aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2013-11-04 14:51:28 -0500
committerJohn W. Linville <linville@tuxdriver.com>2013-11-04 14:51:28 -0500
commit87bc0728d462ae37841a550542829aa65a97e7c2 (patch)
tree266afb90f501b814c0a79f10a7afd86a6a33d631 /drivers/net/wireless
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parent01925efdf7e03b4b803b5c9f985163d687f7f017 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
Conflicts: drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/Kconfig17
-rw-r--r--drivers/net/wireless/ath/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h16
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c285
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h47
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h56
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c512
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1087
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h968
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c95
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h39
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c118
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c160
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c149
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c56
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c)23
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h)28
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c)8
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h)2
-rw-r--r--drivers/net/wireless/ath/regd.c140
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c186
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c111
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c632
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c515
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h149
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c242
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c70
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c42
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c793
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c462
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h95
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c127
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c34
-rw-r--r--drivers/net/wireless/libertas/firmware.c5
-rw-r--r--drivers/net/wireless/libertas/if_cs.c8
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c17
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig27
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c73
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c873
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h165
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c951
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h97
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c263
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
108 files changed, 8703 insertions, 2771 deletions
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index ba81d6292eeb..c63d1159db5c 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,6 +25,23 @@ config ATH_DEBUG
25 Say Y, if you want to debug atheros wireless drivers. 25 Say Y, if you want to debug atheros wireless drivers.
26 Right now only ath9k makes use of this. 26 Right now only ath9k makes use of this.
27 27
28config ATH_REG_DYNAMIC_USER_REG_HINTS
29 bool "Atheros dynamic user regulatory hints"
30 depends on CFG80211_CERTIFICATION_ONUS
31 default n
32 ---help---
33 Say N. This should only be enabled in countries where
34 this feature is explicitly allowed and only on cards that
35 specifically have been tested for this.
36
37config ATH_REG_DYNAMIC_USER_CERT_TESTING
38 bool "Atheros dynamic user regulatory testing"
39 depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
40 default n
41 ---help---
42 Say N. This should only be enabled on systems
43 undergoing certification testing.
44
28source "drivers/net/wireless/ath/ath5k/Kconfig" 45source "drivers/net/wireless/ath/ath5k/Kconfig"
29source "drivers/net/wireless/ath/ath9k/Kconfig" 46source "drivers/net/wireless/ath/ath9k/Kconfig"
30source "drivers/net/wireless/ath/carl9170/Kconfig" 47source "drivers/net/wireless/ath/carl9170/Kconfig"
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 363b05653c7e..7d023b0f13b4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -12,7 +12,9 @@ obj-$(CONFIG_ATH_COMMON) += ath.o
12ath-objs := main.o \ 12ath-objs := main.o \
13 regd.o \ 13 regd.o \
14 hw.o \ 14 hw.o \
15 key.o 15 key.o \
16 dfs_pattern_detector.o \
17 dfs_pri_detector.o
16 18
17ath-$(CONFIG_ATH_DEBUG) += debug.o 19ath-$(CONFIG_ATH_DEBUG) += debug.o
18ccflags-y += -D__CHECK_ENDIAN__ 20ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 834e29ea236c..e46951b8fb92 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -283,7 +283,7 @@ static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
283 283
284 if (unlikely(CE_RING_DELTA(nentries_mask, 284 if (unlikely(CE_RING_DELTA(nentries_mask,
285 write_index, sw_index - 1) <= 0)) { 285 write_index, sw_index - 1) <= 0)) {
286 ret = -EIO; 286 ret = -ENOSR;
287 goto exit; 287 goto exit;
288 } 288 }
289 289
@@ -338,38 +338,19 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
338 return ret; 338 return ret;
339} 339}
340 340
341int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state, 341int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
342 void *per_transfer_context,
343 unsigned int transfer_id,
344 u32 paddr, unsigned int nbytes,
345 u32 flags)
346{ 342{
347 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 343 struct ath10k *ar = pipe->ar;
348 struct ath10k *ar = ce_state->ar;
349 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 344 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
350 unsigned int nentries_mask = src_ring->nentries_mask; 345 int delta;
351 unsigned int sw_index;
352 unsigned int write_index;
353 int delta, ret = -ENOMEM;
354 346
355 spin_lock_bh(&ar_pci->ce_lock); 347 spin_lock_bh(&ar_pci->ce_lock);
356 348 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
357 sw_index = src_ring->sw_index; 349 pipe->src_ring->write_index,
358 write_index = src_ring->write_index; 350 pipe->src_ring->sw_index - 1);
359
360 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
361
362 if (delta >= 1) {
363 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
364 paddr, nbytes,
365 transfer_id, flags);
366 if (ret)
367 ath10k_warn("CE send failed: %d\n", ret);
368 }
369
370 spin_unlock_bh(&ar_pci->ce_lock); 351 spin_unlock_bh(&ar_pci->ce_lock);
371 352
372 return ret; 353 return delta;
373} 354}
374 355
375int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, 356int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index aec802868341..15d45b5b7615 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -156,21 +156,7 @@ void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
156 void (*send_cb)(struct ath10k_ce_pipe *), 156 void (*send_cb)(struct ath10k_ce_pipe *),
157 int disable_interrupts); 157 int disable_interrupts);
158 158
159/* 159int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
160 * Queue a "sendlist" of buffers to be sent using gather to a single
161 * anonymous destination buffer
162 * ce - which copy engine to use
163 * sendlist - list of simple buffers to send using gather
164 * transfer_id - arbitrary ID; reflected to destination
165 * Returns 0 on success; otherwise an error status.
166 *
167 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
168 */
169int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
170 void *per_transfer_context,
171 unsigned int transfer_id,
172 u32 paddr, unsigned int nbytes,
173 u32 flags);
174 160
175/*==================Recv=======================*/ 161/*==================Recv=======================*/
176 162
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 76906d5a082e..1129994fb105 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -59,27 +59,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
59 wake_up(&ar->event_queue); 59 wake_up(&ar->event_queue);
60} 60}
61 61
62static int ath10k_check_fw_version(struct ath10k *ar)
63{
64 char version[32];
65
66 if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
67 ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
68 ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
69 ar->fw_version_build >= SUPPORTED_FW_BUILD)
70 return 0;
71
72 snprintf(version, sizeof(version), "%u.%u.%u.%u",
73 SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
74 SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
75
76 ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
77 ar->hw->wiphy->fw_version);
78 ath10k_warn("Please upgrade to version %s (or newer)\n", version);
79
80 return 0;
81}
82
83static int ath10k_init_connect_htc(struct ath10k *ar) 62static int ath10k_init_connect_htc(struct ath10k *ar)
84{ 63{
85 int status; 64 int status;
@@ -189,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
189 return fw; 168 return fw;
190} 169}
191 170
192static int ath10k_push_board_ext_data(struct ath10k *ar, 171static int ath10k_push_board_ext_data(struct ath10k *ar)
193 const struct firmware *fw)
194{ 172{
195 u32 board_data_size = QCA988X_BOARD_DATA_SZ; 173 u32 board_data_size = QCA988X_BOARD_DATA_SZ;
196 u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; 174 u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -210,14 +188,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
210 if (board_ext_data_addr == 0) 188 if (board_ext_data_addr == 0)
211 return 0; 189 return 0;
212 190
213 if (fw->size != (board_data_size + board_ext_data_size)) { 191 if (ar->board_len != (board_data_size + board_ext_data_size)) {
214 ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", 192 ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
215 fw->size, board_data_size, board_ext_data_size); 193 ar->board_len, board_data_size, board_ext_data_size);
216 return -EINVAL; 194 return -EINVAL;
217 } 195 }
218 196
219 ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, 197 ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
220 fw->data + board_data_size, 198 ar->board_data + board_data_size,
221 board_ext_data_size); 199 board_ext_data_size);
222 if (ret) { 200 if (ret) {
223 ath10k_err("could not write board ext data (%d)\n", ret); 201 ath10k_err("could not write board ext data (%d)\n", ret);
@@ -236,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
236 214
237static int ath10k_download_board_data(struct ath10k *ar) 215static int ath10k_download_board_data(struct ath10k *ar)
238{ 216{
239 const struct firmware *fw = ar->board_data;
240 u32 board_data_size = QCA988X_BOARD_DATA_SZ; 217 u32 board_data_size = QCA988X_BOARD_DATA_SZ;
241 u32 address; 218 u32 address;
242 int ret; 219 int ret;
243 220
244 ret = ath10k_push_board_ext_data(ar, fw); 221 ret = ath10k_push_board_ext_data(ar);
245 if (ret) { 222 if (ret) {
246 ath10k_err("could not push board ext data (%d)\n", ret); 223 ath10k_err("could not push board ext data (%d)\n", ret);
247 goto exit; 224 goto exit;
@@ -253,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
253 goto exit; 230 goto exit;
254 } 231 }
255 232
256 ret = ath10k_bmi_write_memory(ar, address, fw->data, 233 ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
257 min_t(u32, board_data_size, fw->size)); 234 min_t(u32, board_data_size,
235 ar->board_len));
258 if (ret) { 236 if (ret) {
259 ath10k_err("could not write board data (%d)\n", ret); 237 ath10k_err("could not write board data (%d)\n", ret);
260 goto exit; 238 goto exit;
@@ -272,17 +250,16 @@ exit:
272 250
273static int ath10k_download_and_run_otp(struct ath10k *ar) 251static int ath10k_download_and_run_otp(struct ath10k *ar)
274{ 252{
275 const struct firmware *fw = ar->otp;
276 u32 address = ar->hw_params.patch_load_addr; 253 u32 address = ar->hw_params.patch_load_addr;
277 u32 exec_param; 254 u32 exec_param;
278 int ret; 255 int ret;
279 256
280 /* OTP is optional */ 257 /* OTP is optional */
281 258
282 if (!ar->otp) 259 if (!ar->otp_data || !ar->otp_len)
283 return 0; 260 return 0;
284 261
285 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); 262 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
286 if (ret) { 263 if (ret) {
287 ath10k_err("could not write otp (%d)\n", ret); 264 ath10k_err("could not write otp (%d)\n", ret);
288 goto exit; 265 goto exit;
@@ -301,13 +278,13 @@ exit:
301 278
302static int ath10k_download_fw(struct ath10k *ar) 279static int ath10k_download_fw(struct ath10k *ar)
303{ 280{
304 const struct firmware *fw = ar->firmware;
305 u32 address; 281 u32 address;
306 int ret; 282 int ret;
307 283
308 address = ar->hw_params.patch_load_addr; 284 address = ar->hw_params.patch_load_addr;
309 285
310 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); 286 ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
287 ar->firmware_len);
311 if (ret) { 288 if (ret) {
312 ath10k_err("could not write fw (%d)\n", ret); 289 ath10k_err("could not write fw (%d)\n", ret);
313 goto exit; 290 goto exit;
@@ -319,8 +296,8 @@ exit:
319 296
320static void ath10k_core_free_firmware_files(struct ath10k *ar) 297static void ath10k_core_free_firmware_files(struct ath10k *ar)
321{ 298{
322 if (ar->board_data && !IS_ERR(ar->board_data)) 299 if (ar->board && !IS_ERR(ar->board))
323 release_firmware(ar->board_data); 300 release_firmware(ar->board);
324 301
325 if (ar->otp && !IS_ERR(ar->otp)) 302 if (ar->otp && !IS_ERR(ar->otp))
326 release_firmware(ar->otp); 303 release_firmware(ar->otp);
@@ -328,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
328 if (ar->firmware && !IS_ERR(ar->firmware)) 305 if (ar->firmware && !IS_ERR(ar->firmware))
329 release_firmware(ar->firmware); 306 release_firmware(ar->firmware);
330 307
308 ar->board = NULL;
331 ar->board_data = NULL; 309 ar->board_data = NULL;
310 ar->board_len = 0;
311
332 ar->otp = NULL; 312 ar->otp = NULL;
313 ar->otp_data = NULL;
314 ar->otp_len = 0;
315
333 ar->firmware = NULL; 316 ar->firmware = NULL;
317 ar->firmware_data = NULL;
318 ar->firmware_len = 0;
334} 319}
335 320
336static int ath10k_core_fetch_firmware_files(struct ath10k *ar) 321static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
337{ 322{
338 int ret = 0; 323 int ret = 0;
339 324
@@ -347,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
347 return -EINVAL; 332 return -EINVAL;
348 } 333 }
349 334
350 ar->board_data = ath10k_fetch_fw_file(ar, 335 ar->board = ath10k_fetch_fw_file(ar,
351 ar->hw_params.fw.dir, 336 ar->hw_params.fw.dir,
352 ar->hw_params.fw.board); 337 ar->hw_params.fw.board);
353 if (IS_ERR(ar->board_data)) { 338 if (IS_ERR(ar->board)) {
354 ret = PTR_ERR(ar->board_data); 339 ret = PTR_ERR(ar->board);
355 ath10k_err("could not fetch board data (%d)\n", ret); 340 ath10k_err("could not fetch board data (%d)\n", ret);
356 goto err; 341 goto err;
357 } 342 }
358 343
344 ar->board_data = ar->board->data;
345 ar->board_len = ar->board->size;
346
359 ar->firmware = ath10k_fetch_fw_file(ar, 347 ar->firmware = ath10k_fetch_fw_file(ar,
360 ar->hw_params.fw.dir, 348 ar->hw_params.fw.dir,
361 ar->hw_params.fw.fw); 349 ar->hw_params.fw.fw);
@@ -365,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
365 goto err; 353 goto err;
366 } 354 }
367 355
356 ar->firmware_data = ar->firmware->data;
357 ar->firmware_len = ar->firmware->size;
358
368 /* OTP may be undefined. If so, don't fetch it at all */ 359 /* OTP may be undefined. If so, don't fetch it at all */
369 if (ar->hw_params.fw.otp == NULL) 360 if (ar->hw_params.fw.otp == NULL)
370 return 0; 361 return 0;
@@ -378,6 +369,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
378 goto err; 369 goto err;
379 } 370 }
380 371
372 ar->otp_data = ar->otp->data;
373 ar->otp_len = ar->otp->size;
374
381 return 0; 375 return 0;
382 376
383err: 377err:
@@ -385,6 +379,191 @@ err:
385 return ret; 379 return ret;
386} 380}
387 381
382static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
383{
384 size_t magic_len, len, ie_len;
385 int ie_id, i, index, bit, ret;
386 struct ath10k_fw_ie *hdr;
387 const u8 *data;
388 __le32 *timestamp;
389
390 /* first fetch the firmware file (firmware-*.bin) */
391 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
392 if (IS_ERR(ar->firmware)) {
393 ath10k_err("Could not fetch firmware file '%s': %ld\n",
394 name, PTR_ERR(ar->firmware));
395 return PTR_ERR(ar->firmware);
396 }
397
398 data = ar->firmware->data;
399 len = ar->firmware->size;
400
401 /* magic also includes the null byte, check that as well */
402 magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
403
404 if (len < magic_len) {
405 ath10k_err("firmware image too small to contain magic: %zu\n",
406 len);
407 ret = -EINVAL;
408 goto err;
409 }
410
411 if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
412 ath10k_err("Invalid firmware magic\n");
413 ret = -EINVAL;
414 goto err;
415 }
416
417 /* jump over the padding */
418 magic_len = ALIGN(magic_len, 4);
419
420 len -= magic_len;
421 data += magic_len;
422
423 /* loop elements */
424 while (len > sizeof(struct ath10k_fw_ie)) {
425 hdr = (struct ath10k_fw_ie *)data;
426
427 ie_id = le32_to_cpu(hdr->id);
428 ie_len = le32_to_cpu(hdr->len);
429
430 len -= sizeof(*hdr);
431 data += sizeof(*hdr);
432
433 if (len < ie_len) {
434 ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
435 ie_id, len, ie_len);
436 ret = -EINVAL;
437 goto err;
438 }
439
440 switch (ie_id) {
441 case ATH10K_FW_IE_FW_VERSION:
442 if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
443 break;
444
445 memcpy(ar->hw->wiphy->fw_version, data, ie_len);
446 ar->hw->wiphy->fw_version[ie_len] = '\0';
447
448 ath10k_dbg(ATH10K_DBG_BOOT,
449 "found fw version %s\n",
450 ar->hw->wiphy->fw_version);
451 break;
452 case ATH10K_FW_IE_TIMESTAMP:
453 if (ie_len != sizeof(u32))
454 break;
455
456 timestamp = (__le32 *)data;
457
458 ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
459 le32_to_cpup(timestamp));
460 break;
461 case ATH10K_FW_IE_FEATURES:
462 ath10k_dbg(ATH10K_DBG_BOOT,
463 "found firmware features ie (%zd B)\n",
464 ie_len);
465
466 for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
467 index = i / 8;
468 bit = i % 8;
469
470 if (index == ie_len)
471 break;
472
473 if (data[index] & (1 << bit))
474 __set_bit(i, ar->fw_features);
475 }
476
477 ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
478 ar->fw_features,
479 sizeof(ar->fw_features));
480 break;
481 case ATH10K_FW_IE_FW_IMAGE:
482 ath10k_dbg(ATH10K_DBG_BOOT,
483 "found fw image ie (%zd B)\n",
484 ie_len);
485
486 ar->firmware_data = data;
487 ar->firmware_len = ie_len;
488
489 break;
490 case ATH10K_FW_IE_OTP_IMAGE:
491 ath10k_dbg(ATH10K_DBG_BOOT,
492 "found otp image ie (%zd B)\n",
493 ie_len);
494
495 ar->otp_data = data;
496 ar->otp_len = ie_len;
497
498 break;
499 default:
500 ath10k_warn("Unknown FW IE: %u\n",
501 le32_to_cpu(hdr->id));
502 break;
503 }
504
505 /* jump over the padding */
506 ie_len = ALIGN(ie_len, 4);
507
508 len -= ie_len;
509 data += ie_len;
510 }
511
512 if (!ar->firmware_data || !ar->firmware_len) {
513 ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
514 name);
515 ret = -ENOMEDIUM;
516 goto err;
517 }
518
519 /* now fetch the board file */
520 if (ar->hw_params.fw.board == NULL) {
521 ath10k_err("board data file not defined");
522 ret = -EINVAL;
523 goto err;
524 }
525
526 ar->board = ath10k_fetch_fw_file(ar,
527 ar->hw_params.fw.dir,
528 ar->hw_params.fw.board);
529 if (IS_ERR(ar->board)) {
530 ret = PTR_ERR(ar->board);
531 ath10k_err("could not fetch board data (%d)\n", ret);
532 goto err;
533 }
534
535 ar->board_data = ar->board->data;
536 ar->board_len = ar->board->size;
537
538 return 0;
539
540err:
541 ath10k_core_free_firmware_files(ar);
542 return ret;
543}
544
545static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
546{
547 int ret;
548
549 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
550 if (ret == 0) {
551 ar->fw_api = 2;
552 goto out;
553 }
554
555 ret = ath10k_core_fetch_firmware_api_1(ar);
556 if (ret)
557 return ret;
558
559 ar->fw_api = 1;
560
561out:
562 ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
563
564 return 0;
565}
566
388static int ath10k_init_download_firmware(struct ath10k *ar) 567static int ath10k_init_download_firmware(struct ath10k *ar)
389{ 568{
390 int ret; 569 int ret;
@@ -541,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
541 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); 720 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
542 skb_queue_head_init(&ar->offchan_tx_queue); 721 skb_queue_head_init(&ar->offchan_tx_queue);
543 722
723 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
724 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
725
544 init_waitqueue_head(&ar->event_queue); 726 init_waitqueue_head(&ar->event_queue);
545 727
546 INIT_WORK(&ar->restart_work, ath10k_core_restart); 728 INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -555,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
555 737
556void ath10k_core_destroy(struct ath10k *ar) 738void ath10k_core_destroy(struct ath10k *ar)
557{ 739{
740 ath10k_debug_destroy(ar);
741
558 flush_workqueue(ar->workqueue); 742 flush_workqueue(ar->workqueue);
559 destroy_workqueue(ar->workqueue); 743 destroy_workqueue(ar->workqueue);
560 744
@@ -566,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
566{ 750{
567 int status; 751 int status;
568 752
753 lockdep_assert_held(&ar->conf_mutex);
754
569 ath10k_bmi_start(ar); 755 ath10k_bmi_start(ar);
570 756
571 if (ath10k_init_configure_target(ar)) { 757 if (ath10k_init_configure_target(ar)) {
@@ -616,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
616 802
617 ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); 803 ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
618 804
619 status = ath10k_check_fw_version(ar);
620 if (status)
621 goto err_disconnect_htc;
622
623 status = ath10k_wmi_cmd_init(ar); 805 status = ath10k_wmi_cmd_init(ar);
624 if (status) { 806 if (status) {
625 ath10k_err("could not send WMI init command (%d)\n", status); 807 ath10k_err("could not send WMI init command (%d)\n", status);
@@ -642,6 +824,7 @@ int ath10k_core_start(struct ath10k *ar)
642 goto err_disconnect_htc; 824 goto err_disconnect_htc;
643 825
644 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 826 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
827 INIT_LIST_HEAD(&ar->arvifs);
645 828
646 return 0; 829 return 0;
647 830
@@ -658,6 +841,8 @@ EXPORT_SYMBOL(ath10k_core_start);
658 841
659void ath10k_core_stop(struct ath10k *ar) 842void ath10k_core_stop(struct ath10k *ar)
660{ 843{
844 lockdep_assert_held(&ar->conf_mutex);
845
661 ath10k_debug_stop(ar); 846 ath10k_debug_stop(ar);
662 ath10k_htc_stop(&ar->htc); 847 ath10k_htc_stop(&ar->htc);
663 ath10k_htt_detach(&ar->htt); 848 ath10k_htt_detach(&ar->htt);
@@ -705,15 +890,21 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
705 return ret; 890 return ret;
706 } 891 }
707 892
893 mutex_lock(&ar->conf_mutex);
894
708 ret = ath10k_core_start(ar); 895 ret = ath10k_core_start(ar);
709 if (ret) { 896 if (ret) {
710 ath10k_err("could not init core (%d)\n", ret); 897 ath10k_err("could not init core (%d)\n", ret);
711 ath10k_core_free_firmware_files(ar); 898 ath10k_core_free_firmware_files(ar);
712 ath10k_hif_power_down(ar); 899 ath10k_hif_power_down(ar);
900 mutex_unlock(&ar->conf_mutex);
713 return ret; 901 return ret;
714 } 902 }
715 903
716 ath10k_core_stop(ar); 904 ath10k_core_stop(ar);
905
906 mutex_unlock(&ar->conf_mutex);
907
717 ath10k_hif_power_down(ar); 908 ath10k_hif_power_down(ar);
718 return 0; 909 return 0;
719} 910}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 292ad4577c98..0934f7633de3 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -43,15 +43,17 @@
43/* Antenna noise floor */ 43/* Antenna noise floor */
44#define ATH10K_DEFAULT_NOISE_FLOOR -95 44#define ATH10K_DEFAULT_NOISE_FLOOR -95
45 45
46#define ATH10K_MAX_NUM_MGMT_PENDING 16
47
46struct ath10k; 48struct ath10k;
47 49
48struct ath10k_skb_cb { 50struct ath10k_skb_cb {
49 dma_addr_t paddr; 51 dma_addr_t paddr;
50 bool is_mapped; 52 bool is_mapped;
51 bool is_aborted; 53 bool is_aborted;
54 u8 vdev_id;
52 55
53 struct { 56 struct {
54 u8 vdev_id;
55 u8 tid; 57 u8 tid;
56 bool is_offchan; 58 bool is_offchan;
57 59
@@ -102,11 +104,26 @@ struct ath10k_bmi {
102 bool done_sent; 104 bool done_sent;
103}; 105};
104 106
107#define ATH10K_MAX_MEM_REQS 16
108
109struct ath10k_mem_chunk {
110 void *vaddr;
111 dma_addr_t paddr;
112 u32 len;
113 u32 req_id;
114};
115
105struct ath10k_wmi { 116struct ath10k_wmi {
106 enum ath10k_htc_ep_id eid; 117 enum ath10k_htc_ep_id eid;
107 struct completion service_ready; 118 struct completion service_ready;
108 struct completion unified_ready; 119 struct completion unified_ready;
109 wait_queue_head_t tx_credits_wq; 120 wait_queue_head_t tx_credits_wq;
121 struct wmi_cmd_map *cmd;
122 struct wmi_vdev_param_map *vdev_param;
123 struct wmi_pdev_param_map *pdev_param;
124
125 u32 num_mem_chunks;
126 struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
110}; 127};
111 128
112struct ath10k_peer_stat { 129struct ath10k_peer_stat {
@@ -188,6 +205,8 @@ struct ath10k_peer {
188#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) 205#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
189 206
190struct ath10k_vif { 207struct ath10k_vif {
208 struct list_head list;
209
191 u32 vdev_id; 210 u32 vdev_id;
192 enum wmi_vdev_type vdev_type; 211 enum wmi_vdev_type vdev_type;
193 enum wmi_vdev_subtype vdev_subtype; 212 enum wmi_vdev_subtype vdev_subtype;
@@ -198,8 +217,10 @@ struct ath10k_vif {
198 struct ath10k *ar; 217 struct ath10k *ar;
199 struct ieee80211_vif *vif; 218 struct ieee80211_vif *vif;
200 219
220 struct work_struct wep_key_work;
201 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; 221 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
202 u8 def_wep_key_index; 222 u8 def_wep_key_idx;
223 u8 def_wep_key_newidx;
203 224
204 u16 tx_seq_no; 225 u16 tx_seq_no;
205 226
@@ -268,6 +289,12 @@ enum ath10k_fw_features {
268 /* wmi_mgmt_rx_hdr contains extra RSSI information */ 289 /* wmi_mgmt_rx_hdr contains extra RSSI information */
269 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0, 290 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
270 291
292 /* firmware from 10X branch */
293 ATH10K_FW_FEATURE_WMI_10X = 1,
294
295 /* firmware support tx frame management over WMI, otherwise it's HTT */
296 ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
297
271 /* keep last */ 298 /* keep last */
272 ATH10K_FW_FEATURE_COUNT, 299 ATH10K_FW_FEATURE_COUNT,
273}; 300};
@@ -324,9 +351,19 @@ struct ath10k {
324 } fw; 351 } fw;
325 } hw_params; 352 } hw_params;
326 353
327 const struct firmware *board_data; 354 const struct firmware *board;
355 const void *board_data;
356 size_t board_len;
357
328 const struct firmware *otp; 358 const struct firmware *otp;
359 const void *otp_data;
360 size_t otp_len;
361
329 const struct firmware *firmware; 362 const struct firmware *firmware;
363 const void *firmware_data;
364 size_t firmware_len;
365
366 int fw_api;
330 367
331 struct { 368 struct {
332 struct completion started; 369 struct completion started;
@@ -369,6 +406,7 @@ struct ath10k {
369 /* protects shared structure data */ 406 /* protects shared structure data */
370 spinlock_t data_lock; 407 spinlock_t data_lock;
371 408
409 struct list_head arvifs;
372 struct list_head peers; 410 struct list_head peers;
373 wait_queue_head_t peer_mapping_wq; 411 wait_queue_head_t peer_mapping_wq;
374 412
@@ -377,6 +415,9 @@ struct ath10k {
377 struct completion offchan_tx_completed; 415 struct completion offchan_tx_completed;
378 struct sk_buff *offchan_tx_skb; 416 struct sk_buff *offchan_tx_skb;
379 417
418 struct work_struct wmi_mgmt_tx_work;
419 struct sk_buff_head wmi_mgmt_tx_queue;
420
380 enum ath10k_state state; 421 enum ath10k_state state;
381 422
382 struct work_struct restart_work; 423 struct work_struct restart_work;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 59615c7f217e..760ff2289e3c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -618,6 +618,8 @@ int ath10k_debug_start(struct ath10k *ar)
618{ 618{
619 int ret; 619 int ret;
620 620
621 lockdep_assert_held(&ar->conf_mutex);
622
621 ret = ath10k_debug_htt_stats_req(ar); 623 ret = ath10k_debug_htt_stats_req(ar);
622 if (ret) 624 if (ret)
623 /* continue normally anyway, this isn't serious */ 625 /* continue normally anyway, this isn't serious */
@@ -628,7 +630,13 @@ int ath10k_debug_start(struct ath10k *ar)
628 630
629void ath10k_debug_stop(struct ath10k *ar) 631void ath10k_debug_stop(struct ath10k *ar)
630{ 632{
631 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); 633 lockdep_assert_held(&ar->conf_mutex);
634
635 /* Must not use _sync to avoid deadlock, we do that in
636 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
637 * warning from del_timer(). */
638 if (ar->debug.htt_stats_mask != 0)
639 cancel_delayed_work(&ar->debug.htt_stats_dwork);
632} 640}
633 641
634int ath10k_debug_create(struct ath10k *ar) 642int ath10k_debug_create(struct ath10k *ar)
@@ -662,6 +670,11 @@ int ath10k_debug_create(struct ath10k *ar)
662 return 0; 670 return 0;
663} 671}
664 672
673void ath10k_debug_destroy(struct ath10k *ar)
674{
675 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
676}
677
665#endif /* CONFIG_ATH10K_DEBUGFS */ 678#endif /* CONFIG_ATH10K_DEBUGFS */
666 679
667#ifdef CONFIG_ATH10K_DEBUG 680#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 6576b82a8d86..3cfe3ee90dbe 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -46,6 +46,7 @@ __printf(1, 2) int ath10k_warn(const char *fmt, ...);
46int ath10k_debug_start(struct ath10k *ar); 46int ath10k_debug_start(struct ath10k *ar);
47void ath10k_debug_stop(struct ath10k *ar); 47void ath10k_debug_stop(struct ath10k *ar);
48int ath10k_debug_create(struct ath10k *ar); 48int ath10k_debug_create(struct ath10k *ar);
49void ath10k_debug_destroy(struct ath10k *ar);
49void ath10k_debug_read_service_map(struct ath10k *ar, 50void ath10k_debug_read_service_map(struct ath10k *ar,
50 void *service_map, 51 void *service_map,
51 size_t map_size); 52 size_t map_size);
@@ -67,6 +68,10 @@ static inline int ath10k_debug_create(struct ath10k *ar)
67 return 0; 68 return 0;
68} 69}
69 70
71static inline void ath10k_debug_destroy(struct ath10k *ar)
72{
73}
74
70static inline void ath10k_debug_read_service_map(struct ath10k *ar, 75static inline void ath10k_debug_read_service_map(struct ath10k *ar,
71 void *service_map, 76 void *service_map,
72 size_t map_size) 77 size_t map_size)
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 3b93c6a01c6c..d9335e9d0d04 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -308,7 +308,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
308 struct sk_buff *txdesc = NULL; 308 struct sk_buff *txdesc = NULL;
309 struct htt_cmd *cmd; 309 struct htt_cmd *cmd;
310 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 310 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
311 u8 vdev_id = skb_cb->htt.vdev_id; 311 u8 vdev_id = skb_cb->vdev_id;
312 int len = 0; 312 int len = 0;
313 int msdu_id = -1; 313 int msdu_id = -1;
314 int res; 314 int res;
@@ -384,7 +384,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
384 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 384 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
385 struct sk_buff *txdesc = NULL; 385 struct sk_buff *txdesc = NULL;
386 bool use_frags; 386 bool use_frags;
387 u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; 387 u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
388 u8 tid; 388 u8 tid;
389 int prefetch_len, desc_len; 389 int prefetch_len, desc_len;
390 int msdu_id = -1; 390 int msdu_id = -1;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 8c1be7685922..8aeb46d9b534 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,12 +20,6 @@
20 20
21#include "targaddrs.h" 21#include "targaddrs.h"
22 22
23/* Supported FW version */
24#define SUPPORTED_FW_MAJOR 1
25#define SUPPORTED_FW_MINOR 0
26#define SUPPORTED_FW_RELEASE 0
27#define SUPPORTED_FW_BUILD 636
28
29/* QCA988X 1.0 definitions (unsupported) */ 23/* QCA988X 1.0 definitions (unsupported) */
30#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 24#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
31 25
@@ -38,6 +32,25 @@
38#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" 32#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
39#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 33#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
40 34
35#define ATH10K_FW_API2_FILE "firmware-2.bin"
36
37/* includes also the null byte */
38#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
39
40struct ath10k_fw_ie {
41 __le32 id;
42 __le32 len;
43 u8 data[0];
44};
45
46enum ath10k_fw_ie_type {
47 ATH10K_FW_IE_FW_VERSION = 0,
48 ATH10K_FW_IE_TIMESTAMP = 1,
49 ATH10K_FW_IE_FEATURES = 2,
50 ATH10K_FW_IE_FW_IMAGE = 3,
51 ATH10K_FW_IE_OTP_IMAGE = 4,
52};
53
41/* Known pecularities: 54/* Known pecularities:
42 * - current FW doesn't support raw rx mode (last tested v599) 55 * - current FW doesn't support raw rx mode (last tested v599)
43 * - current FW dumps upon raw tx mode (last tested v599) 56 * - current FW dumps upon raw tx mode (last tested v599)
@@ -59,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
59 ATH10K_MCAST2UCAST_ENABLED = 1, 72 ATH10K_MCAST2UCAST_ENABLED = 1,
60}; 73};
61 74
75/* Target specific defines for MAIN firmware */
62#define TARGET_NUM_VDEVS 8 76#define TARGET_NUM_VDEVS 8
63#define TARGET_NUM_PEER_AST 2 77#define TARGET_NUM_PEER_AST 2
64#define TARGET_NUM_WDS_ENTRIES 32 78#define TARGET_NUM_WDS_ENTRIES 32
@@ -93,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
93#define TARGET_NUM_MSDU_DESC (1024 + 400) 107#define TARGET_NUM_MSDU_DESC (1024 + 400)
94#define TARGET_MAX_FRAG_ENTRIES 0 108#define TARGET_MAX_FRAG_ENTRIES 0
95 109
110/* Target specific defines for 10.X firmware */
111#define TARGET_10X_NUM_VDEVS 16
112#define TARGET_10X_NUM_PEER_AST 2
113#define TARGET_10X_NUM_WDS_ENTRIES 32
114#define TARGET_10X_DMA_BURST_SIZE 0
115#define TARGET_10X_MAC_AGGR_DELIM 0
116#define TARGET_10X_AST_SKID_LIMIT 16
117#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
118#define TARGET_10X_NUM_OFFLOAD_PEERS 0
119#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
120#define TARGET_10X_NUM_PEER_KEYS 2
121#define TARGET_10X_NUM_TIDS 256
122#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
123#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
124#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
125#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
126#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
127#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
128#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
129#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
130#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
131#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
132#define TARGET_10X_NUM_MCAST_GROUPS 0
133#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
134#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
135#define TARGET_10X_TX_DBG_LOG_SIZE 1024
136#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
137#define TARGET_10X_VOW_CONFIG 0
138#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
139#define TARGET_10X_MAX_FRAG_ENTRIES 0
96 140
97/* Number of Copy Engines supported */ 141/* Number of Copy Engines supported */
98#define CE_COUNT 8 142#define CE_COUNT 8
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 99a9bad3f398..0b1cc516e778 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
334 334
335static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 335static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
336{ 336{
337 struct ath10k *ar = arvif->ar;
338 u32 vdev_param;
339
337 if (value != 0xFFFFFFFF) 340 if (value != 0xFFFFFFFF)
338 value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, 341 value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
339 ATH10K_RTS_MAX); 342 ATH10K_RTS_MAX);
340 343
341 return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, 344 vdev_param = ar->wmi.vdev_param->rts_threshold;
342 WMI_VDEV_PARAM_RTS_THRESHOLD, 345 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
343 value);
344} 346}
345 347
346static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) 348static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
347{ 349{
350 struct ath10k *ar = arvif->ar;
351 u32 vdev_param;
352
348 if (value != 0xFFFFFFFF) 353 if (value != 0xFFFFFFFF)
349 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, 354 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
350 ATH10K_FRAGMT_THRESHOLD_MIN, 355 ATH10K_FRAGMT_THRESHOLD_MIN,
351 ATH10K_FRAGMT_THRESHOLD_MAX); 356 ATH10K_FRAGMT_THRESHOLD_MAX);
352 357
353 return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, 358 vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
354 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 359 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
355 value);
356} 360}
357 361
358static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 362static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -562,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
562 566
563 lockdep_assert_held(&ar->conf_mutex); 567 lockdep_assert_held(&ar->conf_mutex);
564 568
565 /* For some reasons, ath10k_wmi_vdev_down() here couse 569 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
566 * often ath10k_wmi_vdev_stop() to fail. Next we could 570 if (ret)
567 * not run monitor vdev and driver reload 571 ath10k_warn("Monitor vdev down failed: %d\n", ret);
568 * required. Don't see such problems we skip
569 * ath10k_wmi_vdev_down() here.
570 */
571 572
572 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 573 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
573 if (ret) 574 if (ret)
@@ -677,6 +678,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
677 struct ieee80211_bss_conf *info, 678 struct ieee80211_bss_conf *info,
678 const u8 self_peer[ETH_ALEN]) 679 const u8 self_peer[ETH_ALEN])
679{ 680{
681 u32 vdev_param;
680 int ret = 0; 682 int ret = 0;
681 683
682 lockdep_assert_held(&arvif->ar->conf_mutex); 684 lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -710,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
710 return; 712 return;
711 } 713 }
712 714
713 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, 715 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
714 WMI_VDEV_PARAM_ATIM_WINDOW, 716 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
715 ATH10K_DEFAULT_ATIM); 717 ATH10K_DEFAULT_ATIM);
716 if (ret) 718 if (ret)
717 ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", 719 ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -721,35 +723,30 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
721/* 723/*
722 * Review this when mac80211 gains per-interface powersave support. 724 * Review this when mac80211 gains per-interface powersave support.
723 */ 725 */
724static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 726static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
725{ 727{
726 struct ath10k_generic_iter *ar_iter = data; 728 struct ath10k *ar = arvif->ar;
727 struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; 729 struct ieee80211_conf *conf = &ar->hw->conf;
728 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
729 enum wmi_sta_powersave_param param; 730 enum wmi_sta_powersave_param param;
730 enum wmi_sta_ps_mode psmode; 731 enum wmi_sta_ps_mode psmode;
731 int ret; 732 int ret;
732 733
733 lockdep_assert_held(&arvif->ar->conf_mutex); 734 lockdep_assert_held(&arvif->ar->conf_mutex);
734 735
735 if (vif->type != NL80211_IFTYPE_STATION) 736 if (arvif->vif->type != NL80211_IFTYPE_STATION)
736 return; 737 return 0;
737 738
738 if (conf->flags & IEEE80211_CONF_PS) { 739 if (conf->flags & IEEE80211_CONF_PS) {
739 psmode = WMI_STA_PS_MODE_ENABLED; 740 psmode = WMI_STA_PS_MODE_ENABLED;
740 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 741 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
741 742
742 ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, 743 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
743 arvif->vdev_id,
744 param,
745 conf->dynamic_ps_timeout); 744 conf->dynamic_ps_timeout);
746 if (ret) { 745 if (ret) {
747 ath10k_warn("Failed to set inactivity time for VDEV: %d\n", 746 ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
748 arvif->vdev_id); 747 arvif->vdev_id);
749 return; 748 return ret;
750 } 749 }
751
752 ar_iter->ret = ret;
753 } else { 750 } else {
754 psmode = WMI_STA_PS_MODE_DISABLED; 751 psmode = WMI_STA_PS_MODE_DISABLED;
755 } 752 }
@@ -757,11 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
757 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 754 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
758 arvif->vdev_id, psmode ? "enable" : "disable"); 755 arvif->vdev_id, psmode ? "enable" : "disable");
759 756
760 ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, 757 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
761 psmode); 758 if (ret) {
762 if (ar_iter->ret)
763 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", 759 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
764 psmode, arvif->vdev_id); 760 psmode, arvif->vdev_id);
761 return ret;
762 }
763
764 return 0;
765} 765}
766 766
767/**********************/ 767/**********************/
@@ -1031,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
1031 struct wmi_peer_assoc_complete_arg *arg) 1031 struct wmi_peer_assoc_complete_arg *arg)
1032{ 1032{
1033 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 1033 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
1034 u8 ampdu_factor;
1034 1035
1035 if (!vht_cap->vht_supported) 1036 if (!vht_cap->vht_supported)
1036 return; 1037 return;
1037 1038
1038 arg->peer_flags |= WMI_PEER_VHT; 1039 arg->peer_flags |= WMI_PEER_VHT;
1039
1040 arg->peer_vht_caps = vht_cap->cap; 1040 arg->peer_vht_caps = vht_cap->cap;
1041 1041
1042
1043 ampdu_factor = (vht_cap->cap &
1044 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
1045 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
1046
1047 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
1048 * zero in VHT IE. Using it would result in degraded throughput.
1049 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
1050 * it if VHT max_mpdu is smaller. */
1051 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
1052 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1053 ampdu_factor)) - 1);
1054
1042 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 1055 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
1043 arg->peer_flags |= WMI_PEER_80MHZ; 1056 arg->peer_flags |= WMI_PEER_80MHZ;
1044 1057
@@ -1124,26 +1137,25 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1124 WARN_ON(phymode == MODE_UNKNOWN); 1137 WARN_ON(phymode == MODE_UNKNOWN);
1125} 1138}
1126 1139
1127static int ath10k_peer_assoc(struct ath10k *ar, 1140static int ath10k_peer_assoc_prepare(struct ath10k *ar,
1128 struct ath10k_vif *arvif, 1141 struct ath10k_vif *arvif,
1129 struct ieee80211_sta *sta, 1142 struct ieee80211_sta *sta,
1130 struct ieee80211_bss_conf *bss_conf) 1143 struct ieee80211_bss_conf *bss_conf,
1144 struct wmi_peer_assoc_complete_arg *arg)
1131{ 1145{
1132 struct wmi_peer_assoc_complete_arg arg;
1133
1134 lockdep_assert_held(&ar->conf_mutex); 1146 lockdep_assert_held(&ar->conf_mutex);
1135 1147
1136 memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); 1148 memset(arg, 0, sizeof(*arg));
1137 1149
1138 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); 1150 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
1139 ath10k_peer_assoc_h_crypto(ar, arvif, &arg); 1151 ath10k_peer_assoc_h_crypto(ar, arvif, arg);
1140 ath10k_peer_assoc_h_rates(ar, sta, &arg); 1152 ath10k_peer_assoc_h_rates(ar, sta, arg);
1141 ath10k_peer_assoc_h_ht(ar, sta, &arg); 1153 ath10k_peer_assoc_h_ht(ar, sta, arg);
1142 ath10k_peer_assoc_h_vht(ar, sta, &arg); 1154 ath10k_peer_assoc_h_vht(ar, sta, arg);
1143 ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); 1155 ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
1144 ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); 1156 ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
1145 1157
1146 return ath10k_wmi_peer_assoc(ar, &arg); 1158 return 0;
1147} 1159}
1148 1160
1149/* can be called only in mac80211 callbacks due to `key_count` usage */ 1161/* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1153,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1153{ 1165{
1154 struct ath10k *ar = hw->priv; 1166 struct ath10k *ar = hw->priv;
1155 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1167 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1168 struct wmi_peer_assoc_complete_arg peer_arg;
1156 struct ieee80211_sta *ap_sta; 1169 struct ieee80211_sta *ap_sta;
1157 int ret; 1170 int ret;
1158 1171
@@ -1168,15 +1181,24 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1168 return; 1181 return;
1169 } 1182 }
1170 1183
1171 ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); 1184 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1185 bss_conf, &peer_arg);
1172 if (ret) { 1186 if (ret) {
1173 ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); 1187 ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
1188 bss_conf->bssid, ret);
1174 rcu_read_unlock(); 1189 rcu_read_unlock();
1175 return; 1190 return;
1176 } 1191 }
1177 1192
1178 rcu_read_unlock(); 1193 rcu_read_unlock();
1179 1194
1195 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1196 if (ret) {
1197 ath10k_warn("Peer assoc failed for %pM\n: %d",
1198 bss_conf->bssid, ret);
1199 return;
1200 }
1201
1180 ath10k_dbg(ATH10K_DBG_MAC, 1202 ath10k_dbg(ATH10K_DBG_MAC,
1181 "mac vdev %d up (associated) bssid %pM aid %d\n", 1203 "mac vdev %d up (associated) bssid %pM aid %d\n",
1182 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 1204 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
@@ -1224,19 +1246,28 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1224 /* FIXME: why don't we print error if wmi call fails? */ 1246 /* FIXME: why don't we print error if wmi call fails? */
1225 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1247 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1226 1248
1227 arvif->def_wep_key_index = 0; 1249 arvif->def_wep_key_idx = 0;
1228} 1250}
1229 1251
1230static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, 1252static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1231 struct ieee80211_sta *sta) 1253 struct ieee80211_sta *sta)
1232{ 1254{
1255 struct wmi_peer_assoc_complete_arg peer_arg;
1233 int ret = 0; 1256 int ret = 0;
1234 1257
1235 lockdep_assert_held(&ar->conf_mutex); 1258 lockdep_assert_held(&ar->conf_mutex);
1236 1259
1237 ret = ath10k_peer_assoc(ar, arvif, sta, NULL); 1260 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
1238 if (ret) { 1261 if (ret) {
1239 ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); 1262 ath10k_warn("WMI peer assoc prepare failed for %pM\n",
1263 sta->addr);
1264 return ret;
1265 }
1266
1267 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1268 if (ret) {
1269 ath10k_warn("Peer assoc failed for STA %pM\n: %d",
1270 sta->addr, ret);
1240 return ret; 1271 return ret;
1241 } 1272 }
1242 1273
@@ -1405,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
1405/* TX handlers */ 1436/* TX handlers */
1406/***************/ 1437/***************/
1407 1438
1439static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
1440{
1441 if (ieee80211_is_mgmt(hdr->frame_control))
1442 return HTT_DATA_TX_EXT_TID_MGMT;
1443
1444 if (!ieee80211_is_data_qos(hdr->frame_control))
1445 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1446
1447 if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
1448 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1449
1450 return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1451}
1452
1453static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
1454 struct ieee80211_tx_info *info)
1455{
1456 if (info->control.vif)
1457 return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
1458
1459 if (ar->monitor_enabled)
1460 return ar->monitor_vdev_id;
1461
1462 ath10k_warn("could not resolve vdev id\n");
1463 return 0;
1464}
1465
1408/* 1466/*
1409 * Frames sent to the FW have to be in "Native Wifi" format. 1467 * Frames sent to the FW have to be in "Native Wifi" format.
1410 * Strip the QoS field from the 802.11 header. 1468 * Strip the QoS field from the 802.11 header.
@@ -1425,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
1425 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 1483 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
1426} 1484}
1427 1485
1486static void ath10k_tx_wep_key_work(struct work_struct *work)
1487{
1488 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1489 wep_key_work);
1490 int ret, keyidx = arvif->def_wep_key_newidx;
1491
1492 if (arvif->def_wep_key_idx == keyidx)
1493 return;
1494
1495 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
1496 arvif->vdev_id, keyidx);
1497
1498 ret = ath10k_wmi_vdev_set_param(arvif->ar,
1499 arvif->vdev_id,
1500 arvif->ar->wmi.vdev_param->def_keyid,
1501 keyidx);
1502 if (ret) {
1503 ath10k_warn("could not update wep keyidx (%d)\n", ret);
1504 return;
1505 }
1506
1507 arvif->def_wep_key_idx = keyidx;
1508}
1509
1428static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) 1510static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1429{ 1511{
1430 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1512 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1433,7 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1433 struct ath10k *ar = arvif->ar; 1515 struct ath10k *ar = arvif->ar;
1434 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1516 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1435 struct ieee80211_key_conf *key = info->control.hw_key; 1517 struct ieee80211_key_conf *key = info->control.hw_key;
1436 int ret;
1437 1518
1438 if (!ieee80211_has_protected(hdr->frame_control)) 1519 if (!ieee80211_has_protected(hdr->frame_control))
1439 return; 1520 return;
@@ -1445,21 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
1445 key->cipher != WLAN_CIPHER_SUITE_WEP104) 1526 key->cipher != WLAN_CIPHER_SUITE_WEP104)
1446 return; 1527 return;
1447 1528
1448 if (key->keyidx == arvif->def_wep_key_index) 1529 if (key->keyidx == arvif->def_wep_key_idx)
1449 return;
1450
1451 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
1452 arvif->vdev_id, key->keyidx);
1453
1454 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
1455 WMI_VDEV_PARAM_DEF_KEYID,
1456 key->keyidx);
1457 if (ret) {
1458 ath10k_warn("could not update wep keyidx (%d)\n", ret);
1459 return; 1530 return;
1460 }
1461 1531
1462 arvif->def_wep_key_index = key->keyidx; 1532 /* FIXME: Most likely a few frames will be TXed with an old key. Simply
1533 * queueing frames until key index is updated is not an option because
1534 * sk_buff may need more processing to be done, e.g. offchannel */
1535 arvif->def_wep_key_newidx = key->keyidx;
1536 ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
1463} 1537}
1464 1538
1465static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) 1539static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1489,7 +1563,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
1489static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) 1563static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1490{ 1564{
1491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1565 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1492 int ret; 1566 int ret = 0;
1493 1567
1494 if (ar->htt.target_version_major >= 3) { 1568 if (ar->htt.target_version_major >= 3) {
1495 /* Since HTT 3.0 there is no separate mgmt tx command */ 1569 /* Since HTT 3.0 there is no separate mgmt tx command */
@@ -1497,16 +1571,32 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1497 goto exit; 1571 goto exit;
1498 } 1572 }
1499 1573
1500 if (ieee80211_is_mgmt(hdr->frame_control)) 1574 if (ieee80211_is_mgmt(hdr->frame_control)) {
1501 ret = ath10k_htt_mgmt_tx(&ar->htt, skb); 1575 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
1502 else if (ieee80211_is_nullfunc(hdr->frame_control)) 1576 ar->fw_features)) {
1577 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
1578 ATH10K_MAX_NUM_MGMT_PENDING) {
1579 ath10k_warn("wmi mgmt_tx queue limit reached\n");
1580 ret = -EBUSY;
1581 goto exit;
1582 }
1583
1584 skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
1585 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
1586 } else {
1587 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
1588 }
1589 } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
1590 ar->fw_features) &&
1591 ieee80211_is_nullfunc(hdr->frame_control)) {
1503 /* FW does not report tx status properly for NullFunc frames 1592 /* FW does not report tx status properly for NullFunc frames
1504 * unless they are sent through mgmt tx path. mac80211 sends 1593 * unless they are sent through mgmt tx path. mac80211 sends
1505 * those frames when it detects link/beacon loss and depends on 1594 * those frames when it detects link/beacon loss and depends
1506 * the tx status to be correct. */ 1595 * on the tx status to be correct. */
1507 ret = ath10k_htt_mgmt_tx(&ar->htt, skb); 1596 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
1508 else 1597 } else {
1509 ret = ath10k_htt_tx(&ar->htt, skb); 1598 ret = ath10k_htt_tx(&ar->htt, skb);
1599 }
1510 1600
1511exit: 1601exit:
1512 if (ret) { 1602 if (ret) {
@@ -1557,7 +1647,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1557 1647
1558 hdr = (struct ieee80211_hdr *)skb->data; 1648 hdr = (struct ieee80211_hdr *)skb->data;
1559 peer_addr = ieee80211_get_DA(hdr); 1649 peer_addr = ieee80211_get_DA(hdr);
1560 vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; 1650 vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
1561 1651
1562 spin_lock_bh(&ar->data_lock); 1652 spin_lock_bh(&ar->data_lock);
1563 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 1653 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
@@ -1599,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1599 } 1689 }
1600} 1690}
1601 1691
1692void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
1693{
1694 struct sk_buff *skb;
1695
1696 for (;;) {
1697 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
1698 if (!skb)
1699 break;
1700
1701 ieee80211_free_txskb(ar->hw, skb);
1702 }
1703}
1704
1705void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
1706{
1707 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
1708 struct sk_buff *skb;
1709 int ret;
1710
1711 for (;;) {
1712 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
1713 if (!skb)
1714 break;
1715
1716 ret = ath10k_wmi_mgmt_tx(ar, skb);
1717 if (ret)
1718 ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
1719 }
1720}
1721
1602/************/ 1722/************/
1603/* Scanning */ 1723/* Scanning */
1604/************/ 1724/************/
@@ -1722,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1722 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1842 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1723 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1843 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1724 struct ath10k *ar = hw->priv; 1844 struct ath10k *ar = hw->priv;
1725 struct ath10k_vif *arvif = NULL; 1845 u8 tid, vdev_id;
1726 u32 vdev_id = 0;
1727 u8 tid;
1728
1729 if (info->control.vif) {
1730 arvif = ath10k_vif_to_arvif(info->control.vif);
1731 vdev_id = arvif->vdev_id;
1732 } else if (ar->monitor_enabled) {
1733 vdev_id = ar->monitor_vdev_id;
1734 }
1735 1846
1736 /* We should disable CCK RATE due to P2P */ 1847 /* We should disable CCK RATE due to P2P */
1737 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 1848 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1739,14 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1739 1850
1740 /* we must calculate tid before we apply qos workaround 1851 /* we must calculate tid before we apply qos workaround
1741 * as we'd lose the qos control field */ 1852 * as we'd lose the qos control field */
1742 tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1853 tid = ath10k_tx_h_get_tid(hdr);
1743 if (ieee80211_is_mgmt(hdr->frame_control)) { 1854 vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
1744 tid = HTT_DATA_TX_EXT_TID_MGMT;
1745 } else if (ieee80211_is_data_qos(hdr->frame_control) &&
1746 is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
1747 u8 *qc = ieee80211_get_qos_ctl(hdr);
1748 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1749 }
1750 1855
1751 /* it makes no sense to process injected frames like that */ 1856 /* it makes no sense to process injected frames like that */
1752 if (info->control.vif && 1857 if (info->control.vif &&
@@ -1757,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1757 ath10k_tx_h_seq_no(skb); 1862 ath10k_tx_h_seq_no(skb);
1758 } 1863 }
1759 1864
1865 ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
1760 ATH10K_SKB_CB(skb)->htt.is_offchan = false; 1866 ATH10K_SKB_CB(skb)->htt.is_offchan = false;
1761 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
1762 ATH10K_SKB_CB(skb)->htt.tid = tid; 1867 ATH10K_SKB_CB(skb)->htt.tid = tid;
1763 1868
1764 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 1869 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1765 spin_lock_bh(&ar->data_lock); 1870 spin_lock_bh(&ar->data_lock);
1766 ATH10K_SKB_CB(skb)->htt.is_offchan = true; 1871 ATH10K_SKB_CB(skb)->htt.is_offchan = true;
1767 ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; 1872 ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
1768 spin_unlock_bh(&ar->data_lock); 1873 spin_unlock_bh(&ar->data_lock);
1769 1874
1770 ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); 1875 ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1786,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
1786 1891
1787 del_timer_sync(&ar->scan.timeout); 1892 del_timer_sync(&ar->scan.timeout);
1788 ath10k_offchan_tx_purge(ar); 1893 ath10k_offchan_tx_purge(ar);
1894 ath10k_mgmt_over_wmi_tx_purge(ar);
1789 ath10k_peer_cleanup_all(ar); 1895 ath10k_peer_cleanup_all(ar);
1790 ath10k_core_stop(ar); 1896 ath10k_core_stop(ar);
1791 ath10k_hif_power_down(ar); 1897 ath10k_hif_power_down(ar);
@@ -1832,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
1832 else if (ar->state == ATH10K_STATE_RESTARTING) 1938 else if (ar->state == ATH10K_STATE_RESTARTING)
1833 ar->state = ATH10K_STATE_RESTARTED; 1939 ar->state = ATH10K_STATE_RESTARTED;
1834 1940
1835 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); 1941 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
1836 if (ret) 1942 if (ret)
1837 ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", 1943 ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
1838 ret); 1944 ret);
1839 1945
1840 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); 1946 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
1841 if (ret) 1947 if (ret)
1842 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", 1948 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
1843 ret); 1949 ret);
@@ -1862,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
1862 ar->state = ATH10K_STATE_OFF; 1968 ar->state = ATH10K_STATE_OFF;
1863 mutex_unlock(&ar->conf_mutex); 1969 mutex_unlock(&ar->conf_mutex);
1864 1970
1971 ath10k_mgmt_over_wmi_tx_purge(ar);
1972
1865 cancel_work_sync(&ar->offchan_tx_work); 1973 cancel_work_sync(&ar->offchan_tx_work);
1974 cancel_work_sync(&ar->wmi_mgmt_tx_work);
1866 cancel_work_sync(&ar->restart_work); 1975 cancel_work_sync(&ar->restart_work);
1867} 1976}
1868 1977
1869static void ath10k_config_ps(struct ath10k *ar) 1978static int ath10k_config_ps(struct ath10k *ar)
1870{ 1979{
1871 struct ath10k_generic_iter ar_iter; 1980 struct ath10k_vif *arvif;
1981 int ret = 0;
1872 1982
1873 lockdep_assert_held(&ar->conf_mutex); 1983 lockdep_assert_held(&ar->conf_mutex);
1874 1984
1875 /* During HW reconfiguration mac80211 reports all interfaces that were 1985 list_for_each_entry(arvif, &ar->arvifs, list) {
1876 * running until reconfiguration was started. Since FW doesn't have any 1986 ret = ath10k_mac_vif_setup_ps(arvif);
1877 * vdevs at this point we must not iterate over this interface list. 1987 if (ret) {
1878 * This setting will be updated upon add_interface(). */ 1988 ath10k_warn("could not setup powersave (%d)\n", ret);
1879 if (ar->state == ATH10K_STATE_RESTARTED) 1989 break;
1880 return; 1990 }
1881 1991 }
1882 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
1883 ar_iter.ar = ar;
1884
1885 ieee80211_iterate_active_interfaces_atomic(
1886 ar->hw, IEEE80211_IFACE_ITER_NORMAL,
1887 ath10k_ps_iter, &ar_iter);
1888 1992
1889 if (ar_iter.ret) 1993 return ret;
1890 ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
1891} 1994}
1892 1995
1893static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 1996static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1936,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1936 int ret = 0; 2039 int ret = 0;
1937 u32 value; 2040 u32 value;
1938 int bit; 2041 int bit;
2042 u32 vdev_param;
1939 2043
1940 mutex_lock(&ar->conf_mutex); 2044 mutex_lock(&ar->conf_mutex);
1941 2045
@@ -1944,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1944 arvif->ar = ar; 2048 arvif->ar = ar;
1945 arvif->vif = vif; 2049 arvif->vif = vif;
1946 2050
2051 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
2052
1947 if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { 2053 if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
1948 ath10k_warn("Only one monitor interface allowed\n"); 2054 ath10k_warn("Only one monitor interface allowed\n");
1949 ret = -EBUSY; 2055 ret = -EBUSY;
1950 goto exit; 2056 goto err;
1951 } 2057 }
1952 2058
1953 bit = ffs(ar->free_vdev_map); 2059 bit = ffs(ar->free_vdev_map);
1954 if (bit == 0) { 2060 if (bit == 0) {
1955 ret = -EBUSY; 2061 ret = -EBUSY;
1956 goto exit; 2062 goto err;
1957 } 2063 }
1958 2064
1959 arvif->vdev_id = bit - 1; 2065 arvif->vdev_id = bit - 1;
1960 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 2066 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
1961 ar->free_vdev_map &= ~(1 << arvif->vdev_id);
1962 2067
1963 if (ar->p2p) 2068 if (ar->p2p)
1964 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; 2069 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1994,25 +2099,34 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1994 arvif->vdev_subtype, vif->addr); 2099 arvif->vdev_subtype, vif->addr);
1995 if (ret) { 2100 if (ret) {
1996 ath10k_warn("WMI vdev create failed: ret %d\n", ret); 2101 ath10k_warn("WMI vdev create failed: ret %d\n", ret);
1997 goto exit; 2102 goto err;
1998 } 2103 }
1999 2104
2000 ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, 2105 ar->free_vdev_map &= ~BIT(arvif->vdev_id);
2001 arvif->def_wep_key_index); 2106 list_add(&arvif->list, &ar->arvifs);
2002 if (ret) 2107
2108 vdev_param = ar->wmi.vdev_param->def_keyid;
2109 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
2110 arvif->def_wep_key_idx);
2111 if (ret) {
2003 ath10k_warn("Failed to set default keyid: %d\n", ret); 2112 ath10k_warn("Failed to set default keyid: %d\n", ret);
2113 goto err_vdev_delete;
2114 }
2004 2115
2005 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2116 vdev_param = ar->wmi.vdev_param->tx_encap_type;
2006 WMI_VDEV_PARAM_TX_ENCAP_TYPE, 2117 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2007 ATH10K_HW_TXRX_NATIVE_WIFI); 2118 ATH10K_HW_TXRX_NATIVE_WIFI);
2008 if (ret) 2119 /* 10.X firmware does not support this VDEV parameter. Do not warn */
2120 if (ret && ret != -EOPNOTSUPP) {
2009 ath10k_warn("Failed to set TX encap: %d\n", ret); 2121 ath10k_warn("Failed to set TX encap: %d\n", ret);
2122 goto err_vdev_delete;
2123 }
2010 2124
2011 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2125 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2012 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); 2126 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2013 if (ret) { 2127 if (ret) {
2014 ath10k_warn("Failed to create peer for AP: %d\n", ret); 2128 ath10k_warn("Failed to create peer for AP: %d\n", ret);
2015 goto exit; 2129 goto err_vdev_delete;
2016 } 2130 }
2017 } 2131 }
2018 2132
@@ -2021,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2021 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 2135 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
2022 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2136 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2023 param, value); 2137 param, value);
2024 if (ret) 2138 if (ret) {
2025 ath10k_warn("Failed to set RX wake policy: %d\n", ret); 2139 ath10k_warn("Failed to set RX wake policy: %d\n", ret);
2140 goto err_peer_delete;
2141 }
2026 2142
2027 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 2143 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
2028 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 2144 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
2029 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2145 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2030 param, value); 2146 param, value);
2031 if (ret) 2147 if (ret) {
2032 ath10k_warn("Failed to set TX wake thresh: %d\n", ret); 2148 ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
2149 goto err_peer_delete;
2150 }
2033 2151
2034 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 2152 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
2035 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 2153 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
2036 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2154 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2037 param, value); 2155 param, value);
2038 if (ret) 2156 if (ret) {
2039 ath10k_warn("Failed to set PSPOLL count: %d\n", ret); 2157 ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
2158 goto err_peer_delete;
2159 }
2040 } 2160 }
2041 2161
2042 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 2162 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
2043 if (ret) 2163 if (ret) {
2044 ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", 2164 ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
2045 arvif->vdev_id, ret); 2165 arvif->vdev_id, ret);
2166 goto err_peer_delete;
2167 }
2046 2168
2047 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); 2169 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
2048 if (ret) 2170 if (ret) {
2049 ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", 2171 ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
2050 arvif->vdev_id, ret); 2172 arvif->vdev_id, ret);
2173 goto err_peer_delete;
2174 }
2051 2175
2052 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 2176 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2053 ar->monitor_present = true; 2177 ar->monitor_present = true;
2054 2178
2055exit:
2056 mutex_unlock(&ar->conf_mutex); 2179 mutex_unlock(&ar->conf_mutex);
2180 return 0;
2181
2182err_peer_delete:
2183 if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
2184 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
2185
2186err_vdev_delete:
2187 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2188 ar->free_vdev_map &= ~BIT(arvif->vdev_id);
2189 list_del(&arvif->list);
2190
2191err:
2192 mutex_unlock(&ar->conf_mutex);
2193
2057 return ret; 2194 return ret;
2058} 2195}
2059 2196
@@ -2066,6 +2203,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2066 2203
2067 mutex_lock(&ar->conf_mutex); 2204 mutex_lock(&ar->conf_mutex);
2068 2205
2206 cancel_work_sync(&arvif->wep_key_work);
2207
2069 spin_lock_bh(&ar->data_lock); 2208 spin_lock_bh(&ar->data_lock);
2070 if (arvif->beacon) { 2209 if (arvif->beacon) {
2071 dev_kfree_skb_any(arvif->beacon); 2210 dev_kfree_skb_any(arvif->beacon);
@@ -2074,6 +2213,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2074 spin_unlock_bh(&ar->data_lock); 2213 spin_unlock_bh(&ar->data_lock);
2075 2214
2076 ar->free_vdev_map |= 1 << (arvif->vdev_id); 2215 ar->free_vdev_map |= 1 << (arvif->vdev_id);
2216 list_del(&arvif->list);
2077 2217
2078 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2218 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2079 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); 2219 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2154,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2154 struct ath10k *ar = hw->priv; 2294 struct ath10k *ar = hw->priv;
2155 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2295 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2156 int ret = 0; 2296 int ret = 0;
2297 u32 vdev_param, pdev_param;
2157 2298
2158 mutex_lock(&ar->conf_mutex); 2299 mutex_lock(&ar->conf_mutex);
2159 2300
@@ -2162,8 +2303,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2162 2303
2163 if (changed & BSS_CHANGED_BEACON_INT) { 2304 if (changed & BSS_CHANGED_BEACON_INT) {
2164 arvif->beacon_interval = info->beacon_int; 2305 arvif->beacon_interval = info->beacon_int;
2165 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2306 vdev_param = ar->wmi.vdev_param->beacon_interval;
2166 WMI_VDEV_PARAM_BEACON_INTERVAL, 2307 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2167 arvif->beacon_interval); 2308 arvif->beacon_interval);
2168 ath10k_dbg(ATH10K_DBG_MAC, 2309 ath10k_dbg(ATH10K_DBG_MAC,
2169 "mac vdev %d beacon_interval %d\n", 2310 "mac vdev %d beacon_interval %d\n",
@@ -2179,8 +2320,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2179 "vdev %d set beacon tx mode to staggered\n", 2320 "vdev %d set beacon tx mode to staggered\n",
2180 arvif->vdev_id); 2321 arvif->vdev_id);
2181 2322
2182 ret = ath10k_wmi_pdev_set_param(ar, 2323 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
2183 WMI_PDEV_PARAM_BEACON_TX_MODE, 2324 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
2184 WMI_BEACON_STAGGERED_MODE); 2325 WMI_BEACON_STAGGERED_MODE);
2185 if (ret) 2326 if (ret)
2186 ath10k_warn("Failed to set beacon mode for VDEV: %d\n", 2327 ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
@@ -2194,8 +2335,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2194 "mac vdev %d dtim_period %d\n", 2335 "mac vdev %d dtim_period %d\n",
2195 arvif->vdev_id, arvif->dtim_period); 2336 arvif->vdev_id, arvif->dtim_period);
2196 2337
2197 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2338 vdev_param = ar->wmi.vdev_param->dtim_period;
2198 WMI_VDEV_PARAM_DTIM_PERIOD, 2339 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2199 arvif->dtim_period); 2340 arvif->dtim_period);
2200 if (ret) 2341 if (ret)
2201 ath10k_warn("Failed to set dtim period for VDEV: %d\n", 2342 ath10k_warn("Failed to set dtim period for VDEV: %d\n",
@@ -2262,8 +2403,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2262 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", 2403 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2263 arvif->vdev_id, cts_prot); 2404 arvif->vdev_id, cts_prot);
2264 2405
2265 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2406 vdev_param = ar->wmi.vdev_param->enable_rtscts;
2266 WMI_VDEV_PARAM_ENABLE_RTSCTS, 2407 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2267 cts_prot); 2408 cts_prot);
2268 if (ret) 2409 if (ret)
2269 ath10k_warn("Failed to set CTS prot for VDEV: %d\n", 2410 ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
@@ -2281,8 +2422,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2281 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 2422 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
2282 arvif->vdev_id, slottime); 2423 arvif->vdev_id, slottime);
2283 2424
2284 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2425 vdev_param = ar->wmi.vdev_param->slot_time;
2285 WMI_VDEV_PARAM_SLOT_TIME, 2426 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2286 slottime); 2427 slottime);
2287 if (ret) 2428 if (ret)
2288 ath10k_warn("Failed to set erp slot for VDEV: %d\n", 2429 ath10k_warn("Failed to set erp slot for VDEV: %d\n",
@@ -2300,8 +2441,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2300 "mac vdev %d preamble %dn", 2441 "mac vdev %d preamble %dn",
2301 arvif->vdev_id, preamble); 2442 arvif->vdev_id, preamble);
2302 2443
2303 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 2444 vdev_param = ar->wmi.vdev_param->preamble;
2304 WMI_VDEV_PARAM_PREAMBLE, 2445 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2305 preamble); 2446 preamble);
2306 if (ret) 2447 if (ret)
2307 ath10k_warn("Failed to set preamble for VDEV: %d\n", 2448 ath10k_warn("Failed to set preamble for VDEV: %d\n",
@@ -2751,86 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
2751 * Both RTS and Fragmentation threshold are interface-specific 2892 * Both RTS and Fragmentation threshold are interface-specific
2752 * in ath10k, but device-specific in mac80211. 2893 * in ath10k, but device-specific in mac80211.
2753 */ 2894 */
2754static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2755{
2756 struct ath10k_generic_iter *ar_iter = data;
2757 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2758 u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
2759
2760 lockdep_assert_held(&arvif->ar->conf_mutex);
2761
2762 /* During HW reconfiguration mac80211 reports all interfaces that were
2763 * running until reconfiguration was started. Since FW doesn't have any
2764 * vdevs at this point we must not iterate over this interface list.
2765 * This setting will be updated upon add_interface(). */
2766 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
2767 return;
2768
2769 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
2770 arvif->vdev_id, rts);
2771
2772 ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
2773 if (ar_iter->ret)
2774 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
2775 arvif->vdev_id);
2776}
2777 2895
2778static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2896static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2779{ 2897{
2780 struct ath10k_generic_iter ar_iter;
2781 struct ath10k *ar = hw->priv; 2898 struct ath10k *ar = hw->priv;
2782 2899 struct ath10k_vif *arvif;
2783 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); 2900 int ret = 0;
2784 ar_iter.ar = ar;
2785 2901
2786 mutex_lock(&ar->conf_mutex); 2902 mutex_lock(&ar->conf_mutex);
2787 ieee80211_iterate_active_interfaces_atomic( 2903 list_for_each_entry(arvif, &ar->arvifs, list) {
2788 hw, IEEE80211_IFACE_ITER_NORMAL, 2904 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
2789 ath10k_set_rts_iter, &ar_iter); 2905 arvif->vdev_id, value);
2790 mutex_unlock(&ar->conf_mutex);
2791
2792 return ar_iter.ret;
2793}
2794
2795static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2796{
2797 struct ath10k_generic_iter *ar_iter = data;
2798 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2799 u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
2800
2801 lockdep_assert_held(&arvif->ar->conf_mutex);
2802 2906
2803 /* During HW reconfiguration mac80211 reports all interfaces that were 2907 ret = ath10k_mac_set_rts(arvif, value);
2804 * running until reconfiguration was started. Since FW doesn't have any 2908 if (ret) {
2805 * vdevs at this point we must not iterate over this interface list. 2909 ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
2806 * This setting will be updated upon add_interface(). */ 2910 arvif->vdev_id, ret);
2807 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) 2911 break;
2808 return; 2912 }
2809 2913 }
2810 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n", 2914 mutex_unlock(&ar->conf_mutex);
2811 arvif->vdev_id, frag);
2812 2915
2813 ar_iter->ret = ath10k_mac_set_frag(arvif, frag); 2916 return ret;
2814 if (ar_iter->ret)
2815 ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
2816 arvif->vdev_id);
2817} 2917}
2818 2918
2819static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 2919static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2820{ 2920{
2821 struct ath10k_generic_iter ar_iter;
2822 struct ath10k *ar = hw->priv; 2921 struct ath10k *ar = hw->priv;
2823 2922 struct ath10k_vif *arvif;
2824 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); 2923 int ret = 0;
2825 ar_iter.ar = ar;
2826 2924
2827 mutex_lock(&ar->conf_mutex); 2925 mutex_lock(&ar->conf_mutex);
2828 ieee80211_iterate_active_interfaces_atomic( 2926 list_for_each_entry(arvif, &ar->arvifs, list) {
2829 hw, IEEE80211_IFACE_ITER_NORMAL, 2927 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
2830 ath10k_set_frag_iter, &ar_iter); 2928 arvif->vdev_id, value);
2929
2930 ret = ath10k_mac_set_rts(arvif, value);
2931 if (ret) {
2932 ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
2933 arvif->vdev_id, ret);
2934 break;
2935 }
2936 }
2831 mutex_unlock(&ar->conf_mutex); 2937 mutex_unlock(&ar->conf_mutex);
2832 2938
2833 return ar_iter.ret; 2939 return ret;
2834} 2940}
2835 2941
2836static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 2942static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6fce9bfb19a5..ba1021997b8f 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
34void ath10k_reset_scan(unsigned long ptr); 34void ath10k_reset_scan(unsigned long ptr);
35void ath10k_offchan_tx_purge(struct ath10k *ar); 35void ath10k_offchan_tx_purge(struct ath10k *ar);
36void ath10k_offchan_tx_work(struct work_struct *work); 36void ath10k_offchan_tx_work(struct work_struct *work);
37void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
38void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
37void ath10k_halt(struct ath10k *ar); 39void ath10k_halt(struct ath10k *ar);
38 40
39static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) 41static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index dff23d97bed0..f8d59c7b9082 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -720,18 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
720 "ath10k tx: data: ", 720 "ath10k tx: data: ",
721 nbuf->data, nbuf->len); 721 nbuf->data, nbuf->len);
722 722
723 /* Make sure we have resources to handle this request */ 723 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
724 spin_lock_bh(&pipe_info->pipe_lock); 724 flags);
725 if (!pipe_info->num_sends_allowed) {
726 ath10k_warn("Pipe: %d is full\n", pipe_id);
727 spin_unlock_bh(&pipe_info->pipe_lock);
728 return -ENOSR;
729 }
730 pipe_info->num_sends_allowed--;
731 spin_unlock_bh(&pipe_info->pipe_lock);
732
733 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
734 skb_cb->paddr, len, flags);
735 if (ret) 725 if (ret)
736 ath10k_warn("CE send failed: %p\n", nbuf); 726 ath10k_warn("CE send failed: %p\n", nbuf);
737 727
@@ -741,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
741static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 731static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
742{ 732{
743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 733 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
744 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]); 734 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
745 int ret;
746
747 spin_lock_bh(&pipe_info->pipe_lock);
748 ret = pipe_info->num_sends_allowed;
749 spin_unlock_bh(&pipe_info->pipe_lock);
750
751 return ret;
752} 735}
753 736
754static void ath10k_pci_hif_dump_area(struct ath10k *ar) 737static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -863,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
863 ath10k_pci_ce_send_done, 846 ath10k_pci_ce_send_done,
864 disable_interrupts); 847 disable_interrupts);
865 completions += attr->src_nentries; 848 completions += attr->src_nentries;
866 pipe_info->num_sends_allowed = attr->src_nentries - 1;
867 } 849 }
868 850
869 if (attr->dest_nentries) { 851 if (attr->dest_nentries) {
@@ -1033,7 +1015,6 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
1033 */ 1015 */
1034 spin_lock_bh(&compl->pipe_info->pipe_lock); 1016 spin_lock_bh(&compl->pipe_info->pipe_lock);
1035 list_add_tail(&compl->list, &compl->pipe_info->compl_free); 1017 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1036 compl->pipe_info->num_sends_allowed += send_done;
1037 spin_unlock_bh(&compl->pipe_info->pipe_lock); 1018 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1038 } 1019 }
1039 1020
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 7c49f6f96f70..52fb7b973571 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -178,9 +178,6 @@ struct ath10k_pci_pipe {
178 /* List of free CE completion slots */ 178 /* List of free CE completion slots */
179 struct list_head compl_free; 179 struct list_head compl_free;
180 180
181 /* Limit the number of outstanding send requests. */
182 int num_sends_allowed;
183
184 struct ath10k_pci *ar_pci; 181 struct ath10k_pci *ar_pci;
185 struct tasklet_struct intr; 182 struct tasklet_struct intr;
186}; 183};
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 33cb19eb3d89..ccf3597fd9e2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,6 +23,471 @@
23#include "wmi.h" 23#include "wmi.h"
24#include "mac.h" 24#include "mac.h"
25 25
26/* MAIN WMI cmd track */
27static struct wmi_cmd_map wmi_cmd_map = {
28 .init_cmdid = WMI_INIT_CMDID,
29 .start_scan_cmdid = WMI_START_SCAN_CMDID,
30 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
31 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
32 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
33 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
34 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
35 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
36 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
37 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
38 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
39 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
40 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
41 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
42 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
43 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
44 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
45 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
46 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
47 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
48 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
49 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
50 .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
51 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
52 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
53 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
54 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
55 .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
56 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
57 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
58 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
59 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
60 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
61 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
62 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
63 .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
64 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
65 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
66 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
67 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
68 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
69 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
70 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
71 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
72 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
73 .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
74 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
75 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
76 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
77 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
78 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
79 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
80 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
81 .roam_scan_mode = WMI_ROAM_SCAN_MODE,
82 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
83 .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
84 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
85 .roam_ap_profile = WMI_ROAM_AP_PROFILE,
86 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
87 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
88 .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
89 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
90 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
91 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
92 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
93 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
94 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
95 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
96 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
97 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
98 .wlan_profile_set_hist_intvl_cmdid =
99 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
100 .wlan_profile_get_profile_data_cmdid =
101 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
102 .wlan_profile_enable_profile_id_cmdid =
103 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
104 .wlan_profile_list_profile_id_cmdid =
105 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
106 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
107 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
108 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
109 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
110 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
111 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
112 .wow_enable_disable_wake_event_cmdid =
113 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
114 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
115 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
116 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
117 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
118 .vdev_spectral_scan_configure_cmdid =
119 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
120 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
121 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
122 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
123 .network_list_offload_config_cmdid =
124 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
125 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
126 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
127 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
128 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
129 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
130 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
131 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
132 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
133 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
134 .echo_cmdid = WMI_ECHO_CMDID,
135 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
136 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
137 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
138 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
139 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
140 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
141 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
142 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
143 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
144};
145
146/* 10.X WMI cmd track */
147static struct wmi_cmd_map wmi_10x_cmd_map = {
148 .init_cmdid = WMI_10X_INIT_CMDID,
149 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
150 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
151 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
152 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
153 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
154 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
155 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
156 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
157 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
158 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
159 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
160 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
161 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
162 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
163 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
164 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
165 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
166 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
167 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
168 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
169 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
170 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
171 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
172 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
173 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
174 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
175 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
176 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
177 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
178 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
179 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
180 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
181 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
182 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
183 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
184 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
185 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
186 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
187 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
188 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
189 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
190 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
191 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
192 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
193 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
194 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
195 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
196 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
197 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
198 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
199 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
200 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
201 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
202 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
203 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
204 .roam_scan_rssi_change_threshold =
205 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
206 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
207 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
208 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
209 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
210 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
211 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
212 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
213 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
214 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
215 .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
216 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
217 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
218 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
219 .wlan_profile_set_hist_intvl_cmdid =
220 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
221 .wlan_profile_get_profile_data_cmdid =
222 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
223 .wlan_profile_enable_profile_id_cmdid =
224 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
225 .wlan_profile_list_profile_id_cmdid =
226 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
227 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
228 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
229 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
230 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
231 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
232 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
233 .wow_enable_disable_wake_event_cmdid =
234 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
235 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
236 .wow_hostwakeup_from_sleep_cmdid =
237 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
238 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
239 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
240 .vdev_spectral_scan_configure_cmdid =
241 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
242 .vdev_spectral_scan_enable_cmdid =
243 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
244 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
245 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
246 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
247 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
248 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
249 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
250 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
251 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
252 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
253 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
254 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
255 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
256 .echo_cmdid = WMI_10X_ECHO_CMDID,
257 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
258 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
259 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
260 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
261 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
262 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
263 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
264 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
265 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
266};
267
268/* MAIN WMI VDEV param map */
269static struct wmi_vdev_param_map wmi_vdev_param_map = {
270 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
271 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
272 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
273 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
274 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
275 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
276 .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
277 .preamble = WMI_VDEV_PARAM_PREAMBLE,
278 .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
279 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
280 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
281 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
282 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
283 .wmi_vdev_oc_scheduler_air_time_limit =
284 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
285 .wds = WMI_VDEV_PARAM_WDS,
286 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
287 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
288 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
289 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
290 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
291 .chwidth = WMI_VDEV_PARAM_CHWIDTH,
292 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
293 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
294 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
295 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
296 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
297 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
298 .sgi = WMI_VDEV_PARAM_SGI,
299 .ldpc = WMI_VDEV_PARAM_LDPC,
300 .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
301 .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
302 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
303 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
304 .nss = WMI_VDEV_PARAM_NSS,
305 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
306 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
307 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
308 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
309 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
310 .ap_keepalive_min_idle_inactive_time_secs =
311 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
312 .ap_keepalive_max_idle_inactive_time_secs =
313 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
314 .ap_keepalive_max_unresponsive_time_secs =
315 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
316 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
317 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
318 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
319 .txbf = WMI_VDEV_PARAM_TXBF,
320 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
321 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
322 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
323 .ap_detect_out_of_sync_sleeping_sta_time_secs =
324 WMI_VDEV_PARAM_UNSUPPORTED,
325};
326
327/* 10.X WMI VDEV param map */
328static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
329 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
330 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
331 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
332 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
333 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
334 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
335 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
336 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
337 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
338 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
339 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
340 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
341 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
342 .wmi_vdev_oc_scheduler_air_time_limit =
343 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
344 .wds = WMI_10X_VDEV_PARAM_WDS,
345 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
346 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
347 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
348 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
349 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
350 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
351 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
352 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
353 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
354 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
355 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
356 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
357 .sgi = WMI_10X_VDEV_PARAM_SGI,
358 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
359 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
360 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
361 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
362 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
363 .nss = WMI_10X_VDEV_PARAM_NSS,
364 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
365 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
366 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
367 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
368 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
369 .ap_keepalive_min_idle_inactive_time_secs =
370 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
371 .ap_keepalive_max_idle_inactive_time_secs =
372 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
373 .ap_keepalive_max_unresponsive_time_secs =
374 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
375 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
376 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
377 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
378 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
379 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
380 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
381 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
382 .ap_detect_out_of_sync_sleeping_sta_time_secs =
383 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
384};
385
386static struct wmi_pdev_param_map wmi_pdev_param_map = {
387 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
388 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
389 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
390 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
391 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
392 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
393 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
394 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
395 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
396 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
397 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
398 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
399 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
400 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
401 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
402 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
403 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
404 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
405 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
406 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
407 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
408 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
409 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
410 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
411 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
412 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
413 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
414 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
415 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
416 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
417 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
418 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
419 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
420 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
421 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
422 .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
423 .dcs = WMI_PDEV_PARAM_DCS,
424 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
425 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
426 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
427 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
428 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
429 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
430 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
431 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
432 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
433 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
434 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
435 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
436};
437
438static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
439 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
440 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
441 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
442 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
443 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
444 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
445 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
446 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
447 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
448 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
449 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
450 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
451 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
452 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
453 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
454 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
455 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
456 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
457 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
458 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
459 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
460 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
461 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
462 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
463 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
464 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
465 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
466 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
467 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
468 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
469 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
470 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
471 .bcnflt_stats_update_period =
472 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
473 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
474 .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
475 .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
476 .dcs = WMI_10X_PDEV_PARAM_DCS,
477 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
478 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
479 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
480 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
481 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
482 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
483 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
484 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
485 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
486 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
487 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
488 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
489};
490
26int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 491int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
27{ 492{
28 int ret; 493 int ret;
@@ -64,7 +529,7 @@ static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
64} 529}
65 530
66static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, 531static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
67 enum wmi_cmd_id cmd_id) 532 u32 cmd_id)
68{ 533{
69 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 534 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
70 struct wmi_cmd_hdr *cmd_hdr; 535 struct wmi_cmd_hdr *cmd_hdr;
@@ -144,9 +609,17 @@ static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
144} 609}
145 610
146static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, 611static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
147 enum wmi_cmd_id cmd_id) 612 u32 cmd_id)
148{ 613{
149 int ret = -EINVAL; 614 int ret = -EOPNOTSUPP;
615
616 might_sleep();
617
618 if (cmd_id == WMI_CMD_UNSUPPORTED) {
619 ath10k_warn("wmi command %d is not supported by firmware\n",
620 cmd_id);
621 return ret;
622 }
150 623
151 wait_event_timeout(ar->wmi.tx_credits_wq, ({ 624 wait_event_timeout(ar->wmi.tx_credits_wq, ({
152 /* try to send pending beacons first. they take priority */ 625 /* try to send pending beacons first. they take priority */
@@ -162,6 +635,57 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
162 return ret; 635 return ret;
163} 636}
164 637
638int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
639{
640 int ret = 0;
641 struct wmi_mgmt_tx_cmd *cmd;
642 struct ieee80211_hdr *hdr;
643 struct sk_buff *wmi_skb;
644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
645 int len;
646 u16 fc;
647
648 hdr = (struct ieee80211_hdr *)skb->data;
649 fc = le16_to_cpu(hdr->frame_control);
650
651 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
652 return -EINVAL;
653
654 len = sizeof(cmd->hdr) + skb->len;
655 len = round_up(len, 4);
656
657 wmi_skb = ath10k_wmi_alloc_skb(len);
658 if (!wmi_skb)
659 return -ENOMEM;
660
661 cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
662
663 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
664 cmd->hdr.tx_rate = 0;
665 cmd->hdr.tx_power = 0;
666 cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
667
668 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
669 memcpy(cmd->buf, skb->data, skb->len);
670
671 ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
672 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
673 fc & IEEE80211_FCTL_STYPE);
674
675 /* Send the management frame buffer to the target */
676 ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
677 if (ret) {
678 dev_kfree_skb_any(skb);
679 return ret;
680 }
681
682 /* TODO: report tx status to mac80211 - temporary just ACK */
683 info->flags |= IEEE80211_TX_STAT_ACK;
684 ieee80211_tx_status_irqsafe(ar->hw, skb);
685
686 return ret;
687}
688
165static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 689static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
166{ 690{
167 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 691 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
@@ -964,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
964 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 1488 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
965} 1489}
966 1490
1491static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
1492 struct sk_buff *skb)
1493{
1494 ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
1495}
1496
1497static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
1498 struct sk_buff *skb)
1499{
1500 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
1501}
1502
1503static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
1504 struct sk_buff *skb)
1505{
1506 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
1507}
1508
1509static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
1510 u32 num_units, u32 unit_len)
1511{
1512 dma_addr_t paddr;
1513 u32 pool_size;
1514 int idx = ar->wmi.num_mem_chunks;
1515
1516 pool_size = num_units * round_up(unit_len, 4);
1517
1518 if (!pool_size)
1519 return -EINVAL;
1520
1521 ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
1522 pool_size,
1523 &paddr,
1524 GFP_ATOMIC);
1525 if (!ar->wmi.mem_chunks[idx].vaddr) {
1526 ath10k_warn("failed to allocate memory chunk\n");
1527 return -ENOMEM;
1528 }
1529
1530 memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
1531
1532 ar->wmi.mem_chunks[idx].paddr = paddr;
1533 ar->wmi.mem_chunks[idx].len = pool_size;
1534 ar->wmi.mem_chunks[idx].req_id = req_id;
1535 ar->wmi.num_mem_chunks++;
1536
1537 return 0;
1538}
1539
967static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, 1540static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
968 struct sk_buff *skb) 1541 struct sk_buff *skb)
969{ 1542{
@@ -988,7 +1561,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
988 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 1561 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
989 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 1562 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
990 1563
991 if (ar->fw_version_build > 636) 1564 /* only manually set fw features when not using FW IE format */
1565 if (ar->fw_api == 1 && ar->fw_version_build > 636)
992 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); 1566 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
993 1567
994 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 1568 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
@@ -1035,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1035 complete(&ar->wmi.service_ready); 1609 complete(&ar->wmi.service_ready);
1036} 1610}
1037 1611
1612static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
1613 struct sk_buff *skb)
1614{
1615 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
1616 int ret;
1617 struct wmi_service_ready_event_10x *ev = (void *)skb->data;
1618
1619 if (skb->len < sizeof(*ev)) {
1620 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
1621 skb->len, sizeof(*ev));
1622 return;
1623 }
1624
1625 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
1626 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
1627 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
1628 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
1629 ar->fw_version_major =
1630 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
1631 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
1632 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
1633 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
1634
1635 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
1636 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
1637 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
1638 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
1639 }
1640
1641 ar->ath_common.regulatory.current_rd =
1642 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
1643
1644 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
1645 sizeof(ev->wmi_service_bitmap));
1646
1647 if (strlen(ar->hw->wiphy->fw_version) == 0) {
1648 snprintf(ar->hw->wiphy->fw_version,
1649 sizeof(ar->hw->wiphy->fw_version),
1650 "%u.%u",
1651 ar->fw_version_major,
1652 ar->fw_version_minor);
1653 }
1654
1655 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
1656
1657 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
1658 ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
1659 num_mem_reqs);
1660 return;
1661 }
1662
1663 if (!num_mem_reqs)
1664 goto exit;
1665
1666 ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
1667 num_mem_reqs);
1668
1669 for (i = 0; i < num_mem_reqs; ++i) {
1670 req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
1671 num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
1672 unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
1673 num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
1674
1675 if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
1676 /* number of units to allocate is number of
1677 * peers, 1 extra for self peer on target */
1678 /* this needs to be tied, host and target
1679 * can get out of sync */
1680 num_units = TARGET_10X_NUM_PEERS + 1;
1681 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
1682 num_units = TARGET_10X_NUM_VDEVS + 1;
1683
1684 ath10k_dbg(ATH10K_DBG_WMI,
1685 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
1686 req_id,
1687 __le32_to_cpu(ev->mem_reqs[i].num_units),
1688 num_unit_info,
1689 unit_size,
1690 num_units);
1691
1692 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
1693 unit_size);
1694 if (ret)
1695 return;
1696 }
1697
1698exit:
1699 ath10k_dbg(ATH10K_DBG_WMI,
1700 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
1701 __le32_to_cpu(ev->sw_version),
1702 __le32_to_cpu(ev->abi_version),
1703 __le32_to_cpu(ev->phy_capability),
1704 __le32_to_cpu(ev->ht_cap_info),
1705 __le32_to_cpu(ev->vht_cap_info),
1706 __le32_to_cpu(ev->vht_supp_mcs),
1707 __le32_to_cpu(ev->sys_cap_info),
1708 __le32_to_cpu(ev->num_mem_reqs),
1709 __le32_to_cpu(ev->num_rf_chains));
1710
1711 complete(&ar->wmi.service_ready);
1712}
1713
1038static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) 1714static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
1039{ 1715{
1040 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; 1716 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1055,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
1055 return 0; 1731 return 0;
1056} 1732}
1057 1733
1058static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 1734static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
1059{ 1735{
1060 struct wmi_cmd_hdr *cmd_hdr; 1736 struct wmi_cmd_hdr *cmd_hdr;
1061 enum wmi_event_id id; 1737 enum wmi_event_id id;
@@ -1174,9 +1850,138 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1174 dev_kfree_skb(skb); 1850 dev_kfree_skb(skb);
1175} 1851}
1176 1852
1853static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
1854{
1855 struct wmi_cmd_hdr *cmd_hdr;
1856 enum wmi_10x_event_id id;
1857 u16 len;
1858
1859 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1860 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1861
1862 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1863 return;
1864
1865 len = skb->len;
1866
1867 trace_ath10k_wmi_event(id, skb->data, skb->len);
1868
1869 switch (id) {
1870 case WMI_10X_MGMT_RX_EVENTID:
1871 ath10k_wmi_event_mgmt_rx(ar, skb);
1872 /* mgmt_rx() owns the skb now! */
1873 return;
1874 case WMI_10X_SCAN_EVENTID:
1875 ath10k_wmi_event_scan(ar, skb);
1876 break;
1877 case WMI_10X_CHAN_INFO_EVENTID:
1878 ath10k_wmi_event_chan_info(ar, skb);
1879 break;
1880 case WMI_10X_ECHO_EVENTID:
1881 ath10k_wmi_event_echo(ar, skb);
1882 break;
1883 case WMI_10X_DEBUG_MESG_EVENTID:
1884 ath10k_wmi_event_debug_mesg(ar, skb);
1885 break;
1886 case WMI_10X_UPDATE_STATS_EVENTID:
1887 ath10k_wmi_event_update_stats(ar, skb);
1888 break;
1889 case WMI_10X_VDEV_START_RESP_EVENTID:
1890 ath10k_wmi_event_vdev_start_resp(ar, skb);
1891 break;
1892 case WMI_10X_VDEV_STOPPED_EVENTID:
1893 ath10k_wmi_event_vdev_stopped(ar, skb);
1894 break;
1895 case WMI_10X_PEER_STA_KICKOUT_EVENTID:
1896 ath10k_wmi_event_peer_sta_kickout(ar, skb);
1897 break;
1898 case WMI_10X_HOST_SWBA_EVENTID:
1899 ath10k_wmi_event_host_swba(ar, skb);
1900 break;
1901 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
1902 ath10k_wmi_event_tbttoffset_update(ar, skb);
1903 break;
1904 case WMI_10X_PHYERR_EVENTID:
1905 ath10k_wmi_event_phyerr(ar, skb);
1906 break;
1907 case WMI_10X_ROAM_EVENTID:
1908 ath10k_wmi_event_roam(ar, skb);
1909 break;
1910 case WMI_10X_PROFILE_MATCH:
1911 ath10k_wmi_event_profile_match(ar, skb);
1912 break;
1913 case WMI_10X_DEBUG_PRINT_EVENTID:
1914 ath10k_wmi_event_debug_print(ar, skb);
1915 break;
1916 case WMI_10X_PDEV_QVIT_EVENTID:
1917 ath10k_wmi_event_pdev_qvit(ar, skb);
1918 break;
1919 case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
1920 ath10k_wmi_event_wlan_profile_data(ar, skb);
1921 break;
1922 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
1923 ath10k_wmi_event_rtt_measurement_report(ar, skb);
1924 break;
1925 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
1926 ath10k_wmi_event_tsf_measurement_report(ar, skb);
1927 break;
1928 case WMI_10X_RTT_ERROR_REPORT_EVENTID:
1929 ath10k_wmi_event_rtt_error_report(ar, skb);
1930 break;
1931 case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
1932 ath10k_wmi_event_wow_wakeup_host(ar, skb);
1933 break;
1934 case WMI_10X_DCS_INTERFERENCE_EVENTID:
1935 ath10k_wmi_event_dcs_interference(ar, skb);
1936 break;
1937 case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
1938 ath10k_wmi_event_pdev_tpc_config(ar, skb);
1939 break;
1940 case WMI_10X_INST_RSSI_STATS_EVENTID:
1941 ath10k_wmi_event_inst_rssi_stats(ar, skb);
1942 break;
1943 case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
1944 ath10k_wmi_event_vdev_standby_req(ar, skb);
1945 break;
1946 case WMI_10X_VDEV_RESUME_REQ_EVENTID:
1947 ath10k_wmi_event_vdev_resume_req(ar, skb);
1948 break;
1949 case WMI_10X_SERVICE_READY_EVENTID:
1950 ath10k_wmi_10x_service_ready_event_rx(ar, skb);
1951 break;
1952 case WMI_10X_READY_EVENTID:
1953 ath10k_wmi_ready_event_rx(ar, skb);
1954 break;
1955 default:
1956 ath10k_warn("Unknown eventid: %d\n", id);
1957 break;
1958 }
1959
1960 dev_kfree_skb(skb);
1961}
1962
1963
1964static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1965{
1966 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
1967 ath10k_wmi_10x_process_rx(ar, skb);
1968 else
1969 ath10k_wmi_main_process_rx(ar, skb);
1970}
1971
1177/* WMI Initialization functions */ 1972/* WMI Initialization functions */
1178int ath10k_wmi_attach(struct ath10k *ar) 1973int ath10k_wmi_attach(struct ath10k *ar)
1179{ 1974{
1975 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
1976 ar->wmi.cmd = &wmi_10x_cmd_map;
1977 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
1978 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
1979 } else {
1980 ar->wmi.cmd = &wmi_cmd_map;
1981 ar->wmi.vdev_param = &wmi_vdev_param_map;
1982 ar->wmi.pdev_param = &wmi_pdev_param_map;
1983 }
1984
1180 init_completion(&ar->wmi.service_ready); 1985 init_completion(&ar->wmi.service_ready);
1181 init_completion(&ar->wmi.unified_ready); 1986 init_completion(&ar->wmi.unified_ready);
1182 init_waitqueue_head(&ar->wmi.tx_credits_wq); 1987 init_waitqueue_head(&ar->wmi.tx_credits_wq);
@@ -1186,6 +1991,17 @@ int ath10k_wmi_attach(struct ath10k *ar)
1186 1991
1187void ath10k_wmi_detach(struct ath10k *ar) 1992void ath10k_wmi_detach(struct ath10k *ar)
1188{ 1993{
1994 int i;
1995
1996 /* free the host memory chunks requested by firmware */
1997 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1998 dma_free_coherent(ar->dev,
1999 ar->wmi.mem_chunks[i].len,
2000 ar->wmi.mem_chunks[i].vaddr,
2001 ar->wmi.mem_chunks[i].paddr);
2002 }
2003
2004 ar->wmi.num_mem_chunks = 0;
1189} 2005}
1190 2006
1191int ath10k_wmi_connect_htc_service(struct ath10k *ar) 2007int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1237,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
1237 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 2053 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
1238 rd, rd2g, rd5g, ctl2g, ctl5g); 2054 rd, rd2g, rd5g, ctl2g, ctl5g);
1239 2055
1240 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 2056 return ath10k_wmi_cmd_send(ar, skb,
2057 ar->wmi.cmd->pdev_set_regdomain_cmdid);
1241} 2058}
1242 2059
1243int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 2060int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1267,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
1267 "wmi set channel mode %d freq %d\n", 2084 "wmi set channel mode %d freq %d\n",
1268 arg->mode, arg->freq); 2085 arg->mode, arg->freq);
1269 2086
1270 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); 2087 return ath10k_wmi_cmd_send(ar, skb,
2088 ar->wmi.cmd->pdev_set_channel_cmdid);
1271} 2089}
1272 2090
1273int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) 2091int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1282,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
1282 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 2100 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1283 cmd->suspend_opt = WMI_PDEV_SUSPEND; 2101 cmd->suspend_opt = WMI_PDEV_SUSPEND;
1284 2102
1285 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); 2103 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
1286} 2104}
1287 2105
1288int ath10k_wmi_pdev_resume_target(struct ath10k *ar) 2106int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1293,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
1293 if (skb == NULL) 2111 if (skb == NULL)
1294 return -ENOMEM; 2112 return -ENOMEM;
1295 2113
1296 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); 2114 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
1297} 2115}
1298 2116
1299int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, 2117int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
1300 u32 value)
1301{ 2118{
1302 struct wmi_pdev_set_param_cmd *cmd; 2119 struct wmi_pdev_set_param_cmd *cmd;
1303 struct sk_buff *skb; 2120 struct sk_buff *skb;
1304 2121
2122 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
2123 ath10k_warn("pdev param %d not supported by firmware\n", id);
2124 return -EOPNOTSUPP;
2125 }
2126
1305 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2127 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1306 if (!skb) 2128 if (!skb)
1307 return -ENOMEM; 2129 return -ENOMEM;
@@ -1312,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
1312 2134
1313 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 2135 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
1314 id, value); 2136 id, value);
1315 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); 2137 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
1316} 2138}
1317 2139
1318int ath10k_wmi_cmd_init(struct ath10k *ar) 2140static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
1319{ 2141{
1320 struct wmi_init_cmd *cmd; 2142 struct wmi_init_cmd *cmd;
1321 struct sk_buff *buf; 2143 struct sk_buff *buf;
1322 struct wmi_resource_config config = {}; 2144 struct wmi_resource_config config = {};
1323 u32 val; 2145 u32 len, val;
2146 int i;
1324 2147
1325 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); 2148 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
1326 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); 2149 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1373,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
1373 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); 2196 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
1374 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); 2197 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
1375 2198
1376 buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2199 len = sizeof(*cmd) +
2200 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
2201
2202 buf = ath10k_wmi_alloc_skb(len);
1377 if (!buf) 2203 if (!buf)
1378 return -ENOMEM; 2204 return -ENOMEM;
1379 2205
1380 cmd = (struct wmi_init_cmd *)buf->data; 2206 cmd = (struct wmi_init_cmd *)buf->data;
1381 cmd->num_host_mem_chunks = 0; 2207
2208 if (ar->wmi.num_mem_chunks == 0) {
2209 cmd->num_host_mem_chunks = 0;
2210 goto out;
2211 }
2212
2213 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
2214 __cpu_to_le32(ar->wmi.num_mem_chunks));
2215
2216 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
2217
2218 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
2219 cmd->host_mem_chunks[i].ptr =
2220 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
2221 cmd->host_mem_chunks[i].size =
2222 __cpu_to_le32(ar->wmi.mem_chunks[i].len);
2223 cmd->host_mem_chunks[i].req_id =
2224 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
2225
2226 ath10k_dbg(ATH10K_DBG_WMI,
2227 "wmi chunk %d len %d requested, addr 0x%x\n",
2228 i,
2229 cmd->host_mem_chunks[i].size,
2230 cmd->host_mem_chunks[i].ptr);
2231 }
2232out:
1382 memcpy(&cmd->resource_config, &config, sizeof(config)); 2233 memcpy(&cmd->resource_config, &config, sizeof(config));
1383 2234
1384 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 2235 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
1385 return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); 2236 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
2237}
2238
2239static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
2240{
2241 struct wmi_init_cmd_10x *cmd;
2242 struct sk_buff *buf;
2243 struct wmi_resource_config_10x config = {};
2244 u32 len, val;
2245 int i;
2246
2247 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
2248 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
2249 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
2250 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
2251 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
2252 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
2253 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
2254 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
2255 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
2256 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
2257 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
2258 config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
2259
2260 config.scan_max_pending_reqs =
2261 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
2262
2263 config.bmiss_offload_max_vdev =
2264 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
2265
2266 config.roam_offload_max_vdev =
2267 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
2268
2269 config.roam_offload_max_ap_profiles =
2270 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
2271
2272 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
2273 config.num_mcast_table_elems =
2274 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
2275
2276 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
2277 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
2278 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
2279 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
2280 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
2281
2282 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
2283 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
2284
2285 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
2286
2287 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
2288 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
2289
2290 len = sizeof(*cmd) +
2291 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
2292
2293 buf = ath10k_wmi_alloc_skb(len);
2294 if (!buf)
2295 return -ENOMEM;
2296
2297 cmd = (struct wmi_init_cmd_10x *)buf->data;
2298
2299 if (ar->wmi.num_mem_chunks == 0) {
2300 cmd->num_host_mem_chunks = 0;
2301 goto out;
2302 }
2303
2304 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
2305 __cpu_to_le32(ar->wmi.num_mem_chunks));
2306
2307 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
2308
2309 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
2310 cmd->host_mem_chunks[i].ptr =
2311 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
2312 cmd->host_mem_chunks[i].size =
2313 __cpu_to_le32(ar->wmi.mem_chunks[i].len);
2314 cmd->host_mem_chunks[i].req_id =
2315 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
2316
2317 ath10k_dbg(ATH10K_DBG_WMI,
2318 "wmi chunk %d len %d requested, addr 0x%x\n",
2319 i,
2320 cmd->host_mem_chunks[i].size,
2321 cmd->host_mem_chunks[i].ptr);
2322 }
2323out:
2324 memcpy(&cmd->resource_config, &config, sizeof(config));
2325
2326 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
2327 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
2328}
2329
2330int ath10k_wmi_cmd_init(struct ath10k *ar)
2331{
2332 int ret;
2333
2334 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
2335 ret = ath10k_wmi_10x_cmd_init(ar);
2336 else
2337 ret = ath10k_wmi_main_cmd_init(ar);
2338
2339 return ret;
1386} 2340}
1387 2341
1388static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) 2342static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
2343 const struct wmi_start_scan_arg *arg)
1389{ 2344{
1390 int len; 2345 int len;
1391 2346
1392 len = sizeof(struct wmi_start_scan_cmd); 2347 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
2348 len = sizeof(struct wmi_start_scan_cmd_10x);
2349 else
2350 len = sizeof(struct wmi_start_scan_cmd);
1393 2351
1394 if (arg->ie_len) { 2352 if (arg->ie_len) {
1395 if (!arg->ie) 2353 if (!arg->ie)
@@ -1449,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
1449 int len = 0; 2407 int len = 0;
1450 int i; 2408 int i;
1451 2409
1452 len = ath10k_wmi_start_scan_calc_len(arg); 2410 len = ath10k_wmi_start_scan_calc_len(ar, arg);
1453 if (len < 0) 2411 if (len < 0)
1454 return len; /* len contains error code here */ 2412 return len; /* len contains error code here */
1455 2413
@@ -1481,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
1481 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); 2439 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
1482 2440
1483 /* TLV list starts after fields included in the struct */ 2441 /* TLV list starts after fields included in the struct */
1484 off = sizeof(*cmd); 2442 /* There's just one filed that differes the two start_scan
2443 * structures - burst_duration, which we are not using btw,
2444 no point to make the split here, just shift the buffer to fit with
2445 given FW */
2446 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
2447 off = sizeof(struct wmi_start_scan_cmd_10x);
2448 else
2449 off = sizeof(struct wmi_start_scan_cmd);
1485 2450
1486 if (arg->n_channels) { 2451 if (arg->n_channels) {
1487 channels = (void *)skb->data + off; 2452 channels = (void *)skb->data + off;
@@ -1543,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
1543 } 2508 }
1544 2509
1545 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 2510 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
1546 return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); 2511 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
1547} 2512}
1548 2513
1549void ath10k_wmi_start_scan_init(struct ath10k *ar, 2514void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1559,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
1559 arg->repeat_probe_time = 0; 2524 arg->repeat_probe_time = 0;
1560 arg->probe_spacing_time = 0; 2525 arg->probe_spacing_time = 0;
1561 arg->idle_time = 0; 2526 arg->idle_time = 0;
1562 arg->max_scan_time = 5000; 2527 arg->max_scan_time = 20000;
1563 arg->probe_delay = 5; 2528 arg->probe_delay = 5;
1564 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED 2529 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
1565 | WMI_SCAN_EVENT_COMPLETED 2530 | WMI_SCAN_EVENT_COMPLETED
@@ -1603,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
1603 ath10k_dbg(ATH10K_DBG_WMI, 2568 ath10k_dbg(ATH10K_DBG_WMI,
1604 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 2569 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
1605 arg->req_id, arg->req_type, arg->u.scan_id); 2570 arg->req_id, arg->req_type, arg->u.scan_id);
1606 return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); 2571 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
1607} 2572}
1608 2573
1609int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 2574int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1628,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
1628 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 2593 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
1629 vdev_id, type, subtype, macaddr); 2594 vdev_id, type, subtype, macaddr);
1630 2595
1631 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); 2596 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
1632} 2597}
1633 2598
1634int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 2599int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1646,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
1646 ath10k_dbg(ATH10K_DBG_WMI, 2611 ath10k_dbg(ATH10K_DBG_WMI,
1647 "WMI vdev delete id %d\n", vdev_id); 2612 "WMI vdev delete id %d\n", vdev_id);
1648 2613
1649 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); 2614 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
1650} 2615}
1651 2616
1652static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, 2617static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1653 const struct wmi_vdev_start_request_arg *arg, 2618 const struct wmi_vdev_start_request_arg *arg,
1654 enum wmi_cmd_id cmd_id) 2619 u32 cmd_id)
1655{ 2620{
1656 struct wmi_vdev_start_request_cmd *cmd; 2621 struct wmi_vdev_start_request_cmd *cmd;
1657 struct sk_buff *skb; 2622 struct sk_buff *skb;
1658 const char *cmdname; 2623 const char *cmdname;
1659 u32 flags = 0; 2624 u32 flags = 0;
1660 2625
1661 if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && 2626 if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
1662 cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) 2627 cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
1663 return -EINVAL; 2628 return -EINVAL;
1664 if (WARN_ON(arg->ssid && arg->ssid_len == 0)) 2629 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1665 return -EINVAL; 2630 return -EINVAL;
@@ -1668,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1668 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 2633 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1669 return -EINVAL; 2634 return -EINVAL;
1670 2635
1671 if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) 2636 if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
1672 cmdname = "start"; 2637 cmdname = "start";
1673 else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) 2638 else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
1674 cmdname = "restart"; 2639 cmdname = "restart";
1675 else 2640 else
1676 return -EINVAL; /* should not happen, we already check cmd_id */ 2641 return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1721,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1721int ath10k_wmi_vdev_start(struct ath10k *ar, 2686int ath10k_wmi_vdev_start(struct ath10k *ar,
1722 const struct wmi_vdev_start_request_arg *arg) 2687 const struct wmi_vdev_start_request_arg *arg)
1723{ 2688{
1724 return ath10k_wmi_vdev_start_restart(ar, arg, 2689 u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
1725 WMI_VDEV_START_REQUEST_CMDID); 2690
2691 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
1726} 2692}
1727 2693
1728int ath10k_wmi_vdev_restart(struct ath10k *ar, 2694int ath10k_wmi_vdev_restart(struct ath10k *ar,
1729 const struct wmi_vdev_start_request_arg *arg) 2695 const struct wmi_vdev_start_request_arg *arg)
1730{ 2696{
1731 return ath10k_wmi_vdev_start_restart(ar, arg, 2697 u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
1732 WMI_VDEV_RESTART_REQUEST_CMDID); 2698
2699 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
1733} 2700}
1734 2701
1735int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 2702int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1746,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
1746 2713
1747 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 2714 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
1748 2715
1749 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); 2716 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
1750} 2717}
1751 2718
1752int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 2719int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1767,7 +2734,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1767 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 2734 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1768 vdev_id, aid, bssid); 2735 vdev_id, aid, bssid);
1769 2736
1770 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); 2737 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
1771} 2738}
1772 2739
1773int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 2740int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1785,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
1785 ath10k_dbg(ATH10K_DBG_WMI, 2752 ath10k_dbg(ATH10K_DBG_WMI,
1786 "wmi mgmt vdev down id 0x%x\n", vdev_id); 2753 "wmi mgmt vdev down id 0x%x\n", vdev_id);
1787 2754
1788 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); 2755 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
1789} 2756}
1790 2757
1791int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 2758int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1792 enum wmi_vdev_param param_id, u32 param_value) 2759 u32 param_id, u32 param_value)
1793{ 2760{
1794 struct wmi_vdev_set_param_cmd *cmd; 2761 struct wmi_vdev_set_param_cmd *cmd;
1795 struct sk_buff *skb; 2762 struct sk_buff *skb;
1796 2763
2764 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
2765 ath10k_dbg(ATH10K_DBG_WMI,
2766 "vdev param %d not supported by firmware\n",
2767 param_id);
2768 return -EOPNOTSUPP;
2769 }
2770
1797 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2771 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1798 if (!skb) 2772 if (!skb)
1799 return -ENOMEM; 2773 return -ENOMEM;
@@ -1807,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1807 "wmi vdev id 0x%x set param %d value %d\n", 2781 "wmi vdev id 0x%x set param %d value %d\n",
1808 vdev_id, param_id, param_value); 2782 vdev_id, param_id, param_value);
1809 2783
1810 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); 2784 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
1811} 2785}
1812 2786
1813int ath10k_wmi_vdev_install_key(struct ath10k *ar, 2787int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1842,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1842 ath10k_dbg(ATH10K_DBG_WMI, 2816 ath10k_dbg(ATH10K_DBG_WMI,
1843 "wmi vdev install key idx %d cipher %d len %d\n", 2817 "wmi vdev install key idx %d cipher %d len %d\n",
1844 arg->key_idx, arg->key_cipher, arg->key_len); 2818 arg->key_idx, arg->key_cipher, arg->key_len);
1845 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); 2819 return ath10k_wmi_cmd_send(ar, skb,
2820 ar->wmi.cmd->vdev_install_key_cmdid);
1846} 2821}
1847 2822
1848int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 2823int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1862,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
1862 ath10k_dbg(ATH10K_DBG_WMI, 2837 ath10k_dbg(ATH10K_DBG_WMI,
1863 "wmi peer create vdev_id %d peer_addr %pM\n", 2838 "wmi peer create vdev_id %d peer_addr %pM\n",
1864 vdev_id, peer_addr); 2839 vdev_id, peer_addr);
1865 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); 2840 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
1866} 2841}
1867 2842
1868int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 2843int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1882,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
1882 ath10k_dbg(ATH10K_DBG_WMI, 2857 ath10k_dbg(ATH10K_DBG_WMI,
1883 "wmi peer delete vdev_id %d peer_addr %pM\n", 2858 "wmi peer delete vdev_id %d peer_addr %pM\n",
1884 vdev_id, peer_addr); 2859 vdev_id, peer_addr);
1885 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); 2860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
1886} 2861}
1887 2862
1888int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 2863int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1903,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
1903 ath10k_dbg(ATH10K_DBG_WMI, 2878 ath10k_dbg(ATH10K_DBG_WMI,
1904 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 2879 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
1905 vdev_id, peer_addr, tid_bitmap); 2880 vdev_id, peer_addr, tid_bitmap);
1906 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); 2881 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
1907} 2882}
1908 2883
1909int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 2884int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1927,7 +2902,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
1927 "wmi vdev %d peer 0x%pM set param %d value %d\n", 2902 "wmi vdev %d peer 0x%pM set param %d value %d\n",
1928 vdev_id, peer_addr, param_id, param_value); 2903 vdev_id, peer_addr, param_id, param_value);
1929 2904
1930 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); 2905 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
1931} 2906}
1932 2907
1933int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 2908int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1948,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
1948 "wmi set powersave id 0x%x mode %d\n", 2923 "wmi set powersave id 0x%x mode %d\n",
1949 vdev_id, psmode); 2924 vdev_id, psmode);
1950 2925
1951 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); 2926 return ath10k_wmi_cmd_send(ar, skb,
2927 ar->wmi.cmd->sta_powersave_mode_cmdid);
1952} 2928}
1953 2929
1954int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 2930int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1970,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
1970 ath10k_dbg(ATH10K_DBG_WMI, 2946 ath10k_dbg(ATH10K_DBG_WMI,
1971 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 2947 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
1972 vdev_id, param_id, value); 2948 vdev_id, param_id, value);
1973 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 2949 return ath10k_wmi_cmd_send(ar, skb,
2950 ar->wmi.cmd->sta_powersave_param_cmdid);
1974} 2951}
1975 2952
1976int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 2953int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1996,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1996 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 2973 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
1997 vdev_id, param_id, value, mac); 2974 vdev_id, param_id, value, mac);
1998 2975
1999 return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); 2976 return ath10k_wmi_cmd_send(ar, skb,
2977 ar->wmi.cmd->ap_ps_peer_param_cmdid);
2000} 2978}
2001 2979
2002int ath10k_wmi_scan_chan_list(struct ath10k *ar, 2980int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2049,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
2049 ci->flags |= __cpu_to_le32(flags); 3027 ci->flags |= __cpu_to_le32(flags);
2050 } 3028 }
2051 3029
2052 return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); 3030 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
2053} 3031}
2054 3032
2055int ath10k_wmi_peer_assoc(struct ath10k *ar, 3033int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2108,7 +3086,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
2108 ath10k_dbg(ATH10K_DBG_WMI, 3086 ath10k_dbg(ATH10K_DBG_WMI,
2109 "wmi peer assoc vdev %d addr %pM\n", 3087 "wmi peer assoc vdev %d addr %pM\n",
2110 arg->vdev_id, arg->addr); 3088 arg->vdev_id, arg->addr);
2111 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); 3089 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
2112} 3090}
2113 3091
2114int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, 3092int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
@@ -2128,7 +3106,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
2128 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); 3106 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
2129 memcpy(cmd->bcn, arg->bcn, arg->bcn_len); 3107 memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2130 3108
2131 return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID); 3109 return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
2132} 3110}
2133 3111
2134static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 3112static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2159,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
2159 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 3137 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
2160 3138
2161 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 3139 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
2162 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); 3140 return ath10k_wmi_cmd_send(ar, skb,
3141 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
2163} 3142}
2164 3143
2165int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) 3144int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2175,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2175 cmd->stats_id = __cpu_to_le32(stats_id); 3154 cmd->stats_id = __cpu_to_le32(stats_id);
2176 3155
2177 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 3156 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
2178 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); 3157 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
2179} 3158}
2180 3159
2181int ath10k_wmi_force_fw_hang(struct ath10k *ar, 3160int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2194,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
2194 3173
2195 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 3174 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
2196 type, delay_ms); 3175 type, delay_ms);
2197 return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID); 3176 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
2198} 3177}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c52c23107dd..78c991aec7f9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -208,6 +208,118 @@ struct wmi_mac_addr {
208 (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ 208 (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
209 } while (0) 209 } while (0)
210 210
211struct wmi_cmd_map {
212 u32 init_cmdid;
213 u32 start_scan_cmdid;
214 u32 stop_scan_cmdid;
215 u32 scan_chan_list_cmdid;
216 u32 scan_sch_prio_tbl_cmdid;
217 u32 pdev_set_regdomain_cmdid;
218 u32 pdev_set_channel_cmdid;
219 u32 pdev_set_param_cmdid;
220 u32 pdev_pktlog_enable_cmdid;
221 u32 pdev_pktlog_disable_cmdid;
222 u32 pdev_set_wmm_params_cmdid;
223 u32 pdev_set_ht_cap_ie_cmdid;
224 u32 pdev_set_vht_cap_ie_cmdid;
225 u32 pdev_set_dscp_tid_map_cmdid;
226 u32 pdev_set_quiet_mode_cmdid;
227 u32 pdev_green_ap_ps_enable_cmdid;
228 u32 pdev_get_tpc_config_cmdid;
229 u32 pdev_set_base_macaddr_cmdid;
230 u32 vdev_create_cmdid;
231 u32 vdev_delete_cmdid;
232 u32 vdev_start_request_cmdid;
233 u32 vdev_restart_request_cmdid;
234 u32 vdev_up_cmdid;
235 u32 vdev_stop_cmdid;
236 u32 vdev_down_cmdid;
237 u32 vdev_set_param_cmdid;
238 u32 vdev_install_key_cmdid;
239 u32 peer_create_cmdid;
240 u32 peer_delete_cmdid;
241 u32 peer_flush_tids_cmdid;
242 u32 peer_set_param_cmdid;
243 u32 peer_assoc_cmdid;
244 u32 peer_add_wds_entry_cmdid;
245 u32 peer_remove_wds_entry_cmdid;
246 u32 peer_mcast_group_cmdid;
247 u32 bcn_tx_cmdid;
248 u32 pdev_send_bcn_cmdid;
249 u32 bcn_tmpl_cmdid;
250 u32 bcn_filter_rx_cmdid;
251 u32 prb_req_filter_rx_cmdid;
252 u32 mgmt_tx_cmdid;
253 u32 prb_tmpl_cmdid;
254 u32 addba_clear_resp_cmdid;
255 u32 addba_send_cmdid;
256 u32 addba_status_cmdid;
257 u32 delba_send_cmdid;
258 u32 addba_set_resp_cmdid;
259 u32 send_singleamsdu_cmdid;
260 u32 sta_powersave_mode_cmdid;
261 u32 sta_powersave_param_cmdid;
262 u32 sta_mimo_ps_mode_cmdid;
263 u32 pdev_dfs_enable_cmdid;
264 u32 pdev_dfs_disable_cmdid;
265 u32 roam_scan_mode;
266 u32 roam_scan_rssi_threshold;
267 u32 roam_scan_period;
268 u32 roam_scan_rssi_change_threshold;
269 u32 roam_ap_profile;
270 u32 ofl_scan_add_ap_profile;
271 u32 ofl_scan_remove_ap_profile;
272 u32 ofl_scan_period;
273 u32 p2p_dev_set_device_info;
274 u32 p2p_dev_set_discoverability;
275 u32 p2p_go_set_beacon_ie;
276 u32 p2p_go_set_probe_resp_ie;
277 u32 p2p_set_vendor_ie_data_cmdid;
278 u32 ap_ps_peer_param_cmdid;
279 u32 ap_ps_peer_uapsd_coex_cmdid;
280 u32 peer_rate_retry_sched_cmdid;
281 u32 wlan_profile_trigger_cmdid;
282 u32 wlan_profile_set_hist_intvl_cmdid;
283 u32 wlan_profile_get_profile_data_cmdid;
284 u32 wlan_profile_enable_profile_id_cmdid;
285 u32 wlan_profile_list_profile_id_cmdid;
286 u32 pdev_suspend_cmdid;
287 u32 pdev_resume_cmdid;
288 u32 add_bcn_filter_cmdid;
289 u32 rmv_bcn_filter_cmdid;
290 u32 wow_add_wake_pattern_cmdid;
291 u32 wow_del_wake_pattern_cmdid;
292 u32 wow_enable_disable_wake_event_cmdid;
293 u32 wow_enable_cmdid;
294 u32 wow_hostwakeup_from_sleep_cmdid;
295 u32 rtt_measreq_cmdid;
296 u32 rtt_tsf_cmdid;
297 u32 vdev_spectral_scan_configure_cmdid;
298 u32 vdev_spectral_scan_enable_cmdid;
299 u32 request_stats_cmdid;
300 u32 set_arp_ns_offload_cmdid;
301 u32 network_list_offload_config_cmdid;
302 u32 gtk_offload_cmdid;
303 u32 csa_offload_enable_cmdid;
304 u32 csa_offload_chanswitch_cmdid;
305 u32 chatter_set_mode_cmdid;
306 u32 peer_tid_addba_cmdid;
307 u32 peer_tid_delba_cmdid;
308 u32 sta_dtim_ps_method_cmdid;
309 u32 sta_uapsd_auto_trig_cmdid;
310 u32 sta_keepalive_cmd;
311 u32 echo_cmdid;
312 u32 pdev_utf_cmdid;
313 u32 dbglog_cfg_cmdid;
314 u32 pdev_qvit_cmdid;
315 u32 pdev_ftm_intg_cmdid;
316 u32 vdev_set_keepalive_cmdid;
317 u32 vdev_get_keepalive_cmdid;
318 u32 force_fw_hang_cmdid;
319 u32 gpio_config_cmdid;
320 u32 gpio_output_cmdid;
321};
322
211/* 323/*
212 * wmi command groups. 324 * wmi command groups.
213 */ 325 */
@@ -247,7 +359,9 @@ enum wmi_cmd_group {
247#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) 359#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
248#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) 360#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
249 361
250/* Command IDs and commande events. */ 362#define WMI_CMD_UNSUPPORTED 0
363
364/* Command IDs and command events for MAIN FW. */
251enum wmi_cmd_id { 365enum wmi_cmd_id {
252 WMI_INIT_CMDID = 0x1, 366 WMI_INIT_CMDID = 0x1,
253 367
@@ -488,6 +602,217 @@ enum wmi_event_id {
488 WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), 602 WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
489}; 603};
490 604
605/* Command IDs and command events for 10.X firmware */
606enum wmi_10x_cmd_id {
607 WMI_10X_START_CMDID = 0x9000,
608 WMI_10X_END_CMDID = 0x9FFF,
609
610 /* initialize the wlan sub system */
611 WMI_10X_INIT_CMDID,
612
613 /* Scan specific commands */
614
615 WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
616 WMI_10X_STOP_SCAN_CMDID,
617 WMI_10X_SCAN_CHAN_LIST_CMDID,
618 WMI_10X_ECHO_CMDID,
619
620 /* PDEV(physical device) specific commands */
621 WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
622 WMI_10X_PDEV_SET_CHANNEL_CMDID,
623 WMI_10X_PDEV_SET_PARAM_CMDID,
624 WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
625 WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
626 WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
627 WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
628 WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
629 WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
630 WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
631 WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
632 WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
633 WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
634
635 /* VDEV(virtual device) specific commands */
636 WMI_10X_VDEV_CREATE_CMDID,
637 WMI_10X_VDEV_DELETE_CMDID,
638 WMI_10X_VDEV_START_REQUEST_CMDID,
639 WMI_10X_VDEV_RESTART_REQUEST_CMDID,
640 WMI_10X_VDEV_UP_CMDID,
641 WMI_10X_VDEV_STOP_CMDID,
642 WMI_10X_VDEV_DOWN_CMDID,
643 WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
644 WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
645 WMI_10X_VDEV_SET_PARAM_CMDID,
646 WMI_10X_VDEV_INSTALL_KEY_CMDID,
647
648 /* peer specific commands */
649 WMI_10X_PEER_CREATE_CMDID,
650 WMI_10X_PEER_DELETE_CMDID,
651 WMI_10X_PEER_FLUSH_TIDS_CMDID,
652 WMI_10X_PEER_SET_PARAM_CMDID,
653 WMI_10X_PEER_ASSOC_CMDID,
654 WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
655 WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
656 WMI_10X_PEER_MCAST_GROUP_CMDID,
657
658 /* beacon/management specific commands */
659
660 WMI_10X_BCN_TX_CMDID,
661 WMI_10X_BCN_PRB_TMPL_CMDID,
662 WMI_10X_BCN_FILTER_RX_CMDID,
663 WMI_10X_PRB_REQ_FILTER_RX_CMDID,
664 WMI_10X_MGMT_TX_CMDID,
665
666 /* commands to directly control ba negotiation directly from host. */
667 WMI_10X_ADDBA_CLEAR_RESP_CMDID,
668 WMI_10X_ADDBA_SEND_CMDID,
669 WMI_10X_ADDBA_STATUS_CMDID,
670 WMI_10X_DELBA_SEND_CMDID,
671 WMI_10X_ADDBA_SET_RESP_CMDID,
672 WMI_10X_SEND_SINGLEAMSDU_CMDID,
673
674 /* Station power save specific config */
675 WMI_10X_STA_POWERSAVE_MODE_CMDID,
676 WMI_10X_STA_POWERSAVE_PARAM_CMDID,
677 WMI_10X_STA_MIMO_PS_MODE_CMDID,
678
679 /* set debug log config */
680 WMI_10X_DBGLOG_CFG_CMDID,
681
682 /* DFS-specific commands */
683 WMI_10X_PDEV_DFS_ENABLE_CMDID,
684 WMI_10X_PDEV_DFS_DISABLE_CMDID,
685
686 /* QVIT specific command id */
687 WMI_10X_PDEV_QVIT_CMDID,
688
689 /* Offload Scan and Roaming related commands */
690 WMI_10X_ROAM_SCAN_MODE,
691 WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
692 WMI_10X_ROAM_SCAN_PERIOD,
693 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
694 WMI_10X_ROAM_AP_PROFILE,
695 WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
696 WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
697 WMI_10X_OFL_SCAN_PERIOD,
698
699 /* P2P specific commands */
700 WMI_10X_P2P_DEV_SET_DEVICE_INFO,
701 WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
702 WMI_10X_P2P_GO_SET_BEACON_IE,
703 WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
704
705 /* AP power save specific config */
706 WMI_10X_AP_PS_PEER_PARAM_CMDID,
707 WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
708
709 /* Rate-control specific commands */
710 WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
711
712 /* WLAN Profiling commands. */
713 WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
714 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
715 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
716 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
717 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
718
719 /* Suspend resume command Ids */
720 WMI_10X_PDEV_SUSPEND_CMDID,
721 WMI_10X_PDEV_RESUME_CMDID,
722
723 /* Beacon filter commands */
724 WMI_10X_ADD_BCN_FILTER_CMDID,
725 WMI_10X_RMV_BCN_FILTER_CMDID,
726
727 /* WOW Specific WMI commands*/
728 WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
729 WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
730 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
731 WMI_10X_WOW_ENABLE_CMDID,
732 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
733
734 /* RTT measurement related cmd */
735 WMI_10X_RTT_MEASREQ_CMDID,
736 WMI_10X_RTT_TSF_CMDID,
737
738 /* transmit beacon by value */
739 WMI_10X_PDEV_SEND_BCN_CMDID,
740
741 /* F/W stats */
742 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
743 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
744 WMI_10X_REQUEST_STATS_CMDID,
745
746 /* GPIO Configuration */
747 WMI_10X_GPIO_CONFIG_CMDID,
748 WMI_10X_GPIO_OUTPUT_CMDID,
749
750 WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
751};
752
753enum wmi_10x_event_id {
754 WMI_10X_SERVICE_READY_EVENTID = 0x8000,
755 WMI_10X_READY_EVENTID,
756 WMI_10X_START_EVENTID = 0x9000,
757 WMI_10X_END_EVENTID = 0x9FFF,
758
759 /* Scan specific events */
760 WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
761 WMI_10X_ECHO_EVENTID,
762 WMI_10X_DEBUG_MESG_EVENTID,
763 WMI_10X_UPDATE_STATS_EVENTID,
764
765 /* Instantaneous RSSI event */
766 WMI_10X_INST_RSSI_STATS_EVENTID,
767
768 /* VDEV specific events */
769 WMI_10X_VDEV_START_RESP_EVENTID,
770 WMI_10X_VDEV_STANDBY_REQ_EVENTID,
771 WMI_10X_VDEV_RESUME_REQ_EVENTID,
772 WMI_10X_VDEV_STOPPED_EVENTID,
773
774 /* peer specific events */
775 WMI_10X_PEER_STA_KICKOUT_EVENTID,
776
777 /* beacon/mgmt specific events */
778 WMI_10X_HOST_SWBA_EVENTID,
779 WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
780 WMI_10X_MGMT_RX_EVENTID,
781
782 /* Channel stats event */
783 WMI_10X_CHAN_INFO_EVENTID,
784
785 /* PHY Error specific WMI event */
786 WMI_10X_PHYERR_EVENTID,
787
788 /* Roam event to trigger roaming on host */
789 WMI_10X_ROAM_EVENTID,
790
791 /* matching AP found from list of profiles */
792 WMI_10X_PROFILE_MATCH,
793
794 /* debug print message used for tracing FW code while debugging */
795 WMI_10X_DEBUG_PRINT_EVENTID,
796 /* VI spoecific event */
797 WMI_10X_PDEV_QVIT_EVENTID,
798 /* FW code profile data in response to profile request */
799 WMI_10X_WLAN_PROFILE_DATA_EVENTID,
800
801 /*RTT related event ID*/
802 WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
803 WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
804 WMI_10X_RTT_ERROR_REPORT_EVENTID,
805
806 WMI_10X_WOW_WAKEUP_HOST_EVENTID,
807 WMI_10X_DCS_INTERFERENCE_EVENTID,
808
809 /* TPC config for the current operating channel */
810 WMI_10X_PDEV_TPC_CONFIG_EVENTID,
811
812 WMI_10X_GPIO_INPUT_EVENTID,
813 WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
814};
815
491enum wmi_phy_mode { 816enum wmi_phy_mode {
492 MODE_11A = 0, /* 11a Mode */ 817 MODE_11A = 0, /* 11a Mode */
493 MODE_11G = 1, /* 11b/g Mode */ 818 MODE_11G = 1, /* 11b/g Mode */
@@ -805,6 +1130,46 @@ struct wmi_service_ready_event {
805 struct wlan_host_mem_req mem_reqs[1]; 1130 struct wlan_host_mem_req mem_reqs[1];
806} __packed; 1131} __packed;
807 1132
1133/* This is the definition from 10.X firmware branch */
1134struct wmi_service_ready_event_10x {
1135 __le32 sw_version;
1136 __le32 abi_version;
1137
1138 /* WMI_PHY_CAPABILITY */
1139 __le32 phy_capability;
1140
1141 /* Maximum number of frag table entries that SW will populate less 1 */
1142 __le32 max_frag_entry;
1143 __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
1144 __le32 num_rf_chains;
1145
1146 /*
1147 * The following field is only valid for service type
1148 * WMI_SERVICE_11AC
1149 */
1150 __le32 ht_cap_info; /* WMI HT Capability */
1151 __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
1152 __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
1153 __le32 hw_min_tx_power;
1154 __le32 hw_max_tx_power;
1155
1156 struct hal_reg_capabilities hal_reg_capabilities;
1157
1158 __le32 sys_cap_info;
1159 __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
1160
1161 /*
1162 * request to host to allocate a chuck of memory and pss it down to FW
1163 * via WM_INIT. FW uses this as FW extesnsion memory for saving its
1164 * data structures. Only valid for low latency interfaces like PCIE
1165 * where FW can access this memory directly (or) by DMA.
1166 */
1167 __le32 num_mem_reqs;
1168
1169 struct wlan_host_mem_req mem_reqs[1];
1170} __packed;
1171
1172
808#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) 1173#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
809#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) 1174#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
810 1175
@@ -1012,6 +1377,192 @@ struct wmi_resource_config {
1012 __le32 max_frag_entries; 1377 __le32 max_frag_entries;
1013} __packed; 1378} __packed;
1014 1379
1380struct wmi_resource_config_10x {
1381 /* number of virtual devices (VAPs) to support */
1382 __le32 num_vdevs;
1383
1384 /* number of peer nodes to support */
1385 __le32 num_peers;
1386
1387 /* number of keys per peer */
1388 __le32 num_peer_keys;
1389
1390 /* total number of TX/RX data TIDs */
1391 __le32 num_tids;
1392
1393 /*
1394 * max skid for resolving hash collisions
1395 *
1396 * The address search table is sparse, so that if two MAC addresses
1397 * result in the same hash value, the second of these conflicting
1398 * entries can slide to the next index in the address search table,
1399 * and use it, if it is unoccupied. This ast_skid_limit parameter
1400 * specifies the upper bound on how many subsequent indices to search
1401 * over to find an unoccupied space.
1402 */
1403 __le32 ast_skid_limit;
1404
1405 /*
1406 * the nominal chain mask for transmit
1407 *
1408 * The chain mask may be modified dynamically, e.g. to operate AP
1409 * tx with a reduced number of chains if no clients are associated.
1410 * This configuration parameter specifies the nominal chain-mask that
1411 * should be used when not operating with a reduced set of tx chains.
1412 */
1413 __le32 tx_chain_mask;
1414
1415 /*
1416 * the nominal chain mask for receive
1417 *
1418 * The chain mask may be modified dynamically, e.g. for a client
1419 * to use a reduced number of chains for receive if the traffic to
1420 * the client is low enough that it doesn't require downlink MIMO
1421 * or antenna diversity.
1422 * This configuration parameter specifies the nominal chain-mask that
1423 * should be used when not operating with a reduced set of rx chains.
1424 */
1425 __le32 rx_chain_mask;
1426
1427 /*
1428 * what rx reorder timeout (ms) to use for the AC
1429 *
1430 * Each WMM access class (voice, video, best-effort, background) will
1431 * have its own timeout value to dictate how long to wait for missing
1432 * rx MPDUs to arrive before flushing subsequent MPDUs that have
1433 * already been received.
1434 * This parameter specifies the timeout in milliseconds for each
1435 * class.
1436 */
1437 __le32 rx_timeout_pri_vi;
1438 __le32 rx_timeout_pri_vo;
1439 __le32 rx_timeout_pri_be;
1440 __le32 rx_timeout_pri_bk;
1441
1442 /*
1443 * what mode the rx should decap packets to
1444 *
1445 * MAC can decap to RAW (no decap), native wifi or Ethernet types
1446 * THis setting also determines the default TX behavior, however TX
1447 * behavior can be modified on a per VAP basis during VAP init
1448 */
1449 __le32 rx_decap_mode;
1450
1451 /* what is the maximum scan requests than can be queued */
1452 __le32 scan_max_pending_reqs;
1453
1454 /* maximum VDEV that could use BMISS offload */
1455 __le32 bmiss_offload_max_vdev;
1456
1457 /* maximum VDEV that could use offload roaming */
1458 __le32 roam_offload_max_vdev;
1459
1460 /* maximum AP profiles that would push to offload roaming */
1461 __le32 roam_offload_max_ap_profiles;
1462
1463 /*
1464 * how many groups to use for mcast->ucast conversion
1465 *
1466 * The target's WAL maintains a table to hold information regarding
1467 * which peers belong to a given multicast group, so that if
1468 * multicast->unicast conversion is enabled, the target can convert
1469 * multicast tx frames to a series of unicast tx frames, to each
1470 * peer within the multicast group.
1471 This num_mcast_groups configuration parameter tells the target how
1472 * many multicast groups to provide storage for within its multicast
1473 * group membership table.
1474 */
1475 __le32 num_mcast_groups;
1476
1477 /*
1478 * size to alloc for the mcast membership table
1479 *
1480 * This num_mcast_table_elems configuration parameter tells the
1481 * target how many peer elements it needs to provide storage for in
1482 * its multicast group membership table.
1483 * These multicast group membership table elements are shared by the
1484 * multicast groups stored within the table.
1485 */
1486 __le32 num_mcast_table_elems;
1487
1488 /*
1489 * whether/how to do multicast->unicast conversion
1490 *
1491 * This configuration parameter specifies whether the target should
1492 * perform multicast --> unicast conversion on transmit, and if so,
1493 * what to do if it finds no entries in its multicast group
1494 * membership table for the multicast IP address in the tx frame.
1495 * Configuration value:
1496 * 0 -> Do not perform multicast to unicast conversion.
1497 * 1 -> Convert multicast frames to unicast, if the IP multicast
1498 * address from the tx frame is found in the multicast group
1499 * membership table. If the IP multicast address is not found,
1500 * drop the frame.
1501 * 2 -> Convert multicast frames to unicast, if the IP multicast
1502 * address from the tx frame is found in the multicast group
1503 * membership table. If the IP multicast address is not found,
1504 * transmit the frame as multicast.
1505 */
1506 __le32 mcast2ucast_mode;
1507
1508 /*
1509 * how much memory to allocate for a tx PPDU dbg log
1510 *
1511 * This parameter controls how much memory the target will allocate
1512 * to store a log of tx PPDU meta-information (how large the PPDU
1513 * was, when it was sent, whether it was successful, etc.)
1514 */
1515 __le32 tx_dbg_log_size;
1516
1517 /* how many AST entries to be allocated for WDS */
1518 __le32 num_wds_entries;
1519
1520 /*
1521 * MAC DMA burst size, e.g., For target PCI limit can be
1522 * 0 -default, 1 256B
1523 */
1524 __le32 dma_burst_size;
1525
1526 /*
1527 * Fixed delimiters to be inserted after every MPDU to
1528 * account for interface latency to avoid underrun.
1529 */
1530 __le32 mac_aggr_delim;
1531
1532 /*
1533 * determine whether target is responsible for detecting duplicate
1534 * non-aggregate MPDU and timing out stale fragments.
1535 *
1536 * A-MPDU reordering is always performed on the target.
1537 *
1538 * 0: target responsible for frag timeout and dup checking
1539 * 1: host responsible for frag timeout and dup checking
1540 */
1541 __le32 rx_skip_defrag_timeout_dup_detection_check;
1542
1543 /*
1544 * Configuration for VoW :
1545 * No of Video Nodes to be supported
1546 * and Max no of descriptors for each Video link (node).
1547 */
1548 __le32 vow_config;
1549
1550 /* Number of msdu descriptors target should use */
1551 __le32 num_msdu_desc;
1552
1553 /*
1554 * Max. number of Tx fragments per MSDU
1555 * This parameter controls the max number of Tx fragments per MSDU.
1556 * This is sent by the target as part of the WMI_SERVICE_READY event
1557 * and is overriden by the OS shim as required.
1558 */
1559 __le32 max_frag_entries;
1560} __packed;
1561
1562
1563#define NUM_UNITS_IS_NUM_VDEVS 0x1
1564#define NUM_UNITS_IS_NUM_PEERS 0x2
1565
1015/* strucutre describing host memory chunk. */ 1566/* strucutre describing host memory chunk. */
1016struct host_memory_chunk { 1567struct host_memory_chunk {
1017 /* id of the request that is passed up in service ready */ 1568 /* id of the request that is passed up in service ready */
@@ -1033,6 +1584,18 @@ struct wmi_init_cmd {
1033 struct host_memory_chunk host_mem_chunks[1]; 1584 struct host_memory_chunk host_mem_chunks[1];
1034} __packed; 1585} __packed;
1035 1586
1587/* _10x stucture is from 10.X FW API */
1588struct wmi_init_cmd_10x {
1589 struct wmi_resource_config_10x resource_config;
1590 __le32 num_host_mem_chunks;
1591
1592 /*
1593 * variable number of host memory chunks.
1594 * This should be the last element in the structure
1595 */
1596 struct host_memory_chunk host_mem_chunks[1];
1597} __packed;
1598
1036/* TLV for channel list */ 1599/* TLV for channel list */
1037struct wmi_chan_list { 1600struct wmi_chan_list {
1038 __le32 tag; /* WMI_CHAN_LIST_TAG */ 1601 __le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1152,6 +1715,88 @@ struct wmi_start_scan_cmd {
1152 */ 1715 */
1153} __packed; 1716} __packed;
1154 1717
1718/* This is the definition from 10.X firmware branch */
1719struct wmi_start_scan_cmd_10x {
1720 /* Scan ID */
1721 __le32 scan_id;
1722
1723 /* Scan requestor ID */
1724 __le32 scan_req_id;
1725
1726 /* VDEV id(interface) that is requesting scan */
1727 __le32 vdev_id;
1728
1729 /* Scan Priority, input to scan scheduler */
1730 __le32 scan_priority;
1731
1732 /* Scan events subscription */
1733 __le32 notify_scan_events;
1734
1735 /* dwell time in msec on active channels */
1736 __le32 dwell_time_active;
1737
1738 /* dwell time in msec on passive channels */
1739 __le32 dwell_time_passive;
1740
1741 /*
1742 * min time in msec on the BSS channel,only valid if atleast one
1743 * VDEV is active
1744 */
1745 __le32 min_rest_time;
1746
1747 /*
1748 * max rest time in msec on the BSS channel,only valid if at least
1749 * one VDEV is active
1750 */
1751 /*
1752 * the scanner will rest on the bss channel at least min_rest_time
1753 * after min_rest_time the scanner will start checking for tx/rx
1754 * activity on all VDEVs. if there is no activity the scanner will
1755 * switch to off channel. if there is activity the scanner will let
1756 * the radio on the bss channel until max_rest_time expires.at
1757 * max_rest_time scanner will switch to off channel irrespective of
1758 * activity. activity is determined by the idle_time parameter.
1759 */
1760 __le32 max_rest_time;
1761
1762 /*
1763 * time before sending next set of probe requests.
1764 * The scanner keeps repeating probe requests transmission with
1765 * period specified by repeat_probe_time.
1766 * The number of probe requests specified depends on the ssid_list
1767 * and bssid_list
1768 */
1769 __le32 repeat_probe_time;
1770
1771 /* time in msec between 2 consequetive probe requests with in a set. */
1772 __le32 probe_spacing_time;
1773
1774 /*
1775 * data inactivity time in msec on bss channel that will be used by
1776 * scanner for measuring the inactivity.
1777 */
1778 __le32 idle_time;
1779
1780 /* maximum time in msec allowed for scan */
1781 __le32 max_scan_time;
1782
1783 /*
1784 * delay in msec before sending first probe request after switching
1785 * to a channel
1786 */
1787 __le32 probe_delay;
1788
1789 /* Scan control flags */
1790 __le32 scan_ctrl_flags;
1791
1792 /*
1793 * TLV (tag length value ) paramerters follow the scan_cmd structure.
1794 * TLV can contain channel list, bssid list, ssid list and
1795 * ie. the TLV tags are defined above;
1796 */
1797} __packed;
1798
1799
1155struct wmi_ssid_arg { 1800struct wmi_ssid_arg {
1156 int len; 1801 int len;
1157 const u8 *ssid; 1802 const u8 *ssid;
@@ -1509,6 +2154,60 @@ struct wmi_csa_event {
1509#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500 2154#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
1510#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500 2155#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
1511 2156
2157struct wmi_pdev_param_map {
2158 u32 tx_chain_mask;
2159 u32 rx_chain_mask;
2160 u32 txpower_limit2g;
2161 u32 txpower_limit5g;
2162 u32 txpower_scale;
2163 u32 beacon_gen_mode;
2164 u32 beacon_tx_mode;
2165 u32 resmgr_offchan_mode;
2166 u32 protection_mode;
2167 u32 dynamic_bw;
2168 u32 non_agg_sw_retry_th;
2169 u32 agg_sw_retry_th;
2170 u32 sta_kickout_th;
2171 u32 ac_aggrsize_scaling;
2172 u32 ltr_enable;
2173 u32 ltr_ac_latency_be;
2174 u32 ltr_ac_latency_bk;
2175 u32 ltr_ac_latency_vi;
2176 u32 ltr_ac_latency_vo;
2177 u32 ltr_ac_latency_timeout;
2178 u32 ltr_sleep_override;
2179 u32 ltr_rx_override;
2180 u32 ltr_tx_activity_timeout;
2181 u32 l1ss_enable;
2182 u32 dsleep_enable;
2183 u32 pcielp_txbuf_flush;
2184 u32 pcielp_txbuf_watermark;
2185 u32 pcielp_txbuf_tmo_en;
2186 u32 pcielp_txbuf_tmo_value;
2187 u32 pdev_stats_update_period;
2188 u32 vdev_stats_update_period;
2189 u32 peer_stats_update_period;
2190 u32 bcnflt_stats_update_period;
2191 u32 pmf_qos;
2192 u32 arp_ac_override;
2193 u32 arpdhcp_ac_override;
2194 u32 dcs;
2195 u32 ani_enable;
2196 u32 ani_poll_period;
2197 u32 ani_listen_period;
2198 u32 ani_ofdm_level;
2199 u32 ani_cck_level;
2200 u32 dyntxchain;
2201 u32 proxy_sta;
2202 u32 idle_ps_config;
2203 u32 power_gating_sleep;
2204 u32 fast_channel_reset;
2205 u32 burst_dur;
2206 u32 burst_enable;
2207};
2208
2209#define WMI_PDEV_PARAM_UNSUPPORTED 0
2210
1512enum wmi_pdev_param { 2211enum wmi_pdev_param {
1513 /* TX chian mask */ 2212 /* TX chian mask */
1514 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, 2213 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1608,6 +2307,97 @@ enum wmi_pdev_param {
1608 WMI_PDEV_PARAM_POWER_GATING_SLEEP, 2307 WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1609}; 2308};
1610 2309
2310enum wmi_10x_pdev_param {
2311 /* TX chian mask */
2312 WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
2313 /* RX chian mask */
2314 WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
2315 /* TX power limit for 2G Radio */
2316 WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
2317 /* TX power limit for 5G Radio */
2318 WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
2319 /* TX power scale */
2320 WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
2321 /* Beacon generation mode . 0: host, 1: target */
2322 WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
2323 /* Beacon generation mode . 0: staggered 1: bursted */
2324 WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
2325 /*
2326 * Resource manager off chan mode .
2327 * 0: turn off off chan mode. 1: turn on offchan mode
2328 */
2329 WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
2330 /*
2331 * Protection mode:
2332 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
2333 */
2334 WMI_10X_PDEV_PARAM_PROTECTION_MODE,
2335 /* Dynamic bandwidth 0: disable 1: enable */
2336 WMI_10X_PDEV_PARAM_DYNAMIC_BW,
2337 /* Non aggregrate/ 11g sw retry threshold.0-disable */
2338 WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
2339 /* aggregrate sw retry threshold. 0-disable*/
2340 WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
2341 /* Station kickout threshold (non of consecutive failures).0-disable */
2342 WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
2343 /* Aggerate size scaling configuration per AC */
2344 WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
2345 /* LTR enable */
2346 WMI_10X_PDEV_PARAM_LTR_ENABLE,
2347 /* LTR latency for BE, in us */
2348 WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
2349 /* LTR latency for BK, in us */
2350 WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
2351 /* LTR latency for VI, in us */
2352 WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
2353 /* LTR latency for VO, in us */
2354 WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
2355 /* LTR AC latency timeout, in ms */
2356 WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
2357 /* LTR platform latency override, in us */
2358 WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
2359 /* LTR-RX override, in us */
2360 WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
2361 /* Tx activity timeout for LTR, in us */
2362 WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
2363 /* L1SS state machine enable */
2364 WMI_10X_PDEV_PARAM_L1SS_ENABLE,
2365 /* Deep sleep state machine enable */
2366 WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
2367 /* pdev level stats update period in ms */
2368 WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
2369 /* vdev level stats update period in ms */
2370 WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
2371 /* peer level stats update period in ms */
2372 WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
2373 /* beacon filter status update period */
2374 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
2375 /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
2376 WMI_10X_PDEV_PARAM_PMF_QOS,
2377 /* Access category on which ARP and DHCP frames are sent */
2378 WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
2379 /* DCS configuration */
2380 WMI_10X_PDEV_PARAM_DCS,
2381 /* Enable/Disable ANI on target */
2382 WMI_10X_PDEV_PARAM_ANI_ENABLE,
2383 /* configure the ANI polling period */
2384 WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
2385 /* configure the ANI listening period */
2386 WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
2387 /* configure OFDM immunity level */
2388 WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
2389 /* configure CCK immunity level */
2390 WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
2391 /* Enable/Disable CDD for 1x1 STAs in rate control module */
2392 WMI_10X_PDEV_PARAM_DYNTXCHAIN,
2393 /* Enable/Disable Fast channel reset*/
2394 WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
2395 /* Set Bursting DUR */
2396 WMI_10X_PDEV_PARAM_BURST_DUR,
2397 /* Set Bursting Enable*/
2398 WMI_10X_PDEV_PARAM_BURST_ENABLE,
2399};
2400
1611struct wmi_pdev_set_param_cmd { 2401struct wmi_pdev_set_param_cmd {
1612 __le32 param_id; 2402 __le32 param_id;
1613 __le32 param_value; 2403 __le32 param_value;
@@ -2132,6 +2922,61 @@ enum wmi_rate_preamble {
2132/* Value to disable fixed rate setting */ 2922/* Value to disable fixed rate setting */
2133#define WMI_FIXED_RATE_NONE (0xff) 2923#define WMI_FIXED_RATE_NONE (0xff)
2134 2924
2925struct wmi_vdev_param_map {
2926 u32 rts_threshold;
2927 u32 fragmentation_threshold;
2928 u32 beacon_interval;
2929 u32 listen_interval;
2930 u32 multicast_rate;
2931 u32 mgmt_tx_rate;
2932 u32 slot_time;
2933 u32 preamble;
2934 u32 swba_time;
2935 u32 wmi_vdev_stats_update_period;
2936 u32 wmi_vdev_pwrsave_ageout_time;
2937 u32 wmi_vdev_host_swba_interval;
2938 u32 dtim_period;
2939 u32 wmi_vdev_oc_scheduler_air_time_limit;
2940 u32 wds;
2941 u32 atim_window;
2942 u32 bmiss_count_max;
2943 u32 bmiss_first_bcnt;
2944 u32 bmiss_final_bcnt;
2945 u32 feature_wmm;
2946 u32 chwidth;
2947 u32 chextoffset;
2948 u32 disable_htprotection;
2949 u32 sta_quickkickout;
2950 u32 mgmt_rate;
2951 u32 protection_mode;
2952 u32 fixed_rate;
2953 u32 sgi;
2954 u32 ldpc;
2955 u32 tx_stbc;
2956 u32 rx_stbc;
2957 u32 intra_bss_fwd;
2958 u32 def_keyid;
2959 u32 nss;
2960 u32 bcast_data_rate;
2961 u32 mcast_data_rate;
2962 u32 mcast_indicate;
2963 u32 dhcp_indicate;
2964 u32 unknown_dest_indicate;
2965 u32 ap_keepalive_min_idle_inactive_time_secs;
2966 u32 ap_keepalive_max_idle_inactive_time_secs;
2967 u32 ap_keepalive_max_unresponsive_time_secs;
2968 u32 ap_enable_nawds;
2969 u32 mcast2ucast_set;
2970 u32 enable_rtscts;
2971 u32 txbf;
2972 u32 packet_powersave;
2973 u32 drop_unencry;
2974 u32 tx_encap_type;
2975 u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
2976};
2977
2978#define WMI_VDEV_PARAM_UNSUPPORTED 0
2979
2135/* the definition of different VDEV parameters */ 2980/* the definition of different VDEV parameters */
2136enum wmi_vdev_param { 2981enum wmi_vdev_param {
2137 /* RTS Threshold */ 2982 /* RTS Threshold */
@@ -2263,6 +3108,121 @@ enum wmi_vdev_param {
2263 WMI_VDEV_PARAM_TX_ENCAP_TYPE, 3108 WMI_VDEV_PARAM_TX_ENCAP_TYPE,
2264}; 3109};
2265 3110
3111/* the definition of different VDEV parameters */
3112enum wmi_10x_vdev_param {
3113 /* RTS Threshold */
3114 WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
3115 /* Fragmentation threshold */
3116 WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
3117 /* beacon interval in TUs */
3118 WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
3119 /* Listen interval in TUs */
3120 WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
3121 /* muticast rate in Mbps */
3122 WMI_10X_VDEV_PARAM_MULTICAST_RATE,
3123 /* management frame rate in Mbps */
3124 WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
3125 /* slot time (long vs short) */
3126 WMI_10X_VDEV_PARAM_SLOT_TIME,
3127 /* preamble (long vs short) */
3128 WMI_10X_VDEV_PARAM_PREAMBLE,
3129 /* SWBA time (time before tbtt in msec) */
3130 WMI_10X_VDEV_PARAM_SWBA_TIME,
3131 /* time period for updating VDEV stats */
3132 WMI_10X_VDEV_STATS_UPDATE_PERIOD,
3133 /* age out time in msec for frames queued for station in power save */
3134 WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
3135 /*
3136 * Host SWBA interval (time in msec before tbtt for SWBA event
3137 * generation).
3138 */
3139 WMI_10X_VDEV_HOST_SWBA_INTERVAL,
3140 /* DTIM period (specified in units of num beacon intervals) */
3141 WMI_10X_VDEV_PARAM_DTIM_PERIOD,
3142 /*
3143 * scheduler air time limit for this VDEV. used by off chan
3144 * scheduler.
3145 */
3146 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
3147 /* enable/dsiable WDS for this VDEV */
3148 WMI_10X_VDEV_PARAM_WDS,
3149 /* ATIM Window */
3150 WMI_10X_VDEV_PARAM_ATIM_WINDOW,
3151 /* BMISS max */
3152 WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
3153 /* WMM enables/disabled */
3154 WMI_10X_VDEV_PARAM_FEATURE_WMM,
3155 /* Channel width */
3156 WMI_10X_VDEV_PARAM_CHWIDTH,
3157 /* Channel Offset */
3158 WMI_10X_VDEV_PARAM_CHEXTOFFSET,
3159 /* Disable HT Protection */
3160 WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
3161 /* Quick STA Kickout */
3162 WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
3163 /* Rate to be used with Management frames */
3164 WMI_10X_VDEV_PARAM_MGMT_RATE,
3165 /* Protection Mode */
3166 WMI_10X_VDEV_PARAM_PROTECTION_MODE,
3167 /* Fixed rate setting */
3168 WMI_10X_VDEV_PARAM_FIXED_RATE,
3169 /* Short GI Enable/Disable */
3170 WMI_10X_VDEV_PARAM_SGI,
3171 /* Enable LDPC */
3172 WMI_10X_VDEV_PARAM_LDPC,
3173 /* Enable Tx STBC */
3174 WMI_10X_VDEV_PARAM_TX_STBC,
3175 /* Enable Rx STBC */
3176 WMI_10X_VDEV_PARAM_RX_STBC,
3177 /* Intra BSS forwarding */
3178 WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
3179 /* Setting Default xmit key for Vdev */
3180 WMI_10X_VDEV_PARAM_DEF_KEYID,
3181 /* NSS width */
3182 WMI_10X_VDEV_PARAM_NSS,
3183 /* Set the custom rate for the broadcast data frames */
3184 WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
3185 /* Set the custom rate (rate-code) for multicast data frames */
3186 WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
3187 /* Tx multicast packet indicate Enable/Disable */
3188 WMI_10X_VDEV_PARAM_MCAST_INDICATE,
3189 /* Tx DHCP packet indicate Enable/Disable */
3190 WMI_10X_VDEV_PARAM_DHCP_INDICATE,
3191 /* Enable host inspection of Tx unicast packet to unknown destination */
3192 WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
3193
3194 /* The minimum amount of time AP begins to consider STA inactive */
3195 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
3196
3197 /*
3198 * An associated STA is considered inactive when there is no recent
3199 * TX/RX activity and no downlink frames are buffered for it. Once a
3200 * STA exceeds the maximum idle inactive time, the AP will send an
3201 * 802.11 data-null as a keep alive to verify the STA is still
3202 * associated. If the STA does ACK the data-null, or if the data-null
3203 * is buffered and the STA does not retrieve it, the STA will be
3204 * considered unresponsive
3205 * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
3206 */
3207 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
3208
3209 /*
3210 * An associated STA is considered unresponsive if there is no recent
3211 * TX/RX activity and downlink frames are buffered for it. Once a STA
3212 * exceeds the maximum unresponsive time, the AP will send a
3213 * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
3214 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
3215
3216 /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
3217 WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
3218
3219 WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
3220 /* Enable/Disable RTS-CTS */
3221 WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
3222
3223 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
3224};
3225
2266/* slot time long */ 3226/* slot time long */
2267#define WMI_VDEV_SLOT_TIME_LONG 0x1 3227#define WMI_VDEV_SLOT_TIME_LONG 0x1
2268/* slot time short */ 3228/* slot time short */
@@ -3064,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
3064int ath10k_wmi_pdev_resume_target(struct ath10k *ar); 4024int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
3065int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 4025int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
3066 u16 rd5g, u16 ctl2g, u16 ctl5g); 4026 u16 rd5g, u16 ctl2g, u16 ctl5g);
3067int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, 4027int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
3068 u32 value);
3069int ath10k_wmi_cmd_init(struct ath10k *ar); 4028int ath10k_wmi_cmd_init(struct ath10k *ar);
3070int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); 4029int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
3071void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); 4030void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3085,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
3085 const u8 *bssid); 4044 const u8 *bssid);
3086int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); 4045int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
3087int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 4046int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3088 enum wmi_vdev_param param_id, u32 param_value); 4047 u32 param_id, u32 param_value);
3089int ath10k_wmi_vdev_install_key(struct ath10k *ar, 4048int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3090 const struct wmi_vdev_install_key_arg *arg); 4049 const struct wmi_vdev_install_key_arg *arg);
3091int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 4050int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3115,5 +4074,6 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3115int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); 4074int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
3116int ath10k_wmi_force_fw_hang(struct ath10k *ar, 4075int ath10k_wmi_force_fw_hang(struct ath10k *ar,
3117 enum wmi_force_fw_hang_type type, u32 delay_ms); 4076 enum wmi_force_fw_hang_type type, u32 delay_ms);
4077int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
3118 4078
3119#endif /* _WMI_H_ */ 4079#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 48161edec8de..69f58b073e85 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1663 ah->stats.tx_bytes_count += skb->len; 1663 ah->stats.tx_bytes_count += skb->len;
1664 info = IEEE80211_SKB_CB(skb); 1664 info = IEEE80211_SKB_CB(skb);
1665 1665
1666 size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
1667 memcpy(info->status.rates, bf->rates, size);
1668
1666 tries[0] = info->status.rates[0].count; 1669 tries[0] = info->status.rates[0].count;
1667 tries[1] = info->status.rates[1].count; 1670 tries[1] = info->status.rates[1].count;
1668 tries[2] = info->status.rates[2].count; 1671 tries[2] = info->status.rates[2].count;
1669 1672
1670 ieee80211_tx_info_clear_status(info); 1673 ieee80211_tx_info_clear_status(info);
1671 1674
1672 size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
1673 memcpy(info->status.rates, bf->rates, size);
1674
1675 for (i = 0; i < ts->ts_final_idx; i++) { 1675 for (i = 0; i < ts->ts_final_idx; i++) {
1676 struct ieee80211_tx_rate *r = 1676 struct ieee80211_tx_rate *r =
1677 &info->status.rates[i]; 1677 &info->status.rates[i];
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index a2c8ff809793..14cab1403dd6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -60,7 +60,7 @@
60/* disable credit flow control on a specific service */ 60/* disable credit flow control on a specific service */
61#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3) 61#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
62#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8 62#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
63#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00 63#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
64 64
65/* connect response status codes */ 65/* connect response status codes */
66#define HTC_SERVICE_SUCCESS 0 66#define HTC_SERVICE_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7944c25c9a43..32f139e2e897 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,6 +84,26 @@ config ATH9K_DFS_CERTIFIED
84 developed. At this point enabling this option won't do anything 84 developed. At this point enabling this option won't do anything
85 except increase code size. 85 except increase code size.
86 86
87config ATH9K_TX99
88 bool "Atheros ath9k TX99 testing support"
89 depends on CFG80211_CERTIFICATION_ONUS
90 default n
91 ---help---
92 Say N. This should only be enabled on systems undergoing
93 certification testing and evaluation in a controlled environment.
94 Enabling this will only enable TX99 support, all other modes of
95 operation will be disabled.
96
97 TX99 support enables Specific Absorption Rate (SAR) testing.
98 SAR is the unit of measurement for the amount of radio frequency(RF)
99 absorbed by the body when using a wireless device. The RF exposure
100 limits used are expressed in the terms of SAR, which is a measure
101 of the electric and magnetic field strength and power density for
102 transmitters operating at frequencies from 300 kHz to 100 GHz.
103 Regulatory bodies around the world require that wireless device
104 be evaluated to meet the RF exposure limits set forth in the
105 governmental SAR regulations.
106
87config ATH9K_LEGACY_RATE_CONTROL 107config ATH9K_LEGACY_RATE_CONTROL
88 bool "Atheros ath9k rate control" 108 bool "Atheros ath9k rate control"
89 depends on ATH9K 109 depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 75ee9e7704ce..6205ef5a9321 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -14,9 +14,7 @@ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
15ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o 15ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
16ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \ 16ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
17 dfs.o \ 17 dfs.o
18 dfs_pattern_detector.o \
19 dfs_pri_detector.o
20ath9k-$(CONFIG_PM_SLEEP) += wow.o 18ath9k-$(CONFIG_PM_SLEEP) += wow.o
21 19
22obj-$(CONFIG_ATH9K) += ath9k.o 20obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 17970d49d858..f087117b2e6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -680,6 +680,26 @@ static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
680 } 680 }
681} 681}
682 682
683static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
684{
685 REG_SET_BIT(ah, 0x9864, 0x7f000);
686 REG_SET_BIT(ah, 0x9924, 0x7f00fe);
687 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
688 REG_WRITE(ah, AR_CR, AR_CR_RXD);
689 REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
690 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
691 REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
692 REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
693 REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
694 REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
695 REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
696}
697
698static void ar9002_hw_tx99_stop(struct ath_hw *ah)
699{
700 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
701}
702
683void ar9002_hw_attach_phy_ops(struct ath_hw *ah) 703void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
684{ 704{
685 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 705 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -701,6 +721,8 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
701#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 721#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
702 ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity; 722 ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
703#endif 723#endif
724 ops->tx99_start = ar9002_hw_tx99_start;
725 ops->tx99_stop = ar9002_hw_tx99_stop;
704 726
705 ar9002_hw_set_nf_limits(ah); 727 ar9002_hw_set_nf_limits(ah);
706} 728}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index f3adafd33704..11f53589a3f3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1617,6 +1617,98 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
1617 } 1617 }
1618} 1618}
1619 1619
1620static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
1621{
1622 REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
1623 REG_SET_BIT(ah, 0x9864, 0x7f000);
1624 REG_SET_BIT(ah, 0x9924, 0x7f00fe);
1625 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1626 REG_WRITE(ah, AR_CR, AR_CR_RXD);
1627 REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
1628 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
1629 REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
1630 REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
1631 REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
1632 REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
1633}
1634
1635static void ar9003_hw_tx99_stop(struct ath_hw *ah)
1636{
1637 REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
1638 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1639}
1640
1641static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
1642{
1643 static s16 p_pwr_array[ar9300RateSize] = { 0 };
1644 unsigned int i;
1645
1646 if (txpower <= MAX_RATE_POWER) {
1647 for (i = 0; i < ar9300RateSize; i++)
1648 p_pwr_array[i] = txpower;
1649 } else {
1650 for (i = 0; i < ar9300RateSize; i++)
1651 p_pwr_array[i] = MAX_RATE_POWER;
1652 }
1653
1654 REG_WRITE(ah, 0xa458, 0);
1655
1656 REG_WRITE(ah, 0xa3c0,
1657 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
1658 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
1659 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
1660 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
1661 REG_WRITE(ah, 0xa3c4,
1662 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
1663 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
1664 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
1665 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
1666 REG_WRITE(ah, 0xa3c8,
1667 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
1668 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
1669 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
1670 REG_WRITE(ah, 0xa3cc,
1671 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
1672 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
1673 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
1674 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
1675 REG_WRITE(ah, 0xa3d0,
1676 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
1677 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
1678 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
1679 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
1680 REG_WRITE(ah, 0xa3d4,
1681 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
1682 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
1683 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
1684 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
1685 REG_WRITE(ah, 0xa3e4,
1686 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
1687 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
1688 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
1689 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
1690 REG_WRITE(ah, 0xa3e8,
1691 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
1692 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
1693 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
1694 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
1695 REG_WRITE(ah, 0xa3d8,
1696 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
1697 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
1698 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
1699 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
1700 REG_WRITE(ah, 0xa3dc,
1701 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
1702 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
1703 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
1704 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
1705 REG_WRITE(ah, 0xa3ec,
1706 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
1707 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
1708 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
1709 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
1710}
1711
1620void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1712void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1621{ 1713{
1622 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1714 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1656,6 +1748,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1656#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1748#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1657 ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity; 1749 ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
1658#endif 1750#endif
1751 ops->tx99_start = ar9003_hw_tx99_start;
1752 ops->tx99_stop = ar9003_hw_tx99_stop;
1753 ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
1659 1754
1660 ar9003_hw_set_nf_limits(ah); 1755 ar9003_hw_set_nf_limits(ah);
1661 ar9003_hw_set_radar_conf(ah); 1756 ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 14ff7e9dde4c..e7a38d844a6a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -778,6 +778,11 @@ struct ath_softc {
778 enum spectral_mode spectral_mode; 778 enum spectral_mode spectral_mode;
779 struct ath_spec_scan spec_config; 779 struct ath_spec_scan spec_config;
780 780
781 struct ieee80211_vif *tx99_vif;
782 struct sk_buff *tx99_skb;
783 bool tx99_state;
784 s16 tx99_power;
785
781#ifdef CONFIG_PM_SLEEP 786#ifdef CONFIG_PM_SLEEP
782 atomic_t wow_got_bmiss_intr; 787 atomic_t wow_got_bmiss_intr;
783 atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */ 788 atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -886,6 +891,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
886 */ 891 */
887enum ath_fft_sample_type { 892enum ath_fft_sample_type {
888 ATH_FFT_SAMPLE_HT20 = 1, 893 ATH_FFT_SAMPLE_HT20 = 1,
894 ATH_FFT_SAMPLE_HT20_40,
889}; 895};
890 896
891struct fft_sample_tlv { 897struct fft_sample_tlv {
@@ -912,6 +918,39 @@ struct fft_sample_ht20 {
912 u8 data[SPECTRAL_HT20_NUM_BINS]; 918 u8 data[SPECTRAL_HT20_NUM_BINS];
913} __packed; 919} __packed;
914 920
921struct fft_sample_ht20_40 {
922 struct fft_sample_tlv tlv;
923
924 u8 channel_type;
925 __be16 freq;
926
927 s8 lower_rssi;
928 s8 upper_rssi;
929
930 __be64 tsf;
931
932 s8 lower_noise;
933 s8 upper_noise;
934
935 __be16 lower_max_magnitude;
936 __be16 upper_max_magnitude;
937
938 u8 lower_max_index;
939 u8 upper_max_index;
940
941 u8 lower_bitmap_weight;
942 u8 upper_bitmap_weight;
943
944 u8 max_exp;
945
946 u8 data[SPECTRAL_HT20_40_NUM_BINS];
947} __packed;
948
949int ath9k_tx99_init(struct ath_softc *sc);
950void ath9k_tx99_deinit(struct ath_softc *sc);
951int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
952 struct ath_tx_control *txctl);
953
915void ath9k_tasklet(unsigned long data); 954void ath9k_tasklet(unsigned long data);
916int ath_cabq_update(struct ath_softc *); 955int ath_cabq_update(struct ath_softc *);
917 956
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index d8db74b0ef66..278365b8a895 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,13 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
63 return ath9k_hw_get_nf_limits(ah, chan)->nominal; 63 return ath9k_hw_get_nf_limits(ah, chan)->nominal;
64} 64}
65 65
66s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) 66s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
67 s16 nf)
67{ 68{
68 s8 noise = ATH_DEFAULT_NOISE_FLOOR; 69 s8 noise = ATH_DEFAULT_NOISE_FLOOR;
69 70
70 if (chan && chan->noisefloor) { 71 if (nf) {
71 s8 delta = chan->noisefloor - 72 s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
72 ATH9K_NF_CAL_NOISE_THRESH -
73 ath9k_hw_get_default_nf(ah, chan); 73 ath9k_hw_get_default_nf(ah, chan);
74 if (delta > 0) 74 if (delta > 0)
75 noise += delta; 75 noise += delta;
@@ -392,7 +392,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
392 clear_bit(NFCAL_PENDING, &caldata->cal_flags); 392 clear_bit(NFCAL_PENDING, &caldata->cal_flags);
393 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray); 393 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
394 chan->noisefloor = h[0].privNF; 394 chan->noisefloor = h[0].privNF;
395 ah->noise = ath9k_hw_getchan_noise(ah, chan); 395 ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
396 return true; 396 return true;
397} 397}
398EXPORT_SYMBOL(ath9k_hw_getnf); 398EXPORT_SYMBOL(ath9k_hw_getnf);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3d70b8c2bcdd..b8ed95e9a335 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -116,7 +116,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
116void ath9k_hw_bstuck_nfcal(struct ath_hw *ah); 116void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
117void ath9k_hw_reset_calibration(struct ath_hw *ah, 117void ath9k_hw_reset_calibration(struct ath_hw *ah,
118 struct ath9k_cal_list *currCal); 118 struct ath9k_cal_list *currCal);
119s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 119s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
120 s16 nf);
120 121
121 122
122#endif /* CALIB_H */ 123#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 1be2c787aac9..83a2c59f680b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1050,6 +1050,9 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
1050 char buf[32]; 1050 char buf[32];
1051 ssize_t len; 1051 ssize_t len;
1052 1052
1053 if (config_enabled(CONFIG_ATH9K_TX99))
1054 return -EOPNOTSUPP;
1055
1053 len = min(count, sizeof(buf) - 1); 1056 len = min(count, sizeof(buf) - 1);
1054 if (copy_from_user(buf, user_buf, len)) 1057 if (copy_from_user(buf, user_buf, len))
1055 return -EFAULT; 1058 return -EFAULT;
@@ -1775,6 +1778,111 @@ void ath9k_deinit_debug(struct ath_softc *sc)
1775 } 1778 }
1776} 1779}
1777 1780
1781static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
1782 size_t count, loff_t *ppos)
1783{
1784 struct ath_softc *sc = file->private_data;
1785 char buf[3];
1786 unsigned int len;
1787
1788 len = sprintf(buf, "%d\n", sc->tx99_state);
1789 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1790}
1791
1792static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
1793 size_t count, loff_t *ppos)
1794{
1795 struct ath_softc *sc = file->private_data;
1796 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1797 char buf[32];
1798 bool start;
1799 ssize_t len;
1800 int r;
1801
1802 if (sc->nvifs > 1)
1803 return -EOPNOTSUPP;
1804
1805 len = min(count, sizeof(buf) - 1);
1806 if (copy_from_user(buf, user_buf, len))
1807 return -EFAULT;
1808
1809 if (strtobool(buf, &start))
1810 return -EINVAL;
1811
1812 if (start == sc->tx99_state) {
1813 if (!start)
1814 return count;
1815 ath_dbg(common, XMIT, "Resetting TX99\n");
1816 ath9k_tx99_deinit(sc);
1817 }
1818
1819 if (!start) {
1820 ath9k_tx99_deinit(sc);
1821 return count;
1822 }
1823
1824 r = ath9k_tx99_init(sc);
1825 if (r)
1826 return r;
1827
1828 return count;
1829}
1830
1831static const struct file_operations fops_tx99 = {
1832 .read = read_file_tx99,
1833 .write = write_file_tx99,
1834 .open = simple_open,
1835 .owner = THIS_MODULE,
1836 .llseek = default_llseek,
1837};
1838
1839static ssize_t read_file_tx99_power(struct file *file,
1840 char __user *user_buf,
1841 size_t count, loff_t *ppos)
1842{
1843 struct ath_softc *sc = file->private_data;
1844 char buf[32];
1845 unsigned int len;
1846
1847 len = sprintf(buf, "%d (%d dBm)\n",
1848 sc->tx99_power,
1849 sc->tx99_power / 2);
1850
1851 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1852}
1853
1854static ssize_t write_file_tx99_power(struct file *file,
1855 const char __user *user_buf,
1856 size_t count, loff_t *ppos)
1857{
1858 struct ath_softc *sc = file->private_data;
1859 int r;
1860 u8 tx_power;
1861
1862 r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
1863 if (r)
1864 return r;
1865
1866 if (tx_power > MAX_RATE_POWER)
1867 return -EINVAL;
1868
1869 sc->tx99_power = tx_power;
1870
1871 ath9k_ps_wakeup(sc);
1872 ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
1873 ath9k_ps_restore(sc);
1874
1875 return count;
1876}
1877
1878static const struct file_operations fops_tx99_power = {
1879 .read = read_file_tx99_power,
1880 .write = write_file_tx99_power,
1881 .open = simple_open,
1882 .owner = THIS_MODULE,
1883 .llseek = default_llseek,
1884};
1885
1778int ath9k_init_debug(struct ath_hw *ah) 1886int ath9k_init_debug(struct ath_hw *ah)
1779{ 1887{
1780 struct ath_common *common = ath9k_hw_common(ah); 1888 struct ath_common *common = ath9k_hw_common(ah);
@@ -1866,5 +1974,15 @@ int ath9k_init_debug(struct ath_hw *ah)
1866 debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc, 1974 debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
1867 &fops_btcoex); 1975 &fops_btcoex);
1868#endif 1976#endif
1977 if (config_enabled(CONFIG_ATH9K_TX99) &&
1978 AR_SREV_9300_20_OR_LATER(ah)) {
1979 debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
1980 sc->debug.debugfs_phy, sc,
1981 &fops_tx99);
1982 debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
1983 sc->debug.debugfs_phy, sc,
1984 &fops_tx99_power);
1985 }
1986
1869 return 0; 1987 return 0;
1870} 1988}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index 3c839f06a06a..c6fa3d5b5d74 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,7 +17,7 @@
17 17
18#ifndef ATH9K_DFS_H 18#ifndef ATH9K_DFS_H
19#define ATH9K_DFS_H 19#define ATH9K_DFS_H
20#include "dfs_pattern_detector.h" 20#include "../dfs_pattern_detector.h"
21 21
22#if defined(CONFIG_ATH9K_DFS_CERTIFIED) 22#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
23/** 23/**
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 821599135d8a..90b8342d1ed4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -20,16 +20,16 @@
20 20
21#include "ath9k.h" 21#include "ath9k.h"
22#include "dfs_debug.h" 22#include "dfs_debug.h"
23#include "../dfs_pattern_detector.h"
23 24
24 25static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
25struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
26 26
27#define ATH9K_DFS_STAT(s, p) \ 27#define ATH9K_DFS_STAT(s, p) \
28 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \ 28 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
29 sc->debug.stats.dfs_stats.p); 29 sc->debug.stats.dfs_stats.p);
30#define ATH9K_DFS_POOL_STAT(s, p) \ 30#define ATH9K_DFS_POOL_STAT(s, p) \
31 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \ 31 len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
32 global_dfs_pool_stats.p); 32 dfs_pool_stats.p);
33 33
34static ssize_t read_file_dfs(struct file *file, char __user *user_buf, 34static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
@@ -44,6 +44,9 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
44 if (buf == NULL) 44 if (buf == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 46
47 if (sc->dfs_detector)
48 dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
49
47 len += scnprintf(buf + len, size - len, "DFS support for " 50 len += scnprintf(buf + len, size - len, "DFS support for "
48 "macVersion = 0x%x, macRev = 0x%x: %s\n", 51 "macVersion = 0x%x, macRev = 0x%x: %s\n",
49 hw_ver->macVersion, hw_ver->macRev, 52 hw_ver->macVersion, hw_ver->macRev,
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index e36810a4b585..0a7ddf4c88c9 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -51,25 +51,11 @@ struct ath_dfs_stats {
51 u32 radar_detected; 51 u32 radar_detected;
52}; 52};
53 53
54/**
55 * struct ath_dfs_pool_stats - DFS Statistics for global pools
56 */
57struct ath_dfs_pool_stats {
58 u32 pool_reference;
59 u32 pulse_allocated;
60 u32 pulse_alloc_error;
61 u32 pulse_used;
62 u32 pseq_allocated;
63 u32 pseq_alloc_error;
64 u32 pseq_used;
65};
66#if defined(CONFIG_ATH9K_DFS_DEBUGFS) 54#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
67 55
68#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) 56#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
69void ath9k_dfs_init_debug(struct ath_softc *sc); 57void ath9k_dfs_init_debug(struct ath_softc *sc);
70 58
71#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
72#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
73extern struct ath_dfs_pool_stats global_dfs_pool_stats; 59extern struct ath_dfs_pool_stats global_dfs_pool_stats;
74 60
75#else 61#else
@@ -77,8 +63,6 @@ extern struct ath_dfs_pool_stats global_dfs_pool_stats;
77#define DFS_STAT_INC(sc, c) do { } while (0) 63#define DFS_STAT_INC(sc, c) do { } while (0)
78static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } 64static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
79 65
80#define DFS_POOL_STAT_INC(c) do { } while (0)
81#define DFS_POOL_STAT_DEC(c) do { } while (0)
82#endif /* CONFIG_ATH9K_DFS_DEBUGFS */ 66#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
83 67
84#endif /* ATH9K_DFS_DEBUG_H */ 68#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 83f4927aeaca..4f9378ddf07f 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,22 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); 78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
79} 79}
80 80
81static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
82{
83 ath9k_hw_ops(ah)->tx99_start(ah, qnum);
84}
85
86static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
87{
88 ath9k_hw_ops(ah)->tx99_stop(ah);
89}
90
91static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
92{
93 if (ath9k_hw_ops(ah)->tx99_set_txpower)
94 ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
95}
96
81#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 97#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
82 98
83static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) 99static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index dcdbab48709e..54b04155e43b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1885,7 +1885,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1885 } else if (caldata) { 1885 } else if (caldata) {
1886 clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags); 1886 clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
1887 } 1887 }
1888 ah->noise = ath9k_hw_getchan_noise(ah, chan); 1888 ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
1889 1889
1890 if (fastcc) { 1890 if (fastcc) {
1891 r = ath9k_hw_do_fastcc(ah, chan); 1891 r = ath9k_hw_do_fastcc(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 81fcbc756122..9ea24f1cba73 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -703,6 +703,10 @@ struct ath_hw_ops {
703 void (*spectral_scan_trigger)(struct ath_hw *ah); 703 void (*spectral_scan_trigger)(struct ath_hw *ah);
704 void (*spectral_scan_wait)(struct ath_hw *ah); 704 void (*spectral_scan_wait)(struct ath_hw *ah);
705 705
706 void (*tx99_start)(struct ath_hw *ah, u32 qnum);
707 void (*tx99_stop)(struct ath_hw *ah);
708 void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
709
706#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 710#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
707 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); 711 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
708#endif 712#endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 7df728f36330..e89db64532f5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -680,7 +680,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
680 sc->sc_ah = ah; 680 sc->sc_ah = ah;
681 pCap = &ah->caps; 681 pCap = &ah->caps;
682 682
683 sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET); 683 common = ath9k_hw_common(ah);
684 sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
685 sc->tx99_power = MAX_RATE_POWER + 1;
684 686
685 if (!pdata) { 687 if (!pdata) {
686 ah->ah_flags |= AH_USE_EEPROM; 688 ah->ah_flags |= AH_USE_EEPROM;
@@ -694,7 +696,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
694 ah->external_reset = pdata->external_reset; 696 ah->external_reset = pdata->external_reset;
695 } 697 }
696 698
697 common = ath9k_hw_common(ah);
698 common->ops = &ah->reg_ops; 699 common->ops = &ah->reg_ops;
699 common->bus_ops = bus_ops; 700 common->bus_ops = bus_ops;
700 common->ah = ah; 701 common->ah = ah;
@@ -785,6 +786,7 @@ err_queues:
785 ath9k_hw_deinit(ah); 786 ath9k_hw_deinit(ah);
786err_hw: 787err_hw:
787 ath9k_eeprom_release(sc); 788 ath9k_eeprom_release(sc);
789 dev_kfree_skb_any(sc->tx99_skb);
788 return ret; 790 return ret;
789} 791}
790 792
@@ -842,7 +844,6 @@ static const struct ieee80211_iface_limit if_limits[] = {
842 BIT(NL80211_IFTYPE_P2P_GO) }, 844 BIT(NL80211_IFTYPE_P2P_GO) },
843}; 845};
844 846
845
846static const struct ieee80211_iface_limit if_dfs_limits[] = { 847static const struct ieee80211_iface_limit if_dfs_limits[] = {
847 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) }, 848 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
848}; 849};
@@ -903,17 +904,18 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
903 904
904 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 905 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
905 906
906 hw->wiphy->interface_modes = 907 if (!config_enabled(CONFIG_ATH9K_TX99)) {
907 BIT(NL80211_IFTYPE_P2P_GO) | 908 hw->wiphy->interface_modes =
908 BIT(NL80211_IFTYPE_P2P_CLIENT) | 909 BIT(NL80211_IFTYPE_P2P_GO) |
909 BIT(NL80211_IFTYPE_AP) | 910 BIT(NL80211_IFTYPE_P2P_CLIENT) |
910 BIT(NL80211_IFTYPE_WDS) | 911 BIT(NL80211_IFTYPE_AP) |
911 BIT(NL80211_IFTYPE_STATION) | 912 BIT(NL80211_IFTYPE_WDS) |
912 BIT(NL80211_IFTYPE_ADHOC) | 913 BIT(NL80211_IFTYPE_STATION) |
913 BIT(NL80211_IFTYPE_MESH_POINT); 914 BIT(NL80211_IFTYPE_ADHOC) |
914 915 BIT(NL80211_IFTYPE_MESH_POINT);
915 hw->wiphy->iface_combinations = if_comb; 916 hw->wiphy->iface_combinations = if_comb;
916 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 917 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
918 }
917 919
918 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 920 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
919 921
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 84a60644f93a..aed7e29dc50f 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,6 +28,13 @@ void ath_tx_complete_poll_work(struct work_struct *work)
28 int i; 28 int i;
29 bool needreset = false; 29 bool needreset = false;
30 30
31
32 if (sc->tx99_state) {
33 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
34 "skip tx hung detection on tx99\n");
35 return;
36 }
37
31 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 38 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
32 txq = sc->tx.txq_map[i]; 39 txq = sc->tx.txq_map[i];
33 40
@@ -70,7 +77,7 @@ void ath_hw_check(struct work_struct *work)
70 ath9k_ps_wakeup(sc); 77 ath9k_ps_wakeup(sc);
71 is_alive = ath9k_hw_check_alive(sc->sc_ah); 78 is_alive = ath9k_hw_check_alive(sc->sc_ah);
72 79
73 if (is_alive && !AR_SREV_9300(sc->sc_ah)) 80 if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
74 goto out; 81 goto out;
75 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) { 82 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
76 ath_dbg(common, RESET, 83 ath_dbg(common, RESET,
@@ -141,6 +148,9 @@ void ath_hw_pll_work(struct work_struct *work)
141 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 148 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
142 return; 149 return;
143 150
151 if (sc->tx99_state)
152 return;
153
144 ath9k_ps_wakeup(sc); 154 ath9k_ps_wakeup(sc);
145 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah); 155 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
146 ath9k_ps_restore(sc); 156 ath9k_ps_restore(sc);
@@ -518,7 +528,8 @@ void ath_update_survey_nf(struct ath_softc *sc, int channel)
518 528
519 if (chan->noisefloor) { 529 if (chan->noisefloor) {
520 survey->filled |= SURVEY_INFO_NOISE_DBM; 530 survey->filled |= SURVEY_INFO_NOISE_DBM;
521 survey->noise = ath9k_hw_getchan_noise(ah, chan); 531 survey->noise = ath9k_hw_getchan_noise(ah, chan,
532 chan->noisefloor);
522 } 533 }
523} 534}
524 535
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 20a2fbc1e34f..74f452c7b166 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1046,6 +1046,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1046 1046
1047 mutex_lock(&sc->mutex); 1047 mutex_lock(&sc->mutex);
1048 1048
1049 if (config_enabled(CONFIG_ATH9K_TX99)) {
1050 if (sc->nvifs >= 1) {
1051 mutex_unlock(&sc->mutex);
1052 return -EOPNOTSUPP;
1053 }
1054 sc->tx99_vif = vif;
1055 }
1056
1049 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1057 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1050 sc->nvifs++; 1058 sc->nvifs++;
1051 1059
@@ -1074,9 +1082,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1074 struct ath_softc *sc = hw->priv; 1082 struct ath_softc *sc = hw->priv;
1075 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1083 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1076 1084
1077 ath_dbg(common, CONFIG, "Change Interface\n");
1078 mutex_lock(&sc->mutex); 1085 mutex_lock(&sc->mutex);
1079 1086
1087 if (config_enabled(CONFIG_ATH9K_TX99)) {
1088 mutex_unlock(&sc->mutex);
1089 return -EOPNOTSUPP;
1090 }
1091
1092 ath_dbg(common, CONFIG, "Change Interface\n");
1093
1080 if (ath9k_uses_beacons(vif->type)) 1094 if (ath9k_uses_beacons(vif->type))
1081 ath9k_beacon_remove_slot(sc, vif); 1095 ath9k_beacon_remove_slot(sc, vif);
1082 1096
@@ -1106,6 +1120,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1106 mutex_lock(&sc->mutex); 1120 mutex_lock(&sc->mutex);
1107 1121
1108 sc->nvifs--; 1122 sc->nvifs--;
1123 sc->tx99_vif = NULL;
1109 1124
1110 if (ath9k_uses_beacons(vif->type)) 1125 if (ath9k_uses_beacons(vif->type))
1111 ath9k_beacon_remove_slot(sc, vif); 1126 ath9k_beacon_remove_slot(sc, vif);
@@ -1127,6 +1142,9 @@ static void ath9k_enable_ps(struct ath_softc *sc)
1127 struct ath_hw *ah = sc->sc_ah; 1142 struct ath_hw *ah = sc->sc_ah;
1128 struct ath_common *common = ath9k_hw_common(ah); 1143 struct ath_common *common = ath9k_hw_common(ah);
1129 1144
1145 if (config_enabled(CONFIG_ATH9K_TX99))
1146 return;
1147
1130 sc->ps_enabled = true; 1148 sc->ps_enabled = true;
1131 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1149 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1132 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) { 1150 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1143,6 +1161,9 @@ static void ath9k_disable_ps(struct ath_softc *sc)
1143 struct ath_hw *ah = sc->sc_ah; 1161 struct ath_hw *ah = sc->sc_ah;
1144 struct ath_common *common = ath9k_hw_common(ah); 1162 struct ath_common *common = ath9k_hw_common(ah);
1145 1163
1164 if (config_enabled(CONFIG_ATH9K_TX99))
1165 return;
1166
1146 sc->ps_enabled = false; 1167 sc->ps_enabled = false;
1147 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 1168 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
1148 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1169 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1166,6 +1187,9 @@ void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
1166 struct ath_common *common = ath9k_hw_common(ah); 1187 struct ath_common *common = ath9k_hw_common(ah);
1167 u32 rxfilter; 1188 u32 rxfilter;
1168 1189
1190 if (config_enabled(CONFIG_ATH9K_TX99))
1191 return;
1192
1169 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { 1193 if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
1170 ath_err(common, "spectrum analyzer not implemented on this hardware\n"); 1194 ath_err(common, "spectrum analyzer not implemented on this hardware\n");
1171 return; 1195 return;
@@ -1745,6 +1769,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
1745 unsigned long flags; 1769 unsigned long flags;
1746 int pos; 1770 int pos;
1747 1771
1772 if (config_enabled(CONFIG_ATH9K_TX99))
1773 return -EOPNOTSUPP;
1774
1748 spin_lock_irqsave(&common->cc_lock, flags); 1775 spin_lock_irqsave(&common->cc_lock, flags);
1749 if (idx == 0) 1776 if (idx == 0)
1750 ath_update_survey_stats(sc); 1777 ath_update_survey_stats(sc);
@@ -1777,6 +1804,9 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
1777 struct ath_softc *sc = hw->priv; 1804 struct ath_softc *sc = hw->priv;
1778 struct ath_hw *ah = sc->sc_ah; 1805 struct ath_hw *ah = sc->sc_ah;
1779 1806
1807 if (config_enabled(CONFIG_ATH9K_TX99))
1808 return;
1809
1780 mutex_lock(&sc->mutex); 1810 mutex_lock(&sc->mutex);
1781 ah->coverage_class = coverage_class; 1811 ah->coverage_class = coverage_class;
1782 1812
@@ -2343,6 +2373,134 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
2343 sc->csa_vif = vif; 2373 sc->csa_vif = vif;
2344} 2374}
2345 2375
2376static void ath9k_tx99_stop(struct ath_softc *sc)
2377{
2378 struct ath_hw *ah = sc->sc_ah;
2379 struct ath_common *common = ath9k_hw_common(ah);
2380
2381 ath_drain_all_txq(sc);
2382 ath_startrecv(sc);
2383
2384 ath9k_hw_set_interrupts(ah);
2385 ath9k_hw_enable_interrupts(ah);
2386
2387 ieee80211_wake_queues(sc->hw);
2388
2389 kfree_skb(sc->tx99_skb);
2390 sc->tx99_skb = NULL;
2391 sc->tx99_state = false;
2392
2393 ath9k_hw_tx99_stop(sc->sc_ah);
2394 ath_dbg(common, XMIT, "TX99 stopped\n");
2395}
2396
2397static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
2398{
2399 static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
2400 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
2401 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
2402 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
2403 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
2404 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
2405 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
2406 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
2407 u32 len = 1200;
2408 struct ieee80211_hw *hw = sc->hw;
2409 struct ieee80211_hdr *hdr;
2410 struct ieee80211_tx_info *tx_info;
2411 struct sk_buff *skb;
2412
2413 skb = alloc_skb(len, GFP_KERNEL);
2414 if (!skb)
2415 return NULL;
2416
2417 skb_put(skb, len);
2418
2419 memset(skb->data, 0, len);
2420
2421 hdr = (struct ieee80211_hdr *)skb->data;
2422 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
2423 hdr->duration_id = 0;
2424
2425 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
2426 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
2427 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
2428
2429 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2430
2431 tx_info = IEEE80211_SKB_CB(skb);
2432 memset(tx_info, 0, sizeof(*tx_info));
2433 tx_info->band = hw->conf.chandef.chan->band;
2434 tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
2435 tx_info->control.vif = sc->tx99_vif;
2436
2437 memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
2438
2439 return skb;
2440}
2441
2442void ath9k_tx99_deinit(struct ath_softc *sc)
2443{
2444 ath_reset(sc);
2445
2446 ath9k_ps_wakeup(sc);
2447 ath9k_tx99_stop(sc);
2448 ath9k_ps_restore(sc);
2449}
2450
2451int ath9k_tx99_init(struct ath_softc *sc)
2452{
2453 struct ieee80211_hw *hw = sc->hw;
2454 struct ath_hw *ah = sc->sc_ah;
2455 struct ath_common *common = ath9k_hw_common(ah);
2456 struct ath_tx_control txctl;
2457 int r;
2458
2459 if (sc->sc_flags & SC_OP_INVALID) {
2460 ath_err(common,
2461 "driver is in invalid state unable to use TX99");
2462 return -EINVAL;
2463 }
2464
2465 sc->tx99_skb = ath9k_build_tx99_skb(sc);
2466 if (!sc->tx99_skb)
2467 return -ENOMEM;
2468
2469 memset(&txctl, 0, sizeof(txctl));
2470 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
2471
2472 ath_reset(sc);
2473
2474 ath9k_ps_wakeup(sc);
2475
2476 ath9k_hw_disable_interrupts(ah);
2477 atomic_set(&ah->intr_ref_cnt, -1);
2478 ath_drain_all_txq(sc);
2479 ath_stoprecv(sc);
2480
2481 sc->tx99_state = true;
2482
2483 ieee80211_stop_queues(hw);
2484
2485 if (sc->tx99_power == MAX_RATE_POWER + 1)
2486 sc->tx99_power = MAX_RATE_POWER;
2487
2488 ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
2489 r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
2490 if (r) {
2491 ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
2492 return r;
2493 }
2494
2495 ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
2496 sc->tx99_power,
2497 sc->tx99_power / 2);
2498
2499 /* We leave the harware awake as it will be chugging on */
2500
2501 return 0;
2502}
2503
2346struct ieee80211_ops ath9k_ops = { 2504struct ieee80211_ops ath9k_ops = {
2347 .tx = ath9k_tx, 2505 .tx = ath9k_tx,
2348 .start = ath9k_start, 2506 .start = ath9k_start,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 8b788efb41fd..95ddca5495d4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -375,6 +375,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
375{ 375{
376 u32 rfilt; 376 u32 rfilt;
377 377
378 if (config_enabled(CONFIG_ATH9K_TX99))
379 return 0;
380
378 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 381 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
379 | ATH9K_RX_FILTER_MCAST; 382 | ATH9K_RX_FILTER_MCAST;
380 383
@@ -972,14 +975,15 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
972{ 975{
973#ifdef CONFIG_ATH9K_DEBUGFS 976#ifdef CONFIG_ATH9K_DEBUGFS
974 struct ath_hw *ah = sc->sc_ah; 977 struct ath_hw *ah = sc->sc_ah;
975 u8 bins[SPECTRAL_HT20_NUM_BINS]; 978 u8 num_bins, *bins, *vdata = (u8 *)hdr;
976 u8 *vdata = (u8 *)hdr; 979 struct fft_sample_ht20 fft_sample_20;
977 struct fft_sample_ht20 fft_sample; 980 struct fft_sample_ht20_40 fft_sample_40;
981 struct fft_sample_tlv *tlv;
978 struct ath_radar_info *radar_info; 982 struct ath_radar_info *radar_info;
979 struct ath_ht20_mag_info *mag_info;
980 int len = rs->rs_datalen; 983 int len = rs->rs_datalen;
981 int dc_pos; 984 int dc_pos;
982 u16 length, max_magnitude; 985 u16 fft_len, length, freq = ah->curchan->chan->center_freq;
986 enum nl80211_channel_type chan_type;
983 987
984 /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer 988 /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
985 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT 989 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
997 if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) 1001 if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
998 return 0; 1002 return 0;
999 1003
1000 /* Variation in the data length is possible and will be fixed later. 1004 chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
1001 * Note that we only support HT20 for now. 1005 if ((chan_type == NL80211_CHAN_HT40MINUS) ||
1002 * 1006 (chan_type == NL80211_CHAN_HT40PLUS)) {
1003 * TODO: add HT20_40 support as well. 1007 fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
1004 */ 1008 num_bins = SPECTRAL_HT20_40_NUM_BINS;
1005 if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || 1009 bins = (u8 *)fft_sample_40.data;
1006 (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) 1010 } else {
1007 return 1; 1011 fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
1008 1012 num_bins = SPECTRAL_HT20_NUM_BINS;
1009 fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; 1013 bins = (u8 *)fft_sample_20.data;
1010 length = sizeof(fft_sample) - sizeof(fft_sample.tlv); 1014 }
1011 fft_sample.tlv.length = __cpu_to_be16(length);
1012 1015
1013 fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); 1016 /* Variation in the data length is possible and will be fixed later */
1014 fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); 1017 if ((len > fft_len + 2) || (len < fft_len - 1))
1015 fft_sample.noise = ah->noise; 1018 return 1;
1016 1019
1017 switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { 1020 switch (len - fft_len) {
1018 case 0: 1021 case 0:
1019 /* length correct, nothing to do. */ 1022 /* length correct, nothing to do. */
1020 memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); 1023 memcpy(bins, vdata, num_bins);
1021 break; 1024 break;
1022 case -1: 1025 case -1:
1023 /* first byte missing, duplicate it. */ 1026 /* first byte missing, duplicate it. */
1024 memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); 1027 memcpy(&bins[1], vdata, num_bins - 1);
1025 bins[0] = vdata[0]; 1028 bins[0] = vdata[0];
1026 break; 1029 break;
1027 case 2: 1030 case 2:
1028 /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ 1031 /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
1029 memcpy(bins, vdata, 30); 1032 memcpy(bins, vdata, 30);
1030 bins[30] = vdata[31]; 1033 bins[30] = vdata[31];
1031 memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); 1034 memcpy(&bins[31], &vdata[33], num_bins - 31);
1032 break; 1035 break;
1033 case 1: 1036 case 1:
1034 /* MAC added 2 extra bytes AND first byte is missing. */ 1037 /* MAC added 2 extra bytes AND first byte is missing. */
1035 bins[0] = vdata[0]; 1038 bins[0] = vdata[0];
1036 memcpy(&bins[0], vdata, 30); 1039 memcpy(&bins[1], vdata, 30);
1037 bins[31] = vdata[31]; 1040 bins[31] = vdata[31];
1038 memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); 1041 memcpy(&bins[32], &vdata[33], num_bins - 32);
1039 break; 1042 break;
1040 default: 1043 default:
1041 return 1; 1044 return 1;
@@ -1044,23 +1047,93 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
1044 /* DC value (value in the middle) is the blind spot of the spectral 1047 /* DC value (value in the middle) is the blind spot of the spectral
1045 * sample and invalid, interpolate it. 1048 * sample and invalid, interpolate it.
1046 */ 1049 */
1047 dc_pos = SPECTRAL_HT20_NUM_BINS / 2; 1050 dc_pos = num_bins / 2;
1048 bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; 1051 bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
1049 1052
1050 /* mag data is at the end of the frame, in front of radar_info */ 1053 if ((chan_type == NL80211_CHAN_HT40MINUS) ||
1051 mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; 1054 (chan_type == NL80211_CHAN_HT40PLUS)) {
1055 s8 lower_rssi, upper_rssi;
1056 s16 ext_nf;
1057 u8 lower_max_index, upper_max_index;
1058 u8 lower_bitmap_w, upper_bitmap_w;
1059 u16 lower_mag, upper_mag;
1060 struct ath9k_hw_cal_data *caldata = ah->caldata;
1061 struct ath_ht20_40_mag_info *mag_info;
1062
1063 if (caldata)
1064 ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
1065 caldata->nfCalHist[3].privNF);
1066 else
1067 ext_nf = ATH_DEFAULT_NOISE_FLOOR;
1068
1069 length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
1070 fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
1071 fft_sample_40.tlv.length = __cpu_to_be16(length);
1072 fft_sample_40.freq = __cpu_to_be16(freq);
1073 fft_sample_40.channel_type = chan_type;
1074
1075 if (chan_type == NL80211_CHAN_HT40PLUS) {
1076 lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1077 upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
1052 1078
1053 /* copy raw bins without scaling them */ 1079 fft_sample_40.lower_noise = ah->noise;
1054 memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); 1080 fft_sample_40.upper_noise = ext_nf;
1055 fft_sample.max_exp = mag_info->max_exp & 0xf; 1081 } else {
1082 lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
1083 upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1056 1084
1057 max_magnitude = spectral_max_magnitude(mag_info->all_bins); 1085 fft_sample_40.lower_noise = ext_nf;
1058 fft_sample.max_magnitude = __cpu_to_be16(max_magnitude); 1086 fft_sample_40.upper_noise = ah->noise;
1059 fft_sample.max_index = spectral_max_index(mag_info->all_bins); 1087 }
1060 fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); 1088 fft_sample_40.lower_rssi = lower_rssi;
1061 fft_sample.tsf = __cpu_to_be64(tsf); 1089 fft_sample_40.upper_rssi = upper_rssi;
1090
1091 mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
1092 lower_mag = spectral_max_magnitude(mag_info->lower_bins);
1093 upper_mag = spectral_max_magnitude(mag_info->upper_bins);
1094 fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
1095 fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
1096 lower_max_index = spectral_max_index(mag_info->lower_bins);
1097 upper_max_index = spectral_max_index(mag_info->upper_bins);
1098 fft_sample_40.lower_max_index = lower_max_index;
1099 fft_sample_40.upper_max_index = upper_max_index;
1100 lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
1101 upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
1102 fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
1103 fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
1104 fft_sample_40.max_exp = mag_info->max_exp & 0xf;
1105
1106 fft_sample_40.tsf = __cpu_to_be64(tsf);
1107
1108 tlv = (struct fft_sample_tlv *)&fft_sample_40;
1109 } else {
1110 u8 max_index, bitmap_w;
1111 u16 magnitude;
1112 struct ath_ht20_mag_info *mag_info;
1113
1114 length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
1115 fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
1116 fft_sample_20.tlv.length = __cpu_to_be16(length);
1117 fft_sample_20.freq = __cpu_to_be16(freq);
1118
1119 fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1120 fft_sample_20.noise = ah->noise;
1121
1122 mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
1123 magnitude = spectral_max_magnitude(mag_info->all_bins);
1124 fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
1125 max_index = spectral_max_index(mag_info->all_bins);
1126 fft_sample_20.max_index = max_index;
1127 bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
1128 fft_sample_20.bitmap_weight = bitmap_w;
1129 fft_sample_20.max_exp = mag_info->max_exp & 0xf;
1130
1131 fft_sample_20.tsf = __cpu_to_be64(tsf);
1132
1133 tlv = (struct fft_sample_tlv *)&fft_sample_20;
1134 }
1062 1135
1063 ath_debug_send_fft_sample(sc, &fft_sample.tlv); 1136 ath_debug_send_fft_sample(sc, tlv);
1064 return 1; 1137 return 1;
1065#else 1138#else
1066 return 0; 1139 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 47696d29743c..09cdbcd09739 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1241,12 +1241,13 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1241 if (bf->bf_next) 1241 if (bf->bf_next)
1242 info.link = bf->bf_next->bf_daddr; 1242 info.link = bf->bf_next->bf_daddr;
1243 else 1243 else
1244 info.link = 0; 1244 info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1245 1245
1246 if (!bf_first) { 1246 if (!bf_first) {
1247 bf_first = bf; 1247 bf_first = bf;
1248 1248
1249 info.flags = ATH9K_TXDESC_INTREQ; 1249 if (!sc->tx99_state)
1250 info.flags = ATH9K_TXDESC_INTREQ;
1250 if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) || 1251 if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
1251 txq == sc->tx.uapsdq) 1252 txq == sc->tx.uapsdq)
1252 info.flags |= ATH9K_TXDESC_CLRDMASK; 1253 info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1941,7 +1942,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1941 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1942 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1942 } 1943 }
1943 1944
1944 if (!edma) { 1945 if (!edma || sc->tx99_state) {
1945 TX_STAT_INC(txq->axq_qnum, txstart); 1946 TX_STAT_INC(txq->axq_qnum, txstart);
1946 ath9k_hw_txstart(ah, txq->axq_qnum); 1947 ath9k_hw_txstart(ah, txq->axq_qnum);
1947 } 1948 }
@@ -2020,6 +2021,9 @@ static void setup_frame_info(struct ieee80211_hw *hw,
2020 fi->keyix = ATH9K_TXKEYIX_INVALID; 2021 fi->keyix = ATH9K_TXKEYIX_INVALID;
2021 fi->keytype = keytype; 2022 fi->keytype = keytype;
2022 fi->framelen = framelen; 2023 fi->framelen = framelen;
2024
2025 if (!rate)
2026 return;
2023 fi->rtscts_rate = rate->hw_value; 2027 fi->rtscts_rate = rate->hw_value;
2024 if (short_preamble) 2028 if (short_preamble)
2025 fi->rtscts_rate |= rate->hw_value_short; 2029 fi->rtscts_rate |= rate->hw_value_short;
@@ -2371,6 +2375,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2371 2375
2372 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2376 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2373 bf->bf_buf_addr = 0; 2377 bf->bf_buf_addr = 0;
2378 if (sc->tx99_state)
2379 goto skip_tx_complete;
2374 2380
2375 if (bf->bf_state.bfs_paprd) { 2381 if (bf->bf_state.bfs_paprd) {
2376 if (time_after(jiffies, 2382 if (time_after(jiffies,
@@ -2383,6 +2389,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2383 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2389 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2384 ath_tx_complete(sc, skb, tx_flags, txq); 2390 ath_tx_complete(sc, skb, tx_flags, txq);
2385 } 2391 }
2392skip_tx_complete:
2386 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2393 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2387 * accidentally reference it later. 2394 * accidentally reference it later.
2388 */ 2395 */
@@ -2741,3 +2748,46 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2741 ath_txq_unlock(sc, txq); 2748 ath_txq_unlock(sc, txq);
2742 } 2749 }
2743} 2750}
2751
2752int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
2753 struct ath_tx_control *txctl)
2754{
2755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2756 struct ath_frame_info *fi = get_frame_info(skb);
2757 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2758 struct ath_buf *bf;
2759 int padpos, padsize;
2760
2761 padpos = ieee80211_hdrlen(hdr->frame_control);
2762 padsize = padpos & 3;
2763
2764 if (padsize && skb->len > padpos) {
2765 if (skb_headroom(skb) < padsize) {
2766 ath_dbg(common, XMIT,
2767 "tx99 padding failed\n");
2768 return -EINVAL;
2769 }
2770
2771 skb_push(skb, padsize);
2772 memmove(skb->data, skb->data + padsize, padpos);
2773 }
2774
2775 fi->keyix = ATH9K_TXKEYIX_INVALID;
2776 fi->framelen = skb->len + FCS_LEN;
2777 fi->keytype = ATH9K_KEY_TYPE_CLEAR;
2778
2779 bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
2780 if (!bf) {
2781 ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
2782 return -EINVAL;
2783 }
2784
2785 ath_set_rates(sc->tx99_vif, NULL, bf);
2786
2787 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
2788 ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
2789
2790 ath_tx_send_normal(sc, txctl->txq, NULL, skb);
2791
2792 return 0;
2793}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c81fce..a1a69c5db409 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -19,7 +19,7 @@
19 19
20#include "dfs_pattern_detector.h" 20#include "dfs_pattern_detector.h"
21#include "dfs_pri_detector.h" 21#include "dfs_pri_detector.h"
22#include "ath9k.h" 22#include "ath.h"
23 23
24/* 24/*
25 * tolerated deviation of radar time stamp in usecs on both sides 25 * tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
143{ 143{
144 u32 sz, i; 144 u32 sz, i;
145 struct channel_detector *cd; 145 struct channel_detector *cd;
146 struct ath_common *common = ath9k_hw_common(dpd->ah);
147 146
148 cd = kmalloc(sizeof(*cd), GFP_ATOMIC); 147 cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
149 if (cd == NULL) 148 if (cd == NULL)
@@ -167,7 +166,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
167 return cd; 166 return cd;
168 167
169fail: 168fail:
170 ath_dbg(common, DFS, 169 ath_dbg(dpd->common, DFS,
171 "failed to allocate channel_detector for freq=%d\n", freq); 170 "failed to allocate channel_detector for freq=%d\n", freq);
172 channel_detector_exit(dpd, cd); 171 channel_detector_exit(dpd, cd);
173 return NULL; 172 return NULL;
@@ -242,7 +241,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
242 struct pri_detector *pd = cd->detectors[i]; 241 struct pri_detector *pd = cd->detectors[i];
243 struct pri_sequence *ps = pd->add_pulse(pd, event); 242 struct pri_sequence *ps = pd->add_pulse(pd, event);
244 if (ps != NULL) { 243 if (ps != NULL) {
245 ath_dbg(ath9k_hw_common(dpd->ah), DFS, 244 ath_dbg(dpd->common, DFS,
246 "DFS: radar found on freq=%d: id=%d, pri=%d, " 245 "DFS: radar found on freq=%d: id=%d, pri=%d, "
247 "count=%d, count_false=%d\n", 246 "count=%d, count_false=%d\n",
248 event->freq, pd->rs->type_id, 247 event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
254 return false; 253 return false;
255} 254}
256 255
256static struct ath_dfs_pool_stats
257dpd_get_stats(struct dfs_pattern_detector *dpd)
258{
259 return global_dfs_pool_stats;
260}
261
257static bool dpd_set_domain(struct dfs_pattern_detector *dpd, 262static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
258 enum nl80211_dfs_regions region) 263 enum nl80211_dfs_regions region)
259{ 264{
@@ -284,14 +289,18 @@ static struct dfs_pattern_detector default_dpd = {
284 .exit = dpd_exit, 289 .exit = dpd_exit,
285 .set_dfs_domain = dpd_set_domain, 290 .set_dfs_domain = dpd_set_domain,
286 .add_pulse = dpd_add_pulse, 291 .add_pulse = dpd_add_pulse,
292 .get_stats = dpd_get_stats,
287 .region = NL80211_DFS_UNSET, 293 .region = NL80211_DFS_UNSET,
288}; 294};
289 295
290struct dfs_pattern_detector * 296struct dfs_pattern_detector *
291dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region) 297dfs_pattern_detector_init(struct ath_common *common,
298 enum nl80211_dfs_regions region)
292{ 299{
293 struct dfs_pattern_detector *dpd; 300 struct dfs_pattern_detector *dpd;
294 struct ath_common *common = ath9k_hw_common(ah); 301
302 if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
303 return NULL;
295 304
296 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); 305 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
297 if (dpd == NULL) 306 if (dpd == NULL)
@@ -300,7 +309,7 @@ dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
300 *dpd = default_dpd; 309 *dpd = default_dpd;
301 INIT_LIST_HEAD(&dpd->channel_detectors); 310 INIT_LIST_HEAD(&dpd->channel_detectors);
302 311
303 dpd->ah = ah; 312 dpd->common = common;
304 if (dpd->set_dfs_domain(dpd, region)) 313 if (dpd->set_dfs_domain(dpd, region))
305 return dpd; 314 return dpd;
306 315
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abcc4265..dde2652b787c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -22,6 +22,19 @@
22#include <linux/nl80211.h> 22#include <linux/nl80211.h>
23 23
24/** 24/**
25 * struct ath_dfs_pool_stats - DFS Statistics for global pools
26 */
27struct ath_dfs_pool_stats {
28 u32 pool_reference;
29 u32 pulse_allocated;
30 u32 pulse_alloc_error;
31 u32 pulse_used;
32 u32 pseq_allocated;
33 u32 pseq_alloc_error;
34 u32 pseq_used;
35};
36
37/**
25 * struct pulse_event - describing pulses reported by PHY 38 * struct pulse_event - describing pulses reported by PHY
26 * @ts: pulse time stamp in us 39 * @ts: pulse time stamp in us
27 * @freq: channel frequency in MHz 40 * @freq: channel frequency in MHz
@@ -77,11 +90,12 @@ struct dfs_pattern_detector {
77 bool (*add_pulse)(struct dfs_pattern_detector *dpd, 90 bool (*add_pulse)(struct dfs_pattern_detector *dpd,
78 struct pulse_event *pe); 91 struct pulse_event *pe);
79 92
93 struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
80 enum nl80211_dfs_regions region; 94 enum nl80211_dfs_regions region;
81 u8 num_radar_types; 95 u8 num_radar_types;
82 u64 last_pulse_ts; 96 u64 last_pulse_ts;
83 /* needed for ath_dbg() */ 97 /* needed for ath_dbg() */
84 struct ath_hw *ah; 98 struct ath_common *common;
85 99
86 const struct radar_detector_specs *radar_spec; 100 const struct radar_detector_specs *radar_spec;
87 struct list_head channel_detectors; 101 struct list_head channel_detectors;
@@ -92,15 +106,7 @@ struct dfs_pattern_detector {
92 * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation 106 * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
93 * @return instance pointer on success, NULL otherwise 107 * @return instance pointer on success, NULL otherwise
94 */ 108 */
95#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
96extern struct dfs_pattern_detector * 109extern struct dfs_pattern_detector *
97dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region); 110dfs_pattern_detector_init(struct ath_common *common,
98#else 111 enum nl80211_dfs_regions region);
99static inline struct dfs_pattern_detector *
100dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
101{
102 return NULL;
103}
104#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
105
106#endif /* DFS_PATTERN_DETECTOR_H */ 112#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index c718fc379a10..43b608178884 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -17,10 +17,14 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include "ath9k.h" 20#include "ath.h"
21#include "dfs_pattern_detector.h" 21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h" 22#include "dfs_pri_detector.h"
23#include "dfs_debug.h" 23
24struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25
26#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
24 28
25/** 29/**
26 * struct pulse_elem - elements in pulse queue 30 * struct pulse_elem - elements in pulse queue
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d1abc6..79f0fff4d1e6 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -19,6 +19,8 @@
19 19
20#include <linux/list.h> 20#include <linux/list.h>
21 21
22extern struct ath_dfs_pool_stats global_dfs_pool_stats;
23
22/** 24/**
23 * struct pri_sequence - sequence of pulses matching one PRI 25 * struct pri_sequence - sequence of pulses matching one PRI
24 * @head: list_head 26 * @head: list_head
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7d077c752dd5..c00687e05688 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -356,14 +356,131 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
356 return -1; 356 return -1;
357} 357}
358 358
359static int __ath_reg_dyn_country(struct wiphy *wiphy,
360 struct ath_regulatory *reg,
361 struct regulatory_request *request)
362{
363 u16 country_code;
364
365 if (!ath_is_world_regd(reg))
366 return -EINVAL;
367
368 country_code = ath_regd_find_country_by_name(request->alpha2);
369 if (country_code == (u16) -1)
370 return -EINVAL;
371
372 reg->current_rd = COUNTRY_ERD_FLAG;
373 reg->current_rd |= country_code;
374
375 __ath_regd_init(reg);
376
377 ath_reg_apply_world_flags(wiphy, request->initiator, reg);
378
379 return 0;
380}
381
382static void ath_reg_dyn_country(struct wiphy *wiphy,
383 struct ath_regulatory *reg,
384 struct regulatory_request *request)
385{
386 if (__ath_reg_dyn_country(wiphy, reg, request))
387 return;
388
389 printk(KERN_DEBUG "ath: regdomain 0x%0x "
390 "dynamically updated by %s\n",
391 reg->current_rd,
392 reg_initiator_name(request->initiator));
393}
394
395static bool dynamic_country_user_possible(struct ath_regulatory *reg)
396{
397 if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
398 return true;
399
400 switch (reg->country_code) {
401 case CTRY_UNITED_STATES:
402 case CTRY_JAPAN1:
403 case CTRY_JAPAN2:
404 case CTRY_JAPAN3:
405 case CTRY_JAPAN4:
406 case CTRY_JAPAN5:
407 case CTRY_JAPAN6:
408 case CTRY_JAPAN7:
409 case CTRY_JAPAN8:
410 case CTRY_JAPAN9:
411 case CTRY_JAPAN10:
412 case CTRY_JAPAN11:
413 case CTRY_JAPAN12:
414 case CTRY_JAPAN13:
415 case CTRY_JAPAN14:
416 case CTRY_JAPAN15:
417 case CTRY_JAPAN16:
418 case CTRY_JAPAN17:
419 case CTRY_JAPAN18:
420 case CTRY_JAPAN19:
421 case CTRY_JAPAN20:
422 case CTRY_JAPAN21:
423 case CTRY_JAPAN22:
424 case CTRY_JAPAN23:
425 case CTRY_JAPAN24:
426 case CTRY_JAPAN25:
427 case CTRY_JAPAN26:
428 case CTRY_JAPAN27:
429 case CTRY_JAPAN28:
430 case CTRY_JAPAN29:
431 case CTRY_JAPAN30:
432 case CTRY_JAPAN31:
433 case CTRY_JAPAN32:
434 case CTRY_JAPAN33:
435 case CTRY_JAPAN34:
436 case CTRY_JAPAN35:
437 case CTRY_JAPAN36:
438 case CTRY_JAPAN37:
439 case CTRY_JAPAN38:
440 case CTRY_JAPAN39:
441 case CTRY_JAPAN40:
442 case CTRY_JAPAN41:
443 case CTRY_JAPAN42:
444 case CTRY_JAPAN43:
445 case CTRY_JAPAN44:
446 case CTRY_JAPAN45:
447 case CTRY_JAPAN46:
448 case CTRY_JAPAN47:
449 case CTRY_JAPAN48:
450 case CTRY_JAPAN49:
451 case CTRY_JAPAN50:
452 case CTRY_JAPAN51:
453 case CTRY_JAPAN52:
454 case CTRY_JAPAN53:
455 case CTRY_JAPAN54:
456 case CTRY_JAPAN55:
457 case CTRY_JAPAN56:
458 case CTRY_JAPAN57:
459 case CTRY_JAPAN58:
460 case CTRY_JAPAN59:
461 return false;
462 }
463
464 return true;
465}
466
467static void ath_reg_dyn_country_user(struct wiphy *wiphy,
468 struct ath_regulatory *reg,
469 struct regulatory_request *request)
470{
471 if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
472 return;
473 if (!dynamic_country_user_possible(reg))
474 return;
475 ath_reg_dyn_country(wiphy, reg, request);
476}
477
359void ath_reg_notifier_apply(struct wiphy *wiphy, 478void ath_reg_notifier_apply(struct wiphy *wiphy,
360 struct regulatory_request *request, 479 struct regulatory_request *request,
361 struct ath_regulatory *reg) 480 struct ath_regulatory *reg)
362{ 481{
363 struct ath_common *common = container_of(reg, struct ath_common, 482 struct ath_common *common = container_of(reg, struct ath_common,
364 regulatory); 483 regulatory);
365 u16 country_code;
366
367 /* We always apply this */ 484 /* We always apply this */
368 ath_reg_apply_radar_flags(wiphy); 485 ath_reg_apply_radar_flags(wiphy);
369 486
@@ -388,25 +505,12 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
388 sizeof(struct ath_regulatory)); 505 sizeof(struct ath_regulatory));
389 break; 506 break;
390 case NL80211_REGDOM_SET_BY_DRIVER: 507 case NL80211_REGDOM_SET_BY_DRIVER:
508 break;
391 case NL80211_REGDOM_SET_BY_USER: 509 case NL80211_REGDOM_SET_BY_USER:
510 ath_reg_dyn_country_user(wiphy, reg, request);
392 break; 511 break;
393 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 512 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
394 if (!ath_is_world_regd(reg)) 513 ath_reg_dyn_country(wiphy, reg, request);
395 break;
396
397 country_code = ath_regd_find_country_by_name(request->alpha2);
398 if (country_code == (u16) -1)
399 break;
400
401 reg->current_rd = COUNTRY_ERD_FLAG;
402 reg->current_rd |= country_code;
403
404 printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
405 reg->current_rd);
406 __ath_regd_init(reg);
407
408 ath_reg_apply_world_flags(wiphy, request->initiator, reg);
409
410 break; 514 break;
411 } 515 }
412} 516}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index a55ae6494c3b..0d950f209dae 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3212,7 +3212,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
3212 if (subtype == IEEE80211_STYPE_REASSOC_RESP && 3212 if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
3213 status != WLAN_STATUS_ASSOC_DENIED_RATES && 3213 status != WLAN_STATUS_ASSOC_DENIED_RATES &&
3214 status != WLAN_STATUS_CAPS_UNSUPPORTED && 3214 status != WLAN_STATUS_CAPS_UNSUPPORTED &&
3215 priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { 3215 priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
3216 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3216 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3217 priv->ReAssociationRequestRetryCnt++; 3217 priv->ReAssociationRequestRetryCnt++;
3218 send_association_request(priv, 1); 3218 send_association_request(priv, 1);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 7c970d3ae358..05ee7f10cc8f 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
164 } 164 }
165 en_addr = en_addrs[override][i]; 165 en_addr = en_addrs[override][i];
166 166
167 val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1; 167 if (e)
168 val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
168 169
169 if (off) { 170 if (off) {
170 b43_phy_mask(dev, en_addr, ~en_mask); 171 b43_phy_mask(dev, en_addr, ~en_mask);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e13b1a65c65f..3e10b801eee8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -26,7 +26,6 @@
26#include <linux/mmc/sdio.h> 26#include <linux/mmc/sdio.h>
27#include <linux/mmc/sdio_func.h> 27#include <linux/mmc/sdio_func.h>
28#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
29#include <linux/mmc/host.h>
30#include <linux/platform_data/brcmfmac-sdio.h> 29#include <linux/platform_data/brcmfmac-sdio.h>
31 30
32#include <defs.h> 31#include <defs.h>
@@ -239,7 +238,9 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
239 func_num = SDIO_FUNC_1; 238 func_num = SDIO_FUNC_1;
240 reg_size = 4; 239 reg_size = 4;
241 240
242 brcmf_sdio_addrprep(sdiodev, reg_size, &addr); 241 ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
242 if (ret)
243 goto done;
243 } 244 }
244 245
245 do { 246 do {
@@ -255,6 +256,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
255 func_num, addr, data, 4); 256 func_num, addr, data, 4);
256 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 257 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
257 258
259done:
258 if (ret != 0) 260 if (ret != 0)
259 brcmf_err("failed with %d\n", ret); 261 brcmf_err("failed with %d\n", ret);
260 262
@@ -315,8 +317,36 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
315 *ret = retval; 317 *ret = retval;
316} 318}
317 319
320static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
321 bool write, u32 addr, struct sk_buff *pkt)
322{
323 unsigned int req_sz;
324
325 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
326 if (brcmf_pm_resume_error(sdiodev))
327 return -EIO;
328
329 /* Single skb use the standard mmc interface */
330 req_sz = pkt->len + 3;
331 req_sz &= (uint)~3;
332
333 if (write)
334 return sdio_memcpy_toio(sdiodev->func[fn], addr,
335 ((u8 *)(pkt->data)),
336 req_sz);
337 else if (fn == 1)
338 return sdio_memcpy_fromio(sdiodev->func[fn],
339 ((u8 *)(pkt->data)),
340 addr, req_sz);
341 else
342 /* function 2 read is FIFO operation */
343 return sdio_readsb(sdiodev->func[fn],
344 ((u8 *)(pkt->data)), addr,
345 req_sz);
346}
347
318/** 348/**
319 * brcmf_sdio_buffrw - SDIO interface function for block data access 349 * brcmf_sdio_sglist_rw - SDIO interface function for block data access
320 * @sdiodev: brcmfmac sdio device 350 * @sdiodev: brcmfmac sdio device
321 * @fn: SDIO function number 351 * @fn: SDIO function number
322 * @write: direction flag 352 * @write: direction flag
@@ -327,12 +357,13 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
327 * stack for block data access. It assumes that the skb passed down by the 357 * stack for block data access. It assumes that the skb passed down by the
328 * caller has already been padded and aligned. 358 * caller has already been padded and aligned.
329 */ 359 */
330static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, 360static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
331 bool write, u32 addr, struct sk_buff_head *pktlist) 361 bool write, u32 addr,
362 struct sk_buff_head *pktlist)
332{ 363{
333 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; 364 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
334 unsigned int max_blks, max_req_sz, orig_offset, dst_offset; 365 unsigned int max_req_sz, orig_offset, dst_offset;
335 unsigned short max_seg_sz, seg_sz; 366 unsigned short max_seg_cnt, seg_sz;
336 unsigned char *pkt_data, *orig_data, *dst_data; 367 unsigned char *pkt_data, *orig_data, *dst_data;
337 struct sk_buff *pkt_next = NULL, *local_pkt_next; 368 struct sk_buff *pkt_next = NULL, *local_pkt_next;
338 struct sk_buff_head local_list, *target_list; 369 struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
341 struct mmc_data mmc_dat; 372 struct mmc_data mmc_dat;
342 struct sg_table st; 373 struct sg_table st;
343 struct scatterlist *sgl; 374 struct scatterlist *sgl;
344 struct mmc_host *host;
345 int ret = 0; 375 int ret = 0;
346 376
347 if (!pktlist->qlen) 377 if (!pktlist->qlen)
@@ -351,27 +381,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
351 if (brcmf_pm_resume_error(sdiodev)) 381 if (brcmf_pm_resume_error(sdiodev))
352 return -EIO; 382 return -EIO;
353 383
354 /* Single skb use the standard mmc interface */
355 if (pktlist->qlen == 1) {
356 pkt_next = pktlist->next;
357 req_sz = pkt_next->len + 3;
358 req_sz &= (uint)~3;
359
360 if (write)
361 return sdio_memcpy_toio(sdiodev->func[fn], addr,
362 ((u8 *)(pkt_next->data)),
363 req_sz);
364 else if (fn == 1)
365 return sdio_memcpy_fromio(sdiodev->func[fn],
366 ((u8 *)(pkt_next->data)),
367 addr, req_sz);
368 else
369 /* function 2 read is FIFO operation */
370 return sdio_readsb(sdiodev->func[fn],
371 ((u8 *)(pkt_next->data)), addr,
372 req_sz);
373 }
374
375 target_list = pktlist; 384 target_list = pktlist;
376 /* for host with broken sg support, prepare a page aligned list */ 385 /* for host with broken sg support, prepare a page aligned list */
377 __skb_queue_head_init(&local_list); 386 __skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
398 target_list = &local_list; 407 target_list = &local_list;
399 } 408 }
400 409
401 host = sdiodev->func[fn]->card->host;
402 func_blk_sz = sdiodev->func[fn]->cur_blksize; 410 func_blk_sz = sdiodev->func[fn]->cur_blksize;
403 /* Blocks per command is limited by host count, host transfer 411 max_req_sz = sdiodev->max_request_size;
404 * size and the maximum for IO_RW_EXTENDED of 511 blocks. 412 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
405 */ 413 target_list->qlen);
406 max_blks = min_t(unsigned int, host->max_blk_count, 511u);
407 max_req_sz = min_t(unsigned int, host->max_req_size,
408 max_blks * func_blk_sz);
409 max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
410 max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
411 seg_sz = target_list->qlen; 414 seg_sz = target_list->qlen;
412 pkt_offset = 0; 415 pkt_offset = 0;
413 pkt_next = target_list->next; 416 pkt_next = target_list->next;
414 417
415 if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) { 418 if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
416 ret = -ENOMEM; 419 ret = -ENOMEM;
417 goto exit; 420 goto exit;
418 } 421 }
419 422
423 memset(&mmc_req, 0, sizeof(struct mmc_request));
424 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
425 memset(&mmc_dat, 0, sizeof(struct mmc_data));
426
427 mmc_dat.sg = st.sgl;
428 mmc_dat.blksz = func_blk_sz;
429 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
430 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
431 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
432 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
433 mmc_cmd.arg |= 1<<27; /* block mode */
434 /* for function 1 the addr will be incremented */
435 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
436 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
437 mmc_req.cmd = &mmc_cmd;
438 mmc_req.data = &mmc_dat;
439
420 while (seg_sz) { 440 while (seg_sz) {
421 req_sz = 0; 441 req_sz = 0;
422 sg_cnt = 0; 442 sg_cnt = 0;
423 memset(&mmc_req, 0, sizeof(struct mmc_request));
424 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
425 memset(&mmc_dat, 0, sizeof(struct mmc_data));
426 sgl = st.sgl; 443 sgl = st.sgl;
427 /* prep sg table */ 444 /* prep sg table */
428 while (pkt_next != (struct sk_buff *)target_list) { 445 while (pkt_next != (struct sk_buff *)target_list) {
429 pkt_data = pkt_next->data + pkt_offset; 446 pkt_data = pkt_next->data + pkt_offset;
430 sg_data_sz = pkt_next->len - pkt_offset; 447 sg_data_sz = pkt_next->len - pkt_offset;
431 if (sg_data_sz > host->max_seg_size) 448 if (sg_data_sz > sdiodev->max_segment_size)
432 sg_data_sz = host->max_seg_size; 449 sg_data_sz = sdiodev->max_segment_size;
433 if (sg_data_sz > max_req_sz - req_sz) 450 if (sg_data_sz > max_req_sz - req_sz)
434 sg_data_sz = max_req_sz - req_sz; 451 sg_data_sz = max_req_sz - req_sz;
435 452
@@ -444,7 +461,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
444 pkt_next = pkt_next->next; 461 pkt_next = pkt_next->next;
445 } 462 }
446 463
447 if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz) 464 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
448 break; 465 break;
449 } 466 }
450 seg_sz -= sg_cnt; 467 seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
455 ret = -ENOTBLK; 472 ret = -ENOTBLK;
456 goto exit; 473 goto exit;
457 } 474 }
458 mmc_dat.sg = st.sgl; 475
459 mmc_dat.sg_len = sg_cnt; 476 mmc_dat.sg_len = sg_cnt;
460 mmc_dat.blksz = func_blk_sz;
461 mmc_dat.blocks = req_sz / func_blk_sz; 477 mmc_dat.blocks = req_sz / func_blk_sz;
462 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
463 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
464 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
465 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
466 mmc_cmd.arg |= 1<<27; /* block mode */
467 /* incrementing addr for function 1 */
468 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
469 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ 478 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
470 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ 479 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
471 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 480 /* incrementing addr for function 1 */
472 mmc_req.cmd = &mmc_cmd;
473 mmc_req.data = &mmc_dat;
474 if (fn == 1) 481 if (fn == 1)
475 addr += req_sz; 482 addr += req_sz;
476 483
477 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card); 484 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
478 mmc_wait_for_req(host, &mmc_req); 485 mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
479 486
480 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; 487 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
481 if (ret != 0) { 488 if (ret != 0) {
@@ -546,7 +553,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
546{ 553{
547 uint width; 554 uint width;
548 int err = 0; 555 int err = 0;
549 struct sk_buff_head pkt_list;
550 556
551 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", 557 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
552 fn, addr, pkt->len); 558 fn, addr, pkt->len);
@@ -556,19 +562,17 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
556 if (err) 562 if (err)
557 goto done; 563 goto done;
558 564
559 skb_queue_head_init(&pkt_list); 565 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
560 skb_queue_tail(&pkt_list, pkt);
561 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
562 skb_dequeue_tail(&pkt_list);
563 566
564done: 567done:
565 return err; 568 return err;
566} 569}
567 570
568int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 571int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
569 uint flags, struct sk_buff_head *pktq) 572 uint flags, struct sk_buff_head *pktq, uint totlen)
570{ 573{
571 uint incr_fix; 574 struct sk_buff *glom_skb;
575 struct sk_buff *skb;
572 uint width; 576 uint width;
573 int err = 0; 577 int err = 0;
574 578
@@ -580,8 +584,22 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
580 if (err) 584 if (err)
581 goto done; 585 goto done;
582 586
583 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 587 if (pktq->qlen == 1)
584 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq); 588 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
589 else if (!sdiodev->sg_support) {
590 glom_skb = brcmu_pkt_buf_get_skb(totlen);
591 if (!glom_skb)
592 return -ENOMEM;
593 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
594 if (err)
595 goto done;
596
597 skb_queue_walk(pktq, skb) {
598 memcpy(skb->data, glom_skb->data, skb->len);
599 skb_pull(glom_skb, skb->len);
600 }
601 } else
602 err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
585 603
586done: 604done:
587 return err; 605 return err;
@@ -592,7 +610,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
592 uint flags, u8 *buf, uint nbytes) 610 uint flags, u8 *buf, uint nbytes)
593{ 611{
594 struct sk_buff *mypkt; 612 struct sk_buff *mypkt;
595 struct sk_buff_head pktq; 613 uint width;
596 int err; 614 int err;
597 615
598 mypkt = brcmu_pkt_buf_get_skb(nbytes); 616 mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
603 } 621 }
604 622
605 memcpy(mypkt->data, buf, nbytes); 623 memcpy(mypkt->data, buf, nbytes);
606 __skb_queue_head_init(&pktq); 624
607 __skb_queue_tail(&pktq, mypkt); 625 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
608 err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq); 626 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
609 __skb_dequeue_tail(&pktq); 627
628 if (!err)
629 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
610 630
611 brcmu_pkt_buf_free_skb(mypkt); 631 brcmu_pkt_buf_free_skb(mypkt);
612 return err; 632 return err;
@@ -617,16 +637,26 @@ int
617brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 637brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
618 uint flags, struct sk_buff_head *pktq) 638 uint flags, struct sk_buff_head *pktq)
619{ 639{
640 struct sk_buff *skb;
620 uint width; 641 uint width;
621 int err = 0; 642 int err;
622 643
623 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", 644 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
624 fn, addr, pktq->qlen); 645 fn, addr, pktq->qlen);
625 646
626 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 647 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
627 brcmf_sdio_addrprep(sdiodev, width, &addr); 648 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
649 if (err)
650 return err;
628 651
629 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq); 652 if (pktq->qlen == 1 || !sdiodev->sg_support)
653 skb_queue_walk(pktq, skb) {
654 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
655 if (err)
656 break;
657 }
658 else
659 err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
630 660
631 return err; 661 return err;
632} 662}
@@ -639,7 +669,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
639 struct sk_buff *pkt; 669 struct sk_buff *pkt;
640 u32 sdaddr; 670 u32 sdaddr;
641 uint dsize; 671 uint dsize;
642 struct sk_buff_head pkt_list;
643 672
644 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 673 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
645 pkt = dev_alloc_skb(dsize); 674 pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
648 return -EIO; 677 return -EIO;
649 } 678 }
650 pkt->priority = 0; 679 pkt->priority = 0;
651 skb_queue_head_init(&pkt_list);
652 680
653 /* Determine initial transfer parameters */ 681 /* Determine initial transfer parameters */
654 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; 682 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
676 skb_put(pkt, dsize); 704 skb_put(pkt, dsize);
677 if (write) 705 if (write)
678 memcpy(pkt->data, data, dsize); 706 memcpy(pkt->data, data, dsize);
679 skb_queue_tail(&pkt_list, pkt);
680 bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write, 707 bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
681 sdaddr, &pkt_list); 708 sdaddr, pkt);
682 skb_dequeue_tail(&pkt_list);
683 if (bcmerror) { 709 if (bcmerror) {
684 brcmf_err("membytes transfer failed\n"); 710 brcmf_err("membytes transfer failed\n");
685 break; 711 break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 2a23bf2b904d..905704e335d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -21,6 +21,7 @@
21#include <linux/mmc/sdio_func.h> 21#include <linux/mmc/sdio_func.h>
22#include <linux/mmc/sdio_ids.h> 22#include <linux/mmc/sdio_ids.h>
23#include <linux/mmc/card.h> 23#include <linux/mmc/card.h>
24#include <linux/mmc/host.h>
24#include <linux/suspend.h> 25#include <linux/suspend.h>
25#include <linux/errno.h> 26#include <linux/errno.h>
26#include <linux/sched.h> /* request_irq() */ 27#include <linux/sched.h> /* request_irq() */
@@ -315,6 +316,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
315 int err; 316 int err;
316 struct brcmf_sdio_dev *sdiodev; 317 struct brcmf_sdio_dev *sdiodev;
317 struct brcmf_bus *bus_if; 318 struct brcmf_bus *bus_if;
319 struct mmc_host *host;
320 uint max_blocks;
318 321
319 brcmf_dbg(SDIO, "Enter\n"); 322 brcmf_dbg(SDIO, "Enter\n");
320 brcmf_dbg(SDIO, "Class=%x\n", func->class); 323 brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -361,6 +364,20 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
361 brcmf_err("F2 error, probe failed %d...\n", err); 364 brcmf_err("F2 error, probe failed %d...\n", err);
362 goto fail; 365 goto fail;
363 } 366 }
367
368 /*
369 * determine host related variables after brcmf_sdio_probe()
370 * as func->cur_blksize is properly set and F2 init has been
371 * completed successfully.
372 */
373 host = func->card->host;
374 sdiodev->sg_support = host->max_segs > 1;
375 max_blocks = min_t(uint, host->max_blk_count, 511u);
376 sdiodev->max_request_size = min_t(uint, host->max_req_size,
377 max_blocks * func->cur_blksize);
378 sdiodev->max_segment_count = min_t(uint, host->max_segs,
379 SG_MAX_SINGLE_ALLOC);
380 sdiodev->max_segment_size = host->max_seg_size;
364 brcmf_dbg(SDIO, "F2 init completed...\n"); 381 brcmf_dbg(SDIO, "F2 init completed...\n");
365 return 0; 382 return 0;
366 383
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 67f05db4b9b8..b02953c4ade7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -1147,6 +1147,8 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1147 u8 rx_seq, fc, tx_seq_max; 1147 u8 rx_seq, fc, tx_seq_max;
1148 u32 swheader; 1148 u32 swheader;
1149 1149
1150 trace_brcmf_sdpcm_hdr(false, header);
1151
1150 /* hw header */ 1152 /* hw header */
1151 len = get_unaligned_le16(header); 1153 len = get_unaligned_le16(header);
1152 checksum = get_unaligned_le16(header + sizeof(u16)); 1154 checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1269,6 +1271,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1269 SDPCM_DOFFSET_MASK; 1271 SDPCM_DOFFSET_MASK;
1270 *(((__le32 *)header) + 1) = cpu_to_le32(sw_header); 1272 *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
1271 *(((__le32 *)header) + 2) = 0; 1273 *(((__le32 *)header) + 2) = 0;
1274 trace_brcmf_sdpcm_hdr(true, header);
1272} 1275}
1273 1276
1274static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1277static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1389,7 +1392,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1389 sdio_claim_host(bus->sdiodev->func[1]); 1392 sdio_claim_host(bus->sdiodev->func[1]);
1390 errcode = brcmf_sdcard_recv_chain(bus->sdiodev, 1393 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1391 bus->sdiodev->sbwad, 1394 bus->sdiodev->sbwad,
1392 SDIO_FUNC_2, F2SYNC, &bus->glom); 1395 SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
1393 sdio_release_host(bus->sdiodev->func[1]); 1396 sdio_release_host(bus->sdiodev->func[1]);
1394 bus->sdcnt.f2rxdata++; 1397 bus->sdcnt.f2rxdata++;
1395 1398
@@ -1877,6 +1880,56 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1877/* bit mask of data length chopped from the previous packet */ 1880/* bit mask of data length chopped from the previous packet */
1878#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff 1881#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
1879 1882
1883static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
1884 struct sk_buff_head *pktq,
1885 struct sk_buff *pkt, uint chan)
1886{
1887 struct sk_buff *pkt_pad;
1888 u16 tail_pad, tail_chop, sg_align;
1889 unsigned int blksize;
1890 u8 *dat_buf;
1891 int ntail;
1892
1893 blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
1894 sg_align = 4;
1895 if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
1896 sg_align = sdiodev->pdata->sd_sgentry_align;
1897 /* sg entry alignment should be a divisor of block size */
1898 WARN_ON(blksize % sg_align);
1899
1900 /* Check tail padding */
1901 pkt_pad = NULL;
1902 tail_chop = pkt->len % sg_align;
1903 tail_pad = sg_align - tail_chop;
1904 tail_pad += blksize - (pkt->len + tail_pad) % blksize;
1905 if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
1906 pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
1907 if (pkt_pad == NULL)
1908 return -ENOMEM;
1909 memcpy(pkt_pad->data,
1910 pkt->data + pkt->len - tail_chop,
1911 tail_chop);
1912 *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
1913 skb_trim(pkt, pkt->len - tail_chop);
1914 __skb_queue_after(pktq, pkt, pkt_pad);
1915 } else {
1916 ntail = pkt->data_len + tail_pad -
1917 (pkt->end - pkt->tail);
1918 if (skb_cloned(pkt) || ntail > 0)
1919 if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
1920 return -ENOMEM;
1921 if (skb_linearize(pkt))
1922 return -ENOMEM;
1923 dat_buf = (u8 *)(pkt->data);
1924 __skb_put(pkt, tail_pad);
1925 }
1926
1927 if (pkt_pad)
1928 return pkt->len + tail_chop;
1929 else
1930 return pkt->len - tail_pad;
1931}
1932
1880/** 1933/**
1881 * brcmf_sdio_txpkt_prep - packet preparation for transmit 1934 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1882 * @bus: brcmf_sdio structure pointer 1935 * @bus: brcmf_sdio structure pointer
@@ -1893,24 +1946,16 @@ static int
1893brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, 1946brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1894 uint chan) 1947 uint chan)
1895{ 1948{
1896 u16 head_pad, tail_pad, tail_chop, head_align, sg_align; 1949 u16 head_pad, head_align;
1897 int ntail; 1950 struct sk_buff *pkt_next;
1898 struct sk_buff *pkt_next, *pkt_new;
1899 u8 *dat_buf; 1951 u8 *dat_buf;
1900 unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize; 1952 int err;
1901 struct brcmf_sdio_hdrinfo hd_info = {0}; 1953 struct brcmf_sdio_hdrinfo hd_info = {0};
1902 1954
1903 /* SDIO ADMA requires at least 32 bit alignment */ 1955 /* SDIO ADMA requires at least 32 bit alignment */
1904 head_align = 4; 1956 head_align = 4;
1905 sg_align = 4; 1957 if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
1906 if (bus->sdiodev->pdata) { 1958 head_align = bus->sdiodev->pdata->sd_head_align;
1907 head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
1908 bus->sdiodev->pdata->sd_head_align : 4;
1909 sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
1910 bus->sdiodev->pdata->sd_sgentry_align : 4;
1911 }
1912 /* sg entry alignment should be a divisor of block size */
1913 WARN_ON(blksize % sg_align);
1914 1959
1915 pkt_next = pktq->next; 1960 pkt_next = pktq->next;
1916 dat_buf = (u8 *)(pkt_next->data); 1961 dat_buf = (u8 *)(pkt_next->data);
@@ -1929,40 +1974,20 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1929 memset(dat_buf, 0, head_pad + bus->tx_hdrlen); 1974 memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
1930 } 1975 }
1931 1976
1932 /* Check tail padding */ 1977 if (bus->sdiodev->sg_support && pktq->qlen > 1) {
1933 pkt_new = NULL; 1978 err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
1934 tail_chop = pkt_next->len % sg_align; 1979 pkt_next, chan);
1935 tail_pad = sg_align - tail_chop; 1980 if (err < 0)
1936 tail_pad += blksize - (pkt_next->len + tail_pad) % blksize; 1981 return err;
1937 if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) { 1982 hd_info.len = (u16)err;
1938 pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
1939 if (pkt_new == NULL)
1940 return -ENOMEM;
1941 memcpy(pkt_new->data,
1942 pkt_next->data + pkt_next->len - tail_chop,
1943 tail_chop);
1944 *(u32 *)(pkt_new->cb) = ALIGN_SKB_FLAG + tail_chop;
1945 skb_trim(pkt_next, pkt_next->len - tail_chop);
1946 __skb_queue_after(pktq, pkt_next, pkt_new);
1947 } else { 1983 } else {
1948 ntail = pkt_next->data_len + tail_pad - 1984 hd_info.len = pkt_next->len;
1949 (pkt_next->end - pkt_next->tail);
1950 if (skb_cloned(pkt_next) || ntail > 0)
1951 if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
1952 return -ENOMEM;
1953 if (skb_linearize(pkt_next))
1954 return -ENOMEM;
1955 dat_buf = (u8 *)(pkt_next->data);
1956 __skb_put(pkt_next, tail_pad);
1957 } 1985 }
1958 1986
1959 /* Now prep the header */
1960 if (pkt_new)
1961 hd_info.len = pkt_next->len + tail_chop;
1962 else
1963 hd_info.len = pkt_next->len - tail_pad;
1964 hd_info.channel = chan; 1987 hd_info.channel = chan;
1965 hd_info.dat_offset = head_pad + bus->tx_hdrlen; 1988 hd_info.dat_offset = head_pad + bus->tx_hdrlen;
1989
1990 /* Now fill the header */
1966 brcmf_sdio_hdpack(bus, dat_buf, &hd_info); 1991 brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
1967 1992
1968 if (BRCMF_BYTES_ON() && 1993 if (BRCMF_BYTES_ON() &&
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index c9b06b4e71f7..fc0d4f0129db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -178,6 +178,10 @@ struct brcmf_sdio_dev {
178 bool irq_en; /* irq enable flags */ 178 bool irq_en; /* irq enable flags */
179 spinlock_t irq_en_lock; 179 spinlock_t irq_en_lock;
180 bool irq_wake; /* irq wake enable flags */ 180 bool irq_wake; /* irq wake enable flags */
181 bool sg_support;
182 uint max_request_size;
183 ushort max_segment_count;
184 uint max_segment_size;
181}; 185};
182 186
183/* Register/deregister interrupt handler. */ 187/* Register/deregister interrupt handler. */
@@ -216,7 +220,7 @@ int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
216int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 220int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
217 uint flags, u8 *buf, uint nbytes); 221 uint flags, u8 *buf, uint nbytes);
218int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 222int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
219 uint flags, struct sk_buff_head *pktq); 223 uint flags, struct sk_buff_head *pktq, uint totlen);
220 224
221/* Flags bits */ 225/* Flags bits */
222 226
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index bc2917112899..3c67529b9074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -78,13 +78,15 @@ TRACE_EVENT(brcmf_hexdump,
78 TP_ARGS(data, len), 78 TP_ARGS(data, len),
79 TP_STRUCT__entry( 79 TP_STRUCT__entry(
80 __field(unsigned long, len) 80 __field(unsigned long, len)
81 __field(unsigned long, addr)
81 __dynamic_array(u8, hdata, len) 82 __dynamic_array(u8, hdata, len)
82 ), 83 ),
83 TP_fast_assign( 84 TP_fast_assign(
84 __entry->len = len; 85 __entry->len = len;
86 __entry->addr = (unsigned long)data;
85 memcpy(__get_dynamic_array(hdata), data, len); 87 memcpy(__get_dynamic_array(hdata), data, len);
86 ), 88 ),
87 TP_printk("hexdump [length=%lu]", __entry->len) 89 TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
88); 90);
89 91
90TRACE_EVENT(brcmf_bdchdr, 92TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@ TRACE_EVENT(brcmf_bdchdr,
108 TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen) 110 TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
109); 111);
110 112
113TRACE_EVENT(brcmf_sdpcm_hdr,
114 TP_PROTO(bool tx, void *data),
115 TP_ARGS(tx, data),
116 TP_STRUCT__entry(
117 __field(u8, tx)
118 __field(u16, len)
119 __array(u8, hdr, 12)
120 ),
121 TP_fast_assign(
122 memcpy(__entry->hdr, data, 12);
123 __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
124 __entry->tx = tx ? 1 : 0;
125 ),
126 TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
127 __entry->len, __entry->hdr[4])
128);
129
111#ifdef CONFIG_BRCM_TRACING 130#ifdef CONFIG_BRCM_TRACING
112 131
113#undef TRACE_INCLUDE_PATH 132#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da442b81370a..1fef5240e6ad 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
433 /* Copy MAC header from skb into command buffer */ 433 /* Copy MAC header from skb into command buffer */
434 memcpy(tx_cmd->hdr, hdr, hdr_len); 434 memcpy(tx_cmd->hdr, hdr, hdr_len);
435 435
436 txq_id = info->hw_queue;
437
436 if (is_agg) 438 if (is_agg)
437 txq_id = priv->tid_data[sta_id][tid].agg.txq_id; 439 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
438 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 440 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
439 /* 441 /*
440 * Send this frame after DTIM -- there's a special queue
441 * reserved for this for contexts that support AP mode.
442 */
443 txq_id = ctx->mcast_queue;
444
445 /*
446 * The microcode will clear the more data 442 * The microcode will clear the more data
447 * bit in the last frame it transmits. 443 * bit in the last frame it transmits.
448 */ 444 */
449 hdr->frame_control |= 445 hdr->frame_control |=
450 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 446 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
451 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) 447 }
452 txq_id = IWL_AUX_QUEUE;
453 else
454 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
455 448
456 WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
457 WARN_ON_ONCE(is_agg && 449 WARN_ON_ONCE(is_agg &&
458 priv->queue_to_mac80211[txq_id] != info->hw_queue); 450 priv->queue_to_mac80211[txq_id] != info->hw_queue);
459 451
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 76e14c046d94..85879dbaa402 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -83,6 +83,8 @@
83#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ 83#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
84#define IWL3160_NVM_VERSION 0x709 84#define IWL3160_NVM_VERSION 0x709
85#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ 85#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
86#define IWL7265_NVM_VERSION 0x0a1d
87#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
86 88
87#define IWL7260_FW_PRE "iwlwifi-7260-" 89#define IWL7260_FW_PRE "iwlwifi-7260-"
88#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" 90#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
90#define IWL3160_FW_PRE "iwlwifi-3160-" 92#define IWL3160_FW_PRE "iwlwifi-3160-"
91#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" 93#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
92 94
95#define IWL7265_FW_PRE "iwlwifi-7265-"
96#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
97
93static const struct iwl_base_params iwl7000_base_params = { 98static const struct iwl_base_params iwl7000_base_params = {
94 .eeprom_size = OTP_LOW_IMAGE_SIZE, 99 .eeprom_size = OTP_LOW_IMAGE_SIZE,
95 .num_of_queues = IWLAGN_NUM_QUEUES, 100 .num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@ const struct iwl_cfg iwl3160_n_cfg = {
182 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 187 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
183}; 188};
184 189
190const struct iwl_cfg iwl7265_2ac_cfg = {
191 .name = "Intel(R) Dual Band Wireless AC 7265",
192 .fw_name_pre = IWL7265_FW_PRE,
193 IWL_DEVICE_7000,
194 .ht_params = &iwl7000_ht_params,
195 .nvm_ver = IWL7265_NVM_VERSION,
196 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
197};
198
185MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 199MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
186MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 200MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b03c25e14903..18f232e8e812 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -293,6 +293,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
293extern const struct iwl_cfg iwl3160_2ac_cfg; 293extern const struct iwl_cfg iwl3160_2ac_cfg;
294extern const struct iwl_cfg iwl3160_2n_cfg; 294extern const struct iwl_cfg iwl3160_2n_cfg;
295extern const struct iwl_cfg iwl3160_n_cfg; 295extern const struct iwl_cfg iwl3160_n_cfg;
296extern const struct iwl_cfg iwl7265_2ac_cfg;
296#endif /* CONFIG_IWLMVM */ 297#endif /* CONFIG_IWLMVM */
297 298
298#endif /* __IWL_CONFIG_H__ */ 299#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a276af476e2d..54a4fdc631b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -394,6 +394,38 @@
394#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 394#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
395#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 395#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
396 396
397/* SECURE boot registers */
398#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
399enum secure_boot_config_reg {
400 CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
401 CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
402};
403
404#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
405#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
406enum secure_boot_status_reg {
407 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
408 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
409 CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
410 CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
411 CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
412};
413
414#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
415enum secure_load_status_reg {
416 CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
417 CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
418 CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
419 CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
420};
421
422#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
423#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
424
425#define CSR_SECURE_TIME_OUT (100)
426
427#define FH_TCSR_0_REG0 (0x1D00)
428
397/* 429/*
398 * HBUS (Host-side Bus) 430 * HBUS (Host-side Bus)
399 * 431 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 99e1da3123c9..ff570027e9dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -483,6 +483,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
483 const u8 *tlv_data; 483 const u8 *tlv_data;
484 char buildstr[25]; 484 char buildstr[25];
485 u32 build; 485 u32 build;
486 int num_of_cpus;
486 487
487 if (len < sizeof(*ucode)) { 488 if (len < sizeof(*ucode)) {
488 IWL_ERR(drv, "uCode has invalid length: %zd\n", len); 489 IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
692 goto invalid_tlv_len; 693 goto invalid_tlv_len;
693 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); 694 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
694 break; 695 break;
696 case IWL_UCODE_TLV_SECURE_SEC_RT:
697 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
698 tlv_len);
699 drv->fw.mvm_fw = true;
700 drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
701 break;
702 case IWL_UCODE_TLV_SECURE_SEC_INIT:
703 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
704 tlv_len);
705 drv->fw.mvm_fw = true;
706 drv->fw.img[IWL_UCODE_INIT].is_secure = true;
707 break;
708 case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
709 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
710 tlv_len);
711 drv->fw.mvm_fw = true;
712 drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
713 break;
714 case IWL_UCODE_TLV_NUM_OF_CPU:
715 if (tlv_len != sizeof(u32))
716 goto invalid_tlv_len;
717 num_of_cpus =
718 le32_to_cpup((__le32 *)tlv_data);
719
720 if (num_of_cpus == 2) {
721 drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
722 true;
723 drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
724 true;
725 drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
726 true;
727 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728 IWL_ERR(drv, "Driver support upto 2 CPUs\n");
729 return -EINVAL;
730 }
731 break;
695 default: 732 default:
696 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); 733 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
697 break; 734 break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8b6c6fd95ed0..6c6c35c5228c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -121,6 +121,10 @@ enum iwl_ucode_tlv_type {
121 IWL_UCODE_TLV_SEC_WOWLAN = 21, 121 IWL_UCODE_TLV_SEC_WOWLAN = 21,
122 IWL_UCODE_TLV_DEF_CALIB = 22, 122 IWL_UCODE_TLV_DEF_CALIB = 22,
123 IWL_UCODE_TLV_PHY_SKU = 23, 123 IWL_UCODE_TLV_PHY_SKU = 23,
124 IWL_UCODE_TLV_SECURE_SEC_RT = 24,
125 IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
126 IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
127 IWL_UCODE_TLV_NUM_OF_CPU = 27,
124}; 128};
125 129
126struct iwl_ucode_tlv { 130struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index a1223680bc70..87b66a821ec8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -75,11 +75,23 @@
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS 76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
77 * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD 77 * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
78 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
79 * offload profile config command.
78 * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api 80 * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
79 * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API. 81 * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
80 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six 82 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
81 * (rather than two) IPv6 addresses 83 * (rather than two) IPv6 addresses
82 * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API 84 * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
85 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
86 * from the probe request template.
87 * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
88 * connection when going back to D0
89 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
90 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
91 * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
92 * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
93 * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
94 * containing CAM (Continuous Active Mode) indication.
83 */ 95 */
84enum iwl_ucode_tlv_flag { 96enum iwl_ucode_tlv_flag {
85 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 97 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -87,11 +99,20 @@ enum iwl_ucode_tlv_flag {
87 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 99 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
88 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 100 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
89 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), 101 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
102 IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
90 IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), 103 IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
104 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
91 IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), 105 IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
92 IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9), 106 IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
93 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), 107 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
94 IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11), 108 IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
109 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
110 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
111 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
112 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
113 IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
114 IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
115 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
95}; 116};
96 117
97/* The default calibrate table size if not specified by firmware file */ 118/* The default calibrate table size if not specified by firmware file */
@@ -133,7 +154,8 @@ enum iwl_ucode_sec {
133 * For 16.0 uCode and above, there is no differentiation between sections, 154 * For 16.0 uCode and above, there is no differentiation between sections,
134 * just an offset to the HW address. 155 * just an offset to the HW address.
135 */ 156 */
136#define IWL_UCODE_SECTION_MAX 4 157#define IWL_UCODE_SECTION_MAX 6
158#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
137 159
138struct iwl_ucode_capabilities { 160struct iwl_ucode_capabilities {
139 u32 max_probe_length; 161 u32 max_probe_length;
@@ -150,6 +172,8 @@ struct fw_desc {
150 172
151struct fw_img { 173struct fw_img {
152 struct fw_desc sec[IWL_UCODE_SECTION_MAX]; 174 struct fw_desc sec[IWL_UCODE_SECTION_MAX];
175 bool is_secure;
176 bool is_dual_cpus;
153}; 177};
154 178
155/* uCode version contains 4 values: Major/Minor/API/Serial */ 179/* uCode version contains 4 values: Major/Minor/API/Serial */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75c189d..a70c7b9d9bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
97 97
98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
99 99
100#define APMG_RTC_INT_STT_RFKILL (0x10000000)
101
100/* Device system time */ 102/* Device system time */
101#define DEVICE_SYSTEM_TIME_REG 0xA0206C 103#define DEVICE_SYSTEM_TIME_REG 0xA0206C
102 104
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 80b47508647c..c6bac7c90b00 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -601,7 +601,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
601{ 601{
602 int ret; 602 int ret;
603 603
604 if (trans->state != IWL_TRANS_FW_ALIVE) { 604 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
606 return -EIO; 606 return -EIO;
607 } 607 }
@@ -640,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
640static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 640static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
641 struct iwl_device_cmd *dev_cmd, int queue) 641 struct iwl_device_cmd *dev_cmd, int queue)
642{ 642{
643 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 643 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
644 "%s bad state = %d", __func__, trans->state); 644 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
645 645
646 return trans->ops->tx(trans, skb, dev_cmd, queue); 646 return trans->ops->tx(trans, skb, dev_cmd, queue);
647} 647}
@@ -649,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
649static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 649static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
650 int ssn, struct sk_buff_head *skbs) 650 int ssn, struct sk_buff_head *skbs)
651{ 651{
652 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 652 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
653 "%s bad state = %d", __func__, trans->state); 653 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
654 654
655 trans->ops->reclaim(trans, queue, ssn, skbs); 655 trans->ops->reclaim(trans, queue, ssn, skbs);
656} 656}
657 657
658static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue) 658static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
659{ 659{
660 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 660 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
661 "%s bad state = %d", __func__, trans->state); 661 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
662 662
663 trans->ops->txq_disable(trans, queue); 663 trans->ops->txq_disable(trans, queue);
664} 664}
@@ -669,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
669{ 669{
670 might_sleep(); 670 might_sleep();
671 671
672 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 672 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
673 "%s bad state = %d", __func__, trans->state); 673 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
674 674
675 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid, 675 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
676 frame_limit, ssn); 676 frame_limit, ssn);
@@ -685,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
685 685
686static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 686static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
687{ 687{
688 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 688 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
689 "%s bad state = %d", __func__, trans->state); 689 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
690 690
691 return trans->ops->wait_tx_queue_empty(trans); 691 return trans->ops->wait_tx_queue_empty(trans);
692} 692}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b85f60..5b630f12bbff 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -98,126 +98,258 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
98 98
99#undef EVENT_PRIO_ANT 99#undef EVENT_PRIO_ANT
100 100
101/* BT Antenna Coupling Threshold (dB) */
102#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
103#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3)
104
105#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62) 101#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
106#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) 102#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
107#define BT_REDUCED_TX_POWER_BIT BIT(7) 103#define BT_ANTENNA_COUPLING_THRESHOLD (30)
108
109static inline bool is_loose_coex(void)
110{
111 return iwlwifi_mod_params.ant_coupling >
112 IWL_BT_ANTENNA_COUPLING_THRESHOLD;
113}
114 104
115int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) 105int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
116{ 106{
107 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
108 return 0;
109
117 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC, 110 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
118 sizeof(struct iwl_bt_coex_prio_tbl_cmd), 111 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
119 &iwl_bt_prio_tbl); 112 &iwl_bt_prio_tbl);
120} 113}
121 114
122static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type) 115const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
123{
124 struct iwl_bt_coex_prot_env_cmd env_cmd;
125 int ret;
126
127 env_cmd.action = action;
128 env_cmd.type = type;
129 ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
130 sizeof(env_cmd), &env_cmd);
131 if (ret)
132 IWL_ERR(mvm, "failed to send BT env command\n");
133 return ret;
134}
135
136enum iwl_bt_kill_msk {
137 BT_KILL_MSK_DEFAULT,
138 BT_KILL_MSK_SCO_HID_A2DP,
139 BT_KILL_MSK_REDUCED_TXPOW,
140 BT_KILL_MSK_MAX,
141};
142
143static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
144 [BT_KILL_MSK_DEFAULT] = 0xffff0000, 116 [BT_KILL_MSK_DEFAULT] = 0xffff0000,
145 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff, 117 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
146 [BT_KILL_MSK_REDUCED_TXPOW] = 0, 118 [BT_KILL_MSK_REDUCED_TXPOW] = 0,
147}; 119};
148 120
149static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = { 121const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
150 [BT_KILL_MSK_DEFAULT] = 0xffff0000, 122 [BT_KILL_MSK_DEFAULT] = 0xffff0000,
151 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff, 123 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
152 [BT_KILL_MSK_REDUCED_TXPOW] = 0, 124 [BT_KILL_MSK_REDUCED_TXPOW] = 0,
153}; 125};
154 126
155#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0) 127static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
156 128 cpu_to_le32(0xf0f0f0f0),
157/* Tight Coex */ 129 cpu_to_le32(0xc0c0c0c0),
158static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = { 130 cpu_to_le32(0xfcfcfcfc),
159 cpu_to_le32(0xaaaaaaaa), 131 cpu_to_le32(0xff00ff00),
160 cpu_to_le32(0xaaaaaaaa),
161 cpu_to_le32(0xaeaaaaaa),
162 cpu_to_le32(0xaaaaaaaa),
163 cpu_to_le32(0xcc00ff28),
164 cpu_to_le32(0x0000aaaa),
165 cpu_to_le32(0xcc00aaaa),
166 cpu_to_le32(0x0000aaaa),
167 cpu_to_le32(0xc0004000),
168 cpu_to_le32(0x00000000),
169 cpu_to_le32(0xf0005000),
170 cpu_to_le32(0xf0005000),
171}; 132};
172 133
173/* Loose Coex */ 134static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
174static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = { 135 {
175 cpu_to_le32(0xaaaaaaaa), 136 cpu_to_le32(0x40000000),
176 cpu_to_le32(0xaaaaaaaa), 137 cpu_to_le32(0x00000000),
177 cpu_to_le32(0xaaaaaaaa), 138 cpu_to_le32(0x44000000),
178 cpu_to_le32(0xaaaaaaaa), 139 cpu_to_le32(0x00000000),
179 cpu_to_le32(0xcc00ff28), 140 cpu_to_le32(0x40000000),
180 cpu_to_le32(0x0000aaaa), 141 cpu_to_le32(0x00000000),
181 cpu_to_le32(0xcc00aaaa), 142 cpu_to_le32(0x44000000),
182 cpu_to_le32(0x0000aaaa), 143 cpu_to_le32(0x00000000),
183 cpu_to_le32(0x00000000), 144 cpu_to_le32(0xc0004000),
184 cpu_to_le32(0x00000000), 145 cpu_to_le32(0xf0005000),
185 cpu_to_le32(0xf0005000), 146 cpu_to_le32(0xc0004000),
186 cpu_to_le32(0xf0005000), 147 cpu_to_le32(0xf0005000),
148 },
149 {
150 cpu_to_le32(0x40000000),
151 cpu_to_le32(0x00000000),
152 cpu_to_le32(0x44000000),
153 cpu_to_le32(0x00000000),
154 cpu_to_le32(0x40000000),
155 cpu_to_le32(0x00000000),
156 cpu_to_le32(0x44000000),
157 cpu_to_le32(0x00000000),
158 cpu_to_le32(0xc0004000),
159 cpu_to_le32(0xf0005000),
160 cpu_to_le32(0xc0004000),
161 cpu_to_le32(0xf0005000),
162 },
163 {
164 cpu_to_le32(0x40000000),
165 cpu_to_le32(0x00000000),
166 cpu_to_le32(0x44000000),
167 cpu_to_le32(0x00000000),
168 cpu_to_le32(0x40000000),
169 cpu_to_le32(0x00000000),
170 cpu_to_le32(0x44000000),
171 cpu_to_le32(0x00000000),
172 cpu_to_le32(0xc0004000),
173 cpu_to_le32(0xf0005000),
174 cpu_to_le32(0xc0004000),
175 cpu_to_le32(0xf0005000),
176 },
187}; 177};
188 178
189/* Full concurrency */ 179static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
190static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = { 180 {
191 cpu_to_le32(0xaaaaaaaa), 181 /* Tight */
192 cpu_to_le32(0xaaaaaaaa), 182 cpu_to_le32(0xaaaaaaaa),
193 cpu_to_le32(0xaaaaaaaa), 183 cpu_to_le32(0xaaaaaaaa),
194 cpu_to_le32(0xaaaaaaaa), 184 cpu_to_le32(0xaeaaaaaa),
195 cpu_to_le32(0xaaaaaaaa), 185 cpu_to_le32(0xaaaaaaaa),
196 cpu_to_le32(0xaaaaaaaa), 186 cpu_to_le32(0xcc00ff28),
197 cpu_to_le32(0xaaaaaaaa), 187 cpu_to_le32(0x0000aaaa),
198 cpu_to_le32(0xaaaaaaaa), 188 cpu_to_le32(0xcc00aaaa),
199 cpu_to_le32(0x00000000), 189 cpu_to_le32(0x0000aaaa),
200 cpu_to_le32(0x00000000), 190 cpu_to_le32(0xc0004000),
201 cpu_to_le32(0x00000000), 191 cpu_to_le32(0x00000000),
202 cpu_to_le32(0x00000000), 192 cpu_to_le32(0xf0005000),
193 cpu_to_le32(0xf0005000),
194 },
195 {
196 /* Loose */
197 cpu_to_le32(0xaaaaaaaa),
198 cpu_to_le32(0xaaaaaaaa),
199 cpu_to_le32(0xaaaaaaaa),
200 cpu_to_le32(0xaaaaaaaa),
201 cpu_to_le32(0xcc00ff28),
202 cpu_to_le32(0x0000aaaa),
203 cpu_to_le32(0xcc00aaaa),
204 cpu_to_le32(0x0000aaaa),
205 cpu_to_le32(0x00000000),
206 cpu_to_le32(0x00000000),
207 cpu_to_le32(0xf0005000),
208 cpu_to_le32(0xf0005000),
209 },
210 {
211 /* Tx Tx disabled */
212 cpu_to_le32(0xaaaaaaaa),
213 cpu_to_le32(0xaaaaaaaa),
214 cpu_to_le32(0xaaaaaaaa),
215 cpu_to_le32(0xaaaaaaaa),
216 cpu_to_le32(0xcc00ff28),
217 cpu_to_le32(0x0000aaaa),
218 cpu_to_le32(0xcc00aaaa),
219 cpu_to_le32(0x0000aaaa),
220 cpu_to_le32(0xC0004000),
221 cpu_to_le32(0xC0004000),
222 cpu_to_le32(0xF0005000),
223 cpu_to_le32(0xF0005000),
224 },
203}; 225};
204 226
205/* single shared antenna */ 227/* 20MHz / 40MHz below / 40Mhz above*/
206static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = { 228static const __le64 iwl_ci_mask[][3] = {
207 cpu_to_le32(0x40000000), 229 /* dummy entry for channel 0 */
208 cpu_to_le32(0x00000000), 230 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
209 cpu_to_le32(0x44000000), 231 {
210 cpu_to_le32(0x00000000), 232 cpu_to_le64(0x0000001FFFULL),
211 cpu_to_le32(0x40000000), 233 cpu_to_le64(0x0ULL),
212 cpu_to_le32(0x00000000), 234 cpu_to_le64(0x00007FFFFFULL),
213 cpu_to_le32(0x44000000), 235 },
214 cpu_to_le32(0x00000000), 236 {
215 cpu_to_le32(0xC0004000), 237 cpu_to_le64(0x000000FFFFULL),
216 cpu_to_le32(0xF0005000), 238 cpu_to_le64(0x0ULL),
217 cpu_to_le32(0xC0004000), 239 cpu_to_le64(0x0003FFFFFFULL),
218 cpu_to_le32(0xF0005000), 240 },
241 {
242 cpu_to_le64(0x000003FFFCULL),
243 cpu_to_le64(0x0ULL),
244 cpu_to_le64(0x000FFFFFFCULL),
245 },
246 {
247 cpu_to_le64(0x00001FFFE0ULL),
248 cpu_to_le64(0x0ULL),
249 cpu_to_le64(0x007FFFFFE0ULL),
250 },
251 {
252 cpu_to_le64(0x00007FFF80ULL),
253 cpu_to_le64(0x00007FFFFFULL),
254 cpu_to_le64(0x01FFFFFF80ULL),
255 },
256 {
257 cpu_to_le64(0x0003FFFC00ULL),
258 cpu_to_le64(0x0003FFFFFFULL),
259 cpu_to_le64(0x0FFFFFFC00ULL),
260 },
261 {
262 cpu_to_le64(0x000FFFF000ULL),
263 cpu_to_le64(0x000FFFFFFCULL),
264 cpu_to_le64(0x3FFFFFF000ULL),
265 },
266 {
267 cpu_to_le64(0x007FFF8000ULL),
268 cpu_to_le64(0x007FFFFFE0ULL),
269 cpu_to_le64(0xFFFFFF8000ULL),
270 },
271 {
272 cpu_to_le64(0x01FFFE0000ULL),
273 cpu_to_le64(0x01FFFFFF80ULL),
274 cpu_to_le64(0xFFFFFE0000ULL),
275 },
276 {
277 cpu_to_le64(0x0FFFF00000ULL),
278 cpu_to_le64(0x0FFFFFFC00ULL),
279 cpu_to_le64(0x0ULL),
280 },
281 {
282 cpu_to_le64(0x3FFFC00000ULL),
283 cpu_to_le64(0x3FFFFFF000ULL),
284 cpu_to_le64(0x0)
285 },
286 {
287 cpu_to_le64(0xFFFE000000ULL),
288 cpu_to_le64(0xFFFFFF8000ULL),
289 cpu_to_le64(0x0)
290 },
291 {
292 cpu_to_le64(0xFFF8000000ULL),
293 cpu_to_le64(0xFFFFFE0000ULL),
294 cpu_to_le64(0x0)
295 },
296 {
297 cpu_to_le64(0xFE00000000ULL),
298 cpu_to_le64(0x0ULL),
299 cpu_to_le64(0x0)
300 },
219}; 301};
220 302
303static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
304 cpu_to_le32(0x22002200),
305 cpu_to_le32(0x33113311),
306};
307
308static enum iwl_bt_coex_lut_type
309iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
310{
311 struct ieee80211_chanctx_conf *chanctx_conf;
312 enum iwl_bt_coex_lut_type ret;
313 u16 phy_ctx_id;
314
315 /*
316 * Checking that we hold mvm->mutex is a good idea, but the rate
317 * control can't acquire the mutex since it runs in Tx path.
318 * So this is racy in that case, but in the worst case, the AMPDU
319 * size limit will be wrong for a short time which is not a big
320 * issue.
321 */
322
323 rcu_read_lock();
324
325 chanctx_conf = rcu_dereference(vif->chanctx_conf);
326
327 if (!chanctx_conf ||
328 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
329 rcu_read_unlock();
330 return BT_COEX_LOOSE_LUT;
331 }
332
333 ret = BT_COEX_TX_DIS_LUT;
334
335 if (mvm->cfg->bt_shared_single_ant) {
336 rcu_read_unlock();
337 return ret;
338 }
339
340 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
341
342 if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
343 ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
344 else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
345 ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
346 /* else - default = TX TX disallowed */
347
348 rcu_read_unlock();
349
350 return ret;
351}
352
221int iwl_send_bt_init_conf(struct iwl_mvm *mvm) 353int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
222{ 354{
223 struct iwl_bt_coex_cmd *bt_cmd; 355 struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
228 .flags = CMD_SYNC, 360 .flags = CMD_SYNC,
229 }; 361 };
230 int ret; 362 int ret;
363 u32 flags;
231 364
232 /* go to CALIB state in internal BT-Coex state machine */ 365 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
233 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN, 366 return 0;
234 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
235 if (ret)
236 return ret;
237
238 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
239 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
240 if (ret)
241 return ret;
242 367
243 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); 368 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
244 if (!bt_cmd) 369 if (!bt_cmd)
@@ -246,40 +371,52 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
246 cmd.data[0] = bt_cmd; 371 cmd.data[0] = bt_cmd;
247 372
248 bt_cmd->max_kill = 5; 373 bt_cmd->max_kill = 5;
249 bt_cmd->bt3_time_t7_value = 1; 374 bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
250 bt_cmd->bt3_prio_sample_time = 2; 375 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
251 bt_cmd->bt3_timer_t2_value = 0xc; 376 bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
377 bt_cmd->bt4_tx_rx_max_freq0 = 15,
252 378
253 bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ? 379 flags = iwlwifi_mod_params.bt_coex_active ?
254 BT_COEX_NW : BT_COEX_DISABLE; 380 BT_COEX_NW : BT_COEX_DISABLE;
255 bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE; 381 flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
382 bt_cmd->flags = cpu_to_le32(flags);
256 383
257 bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE | 384 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
258 BT_VALID_BT_PRIO_BOOST | 385 BT_VALID_BT_PRIO_BOOST |
259 BT_VALID_MAX_KILL | 386 BT_VALID_MAX_KILL |
260 BT_VALID_3W_TMRS | 387 BT_VALID_3W_TMRS |
261 BT_VALID_KILL_ACK | 388 BT_VALID_KILL_ACK |
262 BT_VALID_KILL_CTS | 389 BT_VALID_KILL_CTS |
263 BT_VALID_REDUCED_TX_POWER | 390 BT_VALID_REDUCED_TX_POWER |
264 BT_VALID_LUT); 391 BT_VALID_LUT |
392 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
393 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
394 BT_VALID_MULTI_PRIO_LUT |
395 BT_VALID_CORUN_LUT_20 |
396 BT_VALID_CORUN_LUT_40 |
397 BT_VALID_ANT_ISOLATION |
398 BT_VALID_ANT_ISOLATION_THRS |
399 BT_VALID_TXTX_DELTA_FREQ_THRS |
400 BT_VALID_TXRX_MAX_FREQ_0);
265 401
266 if (mvm->cfg->bt_shared_single_ant) 402 if (mvm->cfg->bt_shared_single_ant)
267 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup, 403 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
268 sizeof(iwl_single_shared_ant_lookup)); 404 sizeof(iwl_single_shared_ant));
269 else if (is_loose_coex())
270 memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
271 sizeof(iwl_tight_lookup));
272 else 405 else
273 memcpy(&bt_cmd->decision_lut, iwl_tight_lookup, 406 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
274 sizeof(iwl_tight_lookup)); 407 sizeof(iwl_combined_lookup));
275 408
276 bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST); 409 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
410 sizeof(iwl_bt_prio_boost));
411 memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
412 sizeof(iwl_bt_mprio_lut));
277 bt_cmd->kill_ack_msk = 413 bt_cmd->kill_ack_msk =
278 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]); 414 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
279 bt_cmd->kill_cts_msk = 415 bt_cmd->kill_cts_msk =
280 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]); 416 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
281 417
282 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 418 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
419 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
283 420
284 ret = iwl_mvm_send_cmd(mvm, &cmd); 421 ret = iwl_mvm_send_cmd(mvm, &cmd);
285 422
@@ -334,13 +471,17 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
334 if (!bt_cmd) 471 if (!bt_cmd)
335 return -ENOMEM; 472 return -ENOMEM;
336 cmd.data[0] = bt_cmd; 473 cmd.data[0] = bt_cmd;
474 bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
337 475
338 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]); 476 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
339 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]); 477 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
340 bt_cmd->valid_bit_msk = 478 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
341 cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS); 479 BT_VALID_KILL_ACK |
480 BT_VALID_KILL_CTS);
342 481
343 IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk); 482 IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
483 iwl_bt_ack_kill_msk[bt_kill_msk],
484 iwl_bt_cts_kill_msk[bt_kill_msk]);
344 485
345 ret = iwl_mvm_send_cmd(mvm, &cmd); 486 ret = iwl_mvm_send_cmd(mvm, &cmd);
346 487
@@ -380,8 +521,10 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
380 if (!bt_cmd) 521 if (!bt_cmd)
381 return -ENOMEM; 522 return -ENOMEM;
382 cmd.data[0] = bt_cmd; 523 cmd.data[0] = bt_cmd;
524 bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
383 525
384 bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER), 526 bt_cmd->valid_bit_msk =
527 cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
385 bt_cmd->bt_reduced_tx_power = sta_id; 528 bt_cmd->bt_reduced_tx_power = sta_id;
386 529
387 if (enable) 530 if (enable)
@@ -403,8 +546,25 @@ struct iwl_bt_iterator_data {
403 struct iwl_mvm *mvm; 546 struct iwl_mvm *mvm;
404 u32 num_bss_ifaces; 547 u32 num_bss_ifaces;
405 bool reduced_tx_power; 548 bool reduced_tx_power;
549 struct ieee80211_chanctx_conf *primary;
550 struct ieee80211_chanctx_conf *secondary;
406}; 551};
407 552
553static inline
554void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
555 struct ieee80211_vif *vif,
556 bool enable, int rssi)
557{
558 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
559
560 mvmvif->bf_data.last_bt_coex_event = rssi;
561 mvmvif->bf_data.bt_coex_max_thold =
562 enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
563 mvmvif->bf_data.bt_coex_min_thold =
564 enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
565}
566
567/* must be called under rcu_read_lock */
408static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, 568static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
409 struct ieee80211_vif *vif) 569 struct ieee80211_vif *vif)
410{ 570{
@@ -413,65 +573,94 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
413 struct iwl_mvm *mvm = data->mvm; 573 struct iwl_mvm *mvm = data->mvm;
414 struct ieee80211_chanctx_conf *chanctx_conf; 574 struct ieee80211_chanctx_conf *chanctx_conf;
415 enum ieee80211_smps_mode smps_mode; 575 enum ieee80211_smps_mode smps_mode;
416 enum ieee80211_band band;
417 int ave_rssi; 576 int ave_rssi;
418 577
419 lockdep_assert_held(&mvm->mutex); 578 lockdep_assert_held(&mvm->mutex);
420 if (vif->type != NL80211_IFTYPE_STATION)
421 return;
422 579
423 rcu_read_lock(); 580 if (vif->type != NL80211_IFTYPE_STATION &&
424 chanctx_conf = rcu_dereference(vif->chanctx_conf); 581 vif->type != NL80211_IFTYPE_AP)
425 if (chanctx_conf && chanctx_conf->def.chan) 582 return;
426 band = chanctx_conf->def.chan->band;
427 else
428 band = -1;
429 rcu_read_unlock();
430 583
431 smps_mode = IEEE80211_SMPS_AUTOMATIC; 584 smps_mode = IEEE80211_SMPS_AUTOMATIC;
432 585
433 /* non associated BSSes aren't to be considered */ 586 chanctx_conf = rcu_dereference(vif->chanctx_conf);
434 if (!vif->bss_conf.assoc) 587
588 /* If channel context is invalid or not on 2.4GHz .. */
589 if ((!chanctx_conf ||
590 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
591 /* ... and it is an associated STATION, relax constraints */
592 if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
593 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
594 smps_mode);
595 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
435 return; 596 return;
597 }
598
599 /* SoftAP / GO will always be primary */
600 if (vif->type == NL80211_IFTYPE_AP) {
601 if (!mvmvif->ap_ibss_active)
602 return;
603
604 /* the Ack / Cts kill mask must be default if AP / GO */
605 data->reduced_tx_power = false;
436 606
437 if (band != IEEE80211_BAND_2GHZ) { 607 if (chanctx_conf == data->primary)
438 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 608 return;
439 smps_mode); 609
610 /* downgrade the current primary no matter what its type is */
611 data->secondary = data->primary;
612 data->primary = chanctx_conf;
440 return; 613 return;
441 } 614 }
442 615
443 if (data->notif->bt_status) 616 data->num_bss_ifaces++;
444 smps_mode = IEEE80211_SMPS_DYNAMIC; 617
618 /* we are now a STA / P2P Client, and take associated ones only */
619 if (!vif->bss_conf.assoc)
620 return;
621
622 /* STA / P2P Client, try to be primary if first vif */
623 if (!data->primary || data->primary == chanctx_conf)
624 data->primary = chanctx_conf;
625 else if (!data->secondary)
626 /* if secondary is not NULL, it might be a GO */
627 data->secondary = chanctx_conf;
445 628
446 if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD) 629 if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
447 smps_mode = IEEE80211_SMPS_STATIC; 630 smps_mode = IEEE80211_SMPS_STATIC;
631 else if (le32_to_cpu(data->notif->bt_activity_grading) >=
632 BT_LOW_TRAFFIC)
633 smps_mode = IEEE80211_SMPS_DYNAMIC;
448 634
449 IWL_DEBUG_COEX(data->mvm, 635 IWL_DEBUG_COEX(data->mvm,
450 "mac %d: bt_status %d traffic_load %d smps_req %d\n", 636 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
451 mvmvif->id, data->notif->bt_status, 637 mvmvif->id, data->notif->bt_status,
452 data->notif->bt_traffic_load, smps_mode); 638 data->notif->bt_activity_grading, smps_mode);
453 639
454 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); 640 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
455 641
456 /* don't reduce the Tx power if in loose scheme */ 642 /* don't reduce the Tx power if in loose scheme */
457 if (is_loose_coex()) 643 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
644 mvm->cfg->bt_shared_single_ant) {
645 data->reduced_tx_power = false;
646 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
458 return; 647 return;
648 }
459 649
460 data->num_bss_ifaces++; 650 /* reduced Txpower only if BT is on, so ...*/
461 651 if (!data->notif->bt_status) {
462 /* reduced Txpower only if there are open BT connections, so ...*/
463 if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
464 /* ... cancel reduced Tx power ... */ 652 /* ... cancel reduced Tx power ... */
465 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) 653 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
466 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); 654 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
467 data->reduced_tx_power = false; 655 data->reduced_tx_power = false;
468 656
469 /* ... and there is no need to get reports on RSSI any more. */ 657 /* ... and there is no need to get reports on RSSI any more. */
470 ieee80211_disable_rssi_reports(vif); 658 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
471 return; 659 return;
472 } 660 }
473 661
474 ave_rssi = ieee80211_ave_rssi(vif); 662 /* try to get the avg rssi from fw */
663 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
475 664
476 /* if the RSSI isn't valid, fake it is very low */ 665 /* if the RSSI isn't valid, fake it is very low */
477 if (!ave_rssi) 666 if (!ave_rssi)
@@ -499,8 +688,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
499 } 688 }
500 689
501 /* Begin to monitor the RSSI: it may influence the reduced Tx power */ 690 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
502 ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD, 691 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
503 BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
504} 692}
505 693
506static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) 694static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +698,72 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
510 .notif = &mvm->last_bt_notif, 698 .notif = &mvm->last_bt_notif,
511 .reduced_tx_power = true, 699 .reduced_tx_power = true,
512 }; 700 };
701 struct iwl_bt_coex_ci_cmd cmd = {};
702 u8 ci_bw_idx;
513 703
704 rcu_read_lock();
514 ieee80211_iterate_active_interfaces_atomic( 705 ieee80211_iterate_active_interfaces_atomic(
515 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 706 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
516 iwl_mvm_bt_notif_iterator, &data); 707 iwl_mvm_bt_notif_iterator, &data);
517 708
709 if (data.primary) {
710 struct ieee80211_chanctx_conf *chan = data.primary;
711 if (WARN_ON(!chan->def.chan)) {
712 rcu_read_unlock();
713 return;
714 }
715
716 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
717 ci_bw_idx = 0;
718 cmd.co_run_bw_primary = 0;
719 } else {
720 cmd.co_run_bw_primary = 1;
721 if (chan->def.center_freq1 >
722 chan->def.chan->center_freq)
723 ci_bw_idx = 2;
724 else
725 ci_bw_idx = 1;
726 }
727
728 cmd.bt_primary_ci =
729 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
730 cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
731 }
732
733 if (data.secondary) {
734 struct ieee80211_chanctx_conf *chan = data.secondary;
735 if (WARN_ON(!data.secondary->def.chan)) {
736 rcu_read_unlock();
737 return;
738 }
739
740 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
741 ci_bw_idx = 0;
742 cmd.co_run_bw_secondary = 0;
743 } else {
744 cmd.co_run_bw_secondary = 1;
745 if (chan->def.center_freq1 >
746 chan->def.chan->center_freq)
747 ci_bw_idx = 2;
748 else
749 ci_bw_idx = 1;
750 }
751
752 cmd.bt_secondary_ci =
753 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
754 cmd.secondary_ch_phy_id = *((u16 *)data.primary->drv_priv);
755 }
756
757 rcu_read_unlock();
758
759 /* Don't spam the fw with the same command over and over */
760 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
761 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
762 sizeof(cmd), &cmd))
763 IWL_ERR(mvm, "Failed to send BT_CI cmd");
764 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
765 }
766
518 /* 767 /*
519 * If there are no BSS / P2P client interfaces, reduced Tx Power is 768 * If there are no BSS / P2P client interfaces, reduced Tx Power is
520 * irrelevant since it is based on the RSSI coming from the beacon. 769 * irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +785,18 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
536 785
537 786
538 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); 787 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
539 IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not "); 788 IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
789 notif->bt_status ? "ON" : "OFF");
540 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); 790 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
541 IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load); 791 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
792 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
793 le32_to_cpu(notif->primary_ch_lut));
794 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
795 le32_to_cpu(notif->secondary_ch_lut));
796 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
797 le32_to_cpu(notif->bt_activity_grading));
542 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", 798 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
543 notif->bt_agg_traffic_load); 799 notif->bt_agg_traffic_load);
544 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
545 800
546 /* remember this notification for future use: rssi fluctuations */ 801 /* remember this notification for future use: rssi fluctuations */
547 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); 802 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +820,18 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
565 struct ieee80211_sta *sta; 820 struct ieee80211_sta *sta;
566 struct iwl_mvm_sta *mvmsta; 821 struct iwl_mvm_sta *mvmsta;
567 822
823 struct ieee80211_chanctx_conf *chanctx_conf;
824
825 rcu_read_lock();
826 chanctx_conf = rcu_dereference(vif->chanctx_conf);
827 /* If channel context is invalid or not on 2.4GHz - don't count it */
828 if (!chanctx_conf ||
829 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
830 rcu_read_unlock();
831 return;
832 }
833 rcu_read_unlock();
834
568 if (vif->type != NL80211_IFTYPE_STATION || 835 if (vif->type != NL80211_IFTYPE_STATION ||
569 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) 836 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
570 return; 837 return;
@@ -594,15 +861,15 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
594 }; 861 };
595 int ret; 862 int ret;
596 863
597 mutex_lock(&mvm->mutex); 864 lockdep_assert_held(&mvm->mutex);
598 865
599 /* Rssi update while not associated ?! */ 866 /* Rssi update while not associated ?! */
600 if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) 867 if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
601 goto out_unlock; 868 return;
602 869
603 /* No open connection - reports should be disabled */ 870 /* No BT - reports should be disabled */
604 if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2)) 871 if (!mvm->last_bt_notif.bt_status)
605 goto out_unlock; 872 return;
606 873
607 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, 874 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
608 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); 875 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +878,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
611 * Check if rssi is good enough for reduced Tx power, but not in loose 878 * Check if rssi is good enough for reduced Tx power, but not in loose
612 * scheme. 879 * scheme.
613 */ 880 */
614 if (rssi_event == RSSI_EVENT_LOW || is_loose_coex()) 881 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
882 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
615 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, 883 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
616 false); 884 false);
617 else 885 else
@@ -633,12 +901,52 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
633 901
634 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power)) 902 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
635 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); 903 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
904}
905
906#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
907#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
636 908
637 out_unlock: 909u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
638 mutex_unlock(&mvm->mutex); 910 struct ieee80211_sta *sta)
911{
912 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
913 enum iwl_bt_coex_lut_type lut_type;
914
915 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
916 BT_LOW_TRAFFIC)
917 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
918
919 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
920
921 if (lut_type == BT_COEX_LOOSE_LUT)
922 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
923
924 /* tight coex, high bt traffic, reduce AGG time limit */
925 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
926}
927
928bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
929 struct ieee80211_sta *sta)
930{
931 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
932
933 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
934 BT_HIGH_TRAFFIC)
935 return true;
936
937 /*
938 * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
939 * already killed.
940 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
941 * Tx.
942 */
943 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
639} 944}
640 945
641void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 946void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
642{ 947{
948 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
949 return;
950
643 iwl_mvm_bt_coex_notif_handle(mvm); 951 iwl_mvm_bt_coex_notif_handle(mvm);
644} 952}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 2bf29f7992ee..4b6d670c3509 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -70,7 +70,9 @@
70#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) 70#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
71#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) 71#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
72#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 72#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
73#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20 73#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
74#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
75#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
74#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 76#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
75#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 77#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
76#define IWL_MVM_PS_SNOOZE_INTERVAL 25 78#define IWL_MVM_PS_SNOOZE_INTERVAL 25
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 417639f77b01..6f45966817bb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -67,6 +67,7 @@
67#include <net/cfg80211.h> 67#include <net/cfg80211.h>
68#include <net/ipv6.h> 68#include <net/ipv6.h>
69#include <net/tcp.h> 69#include <net/tcp.h>
70#include <net/addrconf.h>
70#include "iwl-modparams.h" 71#include "iwl-modparams.h"
71#include "fw-api.h" 72#include "fw-api.h"
72#include "mvm.h" 73#include "mvm.h"
@@ -381,14 +382,74 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
381 union { 382 union {
382 struct iwl_proto_offload_cmd_v1 v1; 383 struct iwl_proto_offload_cmd_v1 v1;
383 struct iwl_proto_offload_cmd_v2 v2; 384 struct iwl_proto_offload_cmd_v2 v2;
385 struct iwl_proto_offload_cmd_v3_small v3s;
386 struct iwl_proto_offload_cmd_v3_large v3l;
384 } cmd = {}; 387 } cmd = {};
388 struct iwl_host_cmd hcmd = {
389 .id = PROT_OFFLOAD_CONFIG_CMD,
390 .flags = CMD_SYNC,
391 .data[0] = &cmd,
392 .dataflags[0] = IWL_HCMD_DFL_DUP,
393 };
385 struct iwl_proto_offload_cmd_common *common; 394 struct iwl_proto_offload_cmd_common *common;
386 u32 enabled = 0, size; 395 u32 enabled = 0, size;
396 u32 capa_flags = mvm->fw->ucode_capa.flags;
387#if IS_ENABLED(CONFIG_IPV6) 397#if IS_ENABLED(CONFIG_IPV6)
388 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 398 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
389 int i; 399 int i;
390 400
391 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { 401 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
402 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
403 struct iwl_ns_config *nsc;
404 struct iwl_targ_addr *addrs;
405 int n_nsc, n_addrs;
406 int c;
407
408 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
409 nsc = cmd.v3s.ns_config;
410 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
411 addrs = cmd.v3s.targ_addrs;
412 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
413 } else {
414 nsc = cmd.v3l.ns_config;
415 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
416 addrs = cmd.v3l.targ_addrs;
417 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
418 }
419
420 if (mvmvif->num_target_ipv6_addrs)
421 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
422
423 /*
424 * For each address we have (and that will fit) fill a target
425 * address struct and combine for NS offload structs with the
426 * solicited node addresses.
427 */
428 for (i = 0, c = 0;
429 i < mvmvif->num_target_ipv6_addrs &&
430 i < n_addrs && c < n_nsc; i++) {
431 struct in6_addr solicited_addr;
432 int j;
433
434 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
435 &solicited_addr);
436 for (j = 0; j < c; j++)
437 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
438 &solicited_addr) == 0)
439 break;
440 if (j == c)
441 c++;
442 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
443 addrs[i].config_num = cpu_to_le32(j);
444 nsc[j].dest_ipv6_addr = solicited_addr;
445 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
446 }
447
448 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
449 cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
450 else
451 cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
452 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
392 if (mvmvif->num_target_ipv6_addrs) { 453 if (mvmvif->num_target_ipv6_addrs) {
393 enabled |= IWL_D3_PROTO_OFFLOAD_NS; 454 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
394 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); 455 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
419 } 480 }
420#endif 481#endif
421 482
422 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { 483 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
484 common = &cmd.v3s.common;
485 size = sizeof(cmd.v3s);
486 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
487 common = &cmd.v3l.common;
488 size = sizeof(cmd.v3l);
489 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
423 common = &cmd.v2.common; 490 common = &cmd.v2.common;
424 size = sizeof(cmd.v2); 491 size = sizeof(cmd.v2);
425 } else { 492 } else {
@@ -438,8 +505,8 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
438 505
439 common->enabled = cpu_to_le32(enabled); 506 common->enabled = cpu_to_le32(enabled);
440 507
441 return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC, 508 hcmd.len[0] = size;
442 size, &cmd); 509 return iwl_mvm_send_cmd(mvm, &hcmd);
443} 510}
444 511
445enum iwl_mvm_tcp_packet_type { 512enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
793 return 0; 860 return 0;
794} 861}
795 862
863static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
864 struct ieee80211_vif *vif)
865{
866 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
867 struct iwl_nonqos_seq_query_cmd query_cmd = {
868 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
869 .mac_id_n_color =
870 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
871 mvmvif->color)),
872 };
873 struct iwl_host_cmd cmd = {
874 .id = NON_QOS_TX_COUNTER_CMD,
875 .flags = CMD_SYNC | CMD_WANT_SKB,
876 };
877 int err;
878 u32 size;
879
880 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
881 cmd.data[0] = &query_cmd;
882 cmd.len[0] = sizeof(query_cmd);
883 }
884
885 err = iwl_mvm_send_cmd(mvm, &cmd);
886 if (err)
887 return err;
888
889 size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
890 size -= sizeof(cmd.resp_pkt->hdr);
891 if (size < sizeof(__le16)) {
892 err = -EINVAL;
893 } else {
894 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
895 /* new API returns next, not last-used seqno */
896 if (mvm->fw->ucode_capa.flags &
897 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
898 err -= 0x10;
899 }
900
901 iwl_free_resp(&cmd);
902 return err;
903}
904
905void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
906{
907 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
908 struct iwl_nonqos_seq_query_cmd query_cmd = {
909 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
910 .mac_id_n_color =
911 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
912 mvmvif->color)),
913 .value = cpu_to_le16(mvmvif->seqno),
914 };
915
916 /* return if called during restart, not resume from D3 */
917 if (!mvmvif->seqno_valid)
918 return;
919
920 mvmvif->seqno_valid = false;
921
922 if (!(mvm->fw->ucode_capa.flags &
923 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
924 return;
925
926 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
927 sizeof(query_cmd), &query_cmd))
928 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
929}
930
796static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 931static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
797 struct cfg80211_wowlan *wowlan, 932 struct cfg80211_wowlan *wowlan,
798 bool test) 933 bool test)
@@ -829,7 +964,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
829 }; 964 };
830 int ret, i; 965 int ret, i;
831 int len __maybe_unused; 966 int len __maybe_unused;
832 u16 seq;
833 u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT; 967 u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
834 968
835 if (!wowlan) { 969 if (!wowlan) {
@@ -872,26 +1006,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
872 1006
873 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; 1007 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
874 1008
875 /*
876 * The D3 firmware still hardcodes the AP station ID for the
877 * BSS we're associated with as 0. Store the real STA ID here
878 * and assign 0. When we leave this function, we'll restore
879 * the original value for the resume code.
880 */
881 old_ap_sta_id = mvm_ap_sta->sta_id;
882 mvm_ap_sta->sta_id = 0;
883 mvmvif->ap_sta_id = 0;
884
885 /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */ 1009 /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
886 1010
887 wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported; 1011 wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
888 1012
889 /* 1013 /* Query the last used seqno and set it */
890 * We know the last used seqno, and the uCode expects to know that 1014 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
891 * one, it will increment before TX. 1015 if (ret < 0)
892 */ 1016 goto out_noreset;
893 seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ; 1017 wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
894 wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
895 1018
896 /* 1019 /*
897 * For QoS counters, we store the one to use next, so subtract 0x10 1020 * For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
899 * increment after using the value (i.e. store the next value to use). 1022 * increment after using the value (i.e. store the next value to use).
900 */ 1023 */
901 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1024 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
902 seq = mvm_ap_sta->tid_data[i].seq_number; 1025 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
903 seq -= 0x10; 1026 seq -= 0x10;
904 wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq); 1027 wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
905 } 1028 }
@@ -945,6 +1068,16 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
945 iwl_trans_stop_device(mvm->trans); 1068 iwl_trans_stop_device(mvm->trans);
946 1069
947 /* 1070 /*
1071 * The D3 firmware still hardcodes the AP station ID for the
1072 * BSS we're associated with as 0. Store the real STA ID here
1073 * and assign 0. When we leave this function, we'll restore
1074 * the original value for the resume code.
1075 */
1076 old_ap_sta_id = mvm_ap_sta->sta_id;
1077 mvm_ap_sta->sta_id = 0;
1078 mvmvif->ap_sta_id = 0;
1079
1080 /*
948 * Set the HW restart bit -- this is mostly true as we're 1081 * Set the HW restart bit -- this is mostly true as we're
949 * going to load new firmware and reprogram that, though 1082 * going to load new firmware and reprogram that, though
950 * the reprogramming is going to be manual to avoid adding 1083 * the reprogramming is going to be manual to avoid adding
@@ -1059,6 +1192,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1059 if (ret) 1192 if (ret)
1060 goto out; 1193 goto out;
1061 1194
1195 ret = iwl_mvm_power_update_device_mode(mvm);
1196 if (ret)
1197 goto out;
1198
1062 ret = iwl_mvm_power_update_mode(mvm, vif); 1199 ret = iwl_mvm_power_update_mode(mvm, vif);
1063 if (ret) 1200 if (ret)
1064 goto out; 1201 goto out;
@@ -1109,16 +1246,26 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1109 return __iwl_mvm_suspend(hw, wowlan, false); 1246 return __iwl_mvm_suspend(hw, wowlan, false);
1110} 1247}
1111 1248
1249/* converted data from the different status responses */
1250struct iwl_wowlan_status_data {
1251 u16 pattern_number;
1252 u16 qos_seq_ctr[8];
1253 u32 wakeup_reasons;
1254 u32 wake_packet_length;
1255 u32 wake_packet_bufsize;
1256 const u8 *wake_packet;
1257};
1258
1112static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1259static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1113 struct ieee80211_vif *vif, 1260 struct ieee80211_vif *vif,
1114 struct iwl_wowlan_status *status) 1261 struct iwl_wowlan_status_data *status)
1115{ 1262{
1116 struct sk_buff *pkt = NULL; 1263 struct sk_buff *pkt = NULL;
1117 struct cfg80211_wowlan_wakeup wakeup = { 1264 struct cfg80211_wowlan_wakeup wakeup = {
1118 .pattern_idx = -1, 1265 .pattern_idx = -1,
1119 }; 1266 };
1120 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1267 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1121 u32 reasons = le32_to_cpu(status->wakeup_reasons); 1268 u32 reasons = status->wakeup_reasons;
1122 1269
1123 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1270 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1124 wakeup_report = NULL; 1271 wakeup_report = NULL;
@@ -1130,7 +1277,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1130 1277
1131 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1278 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1132 wakeup.pattern_idx = 1279 wakeup.pattern_idx =
1133 le16_to_cpu(status->pattern_number); 1280 status->pattern_number;
1134 1281
1135 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1282 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1136 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1283 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1158 wakeup.tcp_match = true; 1305 wakeup.tcp_match = true;
1159 1306
1160 if (status->wake_packet_bufsize) { 1307 if (status->wake_packet_bufsize) {
1161 int pktsize = le32_to_cpu(status->wake_packet_bufsize); 1308 int pktsize = status->wake_packet_bufsize;
1162 int pktlen = le32_to_cpu(status->wake_packet_length); 1309 int pktlen = status->wake_packet_length;
1163 const u8 *pktdata = status->wake_packet; 1310 const u8 *pktdata = status->wake_packet;
1164 struct ieee80211_hdr *hdr = (void *)pktdata; 1311 struct ieee80211_hdr *hdr = (void *)pktdata;
1165 int truncated = pktlen - pktsize; 1312 int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1239 kfree_skb(pkt); 1386 kfree_skb(pkt);
1240} 1387}
1241 1388
1389static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1390 struct ieee80211_key_seq *seq)
1391{
1392 u64 pn;
1393
1394 pn = le64_to_cpu(sc->pn);
1395 seq->ccmp.pn[0] = pn >> 40;
1396 seq->ccmp.pn[1] = pn >> 32;
1397 seq->ccmp.pn[2] = pn >> 24;
1398 seq->ccmp.pn[3] = pn >> 16;
1399 seq->ccmp.pn[4] = pn >> 8;
1400 seq->ccmp.pn[5] = pn;
1401}
1402
1403static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1404 struct ieee80211_key_seq *seq)
1405{
1406 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1407 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1408}
1409
1410static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
1411 struct ieee80211_key_conf *key)
1412{
1413 int tid;
1414
1415 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1416
1417 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1418 struct ieee80211_key_seq seq = {};
1419
1420 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1421 ieee80211_set_key_rx_seq(key, tid, &seq);
1422 }
1423}
1424
1425static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1426 struct ieee80211_key_conf *key)
1427{
1428 int tid;
1429
1430 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1431
1432 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1433 struct ieee80211_key_seq seq = {};
1434
1435 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1436 ieee80211_set_key_rx_seq(key, tid, &seq);
1437 }
1438}
1439
1440static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1441 struct iwl_wowlan_status_v6 *status)
1442{
1443 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1444
1445 switch (key->cipher) {
1446 case WLAN_CIPHER_SUITE_CCMP:
1447 iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
1448 break;
1449 case WLAN_CIPHER_SUITE_TKIP:
1450 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1451 break;
1452 default:
1453 WARN_ON(1);
1454 }
1455}
1456
1457struct iwl_mvm_d3_gtk_iter_data {
1458 struct iwl_wowlan_status_v6 *status;
1459 void *last_gtk;
1460 u32 cipher;
1461 bool find_phase, unhandled_cipher;
1462 int num_keys;
1463};
1464
1465static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1466 struct ieee80211_vif *vif,
1467 struct ieee80211_sta *sta,
1468 struct ieee80211_key_conf *key,
1469 void *_data)
1470{
1471 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1472
1473 if (data->unhandled_cipher)
1474 return;
1475
1476 switch (key->cipher) {
1477 case WLAN_CIPHER_SUITE_WEP40:
1478 case WLAN_CIPHER_SUITE_WEP104:
1479 /* ignore WEP completely, nothing to do */
1480 return;
1481 case WLAN_CIPHER_SUITE_CCMP:
1482 case WLAN_CIPHER_SUITE_TKIP:
1483 /* we support these */
1484 break;
1485 default:
1486 /* everything else (even CMAC for MFP) - disconnect from AP */
1487 data->unhandled_cipher = true;
1488 return;
1489 }
1490
1491 data->num_keys++;
1492
1493 /*
1494 * pairwise key - update sequence counters only;
1495 * note that this assumes no TDLS sessions are active
1496 */
1497 if (sta) {
1498 struct ieee80211_key_seq seq = {};
1499 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1500
1501 if (data->find_phase)
1502 return;
1503
1504 switch (key->cipher) {
1505 case WLAN_CIPHER_SUITE_CCMP:
1506 iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
1507 iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
1508 break;
1509 case WLAN_CIPHER_SUITE_TKIP:
1510 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1511 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1512 break;
1513 }
1514 ieee80211_set_key_tx_seq(key, &seq);
1515
1516 /* that's it for this key */
1517 return;
1518 }
1519
1520 if (data->find_phase) {
1521 data->last_gtk = key;
1522 data->cipher = key->cipher;
1523 return;
1524 }
1525
1526 if (data->status->num_of_gtk_rekeys)
1527 ieee80211_remove_key(key);
1528 else if (data->last_gtk == key)
1529 iwl_mvm_set_key_rx_seq(key, data->status);
1530}
1531
1532static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1533 struct ieee80211_vif *vif,
1534 struct iwl_wowlan_status_v6 *status)
1535{
1536 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1537 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1538 .status = status,
1539 };
1540
1541 if (!status || !vif->bss_conf.bssid)
1542 return false;
1543
1544 /* find last GTK that we used initially, if any */
1545 gtkdata.find_phase = true;
1546 ieee80211_iter_keys(mvm->hw, vif,
1547 iwl_mvm_d3_update_gtks, &gtkdata);
1548 /* not trying to keep connections with MFP/unhandled ciphers */
1549 if (gtkdata.unhandled_cipher)
1550 return false;
1551 if (!gtkdata.num_keys)
1552 return true;
1553 if (!gtkdata.last_gtk)
1554 return false;
1555
1556 /*
1557 * invalidate all other GTKs that might still exist and update
1558 * the one that we used
1559 */
1560 gtkdata.find_phase = false;
1561 ieee80211_iter_keys(mvm->hw, vif,
1562 iwl_mvm_d3_update_gtks, &gtkdata);
1563
1564 if (status->num_of_gtk_rekeys) {
1565 struct ieee80211_key_conf *key;
1566 struct {
1567 struct ieee80211_key_conf conf;
1568 u8 key[32];
1569 } conf = {
1570 .conf.cipher = gtkdata.cipher,
1571 .conf.keyidx = status->gtk.key_index,
1572 };
1573
1574 switch (gtkdata.cipher) {
1575 case WLAN_CIPHER_SUITE_CCMP:
1576 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1577 memcpy(conf.conf.key, status->gtk.decrypt_key,
1578 WLAN_KEY_LEN_CCMP);
1579 break;
1580 case WLAN_CIPHER_SUITE_TKIP:
1581 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1582 memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1583 /* leave TX MIC key zeroed, we don't use it anyway */
1584 memcpy(conf.conf.key +
1585 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1586 status->gtk.tkip_mic_key, 8);
1587 break;
1588 }
1589
1590 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1591 if (IS_ERR(key))
1592 return false;
1593 iwl_mvm_set_key_rx_seq(key, status);
1594 }
1595
1596 if (status->num_of_gtk_rekeys) {
1597 __be64 replay_ctr =
1598 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1599 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1600 (void *)&replay_ctr, GFP_KERNEL);
1601 }
1602
1603 mvmvif->seqno_valid = true;
1604 /* +0x10 because the set API expects next-to-use, not last-used */
1605 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1606
1607 return true;
1608}
1609
1242/* releases the MVM mutex */ 1610/* releases the MVM mutex */
1243static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1611static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1244 struct ieee80211_vif *vif) 1612 struct ieee80211_vif *vif)
1245{ 1613{
1246 u32 base = mvm->error_event_table; 1614 u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1253 .id = WOWLAN_GET_STATUSES, 1621 .id = WOWLAN_GET_STATUSES,
1254 .flags = CMD_SYNC | CMD_WANT_SKB, 1622 .flags = CMD_SYNC | CMD_WANT_SKB,
1255 }; 1623 };
1256 struct iwl_wowlan_status *status; 1624 struct iwl_wowlan_status_data status;
1257 int ret, len; 1625 struct iwl_wowlan_status_v6 *status_v6;
1626 int ret, len, status_size, i;
1627 bool keep;
1628 struct ieee80211_sta *ap_sta;
1629 struct iwl_mvm_sta *mvm_ap_sta;
1258 1630
1259 iwl_trans_read_mem_bytes(mvm->trans, base, 1631 iwl_trans_read_mem_bytes(mvm->trans, base,
1260 &err_info, sizeof(err_info)); 1632 &err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1287 if (!cmd.resp_pkt) 1659 if (!cmd.resp_pkt)
1288 goto out_unlock; 1660 goto out_unlock;
1289 1661
1662 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
1663 status_size = sizeof(struct iwl_wowlan_status_v6);
1664 else
1665 status_size = sizeof(struct iwl_wowlan_status_v4);
1666
1290 len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1667 len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1291 if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) { 1668 if (len - sizeof(struct iwl_cmd_header) < status_size) {
1292 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1669 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1293 goto out_free_resp; 1670 goto out_free_resp;
1294 } 1671 }
1295 1672
1296 status = (void *)cmd.resp_pkt->data; 1673 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
1674 status_v6 = (void *)cmd.resp_pkt->data;
1675
1676 status.pattern_number = le16_to_cpu(status_v6->pattern_number);
1677 for (i = 0; i < 8; i++)
1678 status.qos_seq_ctr[i] =
1679 le16_to_cpu(status_v6->qos_seq_ctr[i]);
1680 status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
1681 status.wake_packet_length =
1682 le32_to_cpu(status_v6->wake_packet_length);
1683 status.wake_packet_bufsize =
1684 le32_to_cpu(status_v6->wake_packet_bufsize);
1685 status.wake_packet = status_v6->wake_packet;
1686 } else {
1687 struct iwl_wowlan_status_v4 *status_v4;
1688 status_v6 = NULL;
1689 status_v4 = (void *)cmd.resp_pkt->data;
1690
1691 status.pattern_number = le16_to_cpu(status_v4->pattern_number);
1692 for (i = 0; i < 8; i++)
1693 status.qos_seq_ctr[i] =
1694 le16_to_cpu(status_v4->qos_seq_ctr[i]);
1695 status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
1696 status.wake_packet_length =
1697 le32_to_cpu(status_v4->wake_packet_length);
1698 status.wake_packet_bufsize =
1699 le32_to_cpu(status_v4->wake_packet_bufsize);
1700 status.wake_packet = status_v4->wake_packet;
1701 }
1297 1702
1298 if (len - sizeof(struct iwl_cmd_header) != 1703 if (len - sizeof(struct iwl_cmd_header) !=
1299 sizeof(*status) + 1704 status_size + ALIGN(status.wake_packet_bufsize, 4)) {
1300 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
1301 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1705 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1302 goto out_free_resp; 1706 goto out_free_resp;
1303 } 1707 }
1304 1708
1709 /* still at hard-coded place 0 for D3 image */
1710 ap_sta = rcu_dereference_protected(
1711 mvm->fw_id_to_mac_id[0],
1712 lockdep_is_held(&mvm->mutex));
1713 if (IS_ERR_OR_NULL(ap_sta))
1714 goto out_free_resp;
1715
1716 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
1717 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1718 u16 seq = status.qos_seq_ctr[i];
1719 /* firmware stores last-used value, we store next value */
1720 seq += 0x10;
1721 mvm_ap_sta->tid_data[i].seq_number = seq;
1722 }
1723
1305 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1724 /* now we have all the data we need, unlock to avoid mac80211 issues */
1306 mutex_unlock(&mvm->mutex); 1725 mutex_unlock(&mvm->mutex);
1307 1726
1308 iwl_mvm_report_wakeup_reasons(mvm, vif, status); 1727 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1728
1729 keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
1730
1309 iwl_free_resp(&cmd); 1731 iwl_free_resp(&cmd);
1310 return; 1732 return keep;
1311 1733
1312 out_free_resp: 1734 out_free_resp:
1313 iwl_free_resp(&cmd); 1735 iwl_free_resp(&cmd);
1314 out_unlock: 1736 out_unlock:
1315 mutex_unlock(&mvm->mutex); 1737 mutex_unlock(&mvm->mutex);
1738 return false;
1316} 1739}
1317 1740
1318static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) 1741static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@ static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
1335#endif 1758#endif
1336} 1759}
1337 1760
1761static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1762 struct ieee80211_vif *vif)
1763{
1764 /* skip the one we keep connection on */
1765 if (data == vif)
1766 return;
1767
1768 if (vif->type == NL80211_IFTYPE_STATION)
1769 ieee80211_resume_disconnect(vif);
1770}
1771
1338static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 1772static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1339{ 1773{
1340 struct iwl_d3_iter_data resume_iter_data = { 1774 struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1343 struct ieee80211_vif *vif = NULL; 1777 struct ieee80211_vif *vif = NULL;
1344 int ret; 1778 int ret;
1345 enum iwl_d3_status d3_status; 1779 enum iwl_d3_status d3_status;
1780 bool keep = false;
1346 1781
1347 mutex_lock(&mvm->mutex); 1782 mutex_lock(&mvm->mutex);
1348 1783
@@ -1368,7 +1803,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1368 /* query SRAM first in case we want event logging */ 1803 /* query SRAM first in case we want event logging */
1369 iwl_mvm_read_d3_sram(mvm); 1804 iwl_mvm_read_d3_sram(mvm);
1370 1805
1371 iwl_mvm_query_wakeup_reasons(mvm, vif); 1806 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
1372 /* has unlocked the mutex, so skip that */ 1807 /* has unlocked the mutex, so skip that */
1373 goto out; 1808 goto out;
1374 1809
@@ -1376,8 +1811,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1376 mutex_unlock(&mvm->mutex); 1811 mutex_unlock(&mvm->mutex);
1377 1812
1378 out: 1813 out:
1379 if (!test && vif) 1814 if (!test)
1380 ieee80211_resume_disconnect(vif); 1815 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
1816 IEEE80211_IFACE_ITER_NORMAL,
1817 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
1381 1818
1382 /* return 1 to reconfigure the device */ 1819 /* return 1 to reconfigure the device */
1383 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1820 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index aac81b8984b0..0675f0c8ef93 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -246,58 +246,56 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
246 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 246 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
247} 247}
248 248
249static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file, 249static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
250 const char __user *user_buf, 250 char __user *user_buf,
251 size_t count, loff_t *ppos) 251 size_t count, loff_t *ppos)
252{ 252{
253 struct iwl_mvm *mvm = file->private_data; 253 struct iwl_mvm *mvm = file->private_data;
254 char buf[8] = {}; 254 char buf[64];
255 int allow; 255 int bufsz = sizeof(buf);
256 256 int pos = 0;
257 if (!mvm->ucode_loaded)
258 return -EIO;
259
260 if (copy_from_user(buf, user_buf, sizeof(buf)))
261 return -EFAULT;
262
263 if (sscanf(buf, "%d", &allow) != 1)
264 return -EINVAL;
265
266 IWL_DEBUG_POWER(mvm, "%s device power down\n",
267 allow ? "allow" : "prevent");
268 257
269 /* 258 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
270 * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it 259 mvm->disable_power_off);
271 */ 260 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
261 mvm->disable_power_off_d3);
272 262
273 return count; 263 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
274} 264}
275 265
276static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file, 266static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
277 const char __user *user_buf, 267 const char __user *user_buf,
278 size_t count, loff_t *ppos) 268 size_t count, loff_t *ppos)
279{ 269{
280 struct iwl_mvm *mvm = file->private_data; 270 struct iwl_mvm *mvm = file->private_data;
281 char buf[8] = {}; 271 char buf[64] = {};
282 int allow; 272 int ret;
273 int val;
283 274
284 if (copy_from_user(buf, user_buf, sizeof(buf))) 275 if (!mvm->ucode_loaded)
276 return -EIO;
277
278 count = min_t(size_t, count, sizeof(buf) - 1);
279 if (copy_from_user(buf, user_buf, count))
285 return -EFAULT; 280 return -EFAULT;
286 281
287 if (sscanf(buf, "%d", &allow) != 1) 282 if (!strncmp("disable_power_off_d0=", buf, 21)) {
283 if (sscanf(buf + 21, "%d", &val) != 1)
284 return -EINVAL;
285 mvm->disable_power_off = val;
286 } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
287 if (sscanf(buf + 21, "%d", &val) != 1)
288 return -EINVAL;
289 mvm->disable_power_off_d3 = val;
290 } else {
288 return -EINVAL; 291 return -EINVAL;
292 }
289 293
290 IWL_DEBUG_POWER(mvm, "%s device power down in d3\n", 294 mutex_lock(&mvm->mutex);
291 allow ? "allow" : "prevent"); 295 ret = iwl_mvm_power_update_device_mode(mvm);
292 296 mutex_unlock(&mvm->mutex);
293 /*
294 * TODO: When WoWLAN FW alive notification happens, driver will send
295 * REPLY_DEBUG_CMD setting power_down_allow flag according to
296 * mvm->prevent_power_down_d3
297 */
298 mvm->prevent_power_down_d3 = !allow;
299 297
300 return count; 298 return ret ?: count;
301} 299}
302 300
303static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, 301static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -371,7 +369,8 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
371 int val; 369 int val;
372 int ret; 370 int ret;
373 371
374 if (copy_from_user(buf, user_buf, sizeof(buf))) 372 count = min_t(size_t, count, sizeof(buf) - 1);
373 if (copy_from_user(buf, user_buf, count))
375 return -EFAULT; 374 return -EFAULT;
376 375
377 if (!strncmp("keep_alive=", buf, 11)) { 376 if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +393,9 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
394 if (sscanf(buf + 16, "%d", &val) != 1) 393 if (sscanf(buf + 16, "%d", &val) != 1)
395 return -EINVAL; 394 return -EINVAL;
396 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; 395 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
397 } else if (!strncmp("disable_power_off=", buf, 18)) { 396 } else if (!strncmp("disable_power_off=", buf, 18) &&
397 !(mvm->fw->ucode_capa.flags &
398 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
398 if (sscanf(buf + 18, "%d", &val) != 1) 399 if (sscanf(buf + 18, "%d", &val) != 1)
399 return -EINVAL; 400 return -EINVAL;
400 param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF; 401 param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +582,21 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
581 BT_MBOX_PRINT(3, UPDATE_REQUEST, true); 582 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
582 583
583 pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n", 584 pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
584 notif->bt_status); 585 notif->bt_status);
585 pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n", 586 pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
586 notif->bt_open_conn); 587 notif->bt_open_conn);
587 pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n", 588 pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
588 notif->bt_traffic_load); 589 notif->bt_traffic_load);
589 pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n", 590 pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
590 notif->bt_agg_traffic_load); 591 notif->bt_agg_traffic_load);
591 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", 592 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
592 notif->bt_ci_compliance); 593 notif->bt_ci_compliance);
594 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
595 le32_to_cpu(notif->primary_ch_lut));
596 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
597 le32_to_cpu(notif->secondary_ch_lut));
598 pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
599 le32_to_cpu(notif->bt_activity_grading));
593 600
594 mutex_unlock(&mvm->mutex); 601 mutex_unlock(&mvm->mutex);
595 602
@@ -600,6 +607,38 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
600} 607}
601#undef BT_MBOX_PRINT 608#undef BT_MBOX_PRINT
602 609
610static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
611 size_t count, loff_t *ppos)
612{
613 struct iwl_mvm *mvm = file->private_data;
614 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
615 char buf[256];
616 int bufsz = sizeof(buf);
617 int pos = 0;
618
619 mutex_lock(&mvm->mutex);
620
621 pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
622 pos += scnprintf(buf+pos, bufsz-pos,
623 "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
624 le64_to_cpu(cmd->bt_primary_ci),
625 !!cmd->co_run_bw_primary);
626 pos += scnprintf(buf+pos, bufsz-pos,
627 "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
628 le64_to_cpu(cmd->bt_secondary_ci),
629 !!cmd->co_run_bw_secondary);
630
631 pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
632 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
633 iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
634 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
635 iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
636
637 mutex_unlock(&mvm->mutex);
638
639 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
640}
641
603#define PRINT_STATS_LE32(_str, _val) \ 642#define PRINT_STATS_LE32(_str, _val) \
604 pos += scnprintf(buf + pos, bufsz - pos, \ 643 pos += scnprintf(buf + pos, bufsz - pos, \
605 fmt_table, _str, \ 644 fmt_table, _str, \
@@ -615,9 +654,11 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
615 int pos = 0; 654 int pos = 0;
616 char *buf; 655 char *buf;
617 int ret; 656 int ret;
618 int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 + 657 /* 43 is the size of each data line, 33 is the size of each header */
619 sizeof(struct mvm_statistics_rx_non_phy) * 10 + 658 size_t bufsz =
620 sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200; 659 ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
660 (4 * 33) + 1;
661
621 struct mvm_statistics_rx_phy *ofdm; 662 struct mvm_statistics_rx_phy *ofdm;
622 struct mvm_statistics_rx_phy *cck; 663 struct mvm_statistics_rx_phy *cck;
623 struct mvm_statistics_rx_non_phy *general; 664 struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +753,7 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
712 PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b); 753 PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
713 PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c); 754 PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
714 PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills); 755 PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
756 PRINT_STATS_LE32("mac_id", general->mac_id);
715 PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu); 757 PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
716 758
717 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, 759 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +799,59 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
757 return count; 799 return count;
758} 800}
759 801
802static ssize_t
803iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
804 char __user *user_buf,
805 size_t count, loff_t *ppos)
806{
807 struct iwl_mvm *mvm = file->private_data;
808 int pos = 0;
809 char buf[32];
810 const size_t bufsz = sizeof(buf);
811
812 /* print which antennas were set for the scan command by the user */
813 pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
814 if (mvm->scan_rx_ant & ANT_A)
815 pos += scnprintf(buf + pos, bufsz - pos, "A");
816 if (mvm->scan_rx_ant & ANT_B)
817 pos += scnprintf(buf + pos, bufsz - pos, "B");
818 if (mvm->scan_rx_ant & ANT_C)
819 pos += scnprintf(buf + pos, bufsz - pos, "C");
820 pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
821
822 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
823}
824
825static ssize_t
826iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
827 const char __user *user_buf,
828 size_t count, loff_t *ppos)
829{
830 struct iwl_mvm *mvm = file->private_data;
831 char buf[8];
832 int buf_size;
833 u8 scan_rx_ant;
834
835 memset(buf, 0, sizeof(buf));
836 buf_size = min(count, sizeof(buf) - 1);
837
838 /* get the argument from the user and check if it is valid */
839 if (copy_from_user(buf, user_buf, buf_size))
840 return -EFAULT;
841 if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
842 return -EINVAL;
843 if (scan_rx_ant > ANT_ABC)
844 return -EINVAL;
845 if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
846 return -EINVAL;
847
848 /* change the rx antennas for scan command */
849 mvm->scan_rx_ant = scan_rx_ant;
850
851 return count;
852}
853
854
760static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, 855static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
761 enum iwl_dbgfs_bf_mask param, int value) 856 enum iwl_dbgfs_bf_mask param, int value)
762{ 857{
@@ -968,7 +1063,8 @@ static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
968 char buf[8] = {}; 1063 char buf[8] = {};
969 int store; 1064 int store;
970 1065
971 if (copy_from_user(buf, user_buf, sizeof(buf))) 1066 count = min_t(size_t, count, sizeof(buf) - 1);
1067 if (copy_from_user(buf, user_buf, count))
972 return -EFAULT; 1068 return -EFAULT;
973 1069
974 if (sscanf(buf, "%d", &store) != 1) 1070 if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1159,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
1063MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram); 1159MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
1064MVM_DEBUGFS_READ_FILE_OPS(stations); 1160MVM_DEBUGFS_READ_FILE_OPS(stations);
1065MVM_DEBUGFS_READ_FILE_OPS(bt_notif); 1161MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
1066MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); 1162MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
1067MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); 1163MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
1068MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); 1164MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
1069MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); 1165MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
1166MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
1167
1070#ifdef CONFIG_PM_SLEEP 1168#ifdef CONFIG_PM_SLEEP
1071MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); 1169MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
1072#endif 1170#endif
@@ -1087,10 +1185,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1087 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); 1185 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1088 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); 1186 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
1089 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); 1187 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
1090 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); 1188 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
1091 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); 1189 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
1190 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
1191 S_IRUSR | S_IWUSR);
1092 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); 1192 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1093 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 1193 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
1194 MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
1195 S_IWUSR | S_IRUSR);
1094#ifdef CONFIG_PM_SLEEP 1196#ifdef CONFIG_PM_SLEEP
1095 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1197 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1096 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); 1198 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 05c61d6f384e..4ea5e24ca92d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -82,6 +82,8 @@
82 * @BT_USE_DEFAULTS: 82 * @BT_USE_DEFAULTS:
83 * @BT_SYNC_2_BT_DISABLE: 83 * @BT_SYNC_2_BT_DISABLE:
84 * @BT_COEX_CORUNNING_TBL_EN: 84 * @BT_COEX_CORUNNING_TBL_EN:
85 *
86 * The COEX_MODE must be set for each command. Even if it is not changed.
85 */ 87 */
86enum iwl_bt_coex_flags { 88enum iwl_bt_coex_flags {
87 BT_CH_PRIMARY_EN = BIT(0), 89 BT_CH_PRIMARY_EN = BIT(0),
@@ -95,14 +97,16 @@ enum iwl_bt_coex_flags {
95 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS, 97 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
96 BT_USE_DEFAULTS = BIT(6), 98 BT_USE_DEFAULTS = BIT(6),
97 BT_SYNC_2_BT_DISABLE = BIT(7), 99 BT_SYNC_2_BT_DISABLE = BIT(7),
98 /* 100 BT_COEX_CORUNNING_TBL_EN = BIT(8),
99 * For future use - when the flags will be enlarged 101 BT_COEX_MPLUT_TBL_EN = BIT(9),
100 * BT_COEX_CORUNNING_TBL_EN = BIT(8), 102 /* Bit 10 is reserved */
101 */ 103 BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
102}; 104};
103 105
104/* 106/*
105 * indicates what has changed in the BT_COEX command. 107 * indicates what has changed in the BT_COEX command.
108 * BT_VALID_ENABLE must be set for each command. Commands without this bit will
109 * discarded by the firmware
106 */ 110 */
107enum iwl_bt_coex_valid_bit_msk { 111enum iwl_bt_coex_valid_bit_msk {
108 BT_VALID_ENABLE = BIT(0), 112 BT_VALID_ENABLE = BIT(0),
@@ -121,11 +125,8 @@ enum iwl_bt_coex_valid_bit_msk {
121 BT_VALID_CORUN_LUT_40 = BIT(13), 125 BT_VALID_CORUN_LUT_40 = BIT(13),
122 BT_VALID_ANT_ISOLATION = BIT(14), 126 BT_VALID_ANT_ISOLATION = BIT(14),
123 BT_VALID_ANT_ISOLATION_THRS = BIT(15), 127 BT_VALID_ANT_ISOLATION_THRS = BIT(15),
124 /* 128 BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
125 * For future use - when the valid flags will be enlarged 129 BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
126 * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
127 * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
128 */
129}; 130};
130 131
131/** 132/**
@@ -142,48 +143,88 @@ enum iwl_bt_reduced_tx_power {
142 BT_REDUCED_TX_POWER_DATA = BIT(1), 143 BT_REDUCED_TX_POWER_DATA = BIT(1),
143}; 144};
144 145
146enum iwl_bt_coex_lut_type {
147 BT_COEX_TIGHT_LUT = 0,
148 BT_COEX_LOOSE_LUT,
149 BT_COEX_TX_DIS_LUT,
150
151 BT_COEX_MAX_LUT,
152};
153
145#define BT_COEX_LUT_SIZE (12) 154#define BT_COEX_LUT_SIZE (12)
155#define BT_COEX_CORUN_LUT_SIZE (32)
156#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
157#define BT_COEX_BOOST_SIZE (4)
158#define BT_REDUCED_TX_POWER_BIT BIT(7)
146 159
147/** 160/**
148 * struct iwl_bt_coex_cmd - bt coex configuration command 161 * struct iwl_bt_coex_cmd - bt coex configuration command
149 * @flags:&enum iwl_bt_coex_flags 162 * @flags:&enum iwl_bt_coex_flags
150 * @lead_time:
151 * @max_kill: 163 * @max_kill:
152 * @bt3_time_t7_value:
153 * @kill_ack_msk:
154 * @kill_cts_msk:
155 * @bt3_prio_sample_time:
156 * @bt3_timer_t2_value:
157 * @bt4_reaction_time:
158 * @decision_lut[12]:
159 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power 164 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
160 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk 165 * @bt4_antenna_isolation:
161 * @bt_prio_boost: values for PTA boost register 166 * @bt4_antenna_isolation_thr:
167 * @bt4_tx_tx_delta_freq_thr:
168 * @bt4_tx_rx_max_freq0:
169 * @bt_prio_boost:
162 * @wifi_tx_prio_boost: SW boost of wifi tx priority 170 * @wifi_tx_prio_boost: SW boost of wifi tx priority
163 * @wifi_rx_prio_boost: SW boost of wifi rx priority 171 * @wifi_rx_prio_boost: SW boost of wifi rx priority
172 * @kill_ack_msk:
173 * @kill_cts_msk:
174 * @decision_lut:
175 * @bt4_multiprio_lut:
176 * @bt4_corun_lut20:
177 * @bt4_corun_lut40:
178 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
164 * 179 *
165 * The structure is used for the BT_COEX command. 180 * The structure is used for the BT_COEX command.
166 */ 181 */
167struct iwl_bt_coex_cmd { 182struct iwl_bt_coex_cmd {
168 u8 flags; 183 __le32 flags;
169 u8 lead_time;
170 u8 max_kill; 184 u8 max_kill;
171 u8 bt3_time_t7_value; 185 u8 bt_reduced_tx_power;
186 u8 reserved[2];
187
188 u8 bt4_antenna_isolation;
189 u8 bt4_antenna_isolation_thr;
190 u8 bt4_tx_tx_delta_freq_thr;
191 u8 bt4_tx_rx_max_freq0;
192
193 __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
194 __le32 wifi_tx_prio_boost;
195 __le32 wifi_rx_prio_boost;
172 __le32 kill_ack_msk; 196 __le32 kill_ack_msk;
173 __le32 kill_cts_msk; 197 __le32 kill_cts_msk;
174 u8 bt3_prio_sample_time; 198
175 u8 bt3_timer_t2_value; 199 __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
176 __le16 bt4_reaction_time; 200 __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
177 __le32 decision_lut[BT_COEX_LUT_SIZE]; 201 __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
178 u8 bt_reduced_tx_power; 202 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
179 u8 reserved; 203
180 __le16 valid_bit_msk; 204 __le32 valid_bit_msk;
181 __le32 bt_prio_boost;
182 u8 reserved2;
183 u8 wifi_tx_prio_boost;
184 __le16 wifi_rx_prio_boost;
185} __packed; /* BT_COEX_CMD_API_S_VER_3 */ 205} __packed; /* BT_COEX_CMD_API_S_VER_3 */
186 206
207/**
208 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
209 * @bt_primary_ci:
210 * @bt_secondary_ci:
211 * @co_run_bw_primary:
212 * @co_run_bw_secondary:
213 * @primary_ch_phy_id:
214 * @secondary_ch_phy_id:
215 *
216 * Used for BT_COEX_CI command
217 */
218struct iwl_bt_coex_ci_cmd {
219 __le64 bt_primary_ci;
220 __le64 bt_secondary_ci;
221
222 u8 co_run_bw_primary;
223 u8 co_run_bw_secondary;
224 u8 primary_ch_phy_id;
225 u8 secondary_ch_phy_id;
226} __packed; /* BT_CI_MSG_API_S_VER_1 */
227
187#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ 228#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
188 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ 229 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
189 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS 230 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@ enum iwl_bt_mxbox_dw3 {
244 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ 285 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
245 >> BT_MBOX##_num##_##_field##_POS) 286 >> BT_MBOX##_num##_##_field##_POS)
246 287
288enum iwl_bt_activity_grading {
289 BT_OFF = 0,
290 BT_ON_NO_CONNECTION = 1,
291 BT_LOW_TRAFFIC = 2,
292 BT_HIGH_TRAFFIC = 3,
293};
294
247/** 295/**
248 * struct iwl_bt_coex_profile_notif - notification about BT coex 296 * struct iwl_bt_coex_profile_notif - notification about BT coex
249 * @mbox_msg: message from BT to WiFi 297 * @mbox_msg: message from BT to WiFi
250 * @:bt_status: 0 - off, 1 - on 298 * @msg_idx: the index of the message
251 * @:bt_open_conn: number of BT connections open 299 * @bt_status: 0 - off, 1 - on
252 * @:bt_traffic_load: load of BT traffic 300 * @bt_open_conn: number of BT connections open
253 * @:bt_agg_traffic_load: aggregated load of BT traffic 301 * @bt_traffic_load: load of BT traffic
254 * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant 302 * @bt_agg_traffic_load: aggregated load of BT traffic
303 * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
304 * @primary_ch_lut: LUT used for primary channel
305 * @secondary_ch_lut: LUT used for secondary channel
306 * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
255 */ 307 */
256struct iwl_bt_coex_profile_notif { 308struct iwl_bt_coex_profile_notif {
257 __le32 mbox_msg[4]; 309 __le32 mbox_msg[4];
310 __le32 msg_idx;
258 u8 bt_status; 311 u8 bt_status;
259 u8 bt_open_conn; 312 u8 bt_open_conn;
260 u8 bt_traffic_load; 313 u8 bt_traffic_load;
261 u8 bt_agg_traffic_load; 314 u8 bt_agg_traffic_load;
262 u8 bt_ci_compliance; 315 u8 bt_ci_compliance;
263 u8 reserved[3]; 316 u8 reserved[3];
317
318 __le32 primary_ch_lut;
319 __le32 secondary_ch_lut;
320 __le32 bt_activity_grading;
264} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */ 321} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
265 322
266enum iwl_bt_coex_prio_table_event { 323enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@ struct iwl_bt_coex_prio_tbl_cmd {
300 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; 357 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
301} __packed; 358} __packed;
302 359
303enum iwl_bt_coex_env_action {
304 BT_COEX_ENV_CLOSE = 0,
305 BT_COEX_ENV_OPEN = 1,
306}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
307
308/**
309 * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
310 * @action: enum %iwl_bt_coex_env_action
311 * @type: enum %iwl_bt_coex_prio_table_event
312 */
313struct iwl_bt_coex_prot_env_cmd {
314 u8 action; /* 0 = closed, 1 = open */
315 u8 type; /* 0 .. 15 */
316 u8 reserved[2];
317} __packed;
318
319#endif /* __fw_api_bt_coex_h__ */ 360#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index df72fcdf8170..4e7dd8cf87dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -100,7 +100,12 @@ enum iwl_proto_offloads {
100 100
101#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 101#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
102#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 102#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
103#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6 103#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
104#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
105#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
106
107#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
108#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
104 109
105/** 110/**
106 * struct iwl_proto_offload_cmd_common - ARP/NS offload common part 111 * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@ struct iwl_proto_offload_cmd_v2 {
155 u8 reserved2[3]; 160 u8 reserved2[3];
156} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ 161} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
157 162
163struct iwl_ns_config {
164 struct in6_addr source_ipv6_addr;
165 struct in6_addr dest_ipv6_addr;
166 u8 target_mac_addr[ETH_ALEN];
167 __le16 reserved;
168} __packed; /* NS_OFFLOAD_CONFIG */
169
170struct iwl_targ_addr {
171 struct in6_addr addr;
172 __le32 config_num;
173} __packed; /* TARGET_IPV6_ADDRESS */
174
175/**
176 * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
177 * @common: common/IPv4 configuration
178 * @target_ipv6_addr: target IPv6 addresses
179 * @ns_config: NS offload configurations
180 */
181struct iwl_proto_offload_cmd_v3_small {
182 struct iwl_proto_offload_cmd_common common;
183 __le32 num_valid_ipv6_addrs;
184 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
185 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
186} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
187
188/**
189 * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
190 * @common: common/IPv4 configuration
191 * @target_ipv6_addr: target IPv6 addresses
192 * @ns_config: NS offload configurations
193 */
194struct iwl_proto_offload_cmd_v3_large {
195 struct iwl_proto_offload_cmd_common common;
196 __le32 num_valid_ipv6_addrs;
197 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
198 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
199} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
158 200
159/* 201/*
160 * WOWLAN_PATTERNS 202 * WOWLAN_PATTERNS
@@ -293,7 +335,7 @@ enum iwl_wowlan_wakeup_reason {
293 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), 335 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
294}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ 336}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
295 337
296struct iwl_wowlan_status { 338struct iwl_wowlan_status_v4 {
297 __le64 replay_ctr; 339 __le64 replay_ctr;
298 __le16 pattern_number; 340 __le16 pattern_number;
299 __le16 non_qos_seq_ctr; 341 __le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@ struct iwl_wowlan_status {
308 u8 wake_packet[]; /* can be truncated from _length to _bufsize */ 350 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
309} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */ 351} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
310 352
353struct iwl_wowlan_gtk_status {
354 u8 key_index;
355 u8 reserved[3];
356 u8 decrypt_key[16];
357 u8 tkip_mic_key[8];
358 struct iwl_wowlan_rsc_tsc_params_cmd rsc;
359} __packed;
360
361struct iwl_wowlan_status_v6 {
362 struct iwl_wowlan_gtk_status gtk;
363 __le64 replay_ctr;
364 __le16 pattern_number;
365 __le16 non_qos_seq_ctr;
366 __le16 qos_seq_ctr[8];
367 __le32 wakeup_reasons;
368 __le32 num_of_gtk_rekeys;
369 __le32 transmitted_ndps;
370 __le32 received_beacons;
371 __le32 wake_packet_length;
372 __le32 wake_packet_bufsize;
373 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
374} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
375
311#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 376#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
312#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 377#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
313#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 378#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 98b1feb43d38..39c3148bdfa8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -170,12 +170,14 @@ struct iwl_mac_data_ap {
170 * @beacon_tsf: beacon transmit time in TSF 170 * @beacon_tsf: beacon transmit time in TSF
171 * @bi: beacon interval in TU 171 * @bi: beacon interval in TU
172 * @bi_reciprocal: 2^32 / bi 172 * @bi_reciprocal: 2^32 / bi
173 * @beacon_template: beacon template ID
173 */ 174 */
174struct iwl_mac_data_ibss { 175struct iwl_mac_data_ibss {
175 __le32 beacon_time; 176 __le32 beacon_time;
176 __le64 beacon_tsf; 177 __le64 beacon_tsf;
177 __le32 bi; 178 __le32 bi;
178 __le32 bi_reciprocal; 179 __le32 bi_reciprocal;
180 __le32 beacon_template;
179} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ 181} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
180 182
181/** 183/**
@@ -372,4 +374,13 @@ static inline u32 iwl_mvm_reciprocal(u32 v)
372 return 0xFFFFFFFF / v; 374 return 0xFFFFFFFF / v;
373} 375}
374 376
377#define IWL_NONQOS_SEQ_GET 0x1
378#define IWL_NONQOS_SEQ_SET 0x2
379struct iwl_nonqos_seq_query_cmd {
380 __le32 get_set_flag;
381 __le32 mac_id_n_color;
382 __le16 value;
383 __le16 reserved;
384} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
385
375#endif /* __fw_api_mac_h__ */ 386#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 8e7ab41079ca..5cb93ae5cd2f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -132,6 +132,33 @@ struct iwl_powertable_cmd {
132} __packed; 132} __packed;
133 133
134/** 134/**
135 * enum iwl_device_power_flags - masks for device power command flags
136 * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
137 * receiver and transmitter. '0' - does not allow. This flag should be
138 * always set to '1' unless one need to disable actual power down for debug
139 * purposes.
140 * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
141 * that power management is disabled. '0' Power management is enabled, one
142 * of power schemes is applied.
143*/
144enum iwl_device_power_flags {
145 DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
146 DEVICE_POWER_FLAGS_CAM_MSK = BIT(13),
147};
148
149/**
150 * struct iwl_device_power_cmd - device wide power command.
151 * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
152 *
153 * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
154 */
155struct iwl_device_power_cmd {
156 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
157 __le16 flags;
158 __le16 reserved;
159} __packed;
160
161/**
135 * struct iwl_mac_power_cmd - New power command containing uAPSD support 162 * struct iwl_mac_power_cmd - New power command containing uAPSD support
136 * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) 163 * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
137 * @id_and_color: MAC contex identifier 164 * @id_and_color: MAC contex identifier
@@ -290,7 +317,7 @@ struct iwl_beacon_filter_cmd {
290#define IWL_BF_ESCAPE_TIMER_MIN 0 317#define IWL_BF_ESCAPE_TIMER_MIN 0
291 318
292#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 319#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
293#define IWL_BA_ESCAPE_TIMER_D3 6 320#define IWL_BA_ESCAPE_TIMER_D3 9
294#define IWL_BA_ESCAPE_TIMER_MAX 1024 321#define IWL_BA_ESCAPE_TIMER_MAX 1024
295#define IWL_BA_ESCAPE_TIMER_MIN 0 322#define IWL_BA_ESCAPE_TIMER_MIN 0
296 323
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index fdd33bc0a594..538f1c7a5966 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -68,6 +68,7 @@
68/* 68/*
69 * These serve as indexes into 69 * These serve as indexes into
70 * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; 70 * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
71 * TODO: avoid overlap between legacy and HT rates
71 */ 72 */
72enum { 73enum {
73 IWL_RATE_1M_INDEX = 0, 74 IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@ enum {
78 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, 79 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
79 IWL_RATE_6M_INDEX, 80 IWL_RATE_6M_INDEX,
80 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, 81 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
82 IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
83 IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
84 IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
81 IWL_RATE_9M_INDEX, 85 IWL_RATE_9M_INDEX,
82 IWL_RATE_12M_INDEX, 86 IWL_RATE_12M_INDEX,
87 IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
83 IWL_RATE_18M_INDEX, 88 IWL_RATE_18M_INDEX,
89 IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
84 IWL_RATE_24M_INDEX, 90 IWL_RATE_24M_INDEX,
91 IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
85 IWL_RATE_36M_INDEX, 92 IWL_RATE_36M_INDEX,
93 IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
86 IWL_RATE_48M_INDEX, 94 IWL_RATE_48M_INDEX,
95 IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
87 IWL_RATE_54M_INDEX, 96 IWL_RATE_54M_INDEX,
97 IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
88 IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, 98 IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
89 IWL_RATE_60M_INDEX, 99 IWL_RATE_60M_INDEX,
90 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, 100 IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
101 IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
102 IWL_RATE_MCS_8_INDEX,
103 IWL_RATE_MCS_9_INDEX,
104 IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
91 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, 105 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
92 IWL_RATE_COUNT, 106 IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
93}; 107};
94 108
95#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) 109#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@ enum {
108 IWL_RATE_2M_PLCP = 20, 122 IWL_RATE_2M_PLCP = 20,
109 IWL_RATE_5M_PLCP = 55, 123 IWL_RATE_5M_PLCP = 55,
110 IWL_RATE_11M_PLCP = 110, 124 IWL_RATE_11M_PLCP = 110,
125 IWL_RATE_INVM_PLCP = -1,
111}; 126};
112 127
113/* 128/*
@@ -164,6 +179,8 @@ enum {
164 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) 179 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
165 */ 180 */
166#define RATE_HT_MCS_RATE_CODE_MSK 0x7 181#define RATE_HT_MCS_RATE_CODE_MSK 0x7
182#define RATE_HT_MCS_NSS_POS 3
183#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
167 184
168/* Bit 10: (1) Use Green Field preamble */ 185/* Bit 10: (1) Use Green Field preamble */
169#define RATE_HT_MCS_GF_POS 10 186#define RATE_HT_MCS_GF_POS 10
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 83cb9b992ea4..c3782b48ded1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -356,6 +356,7 @@ struct iwl_scan_complete_notif {
356/* scan offload */ 356/* scan offload */
357#define IWL_MAX_SCAN_CHANNELS 40 357#define IWL_MAX_SCAN_CHANNELS 40
358#define IWL_SCAN_MAX_BLACKLIST_LEN 64 358#define IWL_SCAN_MAX_BLACKLIST_LEN 64
359#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
359#define IWL_SCAN_MAX_PROFILES 11 360#define IWL_SCAN_MAX_PROFILES 11
360#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 361#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
361 362
@@ -368,6 +369,12 @@ struct iwl_scan_complete_notif {
368#define IWL_FULL_SCAN_MULTIPLIER 5 369#define IWL_FULL_SCAN_MULTIPLIER 5
369#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 370#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
370 371
372enum scan_framework_client {
373 SCAN_CLIENT_SCHED_SCAN = BIT(0),
374 SCAN_CLIENT_NETDETECT = BIT(1),
375 SCAN_CLIENT_ASSET_TRACKING = BIT(2),
376};
377
371/** 378/**
372 * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6 379 * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
373 * @scan_flags: see enum iwl_scan_flags 380 * @scan_flags: see enum iwl_scan_flags
@@ -449,11 +456,12 @@ struct iwl_scan_offload_cfg {
449 * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S 456 * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
450 * @ssid: MAC address to filter out 457 * @ssid: MAC address to filter out
451 * @reported_rssi: AP rssi reported to the host 458 * @reported_rssi: AP rssi reported to the host
459 * @client_bitmap: clients ignore this entry - enum scan_framework_client
452 */ 460 */
453struct iwl_scan_offload_blacklist { 461struct iwl_scan_offload_blacklist {
454 u8 ssid[ETH_ALEN]; 462 u8 ssid[ETH_ALEN];
455 u8 reported_rssi; 463 u8 reported_rssi;
456 u8 reserved; 464 u8 client_bitmap;
457} __packed; 465} __packed;
458 466
459enum iwl_scan_offload_network_type { 467enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@ enum iwl_scan_offload_band_selection {
475 * @aut_alg: authentication olgorithm to match - bitmap 483 * @aut_alg: authentication olgorithm to match - bitmap
476 * @network_type: enum iwl_scan_offload_network_type 484 * @network_type: enum iwl_scan_offload_network_type
477 * @band_selection: enum iwl_scan_offload_band_selection 485 * @band_selection: enum iwl_scan_offload_band_selection
486 * @client_bitmap: clients waiting for match - enum scan_framework_client
478 */ 487 */
479struct iwl_scan_offload_profile { 488struct iwl_scan_offload_profile {
480 u8 ssid_index; 489 u8 ssid_index;
@@ -482,7 +491,8 @@ struct iwl_scan_offload_profile {
482 u8 auth_alg; 491 u8 auth_alg;
483 u8 network_type; 492 u8 network_type;
484 u8 band_selection; 493 u8 band_selection;
485 u8 reserved[3]; 494 u8 client_bitmap;
495 u8 reserved[2];
486} __packed; 496} __packed;
487 497
488/** 498/**
@@ -491,13 +501,18 @@ struct iwl_scan_offload_profile {
491 * @profiles: profiles to search for match 501 * @profiles: profiles to search for match
492 * @blacklist_len: length of blacklist 502 * @blacklist_len: length of blacklist
493 * @num_profiles: num of profiles in the list 503 * @num_profiles: num of profiles in the list
504 * @match_notify: clients waiting for match found notification
505 * @pass_match: clients waiting for the results
506 * @active_clients: active clients bitmap - enum scan_framework_client
494 */ 507 */
495struct iwl_scan_offload_profile_cfg { 508struct iwl_scan_offload_profile_cfg {
496 struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
497 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; 509 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
498 u8 blacklist_len; 510 u8 blacklist_len;
499 u8 num_profiles; 511 u8 num_profiles;
500 u8 reserved[2]; 512 u8 match_notify;
513 u8 pass_match;
514 u8 active_clients;
515 u8 reserved[3];
501} __packed; 516} __packed;
502 517
503/** 518/**
@@ -560,4 +575,15 @@ struct iwl_scan_offload_complete {
560 u8 reserved; 575 u8 reserved;
561} __packed; 576} __packed;
562 577
578/**
579 * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
580 * @ssid_bitmap: SSIDs indexes found in this iteration
581 * @client_bitmap: clients that are active and wait for this notification
582 */
583struct iwl_sched_scan_results {
584 __le16 ssid_bitmap;
585 u8 client_bitmap;
586 u8 reserved;
587};
588
563#endif 589#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index a30691a8a85b..4aca5933a65d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -247,7 +247,7 @@ struct iwl_mvm_keyinfo {
247} __packed; 247} __packed;
248 248
249/** 249/**
250 * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table 250 * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
251 * ( REPLY_ADD_STA = 0x18 ) 251 * ( REPLY_ADD_STA = 0x18 )
252 * @add_modify: 1: modify existing, 0: add new station 252 * @add_modify: 1: modify existing, 0: add new station
253 * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent 253 * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@ struct iwl_mvm_keyinfo {
286 * ADD_STA sets up the table entry for one station, either creating a new 286 * ADD_STA sets up the table entry for one station, either creating a new
287 * entry, or modifying a pre-existing one. 287 * entry, or modifying a pre-existing one.
288 */ 288 */
289struct iwl_mvm_add_sta_cmd { 289struct iwl_mvm_add_sta_cmd_v5 {
290 u8 add_modify; 290 u8 add_modify;
291 u8 unicast_tx_key_id; 291 u8 unicast_tx_key_id;
292 u8 multicast_tx_key_id; 292 u8 multicast_tx_key_id;
@@ -313,6 +313,57 @@ struct iwl_mvm_add_sta_cmd {
313} __packed; /* ADD_STA_CMD_API_S_VER_5 */ 313} __packed; /* ADD_STA_CMD_API_S_VER_5 */
314 314
315/** 315/**
316 * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
317 * VER_6 of this command is quite similar to VER_5 except
318 * exclusion of all fields related to the security key installation.
319 */
320struct iwl_mvm_add_sta_cmd_v6 {
321 u8 add_modify;
322 u8 reserved1;
323 __le16 tid_disable_tx;
324 __le32 mac_id_n_color;
325 u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
326 __le16 reserved2;
327 u8 sta_id;
328 u8 modify_mask;
329 __le16 reserved3;
330 __le32 station_flags;
331 __le32 station_flags_msk;
332 u8 add_immediate_ba_tid;
333 u8 remove_immediate_ba_tid;
334 __le16 add_immediate_ba_ssn;
335 __le16 sleep_tx_count;
336 __le16 sleep_state_flags;
337 __le16 assoc_id;
338 __le16 beamform_flags;
339 __le32 tfd_queue_msk;
340} __packed; /* ADD_STA_CMD_API_S_VER_6 */
341
342/**
343 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
344 * ( REPLY_ADD_STA_KEY = 0x17 )
345 * @sta_id: index of station in uCode's station table
346 * @key_offset: key offset in key storage
347 * @key_flags: type %iwl_sta_key_flag
348 * @key: key material data
349 * @key2: key material data
350 * @rx_secur_seq_cnt: RX security sequence counter for the key
351 * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
352 * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
353 */
354struct iwl_mvm_add_sta_key_cmd {
355 u8 sta_id;
356 u8 key_offset;
357 __le16 key_flags;
358 u8 key[16];
359 u8 key2[16];
360 u8 rx_secur_seq_cnt[16];
361 u8 tkip_rx_tsc_byte2;
362 u8 reserved;
363 __le16 tkip_rx_ttak[5];
364} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
365
366/**
316 * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command 367 * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
317 * @ADD_STA_SUCCESS: operation was executed successfully 368 * @ADD_STA_SUCCESS: operation was executed successfully
318 * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table 369 * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 66264cc5a016..bad5a552dd8d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -72,17 +72,17 @@
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-bt-coex.h" 73#include "fw-api-bt-coex.h"
74 74
75/* queue and FIFO numbers by usage */ 75/* maximal number of Tx queues in any platform */
76#define IWL_MVM_MAX_QUEUES 20
77
78/* Tx queue numbers */
76enum { 79enum {
77 IWL_MVM_OFFCHANNEL_QUEUE = 8, 80 IWL_MVM_OFFCHANNEL_QUEUE = 8,
78 IWL_MVM_CMD_QUEUE = 9, 81 IWL_MVM_CMD_QUEUE = 9,
79 IWL_MVM_AUX_QUEUE = 15,
80 IWL_MVM_FIRST_AGG_QUEUE = 16,
81 IWL_MVM_NUM_QUEUES = 20,
82 IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
83 IWL_MVM_CMD_FIFO = 7
84}; 82};
85 83
84#define IWL_MVM_CMD_FIFO 7
85
86#define IWL_MVM_STATION_COUNT 16 86#define IWL_MVM_STATION_COUNT 16
87 87
88/* commands */ 88/* commands */
@@ -97,6 +97,7 @@ enum {
97 DBG_CFG = 0x9, 97 DBG_CFG = 0x9,
98 98
99 /* station table */ 99 /* station table */
100 ADD_STA_KEY = 0x17,
100 ADD_STA = 0x18, 101 ADD_STA = 0x18,
101 REMOVE_STA = 0x19, 102 REMOVE_STA = 0x19,
102 103
@@ -114,6 +115,7 @@ enum {
114 TIME_EVENT_NOTIFICATION = 0x2a, 115 TIME_EVENT_NOTIFICATION = 0x2a,
115 BINDING_CONTEXT_CMD = 0x2b, 116 BINDING_CONTEXT_CMD = 0x2b,
116 TIME_QUOTA_CMD = 0x2c, 117 TIME_QUOTA_CMD = 0x2c,
118 NON_QOS_TX_COUNTER_CMD = 0x2d,
117 119
118 LQ_CMD = 0x4e, 120 LQ_CMD = 0x4e,
119 121
@@ -130,6 +132,7 @@ enum {
130 SCAN_OFFLOAD_COMPLETE = 0x6D, 132 SCAN_OFFLOAD_COMPLETE = 0x6D,
131 SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, 133 SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
132 SCAN_OFFLOAD_CONFIG_CMD = 0x6f, 134 SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
135 MATCH_FOUND_NOTIFICATION = 0xd9,
133 136
134 /* Phy */ 137 /* Phy */
135 PHY_CONFIGURATION_CMD = 0x6a, 138 PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@ enum {
178 BT_COEX_PRIO_TABLE = 0xcc, 181 BT_COEX_PRIO_TABLE = 0xcc,
179 BT_COEX_PROT_ENV = 0xcd, 182 BT_COEX_PROT_ENV = 0xcd,
180 BT_PROFILE_NOTIFICATION = 0xce, 183 BT_PROFILE_NOTIFICATION = 0xce,
184 BT_COEX_CI = 0x5d,
181 185
182 REPLY_BEACON_FILTERING_CMD = 0xd2, 186 REPLY_BEACON_FILTERING_CMD = 0xd2,
183 187
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c76299a3a1e0..83fc5ca04433 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -199,7 +199,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
199 */ 199 */
200 200
201 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { 201 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
202 if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE) 202 if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
203 mvm->queue_to_mac80211[i] = i; 203 mvm->queue_to_mac80211[i] = i;
204 else 204 else
205 mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; 205 mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +243,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
243 243
244 lockdep_assert_held(&mvm->mutex); 244 lockdep_assert_held(&mvm->mutex);
245 245
246 if (mvm->init_ucode_run) 246 if (mvm->init_ucode_complete)
247 return 0; 247 return 0;
248 248
249 iwl_init_notification_wait(&mvm->notif_wait, 249 iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +264,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
264 if (ret) 264 if (ret)
265 goto error; 265 goto error;
266 266
267 /* Read the NVM only at driver load time, no need to do this twice */
267 if (read_nvm) { 268 if (read_nvm) {
268 /* Read nvm */ 269 /* Read nvm */
269 ret = iwl_nvm_init(mvm); 270 ret = iwl_nvm_init(mvm);
@@ -273,6 +274,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
273 } 274 }
274 } 275 }
275 276
277 /* In case we read the NVM from external file, load it to the NIC */
278 if (iwlwifi_mod_params.nvm_file)
279 iwl_mvm_load_nvm_to_nic(mvm);
280
276 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 281 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
277 WARN_ON(ret); 282 WARN_ON(ret);
278 283
@@ -310,7 +315,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
310 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 315 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
311 MVM_UCODE_CALIB_TIMEOUT); 316 MVM_UCODE_CALIB_TIMEOUT);
312 if (!ret) 317 if (!ret)
313 mvm->init_ucode_run = true; 318 mvm->init_ucode_complete = true;
314 goto out; 319 goto out;
315 320
316error: 321error:
@@ -353,8 +358,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
353 if (ret) 358 if (ret)
354 return ret; 359 return ret;
355 360
356 /* If we were in RFKILL during module loading, load init ucode now */ 361 /*
357 if (!mvm->init_ucode_run) { 362 * If we haven't completed the run of the init ucode during
363 * module loading, load init ucode now
364 * (for example, if we were in RFKILL)
365 */
366 if (!mvm->init_ucode_complete) {
358 ret = iwl_run_init_mvm_ucode(mvm, false); 367 ret = iwl_run_init_mvm_ucode(mvm, false);
359 if (ret && !iwlmvm_mod_params.init_dbg) { 368 if (ret && !iwlmvm_mod_params.init_dbg) {
360 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 369 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +433,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
424 goto error; 433 goto error;
425 } 434 }
426 435
436 ret = iwl_mvm_power_update_device_mode(mvm);
437 if (ret)
438 goto error;
439
427 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 440 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
428 return 0; 441 return 0;
429 error: 442 error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5fe23a5ea9b6..ab5a7ac90dcd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -80,7 +80,7 @@ struct iwl_mvm_mac_iface_iterator_data {
80 struct ieee80211_vif *vif; 80 struct ieee80211_vif *vif;
81 unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; 81 unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
82 unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; 82 unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
83 unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)]; 83 unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
84 enum iwl_tsf_id preferred_tsf; 84 enum iwl_tsf_id preferred_tsf;
85 bool found_vif; 85 bool found_vif;
86}; 86};
@@ -218,7 +218,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
218 .preferred_tsf = NUM_TSF_IDS, 218 .preferred_tsf = NUM_TSF_IDS,
219 .used_hw_queues = { 219 .used_hw_queues = {
220 BIT(IWL_MVM_OFFCHANNEL_QUEUE) | 220 BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
221 BIT(IWL_MVM_AUX_QUEUE) | 221 BIT(mvm->aux_queue) |
222 BIT(IWL_MVM_CMD_QUEUE) 222 BIT(IWL_MVM_CMD_QUEUE)
223 }, 223 },
224 .found_vif = false, 224 .found_vif = false,
@@ -242,9 +242,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
242 * that we should share it with another interface. 242 * that we should share it with another interface.
243 */ 243 */
244 244
245 /* Currently, MAC ID 0 should be used only for the managed vif */ 245 /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
246 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 246 switch (vif->type) {
247 case NL80211_IFTYPE_ADHOC:
248 break;
249 case NL80211_IFTYPE_STATION:
250 if (!vif->p2p)
251 break;
252 /* fall through */
253 default:
247 __clear_bit(0, data.available_mac_ids); 254 __clear_bit(0, data.available_mac_ids);
255 }
248 256
249 ieee80211_iterate_active_interfaces_atomic( 257 ieee80211_iterate_active_interfaces_atomic(
250 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 258 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
302 /* Find available queues, and allocate them to the ACs */ 310 /* Find available queues, and allocate them to the ACs */
303 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 311 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
304 u8 queue = find_first_zero_bit(data.used_hw_queues, 312 u8 queue = find_first_zero_bit(data.used_hw_queues,
305 IWL_MVM_FIRST_AGG_QUEUE); 313 mvm->first_agg_queue);
306 314
307 if (queue >= IWL_MVM_FIRST_AGG_QUEUE) { 315 if (queue >= mvm->first_agg_queue) {
308 IWL_ERR(mvm, "Failed to allocate queue\n"); 316 IWL_ERR(mvm, "Failed to allocate queue\n");
309 ret = -EIO; 317 ret = -EIO;
310 goto exit_fail; 318 goto exit_fail;
@@ -317,9 +325,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
317 /* Allocate the CAB queue for softAP and GO interfaces */ 325 /* Allocate the CAB queue for softAP and GO interfaces */
318 if (vif->type == NL80211_IFTYPE_AP) { 326 if (vif->type == NL80211_IFTYPE_AP) {
319 u8 queue = find_first_zero_bit(data.used_hw_queues, 327 u8 queue = find_first_zero_bit(data.used_hw_queues,
320 IWL_MVM_FIRST_AGG_QUEUE); 328 mvm->first_agg_queue);
321 329
322 if (queue >= IWL_MVM_FIRST_AGG_QUEUE) { 330 if (queue >= mvm->first_agg_queue) {
323 IWL_ERR(mvm, "Failed to allocate cab queue\n"); 331 IWL_ERR(mvm, "Failed to allocate cab queue\n");
324 ret = -EIO; 332 ret = -EIO;
325 goto exit_fail; 333 goto exit_fail;
@@ -559,8 +567,12 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
559 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 567 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
560 568
561 /* Don't use cts to self as the fw doesn't support it currently. */ 569 /* Don't use cts to self as the fw doesn't support it currently. */
562 if (vif->bss_conf.use_cts_prot) 570 if (vif->bss_conf.use_cts_prot) {
563 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 571 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
572 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
573 cmd->protection_flags |=
574 cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
575 }
564 576
565 /* 577 /*
566 * I think that we should enable these 2 flags regardless the HT PROT 578 * I think that we should enable these 2 flags regardless the HT PROT
@@ -712,6 +724,31 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
712 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 724 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
713} 725}
714 726
727static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
728 struct ieee80211_vif *vif,
729 u32 action)
730{
731 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
732 struct iwl_mac_ctx_cmd cmd = {};
733
734 WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
735
736 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
737
738 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
739 MAC_FILTER_IN_PROBE_REQUEST);
740
741 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
742 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
743 cmd.ibss.bi_reciprocal =
744 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
745
746 /* TODO: Assumes that the beacon id == mac context id */
747 cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
748
749 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
750}
751
715struct iwl_mvm_go_iterator_data { 752struct iwl_mvm_go_iterator_data {
716 bool go_active; 753 bool go_active;
717}; 754};
@@ -721,7 +758,8 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
721 struct iwl_mvm_go_iterator_data *data = _data; 758 struct iwl_mvm_go_iterator_data *data = _data;
722 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 759 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
723 760
724 if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active) 761 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
762 mvmvif->ap_ibss_active)
725 data->go_active = true; 763 data->go_active = true;
726} 764}
727 765
@@ -833,9 +871,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
833 cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); 871 cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
834 872
835 /* Set up TX beacon command fields */ 873 /* Set up TX beacon command fields */
836 iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd, 874 if (vif->type == NL80211_IFTYPE_AP)
837 beacon->data, 875 iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
838 beacon_skb_len); 876 beacon->data,
877 beacon_skb_len);
839 878
840 /* Submit command */ 879 /* Submit command */
841 cmd.len[0] = sizeof(beacon_cmd); 880 cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +887,15 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
848 return iwl_mvm_send_cmd(mvm, &cmd); 887 return iwl_mvm_send_cmd(mvm, &cmd);
849} 888}
850 889
851/* The beacon template for the AP/GO context has changed and needs update */ 890/* The beacon template for the AP/GO/IBSS has changed and needs update */
852int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, 891int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
853 struct ieee80211_vif *vif) 892 struct ieee80211_vif *vif)
854{ 893{
855 struct sk_buff *beacon; 894 struct sk_buff *beacon;
856 int ret; 895 int ret;
857 896
858 WARN_ON(vif->type != NL80211_IFTYPE_AP); 897 WARN_ON(vif->type != NL80211_IFTYPE_AP &&
898 vif->type != NL80211_IFTYPE_ADHOC);
859 899
860 beacon = ieee80211_beacon_get(mvm->hw, vif); 900 beacon = ieee80211_beacon_get(mvm->hw, vif);
861 if (!beacon) 901 if (!beacon)
@@ -1018,6 +1058,8 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1018 return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); 1058 return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
1019 case NL80211_IFTYPE_P2P_DEVICE: 1059 case NL80211_IFTYPE_P2P_DEVICE:
1020 return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); 1060 return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
1061 case NL80211_IFTYPE_ADHOC:
1062 return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
1021 default: 1063 default:
1022 break; 1064 break;
1023 } 1065 }
@@ -1038,6 +1080,9 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1038 if (ret) 1080 if (ret)
1039 return ret; 1081 return ret;
1040 1082
1083 /* will only do anything at resume from D3 time */
1084 iwl_mvm_set_last_nonqos_seq(mvm, vif);
1085
1041 mvmvif->uploaded = true; 1086 mvmvif->uploaded = true;
1042 return 0; 1087 return 0;
1043} 1088}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9833cdf6177c..f40685c3764e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -77,6 +77,7 @@
77#include "iwl-eeprom-parse.h" 77#include "iwl-eeprom-parse.h"
78#include "fw-api-scan.h" 78#include "fw-api-scan.h"
79#include "iwl-phy-db.h" 79#include "iwl-phy-db.h"
80#include "testmode.h"
80 81
81static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 82static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
82 { 83 {
@@ -138,6 +139,14 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
138 } 139 }
139} 140}
140 141
142static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
143{
144 /* we create the 802.11 header and SSID element */
145 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
146 return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
147 return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
148}
149
141int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 150int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
142{ 151{
143 struct ieee80211_hw *hw = mvm->hw; 152 struct ieee80211_hw *hw = mvm->hw;
@@ -158,7 +167,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
158 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 167 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
159 IEEE80211_HW_SUPPORTS_UAPSD; 168 IEEE80211_HW_SUPPORTS_UAPSD;
160 169
161 hw->queues = IWL_MVM_FIRST_AGG_QUEUE; 170 hw->queues = mvm->first_agg_queue;
162 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 171 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
163 hw->rate_control_algorithm = "iwl-mvm-rs"; 172 hw->rate_control_algorithm = "iwl-mvm-rs";
164 173
@@ -181,6 +190,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
181 BIT(NL80211_IFTYPE_P2P_GO) | 190 BIT(NL80211_IFTYPE_P2P_GO) |
182 BIT(NL80211_IFTYPE_P2P_DEVICE); 191 BIT(NL80211_IFTYPE_P2P_DEVICE);
183 192
193 /* IBSS has bugs in older versions */
194 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
195 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
196
184 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 197 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
185 WIPHY_FLAG_DISABLE_BEACON_HINTS | 198 WIPHY_FLAG_DISABLE_BEACON_HINTS |
186 WIPHY_FLAG_IBSS_RSN; 199 WIPHY_FLAG_IBSS_RSN;
@@ -212,9 +225,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
212 225
213 iwl_mvm_reset_phy_ctxts(mvm); 226 iwl_mvm_reset_phy_ctxts(mvm);
214 227
215 /* we create the 802.11 header and a max-length SSID element */ 228 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
216 hw->wiphy->max_scan_ie_len = 229
217 mvm->fw->ucode_capa.max_probe_length - 24 - 34;
218 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 230 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
219 231
220 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) 232 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +243,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
231 else 243 else
232 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 244 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
233 245
246 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
247 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
248 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
249 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
250 /* we create the 802.11 header and zero length SSID IE. */
251 hw->wiphy->max_sched_scan_ie_len =
252 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
253 }
254
234 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 255 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
235 NL80211_FEATURE_P2P_GO_OPPPS; 256 NL80211_FEATURE_P2P_GO_OPPPS;
236 257
@@ -548,7 +569,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
548 * In short: there's not much we can do at this point, other than 569 * In short: there's not much we can do at this point, other than
549 * allocating resources :) 570 * allocating resources :)
550 */ 571 */
551 if (vif->type == NL80211_IFTYPE_AP) { 572 if (vif->type == NL80211_IFTYPE_AP ||
573 vif->type == NL80211_IFTYPE_ADHOC) {
552 u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); 574 u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
553 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 575 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
554 qmask); 576 qmask);
@@ -698,7 +720,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
698 * For AP/GO interface, the tear down of the resources allocated to the 720 * For AP/GO interface, the tear down of the resources allocated to the
699 * interface is be handled as part of the stop_ap flow. 721 * interface is be handled as part of the stop_ap flow.
700 */ 722 */
701 if (vif->type == NL80211_IFTYPE_AP) { 723 if (vif->type == NL80211_IFTYPE_AP ||
724 vif->type == NL80211_IFTYPE_ADHOC) {
725#ifdef CONFIG_NL80211_TESTMODE
726 if (vif == mvm->noa_vif) {
727 mvm->noa_vif = NULL;
728 mvm->noa_duration = 0;
729 }
730#endif
702 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 731 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
703 goto out_release; 732 goto out_release;
704 } 733 }
@@ -796,6 +825,27 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
796 return; 825 return;
797 } 826 }
798 iwl_mvm_configure_mcast_filter(mvm, vif); 827 iwl_mvm_configure_mcast_filter(mvm, vif);
828
829 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
830 &mvm->status)) {
831 /*
832 * If we're restarting then the firmware will
833 * obviously have lost synchronisation with
834 * the AP. It will attempt to synchronise by
835 * itself, but we can make it more reliable by
836 * scheduling a session protection time event.
837 *
838 * The firmware needs to receive a beacon to
839 * catch up with synchronisation, use 110% of
840 * the beacon interval.
841 *
842 * Set a large maximum delay to allow for more
843 * than a single interface.
844 */
845 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
846 iwl_mvm_protect_session(mvm, vif, dur, dur,
847 5 * dur);
848 }
799 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 849 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
800 /* remove AP station now that the MAC is unassoc */ 850 /* remove AP station now that the MAC is unassoc */
801 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); 851 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -819,7 +869,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
819 if (ret) 869 if (ret)
820 IWL_ERR(mvm, "failed to update power mode\n"); 870 IWL_ERR(mvm, "failed to update power mode\n");
821 } 871 }
822 iwl_mvm_bt_coex_vif_assoc(mvm, vif); 872 iwl_mvm_bt_coex_vif_change(mvm);
823 } else if (changes & BSS_CHANGED_BEACON_INFO) { 873 } else if (changes & BSS_CHANGED_BEACON_INFO) {
824 /* 874 /*
825 * We received a beacon _after_ association so 875 * We received a beacon _after_ association so
@@ -848,7 +898,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
848 } 898 }
849} 899}
850 900
851static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 901static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
902 struct ieee80211_vif *vif)
852{ 903{
853 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 904 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
854 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 905 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +922,7 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
871 if (ret) 922 if (ret)
872 goto out_remove; 923 goto out_remove;
873 924
874 mvmvif->ap_active = true; 925 mvmvif->ap_ibss_active = true;
875 926
876 /* Send the bcast station. At this stage the TBTT and DTIM time events 927 /* Send the bcast station. At this stage the TBTT and DTIM time events
877 * are added and applied to the scheduler */ 928 * are added and applied to the scheduler */
@@ -883,10 +934,12 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
883 if (ret) 934 if (ret)
884 goto out_rm_bcast; 935 goto out_rm_bcast;
885 936
886 /* Need to update the P2P Device MAC */ 937 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
887 if (vif->p2p && mvm->p2p_device_vif) 938 if (vif->p2p && mvm->p2p_device_vif)
888 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 939 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
889 940
941 iwl_mvm_bt_coex_vif_change(mvm);
942
890 mutex_unlock(&mvm->mutex); 943 mutex_unlock(&mvm->mutex);
891 return 0; 944 return 0;
892 945
@@ -901,7 +954,8 @@ out_unlock:
901 return ret; 954 return ret;
902} 955}
903 956
904static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 957static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
958 struct ieee80211_vif *vif)
905{ 959{
906 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 960 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
907 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 961 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +964,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
910 964
911 mutex_lock(&mvm->mutex); 965 mutex_lock(&mvm->mutex);
912 966
913 mvmvif->ap_active = false; 967 mvmvif->ap_ibss_active = false;
968
969 iwl_mvm_bt_coex_vif_change(mvm);
914 970
915 /* Need to update the P2P Device MAC */ 971 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
916 if (vif->p2p && mvm->p2p_device_vif) 972 if (vif->p2p && mvm->p2p_device_vif)
917 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 973 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
918 974
@@ -924,10 +980,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
924 mutex_unlock(&mvm->mutex); 980 mutex_unlock(&mvm->mutex);
925} 981}
926 982
927static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm, 983static void
928 struct ieee80211_vif *vif, 984iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
929 struct ieee80211_bss_conf *bss_conf, 985 struct ieee80211_vif *vif,
930 u32 changes) 986 struct ieee80211_bss_conf *bss_conf,
987 u32 changes)
931{ 988{
932 /* Need to send a new beacon template to the FW */ 989 /* Need to send a new beacon template to the FW */
933 if (changes & BSS_CHANGED_BEACON) { 990 if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1007,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
950 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 1007 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
951 break; 1008 break;
952 case NL80211_IFTYPE_AP: 1009 case NL80211_IFTYPE_AP:
953 iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes); 1010 case NL80211_IFTYPE_ADHOC:
1011 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
954 break; 1012 break;
955 default: 1013 default:
956 /* shouldn't happen */ 1014 /* shouldn't happen */
@@ -1163,7 +1221,54 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
1163 1221
1164 mutex_lock(&mvm->mutex); 1222 mutex_lock(&mvm->mutex);
1165 /* Try really hard to protect the session and hear a beacon */ 1223 /* Try really hard to protect the session and hear a beacon */
1166 iwl_mvm_protect_session(mvm, vif, duration, min_duration); 1224 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
1225 mutex_unlock(&mvm->mutex);
1226}
1227
1228static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1229 struct ieee80211_vif *vif,
1230 struct cfg80211_sched_scan_request *req,
1231 struct ieee80211_sched_scan_ies *ies)
1232{
1233 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1234 int ret;
1235
1236 mutex_lock(&mvm->mutex);
1237
1238 if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
1239 IWL_DEBUG_SCAN(mvm,
1240 "SCHED SCAN request during internal scan - abort\n");
1241 ret = -EBUSY;
1242 goto out;
1243 }
1244
1245 mvm->scan_status = IWL_MVM_SCAN_SCHED;
1246
1247 ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
1248 if (ret)
1249 goto err;
1250
1251 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1252 if (ret)
1253 goto err;
1254
1255 ret = iwl_mvm_sched_scan_start(mvm, req);
1256 if (!ret)
1257 goto out;
1258err:
1259 mvm->scan_status = IWL_MVM_SCAN_NONE;
1260out:
1261 mutex_unlock(&mvm->mutex);
1262 return ret;
1263}
1264
1265static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
1266 struct ieee80211_vif *vif)
1267{
1268 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1269
1270 mutex_lock(&mvm->mutex);
1271 iwl_mvm_sched_scan_stop(mvm);
1167 mutex_unlock(&mvm->mutex); 1272 mutex_unlock(&mvm->mutex);
1168} 1273}
1169 1274
@@ -1207,8 +1312,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
1207 1312
1208 switch (cmd) { 1313 switch (cmd) {
1209 case SET_KEY: 1314 case SET_KEY:
1210 if (vif->type == NL80211_IFTYPE_AP && !sta) { 1315 if ((vif->type == NL80211_IFTYPE_ADHOC ||
1211 /* GTK on AP interface is a TX-only key, return 0 */ 1316 vif->type == NL80211_IFTYPE_AP) && !sta) {
1317 /*
1318 * GTK on AP interface is a TX-only key, return 0;
1319 * on IBSS they're per-station and because we're lazy
1320 * we don't support them for RX, so do the same.
1321 */
1212 ret = 0; 1322 ret = 0;
1213 key->hw_key_idx = STA_KEY_IDX_INVALID; 1323 key->hw_key_idx = STA_KEY_IDX_INVALID;
1214 break; 1324 break;
@@ -1252,6 +1362,9 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
1252{ 1362{
1253 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1363 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1254 1364
1365 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
1366 return;
1367
1255 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 1368 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
1256} 1369}
1257 1370
@@ -1445,6 +1558,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
1445 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, 1558 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
1446 ctx->rx_chains_static, 1559 ctx->rx_chains_static,
1447 ctx->rx_chains_dynamic); 1560 ctx->rx_chains_dynamic);
1561 iwl_mvm_bt_coex_vif_change(mvm);
1448 mutex_unlock(&mvm->mutex); 1562 mutex_unlock(&mvm->mutex);
1449} 1563}
1450 1564
@@ -1464,14 +1578,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
1464 1578
1465 switch (vif->type) { 1579 switch (vif->type) {
1466 case NL80211_IFTYPE_AP: 1580 case NL80211_IFTYPE_AP:
1581 case NL80211_IFTYPE_ADHOC:
1467 /* 1582 /*
1468 * The AP binding flow is handled as part of the start_ap flow 1583 * The AP binding flow is handled as part of the start_ap flow
1469 * (in bss_info_changed). 1584 * (in bss_info_changed), similarly for IBSS.
1470 */ 1585 */
1471 ret = 0; 1586 ret = 0;
1472 goto out_unlock; 1587 goto out_unlock;
1473 case NL80211_IFTYPE_STATION: 1588 case NL80211_IFTYPE_STATION:
1474 case NL80211_IFTYPE_ADHOC:
1475 case NL80211_IFTYPE_MONITOR: 1589 case NL80211_IFTYPE_MONITOR:
1476 break; 1590 break;
1477 default: 1591 default:
@@ -1517,10 +1631,10 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
1517 1631
1518 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 1632 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
1519 1633
1520 if (vif->type == NL80211_IFTYPE_AP)
1521 goto out_unlock;
1522
1523 switch (vif->type) { 1634 switch (vif->type) {
1635 case NL80211_IFTYPE_AP:
1636 case NL80211_IFTYPE_ADHOC:
1637 goto out_unlock;
1524 case NL80211_IFTYPE_MONITOR: 1638 case NL80211_IFTYPE_MONITOR:
1525 mvmvif->monitor_active = false; 1639 mvmvif->monitor_active = false;
1526 iwl_mvm_update_quotas(mvm, NULL); 1640 iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1664,72 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
1550 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 1664 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
1551} 1665}
1552 1666
1553static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw, 1667#ifdef CONFIG_NL80211_TESTMODE
1668static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
1669 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
1670 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
1671 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
1672};
1673
1674static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
1554 struct ieee80211_vif *vif, 1675 struct ieee80211_vif *vif,
1555 enum ieee80211_rssi_event rssi_event) 1676 void *data, int len)
1677{
1678 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
1679 int err;
1680 u32 noa_duration;
1681
1682 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
1683 if (err)
1684 return err;
1685
1686 if (!tb[IWL_MVM_TM_ATTR_CMD])
1687 return -EINVAL;
1688
1689 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
1690 case IWL_MVM_TM_CMD_SET_NOA:
1691 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
1692 !vif->bss_conf.enable_beacon ||
1693 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
1694 return -EINVAL;
1695
1696 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
1697 if (noa_duration >= vif->bss_conf.beacon_int)
1698 return -EINVAL;
1699
1700 mvm->noa_duration = noa_duration;
1701 mvm->noa_vif = vif;
1702
1703 return iwl_mvm_update_quotas(mvm, NULL);
1704 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
1705 /* must be associated client vif - ignore authorized */
1706 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
1707 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
1708 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
1709 return -EINVAL;
1710
1711 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
1712 return iwl_mvm_enable_beacon_filter(mvm, vif);
1713 return iwl_mvm_disable_beacon_filter(mvm, vif);
1714 }
1715
1716 return -EOPNOTSUPP;
1717}
1718
1719static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
1720 struct ieee80211_vif *vif,
1721 void *data, int len)
1556{ 1722{
1557 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1723 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1724 int err;
1558 1725
1559 iwl_mvm_bt_rssi_event(mvm, vif, rssi_event); 1726 mutex_lock(&mvm->mutex);
1727 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
1728 mutex_unlock(&mvm->mutex);
1729
1730 return err;
1560} 1731}
1732#endif
1561 1733
1562struct ieee80211_ops iwl_mvm_hw_ops = { 1734struct ieee80211_ops iwl_mvm_hw_ops = {
1563 .tx = iwl_mvm_mac_tx, 1735 .tx = iwl_mvm_mac_tx,
@@ -1578,23 +1750,27 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
1578 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 1750 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
1579 .conf_tx = iwl_mvm_mac_conf_tx, 1751 .conf_tx = iwl_mvm_mac_conf_tx,
1580 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 1752 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
1753 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
1754 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
1581 .set_key = iwl_mvm_mac_set_key, 1755 .set_key = iwl_mvm_mac_set_key,
1582 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 1756 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
1583 .remain_on_channel = iwl_mvm_roc, 1757 .remain_on_channel = iwl_mvm_roc,
1584 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 1758 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
1585 .rssi_callback = iwl_mvm_mac_rssi_callback,
1586
1587 .add_chanctx = iwl_mvm_add_chanctx, 1759 .add_chanctx = iwl_mvm_add_chanctx,
1588 .remove_chanctx = iwl_mvm_remove_chanctx, 1760 .remove_chanctx = iwl_mvm_remove_chanctx,
1589 .change_chanctx = iwl_mvm_change_chanctx, 1761 .change_chanctx = iwl_mvm_change_chanctx,
1590 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 1762 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
1591 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 1763 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
1592 1764
1593 .start_ap = iwl_mvm_start_ap, 1765 .start_ap = iwl_mvm_start_ap_ibss,
1594 .stop_ap = iwl_mvm_stop_ap, 1766 .stop_ap = iwl_mvm_stop_ap_ibss,
1767 .join_ibss = iwl_mvm_start_ap_ibss,
1768 .leave_ibss = iwl_mvm_stop_ap_ibss,
1595 1769
1596 .set_tim = iwl_mvm_set_tim, 1770 .set_tim = iwl_mvm_set_tim,
1597 1771
1772 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
1773
1598#ifdef CONFIG_PM_SLEEP 1774#ifdef CONFIG_PM_SLEEP
1599 /* look at d3.c */ 1775 /* look at d3.c */
1600 .suspend = iwl_mvm_suspend, 1776 .suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b0389279cc1e..6235cb729f5c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -162,6 +162,7 @@ enum iwl_power_scheme {
162struct iwl_mvm_power_ops { 162struct iwl_mvm_power_ops {
163 int (*power_update_mode)(struct iwl_mvm *mvm, 163 int (*power_update_mode)(struct iwl_mvm *mvm,
164 struct ieee80211_vif *vif); 164 struct ieee80211_vif *vif);
165 int (*power_update_device_mode)(struct iwl_mvm *mvm);
165 int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 166 int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
166#ifdef CONFIG_IWLWIFI_DEBUGFS 167#ifdef CONFIG_IWLWIFI_DEBUGFS
167 int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 168 int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +242,18 @@ enum iwl_mvm_smps_type_request {
241* @last_beacon_signal: last beacon rssi signal in dbm 242* @last_beacon_signal: last beacon rssi signal in dbm
242* @ave_beacon_signal: average beacon signal 243* @ave_beacon_signal: average beacon signal
243* @last_cqm_event: rssi of the last cqm event 244* @last_cqm_event: rssi of the last cqm event
245* @bt_coex_min_thold: minimum threshold for BT coex
246* @bt_coex_max_thold: maximum threshold for BT coex
247* @last_bt_coex_event: rssi of the last BT coex event
244*/ 248*/
245struct iwl_mvm_vif_bf_data { 249struct iwl_mvm_vif_bf_data {
246 bool bf_enabled; 250 bool bf_enabled;
247 bool ba_enabled; 251 bool ba_enabled;
248 s8 ave_beacon_signal; 252 s8 ave_beacon_signal;
249 s8 last_cqm_event; 253 s8 last_cqm_event;
254 s8 bt_coex_min_thold;
255 s8 bt_coex_max_thold;
256 s8 last_bt_coex_event;
250}; 257};
251 258
252/** 259/**
@@ -255,8 +262,8 @@ struct iwl_mvm_vif_bf_data {
255 * @color: to solve races upon MAC addition and removal 262 * @color: to solve races upon MAC addition and removal
256 * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA 263 * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
257 * @uploaded: indicates the MAC context has been added to the device 264 * @uploaded: indicates the MAC context has been added to the device
258 * @ap_active: indicates that ap context is configured, and that the interface 265 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
259 * should get quota etc. 266 * should get quota etc.
260 * @monitor_active: indicates that monitor context is configured, and that the 267 * @monitor_active: indicates that monitor context is configured, and that the
261 * interface should get quota etc. 268 * interface should get quota etc.
262 * @queue_params: QoS params for this MAC 269 * @queue_params: QoS params for this MAC
@@ -272,7 +279,7 @@ struct iwl_mvm_vif {
272 u8 ap_sta_id; 279 u8 ap_sta_id;
273 280
274 bool uploaded; 281 bool uploaded;
275 bool ap_active; 282 bool ap_ibss_active;
276 bool monitor_active; 283 bool monitor_active;
277 struct iwl_mvm_vif_bf_data bf_data; 284 struct iwl_mvm_vif_bf_data bf_data;
278 285
@@ -306,6 +313,9 @@ struct iwl_mvm_vif {
306 313
307 int tx_key_idx; 314 int tx_key_idx;
308 315
316 bool seqno_valid;
317 u16 seqno;
318
309#if IS_ENABLED(CONFIG_IPV6) 319#if IS_ENABLED(CONFIG_IPV6)
310 /* IPv6 addresses for WoWLAN */ 320 /* IPv6 addresses for WoWLAN */
311 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; 321 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +343,7 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
333enum iwl_scan_status { 343enum iwl_scan_status {
334 IWL_MVM_SCAN_NONE, 344 IWL_MVM_SCAN_NONE,
335 IWL_MVM_SCAN_OS, 345 IWL_MVM_SCAN_OS,
346 IWL_MVM_SCAN_SCHED,
336}; 347};
337 348
338/** 349/**
@@ -434,7 +445,7 @@ struct iwl_mvm {
434 445
435 enum iwl_ucode_type cur_ucode; 446 enum iwl_ucode_type cur_ucode;
436 bool ucode_loaded; 447 bool ucode_loaded;
437 bool init_ucode_run; 448 bool init_ucode_complete;
438 u32 error_event_table; 449 u32 error_event_table;
439 u32 log_event_table; 450 u32 log_event_table;
440 451
@@ -470,6 +481,9 @@ struct iwl_mvm {
470 enum iwl_scan_status scan_status; 481 enum iwl_scan_status scan_status;
471 struct iwl_scan_cmd *scan_cmd; 482 struct iwl_scan_cmd *scan_cmd;
472 483
484 /* rx chain antennas set through debugfs for the scan command */
485 u8 scan_rx_ant;
486
473 /* Internal station */ 487 /* Internal station */
474 struct iwl_mvm_int_sta aux_sta; 488 struct iwl_mvm_int_sta aux_sta;
475 489
@@ -479,7 +493,8 @@ struct iwl_mvm {
479#ifdef CONFIG_IWLWIFI_DEBUGFS 493#ifdef CONFIG_IWLWIFI_DEBUGFS
480 struct dentry *debugfs_dir; 494 struct dentry *debugfs_dir;
481 u32 dbgfs_sram_offset, dbgfs_sram_len; 495 u32 dbgfs_sram_offset, dbgfs_sram_len;
482 bool prevent_power_down_d3; 496 bool disable_power_off;
497 bool disable_power_off_d3;
483#endif 498#endif
484 499
485 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; 500 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +538,23 @@ struct iwl_mvm {
523 /* BT-Coex */ 538 /* BT-Coex */
524 u8 bt_kill_msk; 539 u8 bt_kill_msk;
525 struct iwl_bt_coex_profile_notif last_bt_notif; 540 struct iwl_bt_coex_profile_notif last_bt_notif;
541 struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
526 542
527 /* Thermal Throttling and CTkill */ 543 /* Thermal Throttling and CTkill */
528 struct iwl_mvm_tt_mgmt thermal_throttle; 544 struct iwl_mvm_tt_mgmt thermal_throttle;
529 s32 temperature; /* Celsius */ 545 s32 temperature; /* Celsius */
530 546
531 const struct iwl_mvm_power_ops *pm_ops; 547 const struct iwl_mvm_power_ops *pm_ops;
548
549#ifdef CONFIG_NL80211_TESTMODE
550 u32 noa_duration;
551 struct ieee80211_vif *noa_vif;
552#endif
553
554 /* Tx queues */
555 u8 aux_queue;
556 u8 first_agg_queue;
557 u8 last_agg_queue;
532}; 558};
533 559
534/* Extract MVM priv from op_mode and _hw */ 560/* Extract MVM priv from op_mode and _hw */
@@ -570,6 +596,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
570/* Utils */ 596/* Utils */
571int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 597int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
572 enum ieee80211_band band); 598 enum ieee80211_band band);
599void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
600 enum ieee80211_band band,
601 struct ieee80211_tx_rate *r);
573u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); 602u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
574void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); 603void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
575void iwl_mvm_dump_sram(struct iwl_mvm *mvm); 604void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +637,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
608 637
609/* NVM */ 638/* NVM */
610int iwl_nvm_init(struct iwl_mvm *mvm); 639int iwl_nvm_init(struct iwl_mvm *mvm);
640int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
611 641
612int iwl_mvm_up(struct iwl_mvm *mvm); 642int iwl_mvm_up(struct iwl_mvm *mvm);
613int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); 643int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +712,23 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
682 struct iwl_device_cmd *cmd); 712 struct iwl_device_cmd *cmd);
683void iwl_mvm_cancel_scan(struct iwl_mvm *mvm); 713void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
684 714
715/* Scheduled scan */
716int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
717 struct iwl_rx_cmd_buffer *rxb,
718 struct iwl_device_cmd *cmd);
719int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
720 struct ieee80211_vif *vif,
721 struct cfg80211_sched_scan_request *req,
722 struct ieee80211_sched_scan_ies *ies);
723int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
724 struct cfg80211_sched_scan_request *req);
725int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
726 struct cfg80211_sched_scan_request *req);
727void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
728int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
729 struct iwl_rx_cmd_buffer *rxb,
730 struct iwl_device_cmd *cmd);
731
685/* MVM debugfs */ 732/* MVM debugfs */
686#ifdef CONFIG_IWLWIFI_DEBUGFS 733#ifdef CONFIG_IWLWIFI_DEBUGFS
687int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); 734int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +767,13 @@ static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
720 return mvm->pm_ops->power_disable(mvm, vif); 767 return mvm->pm_ops->power_disable(mvm, vif);
721} 768}
722 769
770static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
771{
772 if (mvm->pm_ops->power_update_device_mode)
773 return mvm->pm_ops->power_update_device_mode(mvm);
774 return 0;
775}
776
723#ifdef CONFIG_IWLWIFI_DEBUGFS 777#ifdef CONFIG_IWLWIFI_DEBUGFS
724static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm, 778static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
725 struct ieee80211_vif *vif, 779 struct ieee80211_vif *vif,
@@ -745,6 +799,15 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
745void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 799void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
746 struct ieee80211_vif *vif, int idx); 800 struct ieee80211_vif *vif, int idx);
747extern const struct file_operations iwl_dbgfs_d3_test_ops; 801extern const struct file_operations iwl_dbgfs_d3_test_ops;
802#ifdef CONFIG_PM_SLEEP
803void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
804 struct ieee80211_vif *vif);
805#else
806static inline void
807iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
808{
809}
810#endif
748 811
749/* BT Coex */ 812/* BT Coex */
750int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm); 813int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +817,20 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
754 struct iwl_device_cmd *cmd); 817 struct iwl_device_cmd *cmd);
755void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 818void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
756 enum ieee80211_rssi_event rssi_event); 819 enum ieee80211_rssi_event rssi_event);
757void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 820void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
821u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
822 struct ieee80211_sta *sta);
823bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
824 struct ieee80211_sta *sta);
825
826enum iwl_bt_kill_msk {
827 BT_KILL_MSK_DEFAULT,
828 BT_KILL_MSK_SCO_HID_A2DP,
829 BT_KILL_MSK_REDUCED_TXPOW,
830 BT_KILL_MSK_MAX,
831};
832extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
833extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
758 834
759/* beacon filtering */ 835/* beacon filtering */
760#ifdef CONFIG_IWLWIFI_DEBUGFS 836#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index edb94ea31654..2beffd028b67 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@ static const int nvm_to_read[] = {
77 77
78/* Default NVM size to read */ 78/* Default NVM size to read */
79#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) 79#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
80#define IWL_MAX_NVM_SECTION_SIZE 6000 80#define IWL_MAX_NVM_SECTION_SIZE 7000
81 81
82#define NVM_WRITE_OPCODE 1 82#define NVM_WRITE_OPCODE 1
83#define NVM_READ_OPCODE 0 83#define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
259#define MAX_NVM_FILE_LEN 16384 259#define MAX_NVM_FILE_LEN 16384
260 260
261/* 261/*
262 * Reads external NVM from a file into mvm->nvm_sections
263 *
262 * HOW TO CREATE THE NVM FILE FORMAT: 264 * HOW TO CREATE THE NVM FILE FORMAT:
263 * ------------------------------ 265 * ------------------------------
264 * 1. create hex file, format: 266 * 1. create hex file, format:
@@ -277,20 +279,23 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
277 * 279 *
278 * 4. save as "iNVM_xxx.bin" under /lib/firmware 280 * 4. save as "iNVM_xxx.bin" under /lib/firmware
279 */ 281 */
280static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm) 282static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
281{ 283{
282 int ret, section_id, section_size; 284 int ret, section_size;
285 u16 section_id;
283 const struct firmware *fw_entry; 286 const struct firmware *fw_entry;
284 const struct { 287 const struct {
285 __le16 word1; 288 __le16 word1;
286 __le16 word2; 289 __le16 word2;
287 u8 data[]; 290 u8 data[];
288 } *file_sec; 291 } *file_sec;
289 const u8 *eof; 292 const u8 *eof, *temp;
290 293
291#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) 294#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
292#define NVM_WORD2_ID(x) (x >> 12) 295#define NVM_WORD2_ID(x) (x >> 12)
293 296
297 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
298
294 /* 299 /*
295 * Obtain NVM image via request_firmware. Since we already used 300 * Obtain NVM image via request_firmware. Since we already used
296 * request_firmware_nowait() for the firmware binary load and only 301 * request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@ static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
362 break; 367 break;
363 } 368 }
364 369
365 ret = iwl_nvm_write_section(mvm, section_id, file_sec->data, 370 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
366 section_size); 371 if (!temp) {
367 if (ret < 0) { 372 ret = -ENOMEM;
368 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); 373 break;
374 }
375 if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
376 IWL_ERR(mvm, "Invalid NVM section ID\n");
377 ret = -EINVAL;
369 break; 378 break;
370 } 379 }
380 mvm->nvm_sections[section_id].data = temp;
381 mvm->nvm_sections[section_id].length = section_size;
371 382
372 /* advance to the next section */ 383 /* advance to the next section */
373 file_sec = (void *)(file_sec->data + section_size); 384 file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@ out:
377 return ret; 388 return ret;
378} 389}
379 390
391/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
392int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
393{
394 int i, ret;
395 u16 section_id;
396 struct iwl_nvm_section *sections = mvm->nvm_sections;
397
398 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
399
400 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
401 section_id = nvm_to_read[i];
402 ret = iwl_nvm_write_section(mvm, section_id,
403 sections[section_id].data,
404 sections[section_id].length);
405 if (ret < 0) {
406 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
407 break;
408 }
409 }
410 return ret;
411}
412
380int iwl_nvm_init(struct iwl_mvm *mvm) 413int iwl_nvm_init(struct iwl_mvm *mvm)
381{ 414{
382 int ret, i, section; 415 int ret, i, section;
@@ -385,36 +418,36 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
385 /* load external NVM if configured */ 418 /* load external NVM if configured */
386 if (iwlwifi_mod_params.nvm_file) { 419 if (iwlwifi_mod_params.nvm_file) {
387 /* move to External NVM flow */ 420 /* move to External NVM flow */
388 ret = iwl_mvm_load_external_nvm(mvm); 421 ret = iwl_mvm_read_external_nvm(mvm);
389 if (ret) 422 if (ret)
390 return ret; 423 return ret;
391 } 424 } else {
392 425 /* Read From FW NVM */
393 /* Read From FW NVM */ 426 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
394 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); 427
395 428 /* TODO: find correct NVM max size for a section */
396 /* TODO: find correct NVM max size for a section */ 429 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
397 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, 430 GFP_KERNEL);
398 GFP_KERNEL); 431 if (!nvm_buffer)
399 if (!nvm_buffer) 432 return -ENOMEM;
400 return -ENOMEM; 433 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
401 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) { 434 section = nvm_to_read[i];
402 section = nvm_to_read[i]; 435 /* we override the constness for initial read */
403 /* we override the constness for initial read */ 436 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
404 ret = iwl_nvm_read_section(mvm, section, nvm_buffer); 437 if (ret < 0)
405 if (ret < 0) 438 break;
406 break; 439 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
407 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); 440 if (!temp) {
408 if (!temp) { 441 ret = -ENOMEM;
409 ret = -ENOMEM; 442 break;
410 break; 443 }
444 mvm->nvm_sections[section].data = temp;
445 mvm->nvm_sections[section].length = ret;
411 } 446 }
412 mvm->nvm_sections[section].data = temp; 447 kfree(nvm_buffer);
413 mvm->nvm_sections[section].length = ret; 448 if (ret < 0)
449 return ret;
414 } 450 }
415 kfree(nvm_buffer);
416 if (ret < 0)
417 return ret;
418 451
419 mvm->nvm_data = iwl_parse_nvm_sections(mvm); 452 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
420 if (!mvm->nvm_data) 453 if (!mvm->nvm_data)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2fcc8ef88a68..59b7cb3c6134 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -224,6 +224,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
224 224
225 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 225 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
226 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), 226 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
227 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
228 iwl_mvm_rx_scan_offload_complete_notif, false),
229 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
230 false),
227 231
228 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), 232 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
229 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), 233 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
249 CMD(TIME_EVENT_NOTIFICATION), 253 CMD(TIME_EVENT_NOTIFICATION),
250 CMD(BINDING_CONTEXT_CMD), 254 CMD(BINDING_CONTEXT_CMD),
251 CMD(TIME_QUOTA_CMD), 255 CMD(TIME_QUOTA_CMD),
256 CMD(NON_QOS_TX_COUNTER_CMD),
252 CMD(RADIO_VERSION_NOTIFICATION), 257 CMD(RADIO_VERSION_NOTIFICATION),
253 CMD(SCAN_REQUEST_CMD), 258 CMD(SCAN_REQUEST_CMD),
254 CMD(SCAN_ABORT_CMD), 259 CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
260 CMD(CALIB_RES_NOTIF_PHY_DB), 265 CMD(CALIB_RES_NOTIF_PHY_DB),
261 CMD(SET_CALIB_DEFAULT_CMD), 266 CMD(SET_CALIB_DEFAULT_CMD),
262 CMD(CALIBRATION_COMPLETE_NOTIFICATION), 267 CMD(CALIBRATION_COMPLETE_NOTIFICATION),
268 CMD(ADD_STA_KEY),
263 CMD(ADD_STA), 269 CMD(ADD_STA),
264 CMD(REMOVE_STA), 270 CMD(REMOVE_STA),
265 CMD(LQ_CMD), 271 CMD(LQ_CMD),
266 CMD(SCAN_OFFLOAD_CONFIG_CMD), 272 CMD(SCAN_OFFLOAD_CONFIG_CMD),
273 CMD(MATCH_FOUND_NOTIFICATION),
267 CMD(SCAN_OFFLOAD_REQUEST_CMD), 274 CMD(SCAN_OFFLOAD_REQUEST_CMD),
268 CMD(SCAN_OFFLOAD_ABORT_CMD), 275 CMD(SCAN_OFFLOAD_ABORT_CMD),
269 CMD(SCAN_OFFLOAD_COMPLETE), 276 CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
303 CMD(REPLY_BEACON_FILTERING_CMD), 310 CMD(REPLY_BEACON_FILTERING_CMD),
304 CMD(REPLY_THERMAL_MNG_BACKOFF), 311 CMD(REPLY_THERMAL_MNG_BACKOFF),
305 CMD(MAC_PM_POWER_TABLE), 312 CMD(MAC_PM_POWER_TABLE),
313 CMD(BT_COEX_CI),
306}; 314};
307#undef CMD 315#undef CMD
308 316
@@ -344,6 +352,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
344 352
345 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; 353 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
346 354
355 mvm->aux_queue = 15;
356 mvm->first_agg_queue = 16;
357 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
358 if (mvm->cfg->base_params->num_of_queues == 16) {
359 mvm->aux_queue = 11;
360 mvm->first_agg_queue = 12;
361 }
362
347 mutex_init(&mvm->mutex); 363 mutex_init(&mvm->mutex);
348 spin_lock_init(&mvm->async_handlers_lock); 364 spin_lock_init(&mvm->async_handlers_lock);
349 INIT_LIST_HEAD(&mvm->time_event_list); 365 INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
401 IWL_INFO(mvm, "Detected %s, REV=0x%X\n", 417 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
402 mvm->cfg->name, mvm->trans->hw_rev); 418 mvm->cfg->name, mvm->trans->hw_rev);
403 419
404 err = iwl_trans_start_hw(mvm->trans);
405 if (err)
406 goto out_free;
407
408 iwl_mvm_tt_initialize(mvm); 420 iwl_mvm_tt_initialize(mvm);
409 421
410 mutex_lock(&mvm->mutex); 422 /*
411 err = iwl_run_init_mvm_ucode(mvm, true); 423 * If the NVM exists in an external file,
412 mutex_unlock(&mvm->mutex); 424 * there is no need to unnecessarily power up the NIC at driver load
413 /* returns 0 if successful, 1 if success but in rfkill */ 425 */
414 if (err < 0 && !iwlmvm_mod_params.init_dbg) { 426 if (iwlwifi_mod_params.nvm_file) {
415 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); 427 iwl_nvm_init(mvm);
416 goto out_free; 428 } else {
417 } 429 err = iwl_trans_start_hw(mvm->trans);
430 if (err)
431 goto out_free;
432
433 mutex_lock(&mvm->mutex);
434 err = iwl_run_init_mvm_ucode(mvm, true);
435 mutex_unlock(&mvm->mutex);
436 /* returns 0 if successful, 1 if success but in rfkill */
437 if (err < 0 && !iwlmvm_mod_params.init_dbg) {
438 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
439 goto out_free;
440 }
418 441
419 /* Stop the hw after the ALIVE and NVM has been read */ 442 /* Stop the hw after the ALIVE and NVM has been read */
420 if (!iwlmvm_mod_params.init_dbg) 443 if (!iwlmvm_mod_params.init_dbg)
421 iwl_trans_stop_hw(mvm->trans, false); 444 iwl_trans_stop_hw(mvm->trans, false);
445 }
422 446
423 scan_size = sizeof(struct iwl_scan_cmd) + 447 scan_size = sizeof(struct iwl_scan_cmd) +
424 mvm->fw->ucode_capa.max_probe_length + 448 mvm->fw->ucode_capa.max_probe_length +
@@ -449,7 +473,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
449 out_free: 473 out_free:
450 iwl_phy_db_free(mvm->phy_db); 474 iwl_phy_db_free(mvm->phy_db);
451 kfree(mvm->scan_cmd); 475 kfree(mvm->scan_cmd);
452 iwl_trans_stop_hw(trans, true); 476 if (!iwlwifi_mod_params.nvm_file)
477 iwl_trans_stop_hw(trans, true);
453 ieee80211_free_hw(mvm->hw); 478 ieee80211_free_hw(mvm->hw);
454 return NULL; 479 return NULL;
455} 480}
@@ -715,6 +740,9 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
715 case IWL_MVM_SCAN_OS: 740 case IWL_MVM_SCAN_OS:
716 ieee80211_scan_completed(mvm->hw, true); 741 ieee80211_scan_completed(mvm->hw, true);
717 break; 742 break;
743 case IWL_MVM_SCAN_SCHED:
744 ieee80211_sched_scan_stopped(mvm->hw);
745 break;
718 } 746 }
719 747
720 if (mvm->restart_fw > 0) 748 if (mvm->restart_fw > 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d58e393324ef..550824aa84ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -300,11 +300,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
300 } 300 }
301 301
302 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { 302 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
303 cmd->rx_data_timeout_uapsd =
304 cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
305 cmd->tx_data_timeout_uapsd =
306 cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
307
308 if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | 303 if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
309 BIT(IEEE80211_AC_VI) | 304 BIT(IEEE80211_AC_VI) |
310 BIT(IEEE80211_AC_BE) | 305 BIT(IEEE80211_AC_BE) |
@@ -319,10 +314,31 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
319 } 314 }
320 315
321 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; 316 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
322 cmd->heavy_tx_thld_packets = 317
323 IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; 318 if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
324 cmd->heavy_rx_thld_packets = 319 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
325 IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; 320 cmd->rx_data_timeout_uapsd =
321 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
322 cmd->tx_data_timeout_uapsd =
323 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
324 } else {
325 cmd->rx_data_timeout_uapsd =
326 cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
327 cmd->tx_data_timeout_uapsd =
328 cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
329 }
330
331 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
332 cmd->heavy_tx_thld_packets =
333 IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
334 cmd->heavy_rx_thld_packets =
335 IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
336 } else {
337 cmd->heavy_tx_thld_packets =
338 IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
339 cmd->heavy_rx_thld_packets =
340 IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
341 }
326 cmd->heavy_tx_thld_percentage = 342 cmd->heavy_tx_thld_percentage =
327 IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; 343 IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
328 cmd->heavy_rx_thld_percentage = 344 cmd->heavy_rx_thld_percentage =
@@ -430,6 +446,32 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
430 sizeof(cmd), &cmd); 446 sizeof(cmd), &cmd);
431} 447}
432 448
449static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
450{
451 struct iwl_device_power_cmd cmd = {
452 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
453 };
454
455 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
456 return 0;
457
458 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
459 cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
460
461#ifdef CONFIG_IWLWIFI_DEBUGFS
462 if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
463 mvm->disable_power_off)
464 cmd.flags &=
465 cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
466#endif
467 IWL_DEBUG_POWER(mvm,
468 "Sending device power command with flags = 0x%X\n",
469 cmd.flags);
470
471 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
472 &cmd);
473}
474
433#ifdef CONFIG_IWLWIFI_DEBUGFS 475#ifdef CONFIG_IWLWIFI_DEBUGFS
434static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, 476static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
435 struct ieee80211_vif *vif, char *buf, 477 struct ieee80211_vif *vif, char *buf,
@@ -440,10 +482,11 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
440 482
441 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 483 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
442 484
443 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", 485 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
444 (cmd.flags & 486 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
445 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? 487 (cmd.flags &
446 0 : 1); 488 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
489 0 : 1);
447 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", 490 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
448 iwlmvm_mod_params.power_scheme); 491 iwlmvm_mod_params.power_scheme);
449 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", 492 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -609,6 +652,7 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
609 652
610const struct iwl_mvm_power_ops pm_mac_ops = { 653const struct iwl_mvm_power_ops pm_mac_ops = {
611 .power_update_mode = iwl_mvm_power_mac_update_mode, 654 .power_update_mode = iwl_mvm_power_mac_update_mode,
655 .power_update_device_mode = iwl_mvm_power_update_device,
612 .power_disable = iwl_mvm_power_mac_disable, 656 .power_disable = iwl_mvm_power_mac_disable,
613#ifdef CONFIG_IWLWIFI_DEBUGFS 657#ifdef CONFIG_IWLWIFI_DEBUGFS
614 .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read, 658 .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 5c6ae16ec52b..17e2bc827f9a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -110,7 +110,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
110 data->n_interfaces[id]++; 110 data->n_interfaces[id]++;
111 break; 111 break;
112 case NL80211_IFTYPE_AP: 112 case NL80211_IFTYPE_AP:
113 if (mvmvif->ap_active) 113 case NL80211_IFTYPE_ADHOC:
114 if (mvmvif->ap_ibss_active)
114 data->n_interfaces[id]++; 115 data->n_interfaces[id]++;
115 break; 116 break;
116 case NL80211_IFTYPE_MONITOR: 117 case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
119 break; 120 break;
120 case NL80211_IFTYPE_P2P_DEVICE: 121 case NL80211_IFTYPE_P2P_DEVICE:
121 break; 122 break;
122 case NL80211_IFTYPE_ADHOC:
123 if (vif->bss_conf.ibss_joined)
124 data->n_interfaces[id]++;
125 break;
126 default: 123 default:
127 WARN_ON_ONCE(1); 124 WARN_ON_ONCE(1);
128 break; 125 break;
129 } 126 }
130} 127}
131 128
129static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
130 struct iwl_time_quota_cmd *cmd)
131{
132#ifdef CONFIG_NL80211_TESTMODE
133 struct iwl_mvm_vif *mvmvif;
134 int i, phy_id = -1, beacon_int = 0;
135
136 if (!mvm->noa_duration || !mvm->noa_vif)
137 return;
138
139 mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
140 if (!mvmvif->ap_ibss_active)
141 return;
142
143 phy_id = mvmvif->phy_ctxt->id;
144 beacon_int = mvm->noa_vif->bss_conf.beacon_int;
145
146 for (i = 0; i < MAX_BINDINGS; i++) {
147 u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
148 u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
149 u32 quota = le32_to_cpu(cmd->quotas[i].quota);
150
151 if (id != phy_id)
152 continue;
153
154 quota *= (beacon_int - mvm->noa_duration);
155 quota /= beacon_int;
156
157 cmd->quotas[i].quota = cpu_to_le32(quota);
158 }
159#endif
160}
161
132int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) 162int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
133{ 163{
134 struct iwl_time_quota_cmd cmd = {}; 164 struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
196 /* Give the remainder of the session to the first binding */ 226 /* Give the remainder of the session to the first binding */
197 le32_add_cpu(&cmd.quotas[0].quota, quota_rem); 227 le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
198 228
229 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
230
199 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 231 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
200 sizeof(cmd), &cmd); 232 sizeof(cmd), &cmd);
201 if (ret) 233 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 4ffaa3fa153f..a0b4cc8d9c3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -82,13 +82,24 @@ static const u8 ant_toggle_lookup[] = {
82 [ANT_ABC] = ANT_ABC, 82 [ANT_ABC] = ANT_ABC,
83}; 83};
84 84
85#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \ 85#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
86 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 86 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
87 IWL_RATE_SISO_##s##M_PLCP, \ 87 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
88 IWL_RATE_MIMO2_##s##M_PLCP,\ 88 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
89 IWL_RATE_##rp##M_INDEX, \ 89 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
90 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
91 IWL_RATE_##rp##M_INDEX, \
90 IWL_RATE_##rn##M_INDEX } 92 IWL_RATE_##rn##M_INDEX }
91 93
94#define IWL_DECLARE_MCS_RATE(s) \
95 [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
96 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
97 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
98 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
99 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
100 IWL_RATE_INVM_INDEX, \
101 IWL_RATE_INVM_INDEX }
102
92/* 103/*
93 * Parameter order: 104 * Parameter order:
94 * rate, ht rate, prev rate, next rate 105 * rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */ 113 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */ 114 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */ 115 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */ 116 IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */ 117 IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */ 118 IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */ 119 IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */ 120 IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */ 121 IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */ 122 IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */ 123 IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */ 124 IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
114 /* FIXME:RS: ^^ should be INV (legacy) */ 125 IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
126 IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
115}; 127};
116 128
117static inline u8 rs_extract_rate(u32 rate_n_flags) 129static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
124{ 136{
125 int idx = 0; 137 int idx = 0;
126 138
127 /* HT rate format */
128 if (rate_n_flags & RATE_MCS_HT_MSK) { 139 if (rate_n_flags & RATE_MCS_HT_MSK) {
129 idx = rs_extract_rate(rate_n_flags); 140 idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
130 141 idx += IWL_RATE_MCS_0_INDEX;
131 WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
132 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
133 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
134 142
135 idx += IWL_FIRST_OFDM_RATE; 143 /* skip 9M not supported in HT*/
136 /* skip 9M not supported in ht*/
137 if (idx >= IWL_RATE_9M_INDEX) 144 if (idx >= IWL_RATE_9M_INDEX)
138 idx += 1; 145 idx += 1;
139 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) 146 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
140 return idx; 147 return idx;
148 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
149 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
150 idx += IWL_RATE_MCS_0_INDEX;
141 151
142 /* legacy rate format, search for match in table */ 152 /* skip 9M not supported in VHT*/
153 if (idx >= IWL_RATE_9M_INDEX)
154 idx++;
155 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
156 return idx;
143 } else { 157 } else {
158 /* legacy rate format, search for match in table */
159
160 u8 legacy_rate = rs_extract_rate(rate_n_flags);
144 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) 161 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
145 if (iwl_rates[idx].plcp == 162 if (iwl_rates[idx].plcp == legacy_rate)
146 rs_extract_rate(rate_n_flags))
147 return idx; 163 return idx;
148 } 164 }
149 165
@@ -155,6 +171,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
155 struct ieee80211_sta *sta, 171 struct ieee80211_sta *sta,
156 struct iwl_lq_sta *lq_sta); 172 struct iwl_lq_sta *lq_sta);
157static void rs_fill_link_cmd(struct iwl_mvm *mvm, 173static void rs_fill_link_cmd(struct iwl_mvm *mvm,
174 struct ieee80211_sta *sta,
158 struct iwl_lq_sta *lq_sta, u32 rate_n_flags); 175 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
159static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); 176static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
160 177
@@ -180,35 +197,52 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
180 */ 197 */
181 198
182static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { 199static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
183 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 200 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
184}; 201};
185 202
186static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { 203/* Expected TpT tables. 4 indexes:
187 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ 204 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
188 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ 205 */
189 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ 206static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
190 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ 207 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
208 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
209 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
210 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
191}; 211};
192 212
193static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { 213static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
194 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ 214 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
195 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ 215 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
196 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ 216 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
197 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ 217 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
218};
219
220static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
221 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
222 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
223 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
224 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
198}; 225};
199 226
200static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { 227static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
201 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ 228 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
202 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ 229 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
203 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ 230 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
204 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ 231 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
205}; 232};
206 233
207static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { 234static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
208 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ 235 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
209 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ 236 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
210 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ 237 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
211 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ 238 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
239};
240
241static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
242 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
243 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
244 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
245 {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
212}; 246};
213 247
214/* mbps, mcs */ 248/* mbps, mcs */
@@ -263,7 +297,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
263 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); 297 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
264 298
265 if (lq_sta->dbg_fixed_rate) { 299 if (lq_sta->dbg_fixed_rate) {
266 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); 300 rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
267 iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false); 301 iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
268 } 302 }
269} 303}
@@ -275,17 +309,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
275{ 309{
276 int ret = -EAGAIN; 310 int ret = -EAGAIN;
277 311
278 /*
279 * Don't create TX aggregation sessions when in high
280 * BT traffic, as they would just be disrupted by BT.
281 */
282 if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
283 IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
284 BT_MBOX_MSG(&mvm->last_bt_notif,
285 3, TRAFFIC_LOAD));
286 return ret;
287 }
288
289 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", 312 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
290 sta->addr, tid); 313 sta->addr, tid);
291 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 314 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
416 */ 439 */
417/* FIXME:RS:remove this function and put the flags statically in the table */ 440/* FIXME:RS:remove this function and put the flags statically in the table */
418static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm, 441static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
419 struct iwl_scale_tbl_info *tbl, 442 struct iwl_scale_tbl_info *tbl, int index)
420 int index, u8 use_green)
421{ 443{
422 u32 rate_n_flags = 0; 444 u32 rate_n_flags = 0;
423 445
446 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
447 RATE_MCS_ANT_ABC_MSK);
448
424 if (is_legacy(tbl->lq_type)) { 449 if (is_legacy(tbl->lq_type)) {
425 rate_n_flags = iwl_rates[index].plcp; 450 rate_n_flags |= iwl_rates[index].plcp;
426 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) 451 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
427 rate_n_flags |= RATE_MCS_CCK_MSK; 452 rate_n_flags |= RATE_MCS_CCK_MSK;
428 } else if (is_Ht(tbl->lq_type)) { 453 return rate_n_flags;
429 if (index > IWL_LAST_OFDM_RATE) { 454 }
455
456 if (is_ht(tbl->lq_type)) {
457 if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
430 IWL_ERR(mvm, "Invalid HT rate index %d\n", index); 458 IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
431 index = IWL_LAST_OFDM_RATE; 459 index = IWL_LAST_HT_RATE;
432 } 460 }
433 rate_n_flags = RATE_MCS_HT_MSK; 461 rate_n_flags |= RATE_MCS_HT_MSK;
434 462
435 if (is_siso(tbl->lq_type)) 463 if (is_ht_siso(tbl->lq_type))
436 rate_n_flags |= iwl_rates[index].plcp_siso; 464 rate_n_flags |= iwl_rates[index].plcp_ht_siso;
437 else if (is_mimo2(tbl->lq_type)) 465 else if (is_ht_mimo2(tbl->lq_type))
438 rate_n_flags |= iwl_rates[index].plcp_mimo2; 466 rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
439 else 467 else
440 WARN_ON_ONCE(1); 468 WARN_ON_ONCE(1);
469 } else if (is_vht(tbl->lq_type)) {
470 if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
471 IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
472 index = IWL_LAST_VHT_RATE;
473 }
474 rate_n_flags |= RATE_MCS_VHT_MSK;
475 if (is_vht_siso(tbl->lq_type))
476 rate_n_flags |= iwl_rates[index].plcp_vht_siso;
477 else if (is_vht_mimo2(tbl->lq_type))
478 rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
479 else
480 WARN_ON_ONCE(1);
481
441 } else { 482 } else {
442 IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type); 483 IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
443 } 484 }
444 485
445 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & 486 rate_n_flags |= tbl->bw;
446 RATE_MCS_ANT_ABC_MSK); 487 if (tbl->is_SGI)
447 488 rate_n_flags |= RATE_MCS_SGI_MSK;
448 if (is_Ht(tbl->lq_type)) { 489
449 if (tbl->is_ht40)
450 rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
451 if (tbl->is_SGI)
452 rate_n_flags |= RATE_MCS_SGI_MSK;
453
454 if (use_green) {
455 rate_n_flags |= RATE_HT_MCS_GF_MSK;
456 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
457 rate_n_flags &= ~RATE_MCS_SGI_MSK;
458 IWL_ERR(mvm, "GF was set with SGI:SISO\n");
459 }
460 }
461 }
462 return rate_n_flags; 490 return rate_n_flags;
463} 491}
464 492
@@ -473,7 +501,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
473{ 501{
474 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); 502 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
475 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); 503 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
476 u8 mcs; 504 u8 nss;
477 505
478 memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win)); 506 memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
479 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); 507 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
483 return -EINVAL; 511 return -EINVAL;
484 } 512 }
485 tbl->is_SGI = 0; /* default legacy setup */ 513 tbl->is_SGI = 0; /* default legacy setup */
486 tbl->is_ht40 = 0; 514 tbl->bw = 0;
487 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); 515 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
488 tbl->lq_type = LQ_NONE; 516 tbl->lq_type = LQ_NONE;
489 tbl->max_search = IWL_MAX_SEARCH; 517 tbl->max_search = IWL_MAX_SEARCH;
490 518
491 /* legacy rate format */ 519 /* Legacy */
492 if (!(rate_n_flags & RATE_MCS_HT_MSK)) { 520 if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
521 !(rate_n_flags & RATE_MCS_VHT_MSK)) {
493 if (num_of_ant == 1) { 522 if (num_of_ant == 1) {
494 if (band == IEEE80211_BAND_5GHZ) 523 if (band == IEEE80211_BAND_5GHZ)
495 tbl->lq_type = LQ_A; 524 tbl->lq_type = LQ_LEGACY_A;
496 else 525 else
497 tbl->lq_type = LQ_G; 526 tbl->lq_type = LQ_LEGACY_G;
498 } 527 }
499 /* HT rate format */ 528
500 } else { 529 return 0;
501 if (rate_n_flags & RATE_MCS_SGI_MSK) 530 }
502 tbl->is_SGI = 1; 531
503 532 /* HT or VHT */
504 if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */ 533 if (rate_n_flags & RATE_MCS_SGI_MSK)
505 tbl->is_ht40 = 1; 534 tbl->is_SGI = 1;
506 535
507 mcs = rs_extract_rate(rate_n_flags); 536 tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
508 537
509 /* SISO */ 538 if (rate_n_flags & RATE_MCS_HT_MSK) {
510 if (mcs <= IWL_RATE_SISO_60M_PLCP) { 539 nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
511 if (num_of_ant == 1) 540 RATE_HT_MCS_NSS_POS) + 1;
512 tbl->lq_type = LQ_SISO; /*else NONE*/ 541
513 /* MIMO2 */ 542 if (nss == 1) {
514 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { 543 tbl->lq_type = LQ_HT_SISO;
515 if (num_of_ant == 2) 544 WARN_ON_ONCE(num_of_ant != 1);
516 tbl->lq_type = LQ_MIMO2; 545 } else if (nss == 2) {
546 tbl->lq_type = LQ_HT_MIMO2;
547 WARN_ON_ONCE(num_of_ant != 2);
548 } else {
549 WARN_ON_ONCE(1);
550 }
551 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
552 nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
553 RATE_VHT_MCS_NSS_POS) + 1;
554
555 if (nss == 1) {
556 tbl->lq_type = LQ_VHT_SISO;
557 WARN_ON_ONCE(num_of_ant != 1);
558 } else if (nss == 2) {
559 tbl->lq_type = LQ_VHT_MIMO2;
560 WARN_ON_ONCE(num_of_ant != 2);
517 } else { 561 } else {
518 WARN_ON_ONCE(num_of_ant == 3); 562 WARN_ON_ONCE(1);
519 } 563 }
520 } 564 }
565
566 WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
567 WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
568 !is_vht(tbl->lq_type));
569
521 return 0; 570 return 0;
522} 571}
523 572
@@ -550,22 +599,6 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
550} 599}
551 600
552/** 601/**
553 * Green-field mode is valid if the station supports it and
554 * there are no non-GF stations present in the BSS.
555 */
556static bool rs_use_green(struct ieee80211_sta *sta)
557{
558 /*
559 * There's a bug somewhere in this code that causes the
560 * scaling to get stuck because GF+SGI can't be combined
561 * in SISO rates. Until we find that bug, disable GF, it
562 * has only limited benefit and we still interoperate with
563 * GF APs since we can always receive GF transmissions.
564 */
565 return false;
566}
567
568/**
569 * rs_get_supported_rates - get the available rates 602 * rs_get_supported_rates - get the available rates
570 * 603 *
571 * if management frame or broadcast frame only return 604 * if management frame or broadcast frame only return
@@ -576,16 +609,15 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
576 struct ieee80211_hdr *hdr, 609 struct ieee80211_hdr *hdr,
577 enum iwl_table_type rate_type) 610 enum iwl_table_type rate_type)
578{ 611{
579 if (is_legacy(rate_type)) { 612 if (is_legacy(rate_type))
580 return lq_sta->active_legacy_rate; 613 return lq_sta->active_legacy_rate;
581 } else { 614 else if (is_siso(rate_type))
582 if (is_siso(rate_type)) 615 return lq_sta->active_siso_rate;
583 return lq_sta->active_siso_rate; 616 else if (is_mimo2(rate_type))
584 else { 617 return lq_sta->active_mimo2_rate;
585 WARN_ON_ONCE(!is_mimo2(rate_type)); 618
586 return lq_sta->active_mimo2_rate; 619 WARN_ON_ONCE(1);
587 } 620 return 0;
588 }
589} 621}
590 622
591static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, 623static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
652 u16 rate_mask; 684 u16 rate_mask;
653 u16 high_low; 685 u16 high_low;
654 u8 switch_to_legacy = 0; 686 u8 switch_to_legacy = 0;
655 u8 is_green = lq_sta->is_green;
656 struct iwl_mvm *mvm = lq_sta->drv; 687 struct iwl_mvm *mvm = lq_sta->drv;
657 688
658 /* check if we need to switch from HT to legacy rates. 689 /* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
662 switch_to_legacy = 1; 693 switch_to_legacy = 1;
663 scale_index = rs_ht_to_legacy[scale_index]; 694 scale_index = rs_ht_to_legacy[scale_index];
664 if (lq_sta->band == IEEE80211_BAND_5GHZ) 695 if (lq_sta->band == IEEE80211_BAND_5GHZ)
665 tbl->lq_type = LQ_A; 696 tbl->lq_type = LQ_LEGACY_A;
666 else 697 else
667 tbl->lq_type = LQ_G; 698 tbl->lq_type = LQ_LEGACY_G;
668 699
669 if (num_of_ant(tbl->ant_type) > 1) 700 if (num_of_ant(tbl->ant_type) > 1)
670 tbl->ant_type = 701 tbl->ant_type =
671 first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); 702 first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
672 703
673 tbl->is_ht40 = 0; 704 tbl->bw = 0;
674 tbl->is_SGI = 0; 705 tbl->is_SGI = 0;
675 tbl->max_search = IWL_MAX_SEARCH; 706 tbl->max_search = IWL_MAX_SEARCH;
676 } 707 }
@@ -701,7 +732,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
701 low = scale_index; 732 low = scale_index;
702 733
703out: 734out:
704 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); 735 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
705} 736}
706 737
707/* 738/*
@@ -714,6 +745,18 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
714 (a->is_SGI == b->is_SGI); 745 (a->is_SGI == b->is_SGI);
715} 746}
716 747
748static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
749{
750 if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
751 return RATE_MCS_CHAN_WIDTH_40;
752 else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
753 return RATE_MCS_CHAN_WIDTH_80;
754 else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
755 return RATE_MCS_CHAN_WIDTH_160;
756
757 return RATE_MCS_CHAN_WIDTH_20;
758}
759
717/* 760/*
718 * mac80211 sends us Tx status 761 * mac80211 sends us Tx status
719 */ 762 */
@@ -783,16 +826,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
783 */ 826 */
784 if (info->band == IEEE80211_BAND_2GHZ) 827 if (info->band == IEEE80211_BAND_2GHZ)
785 mac_index += IWL_FIRST_OFDM_RATE; 828 mac_index += IWL_FIRST_OFDM_RATE;
829 } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
830 mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
831 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
832 mac_index++;
786 } 833 }
834
787 /* Here we actually compare this rate to the latest LQ command */ 835 /* Here we actually compare this rate to the latest LQ command */
788 if ((mac_index < 0) || 836 if ((mac_index < 0) ||
789 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 837 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
790 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || 838 (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
791 (tbl_type.ant_type != info->status.antenna) || 839 (tbl_type.ant_type != info->status.antenna) ||
792 (!!(tx_rate & RATE_MCS_HT_MSK) != 840 (!!(tx_rate & RATE_MCS_HT_MSK) !=
793 !!(mac_flags & IEEE80211_TX_RC_MCS)) || 841 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
842 (!!(tx_rate & RATE_MCS_VHT_MSK) !=
843 !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
794 (!!(tx_rate & RATE_HT_MCS_GF_MSK) != 844 (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
795 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || 845 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
796 (rs_index != mac_index)) { 846 (rs_index != mac_index)) {
797 IWL_DEBUG_RATE(mvm, 847 IWL_DEBUG_RATE(mvm,
798 "initial rate %d does not match %d (0x%x)\n", 848 "initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
947 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 997 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
948 998
949 /* Check for invalid LQ type */ 999 /* Check for invalid LQ type */
950 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { 1000 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
1001 !(is_vht(tbl->lq_type)))) {
951 tbl->expected_tpt = expected_tpt_legacy; 1002 tbl->expected_tpt = expected_tpt_legacy;
952 return; 1003 return;
953 } 1004 }
@@ -958,18 +1009,40 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
958 return; 1009 return;
959 } 1010 }
960 1011
1012 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
961 /* Choose among many HT tables depending on number of streams 1013 /* Choose among many HT tables depending on number of streams
962 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation 1014 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
963 * status */ 1015 * status */
964 if (is_siso(tbl->lq_type) && !tbl->is_ht40) 1016 if (is_siso(tbl->lq_type)) {
965 ht_tbl_pointer = expected_tpt_siso20MHz; 1017 switch (tbl->bw) {
966 else if (is_siso(tbl->lq_type)) 1018 case RATE_MCS_CHAN_WIDTH_20:
967 ht_tbl_pointer = expected_tpt_siso40MHz; 1019 ht_tbl_pointer = expected_tpt_siso_20MHz;
968 else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40) 1020 break;
969 ht_tbl_pointer = expected_tpt_mimo2_20MHz; 1021 case RATE_MCS_CHAN_WIDTH_40:
970 else { 1022 ht_tbl_pointer = expected_tpt_siso_40MHz;
971 WARN_ON_ONCE(!is_mimo2(tbl->lq_type)); 1023 break;
972 ht_tbl_pointer = expected_tpt_mimo2_40MHz; 1024 case RATE_MCS_CHAN_WIDTH_80:
1025 ht_tbl_pointer = expected_tpt_siso_80MHz;
1026 break;
1027 default:
1028 WARN_ON_ONCE(1);
1029 }
1030 } else if (is_mimo2(tbl->lq_type)) {
1031 switch (tbl->bw) {
1032 case RATE_MCS_CHAN_WIDTH_20:
1033 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1034 break;
1035 case RATE_MCS_CHAN_WIDTH_40:
1036 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1037 break;
1038 case RATE_MCS_CHAN_WIDTH_80:
1039 ht_tbl_pointer = expected_tpt_mimo2_80MHz;
1040 break;
1041 default:
1042 WARN_ON_ONCE(1);
1043 }
1044 } else {
1045 WARN_ON_ONCE(1);
973 } 1046 }
974 1047
975 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ 1048 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
@@ -1084,9 +1157,47 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1084 return new_rate; 1157 return new_rate;
1085} 1158}
1086 1159
1087static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta) 1160/* Move to the next action and wrap around to the first action in case
1161 * we're at the last action. Assumes actions start at 0.
1162 */
1163static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
1164 u8 last_action)
1165{
1166 BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
1167 BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
1168 BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
1169
1170 tbl->action = (tbl->action + 1) % (last_action + 1);
1171}
1172
1173static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
1174 struct ieee80211_sta *sta)
1175{
1176 if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
1177 tbl->bw = RATE_MCS_CHAN_WIDTH_80;
1178 else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
1179 tbl->bw = RATE_MCS_CHAN_WIDTH_40;
1180 else
1181 tbl->bw = RATE_MCS_CHAN_WIDTH_20;
1182}
1183
1184static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
1185 struct ieee80211_sta *sta)
1088{ 1186{
1089 return sta->bandwidth >= IEEE80211_STA_RX_BW_40; 1187 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1188 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
1189
1190 if (is_ht20(tbl) && (ht_cap->cap &
1191 IEEE80211_HT_CAP_SGI_20))
1192 return true;
1193 if (is_ht40(tbl) && (ht_cap->cap &
1194 IEEE80211_HT_CAP_SGI_40))
1195 return true;
1196 if (is_ht80(tbl) && (vht_cap->cap &
1197 IEEE80211_VHT_CAP_SHORT_GI_80))
1198 return true;
1199
1200 return false;
1090} 1201}
1091 1202
1092/* 1203/*
@@ -1099,7 +1210,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
1099{ 1210{
1100 u16 rate_mask; 1211 u16 rate_mask;
1101 s32 rate; 1212 s32 rate;
1102 s8 is_green = lq_sta->is_green;
1103 1213
1104 if (!sta->ht_cap.ht_supported) 1214 if (!sta->ht_cap.ht_supported)
1105 return -1; 1215 return -1;
@@ -1113,16 +1223,12 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
1113 1223
1114 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n"); 1224 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
1115 1225
1116 tbl->lq_type = LQ_MIMO2; 1226 tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
1117 tbl->action = 0; 1227 tbl->action = 0;
1118 tbl->max_search = IWL_MAX_SEARCH; 1228 tbl->max_search = IWL_MAX_SEARCH;
1119 rate_mask = lq_sta->active_mimo2_rate; 1229 rate_mask = lq_sta->active_mimo2_rate;
1120 1230
1121 if (iwl_is_ht40_tx_allowed(sta)) 1231 rs_set_bw_from_sta(tbl, sta);
1122 tbl->is_ht40 = 1;
1123 else
1124 tbl->is_ht40 = 0;
1125
1126 rs_set_expected_tpt_table(lq_sta, tbl); 1232 rs_set_expected_tpt_table(lq_sta, tbl);
1127 1233
1128 rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); 1234 rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
1134 rate, rate_mask); 1240 rate, rate_mask);
1135 return -1; 1241 return -1;
1136 } 1242 }
1137 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); 1243 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
1138 1244
1139 IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", 1245 IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
1140 tbl->current_rate, is_green); 1246 tbl->current_rate);
1141 return 0; 1247 return 0;
1142} 1248}
1143 1249
@@ -1150,7 +1256,6 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
1150 struct iwl_scale_tbl_info *tbl, int index) 1256 struct iwl_scale_tbl_info *tbl, int index)
1151{ 1257{
1152 u16 rate_mask; 1258 u16 rate_mask;
1153 u8 is_green = lq_sta->is_green;
1154 s32 rate; 1259 s32 rate;
1155 1260
1156 if (!sta->ht_cap.ht_supported) 1261 if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
1158 1263
1159 IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n"); 1264 IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
1160 1265
1161 tbl->lq_type = LQ_SISO; 1266 tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
1162 tbl->action = 0; 1267 tbl->action = 0;
1163 tbl->max_search = IWL_MAX_SEARCH; 1268 tbl->max_search = IWL_MAX_SEARCH;
1164 rate_mask = lq_sta->active_siso_rate; 1269 rate_mask = lq_sta->active_siso_rate;
1165 1270
1166 if (iwl_is_ht40_tx_allowed(sta)) 1271 rs_set_bw_from_sta(tbl, sta);
1167 tbl->is_ht40 = 1;
1168 else
1169 tbl->is_ht40 = 0;
1170
1171 if (is_green)
1172 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1173
1174 rs_set_expected_tpt_table(lq_sta, tbl); 1272 rs_set_expected_tpt_table(lq_sta, tbl);
1175 rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); 1273 rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
1176 1274
@@ -1181,9 +1279,9 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
1181 rate, rate_mask); 1279 rate, rate_mask);
1182 return -1; 1280 return -1;
1183 } 1281 }
1184 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); 1282 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
1185 IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", 1283 IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
1186 tbl->current_rate, is_green); 1284 tbl->current_rate);
1187 return 0; 1285 return 0;
1188} 1286}
1189 1287
@@ -1211,14 +1309,10 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1211 while (1) { 1309 while (1) {
1212 lq_sta->action_counter++; 1310 lq_sta->action_counter++;
1213 switch (tbl->action) { 1311 switch (tbl->action) {
1214 case IWL_LEGACY_SWITCH_ANTENNA1: 1312 case IWL_LEGACY_SWITCH_ANTENNA:
1215 case IWL_LEGACY_SWITCH_ANTENNA2:
1216 IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n"); 1313 IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
1217 1314
1218 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && 1315 if (tx_chains_num <= 1)
1219 tx_chains_num <= 1) ||
1220 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1221 tx_chains_num <= 2))
1222 break; 1316 break;
1223 1317
1224 /* Don't change antenna if success has been great */ 1318 /* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1273 default: 1367 default:
1274 WARN_ON_ONCE(1); 1368 WARN_ON_ONCE(1);
1275 } 1369 }
1276 tbl->action++; 1370 rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
1277 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1278 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1279 1371
1280 if (tbl->action == start_action) 1372 if (tbl->action == start_action)
1281 break; 1373 break;
@@ -1285,9 +1377,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1285 1377
1286out: 1378out:
1287 lq_sta->search_better_tbl = 1; 1379 lq_sta->search_better_tbl = 1;
1288 tbl->action++; 1380 rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
1289 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1290 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1291 if (update_search_tbl_counter) 1381 if (update_search_tbl_counter)
1292 search_tbl->action = tbl->action; 1382 search_tbl->action = tbl->action;
1293 return 0; 1383 return 0;
@@ -1300,12 +1390,10 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1300 struct iwl_lq_sta *lq_sta, 1390 struct iwl_lq_sta *lq_sta,
1301 struct ieee80211_sta *sta, int index) 1391 struct ieee80211_sta *sta, int index)
1302{ 1392{
1303 u8 is_green = lq_sta->is_green;
1304 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1393 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1305 struct iwl_scale_tbl_info *search_tbl = 1394 struct iwl_scale_tbl_info *search_tbl =
1306 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1395 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1307 struct iwl_rate_scale_data *window = &(tbl->win[index]); 1396 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1308 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1309 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1397 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1310 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1398 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1311 u8 start_action; 1399 u8 start_action;
@@ -1314,40 +1402,17 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1314 u8 update_search_tbl_counter = 0; 1402 u8 update_search_tbl_counter = 0;
1315 int ret; 1403 int ret;
1316 1404
1317 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { 1405 if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
1318 case IWL_BT_COEX_TRAFFIC_LOAD_NONE: 1406 !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
1319 /* nothing */ 1407 tbl->action = IWL_SISO_SWITCH_ANTENNA;
1320 break;
1321 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1322 /* avoid antenna B unless MIMO */
1323 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1324 tbl->action = IWL_SISO_SWITCH_MIMO2;
1325 break;
1326 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1327 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1328 /* avoid antenna B and MIMO */
1329 valid_tx_ant =
1330 first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
1331 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1332 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1333 break;
1334 default:
1335 IWL_ERR(mvm, "Invalid BT load %d",
1336 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1337 break;
1338 }
1339 1408
1340 start_action = tbl->action; 1409 start_action = tbl->action;
1341 while (1) { 1410 while (1) {
1342 lq_sta->action_counter++; 1411 lq_sta->action_counter++;
1343 switch (tbl->action) { 1412 switch (tbl->action) {
1344 case IWL_SISO_SWITCH_ANTENNA1: 1413 case IWL_SISO_SWITCH_ANTENNA:
1345 case IWL_SISO_SWITCH_ANTENNA2:
1346 IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n"); 1414 IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
1347 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && 1415 if (tx_chains_num <= 1)
1348 tx_chains_num <= 1) ||
1349 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1350 tx_chains_num <= 2))
1351 break; 1416 break;
1352 1417
1353 if (window->success_ratio >= IWL_RS_GOOD_RATIO && 1418 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1380 goto out; 1445 goto out;
1381 break; 1446 break;
1382 case IWL_SISO_SWITCH_GI: 1447 case IWL_SISO_SWITCH_GI:
1383 if (!tbl->is_ht40 && !(ht_cap->cap & 1448 if (!rs_sgi_allowed(tbl, sta))
1384 IEEE80211_HT_CAP_SGI_20))
1385 break;
1386 if (tbl->is_ht40 && !(ht_cap->cap &
1387 IEEE80211_HT_CAP_SGI_40))
1388 break; 1449 break;
1389 1450
1390 IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n"); 1451 IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
1391 1452
1392 memcpy(search_tbl, tbl, sz); 1453 memcpy(search_tbl, tbl, sz);
1393 if (is_green) {
1394 if (!tbl->is_SGI)
1395 break;
1396 else
1397 IWL_ERR(mvm,
1398 "SGI was set in GF+SISO\n");
1399 }
1400 search_tbl->is_SGI = !tbl->is_SGI; 1454 search_tbl->is_SGI = !tbl->is_SGI;
1401 rs_set_expected_tpt_table(lq_sta, search_tbl); 1455 rs_set_expected_tpt_table(lq_sta, search_tbl);
1402 if (tbl->is_SGI) { 1456 if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1405 break; 1459 break;
1406 } 1460 }
1407 search_tbl->current_rate = 1461 search_tbl->current_rate =
1408 rate_n_flags_from_tbl(mvm, search_tbl, 1462 rate_n_flags_from_tbl(mvm, search_tbl, index);
1409 index, is_green);
1410 update_search_tbl_counter = 1; 1463 update_search_tbl_counter = 1;
1411 goto out; 1464 goto out;
1412 default: 1465 default:
1413 WARN_ON_ONCE(1); 1466 WARN_ON_ONCE(1);
1414 } 1467 }
1415 tbl->action++; 1468 rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
1416 if (tbl->action > IWL_SISO_SWITCH_GI)
1417 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1418 1469
1419 if (tbl->action == start_action) 1470 if (tbl->action == start_action)
1420 break; 1471 break;
@@ -1424,9 +1475,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1424 1475
1425 out: 1476 out:
1426 lq_sta->search_better_tbl = 1; 1477 lq_sta->search_better_tbl = 1;
1427 tbl->action++; 1478 rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
1428 if (tbl->action > IWL_SISO_SWITCH_GI)
1429 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1430 if (update_search_tbl_counter) 1479 if (update_search_tbl_counter)
1431 search_tbl->action = tbl->action; 1480 search_tbl->action = tbl->action;
1432 1481
@@ -1440,63 +1489,20 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1440 struct iwl_lq_sta *lq_sta, 1489 struct iwl_lq_sta *lq_sta,
1441 struct ieee80211_sta *sta, int index) 1490 struct ieee80211_sta *sta, int index)
1442{ 1491{
1443 s8 is_green = lq_sta->is_green;
1444 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1492 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1445 struct iwl_scale_tbl_info *search_tbl = 1493 struct iwl_scale_tbl_info *search_tbl =
1446 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1494 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1447 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1448 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1449 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1495 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1450 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1496 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1451 u8 start_action; 1497 u8 start_action;
1452 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw); 1498 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1453 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1454 u8 update_search_tbl_counter = 0; 1499 u8 update_search_tbl_counter = 0;
1455 int ret; 1500 int ret;
1456 1501
1457 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
1458 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1459 /* nothing */
1460 break;
1461 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1462 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1463 /* avoid antenna B and MIMO */
1464 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1465 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1466 break;
1467 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1468 /* avoid antenna B unless MIMO */
1469 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1470 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1471 break;
1472 default:
1473 IWL_ERR(mvm, "Invalid BT load %d",
1474 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1475 break;
1476 }
1477
1478 start_action = tbl->action; 1502 start_action = tbl->action;
1479 while (1) { 1503 while (1) {
1480 lq_sta->action_counter++; 1504 lq_sta->action_counter++;
1481 switch (tbl->action) { 1505 switch (tbl->action) {
1482 case IWL_MIMO2_SWITCH_ANTENNA1:
1483 case IWL_MIMO2_SWITCH_ANTENNA2:
1484 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
1485
1486 if (tx_chains_num <= 2)
1487 break;
1488
1489 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1490 break;
1491
1492 memcpy(search_tbl, tbl, sz);
1493 if (rs_toggle_antenna(valid_tx_ant,
1494 &search_tbl->current_rate,
1495 search_tbl)) {
1496 update_search_tbl_counter = 1;
1497 goto out;
1498 }
1499 break;
1500 case IWL_MIMO2_SWITCH_SISO_A: 1506 case IWL_MIMO2_SWITCH_SISO_A:
1501 case IWL_MIMO2_SWITCH_SISO_B: 1507 case IWL_MIMO2_SWITCH_SISO_B:
1502 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n"); 1508 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1521 break; 1527 break;
1522 1528
1523 case IWL_MIMO2_SWITCH_GI: 1529 case IWL_MIMO2_SWITCH_GI:
1524 if (!tbl->is_ht40 && !(ht_cap->cap & 1530 if (!rs_sgi_allowed(tbl, sta))
1525 IEEE80211_HT_CAP_SGI_20))
1526 break;
1527 if (tbl->is_ht40 && !(ht_cap->cap &
1528 IEEE80211_HT_CAP_SGI_40))
1529 break; 1531 break;
1530 1532
1531 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n"); 1533 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1546 break; 1548 break;
1547 } 1549 }
1548 search_tbl->current_rate = 1550 search_tbl->current_rate =
1549 rate_n_flags_from_tbl(mvm, search_tbl, 1551 rate_n_flags_from_tbl(mvm, search_tbl, index);
1550 index, is_green);
1551 update_search_tbl_counter = 1; 1552 update_search_tbl_counter = 1;
1552 goto out; 1553 goto out;
1553 default: 1554 default:
1554 WARN_ON_ONCE(1); 1555 WARN_ON_ONCE(1);
1555 } 1556 }
1556 tbl->action++; 1557 rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
1557 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1558 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1559 1558
1560 if (tbl->action == start_action) 1559 if (tbl->action == start_action)
1561 break; 1560 break;
@@ -1564,9 +1563,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1564 return 0; 1563 return 0;
1565 out: 1564 out:
1566 lq_sta->search_better_tbl = 1; 1565 lq_sta->search_better_tbl = 1;
1567 tbl->action++; 1566 rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
1568 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1569 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1570 if (update_search_tbl_counter) 1567 if (update_search_tbl_counter)
1571 search_tbl->action = tbl->action; 1568 search_tbl->action = tbl->action;
1572 1569
@@ -1660,15 +1657,16 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1660 * setup rate table in uCode 1657 * setup rate table in uCode
1661 */ 1658 */
1662static void rs_update_rate_tbl(struct iwl_mvm *mvm, 1659static void rs_update_rate_tbl(struct iwl_mvm *mvm,
1660 struct ieee80211_sta *sta,
1663 struct iwl_lq_sta *lq_sta, 1661 struct iwl_lq_sta *lq_sta,
1664 struct iwl_scale_tbl_info *tbl, 1662 struct iwl_scale_tbl_info *tbl,
1665 int index, u8 is_green) 1663 int index)
1666{ 1664{
1667 u32 rate; 1665 u32 rate;
1668 1666
1669 /* Update uCode's rate table. */ 1667 /* Update uCode's rate table. */
1670 rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green); 1668 rate = rate_n_flags_from_tbl(mvm, tbl, index);
1671 rs_fill_link_cmd(mvm, lq_sta, rate); 1669 rs_fill_link_cmd(mvm, sta, lq_sta, rate);
1672 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); 1670 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
1673} 1671}
1674 1672
@@ -1712,7 +1710,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1712 u8 update_lq = 0; 1710 u8 update_lq = 0;
1713 struct iwl_scale_tbl_info *tbl, *tbl1; 1711 struct iwl_scale_tbl_info *tbl, *tbl1;
1714 u16 rate_scale_index_msk = 0; 1712 u16 rate_scale_index_msk = 0;
1715 u8 is_green = 0;
1716 u8 active_tbl = 0; 1713 u8 active_tbl = 0;
1717 u8 done_search = 0; 1714 u8 done_search = 0;
1718 u16 high_low; 1715 u16 high_low;
@@ -1754,11 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1754 active_tbl = 1 - lq_sta->active_tbl; 1751 active_tbl = 1 - lq_sta->active_tbl;
1755 1752
1756 tbl = &(lq_sta->lq_info[active_tbl]); 1753 tbl = &(lq_sta->lq_info[active_tbl]);
1757 if (is_legacy(tbl->lq_type))
1758 lq_sta->is_green = 0;
1759 else
1760 lq_sta->is_green = rs_use_green(sta);
1761 is_green = lq_sta->is_green;
1762 1754
1763 /* current tx rate */ 1755 /* current tx rate */
1764 index = lq_sta->last_txrate_idx; 1756 index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1797 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1789 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1798 /* get "active" rate info */ 1790 /* get "active" rate info */
1799 index = iwl_hwrate_to_plcp_idx(tbl->current_rate); 1791 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
1800 rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green); 1792 rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
1801 } 1793 }
1802 return; 1794 return;
1803 } 1795 }
@@ -1978,24 +1970,24 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1978 (current_tpt > (100 * tbl->expected_tpt[low])))) 1970 (current_tpt > (100 * tbl->expected_tpt[low]))))
1979 scale_action = 0; 1971 scale_action = 0;
1980 1972
1981 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 1973 if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
1982 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) { 1974 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
1983 if (lq_sta->last_bt_traffic > 1975 if (lq_sta->last_bt_traffic >
1984 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { 1976 le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
1985 /* 1977 /*
1986 * don't set scale_action, don't want to scale up if 1978 * don't set scale_action, don't want to scale up if
1987 * the rate scale doesn't otherwise think that is a 1979 * the rate scale doesn't otherwise think that is a
1988 * good idea. 1980 * good idea.
1989 */ 1981 */
1990 } else if (lq_sta->last_bt_traffic <= 1982 } else if (lq_sta->last_bt_traffic <=
1991 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { 1983 le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
1992 scale_action = -1; 1984 scale_action = -1;
1993 } 1985 }
1994 } 1986 }
1995 lq_sta->last_bt_traffic = 1987 lq_sta->last_bt_traffic =
1996 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); 1988 le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
1997 1989
1998 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 1990 if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
1999 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) { 1991 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
2000 /* search for a new modulation */ 1992 /* search for a new modulation */
2001 rs_stay_in_table(lq_sta, true); 1993 rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2032lq_update: 2024lq_update:
2033 /* Replace uCode's rate table for the destination station. */ 2025 /* Replace uCode's rate table for the destination station. */
2034 if (update_lq) 2026 if (update_lq)
2035 rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green); 2027 rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
2036 2028
2037 rs_stay_in_table(lq_sta, false); 2029 rs_stay_in_table(lq_sta, false);
2038 2030
@@ -2071,7 +2063,7 @@ lq_update:
2071 IWL_DEBUG_RATE(mvm, 2063 IWL_DEBUG_RATE(mvm,
2072 "Switch current mcs: %X index: %d\n", 2064 "Switch current mcs: %X index: %d\n",
2073 tbl->current_rate, index); 2065 tbl->current_rate, index);
2074 rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate); 2066 rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
2075 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); 2067 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
2076 } else { 2068 } else {
2077 done_search = 1; 2069 done_search = 1;
@@ -2113,7 +2105,7 @@ lq_update:
2113 } 2105 }
2114 2106
2115out: 2107out:
2116 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green); 2108 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
2117 lq_sta->last_txrate_idx = index; 2109 lq_sta->last_txrate_idx = index;
2118} 2110}
2119 2111
@@ -2140,7 +2132,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2140 int rate_idx; 2132 int rate_idx;
2141 int i; 2133 int i;
2142 u32 rate; 2134 u32 rate;
2143 u8 use_green = rs_use_green(sta);
2144 u8 active_tbl = 0; 2135 u8 active_tbl = 0;
2145 u8 valid_tx_ant; 2136 u8 valid_tx_ant;
2146 2137
@@ -2172,10 +2163,10 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2172 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) 2163 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2173 rs_toggle_antenna(valid_tx_ant, &rate, tbl); 2164 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2174 2165
2175 rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green); 2166 rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
2176 tbl->current_rate = rate; 2167 tbl->current_rate = rate;
2177 rs_set_expected_tpt_table(lq_sta, tbl); 2168 rs_set_expected_tpt_table(lq_sta, tbl);
2178 rs_fill_link_cmd(NULL, lq_sta, rate); 2169 rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
2179 /* TODO restore station should remember the lq cmd */ 2170 /* TODO restore station should remember the lq cmd */
2180 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true); 2171 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
2181} 2172}
@@ -2190,7 +2181,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2190 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); 2181 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2191 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2182 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2192 struct iwl_lq_sta *lq_sta = mvm_sta; 2183 struct iwl_lq_sta *lq_sta = mvm_sta;
2193 int rate_idx;
2194 2184
2195 IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n"); 2185 IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
2196 2186
@@ -2215,36 +2205,9 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2215 if (rate_control_send_low(sta, mvm_sta, txrc)) 2205 if (rate_control_send_low(sta, mvm_sta, txrc))
2216 return; 2206 return;
2217 2207
2218 rate_idx = lq_sta->last_txrate_idx; 2208 iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
2219 2209 info->band, &info->control.rates[0]);
2220 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { 2210
2221 rate_idx -= IWL_FIRST_OFDM_RATE;
2222 /* 6M and 9M shared same MCS index */
2223 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2224 WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
2225 IWL_RATE_MIMO3_6M_PLCP);
2226 if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2227 IWL_RATE_MIMO2_6M_PLCP)
2228 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2229 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2230 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2231 info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
2232 if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
2233 info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2234 if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
2235 info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
2236 } else {
2237 /* Check for invalid rates */
2238 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2239 ((sband->band == IEEE80211_BAND_5GHZ) &&
2240 (rate_idx < IWL_FIRST_OFDM_RATE)))
2241 rate_idx = rate_lowest_index(sband, sta);
2242 /* On valid 5 GHz rate, adjust index */
2243 else if (sband->band == IEEE80211_BAND_5GHZ)
2244 rate_idx -= IWL_FIRST_OFDM_RATE;
2245 info->control.rates[0].flags = 0;
2246 }
2247 info->control.rates[0].idx = rate_idx;
2248 info->control.rates[0].count = 1; 2211 info->control.rates[0].count = 1;
2249} 2212}
2250 2213
@@ -2261,6 +2224,24 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
2261 return &sta_priv->lq_sta; 2224 return &sta_priv->lq_sta;
2262} 2225}
2263 2226
2227static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
2228 int nss)
2229{
2230 u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
2231 (0x3 << (2 * (nss - 1)));
2232 rx_mcs >>= (2 * (nss - 1));
2233
2234 if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
2235 return IWL_RATE_MCS_7_INDEX;
2236 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
2237 return IWL_RATE_MCS_8_INDEX;
2238 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
2239 return IWL_RATE_MCS_9_INDEX;
2240
2241 WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
2242 return -1;
2243}
2244
2264/* 2245/*
2265 * Called after adding a new station to initialize rate scaling 2246 * Called after adding a new station to initialize rate scaling
2266 */ 2247 */
@@ -2270,6 +2251,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2270 int i, j; 2251 int i, j;
2271 struct ieee80211_hw *hw = mvm->hw; 2252 struct ieee80211_hw *hw = mvm->hw;
2272 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2253 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2254 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2273 struct iwl_mvm_sta *sta_priv; 2255 struct iwl_mvm_sta *sta_priv;
2274 struct iwl_lq_sta *lq_sta; 2256 struct iwl_lq_sta *lq_sta;
2275 struct ieee80211_supported_band *sband; 2257 struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2298 2280
2299 lq_sta->max_rate_idx = -1; 2281 lq_sta->max_rate_idx = -1;
2300 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2282 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2301 lq_sta->is_green = rs_use_green(sta);
2302 lq_sta->band = sband->band; 2283 lq_sta->band = sband->band;
2303 /* 2284 /*
2304 * active legacy rates as per supported rates bitmap 2285 * active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2308 for_each_set_bit(i, &supp, BITS_PER_LONG) 2289 for_each_set_bit(i, &supp, BITS_PER_LONG)
2309 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); 2290 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2310 2291
2311 /* 2292 /* TODO: should probably account for rx_highest for both HT/VHT */
2312 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2293 if (!vht_cap || !vht_cap->vht_supported) {
2313 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2294 /* active_siso_rate mask includes 9 MBits (bit 5),
2314 */ 2295 * and CCK (bits 0-3), supp_rates[] does not;
2315 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; 2296 * shift to convert format, force 9 MBits off.
2316 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; 2297 */
2317 lq_sta->active_siso_rate &= ~((u16)0x2); 2298 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2318 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; 2299 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2300 lq_sta->active_siso_rate &= ~((u16)0x2);
2301 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2302
2303 /* Same here */
2304 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2305 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2306 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2307 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2308
2309 lq_sta->is_vht = false;
2310 } else {
2311 int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
2312 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2313 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2314 if (i == IWL_RATE_9M_INDEX)
2315 continue;
2316
2317 lq_sta->active_siso_rate |= BIT(i);
2318 }
2319 }
2320
2321 highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
2322 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2323 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2324 if (i == IWL_RATE_9M_INDEX)
2325 continue;
2319 2326
2320 /* Same here */ 2327 lq_sta->active_mimo2_rate |= BIT(i);
2321 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; 2328 }
2322 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; 2329 }
2323 lq_sta->active_mimo2_rate &= ~((u16)0x2); 2330
2324 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; 2331 /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
2332 lq_sta->is_vht = true;
2333 }
2325 2334
2326 IWL_DEBUG_RATE(mvm, 2335 IWL_DEBUG_RATE(mvm,
2327 "SISO-RATE=%X MIMO2-RATE=%X\n", 2336 "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
2328 lq_sta->active_siso_rate, 2337 lq_sta->active_siso_rate,
2329 lq_sta->active_mimo2_rate); 2338 lq_sta->active_mimo2_rate,
2339 lq_sta->is_vht);
2330 2340
2331 /* These values will be overridden later */ 2341 /* These values will be overridden later */
2332 lq_sta->lq.single_stream_ant_msk = 2342 lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2358} 2368}
2359 2369
2360static void rs_fill_link_cmd(struct iwl_mvm *mvm, 2370static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2371 struct ieee80211_sta *sta,
2361 struct iwl_lq_sta *lq_sta, u32 new_rate) 2372 struct iwl_lq_sta *lq_sta, u32 new_rate)
2362{ 2373{
2363 struct iwl_scale_tbl_info tbl_type; 2374 struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2429 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, 2440 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2430 &rate_idx); 2441 &rate_idx);
2431 2442
2432
2433 /* Indicate to uCode which entries might be MIMO. 2443 /* Indicate to uCode which entries might be MIMO.
2434 * If initial rate was MIMO, this will finally end up 2444 * If initial rate was MIMO, this will finally end up
2435 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ 2445 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2455 } 2465 }
2456 2466
2457 /* Don't allow HT rates after next pass. 2467 /* Don't allow HT rates after next pass.
2458 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ 2468 * rs_get_lower_rate() will change type to LQ_LEGACY_A
2469 * or LQ_LEGACY_G.
2470 */
2459 use_ht_possible = 0; 2471 use_ht_possible = 0;
2460 2472
2461 /* Override next rate if needed for debug purposes */ 2473 /* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2474 lq_cmd->agg_time_limit = 2486 lq_cmd->agg_time_limit =
2475 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2487 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2476 2488
2477 /* 2489 if (sta)
2478 * overwrite if needed, pass aggregation time limit 2490 lq_cmd->agg_time_limit =
2479 * to uCode in uSec - This is racy - but heh, at least it helps... 2491 cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
2480 */
2481 if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
2482 lq_cmd->agg_time_limit = cpu_to_le16(1200);
2483} 2492}
2484 2493
2485static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2494static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2586 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "", 2595 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
2587 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : ""); 2596 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
2588 desc += sprintf(buff+desc, "lq type %s\n", 2597 desc += sprintf(buff+desc, "lq type %s\n",
2589 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 2598 (is_legacy(tbl->lq_type)) ? "legacy" :
2590 if (is_Ht(tbl->lq_type)) { 2599 is_vht(tbl->lq_type) ? "VHT" : "HT");
2600 if (is_ht(tbl->lq_type)) {
2591 desc += sprintf(buff+desc, " %s", 2601 desc += sprintf(buff+desc, " %s",
2592 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); 2602 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2593 desc += sprintf(buff+desc, " %s", 2603 desc += sprintf(buff+desc, " %s",
2594 (tbl->is_ht40) ? "40MHz" : "20MHz"); 2604 (is_ht20(tbl)) ? "20MHz" :
2595 desc += sprintf(buff+desc, " %s %s %s\n", 2605 (is_ht40(tbl)) ? "40MHz" :
2606 (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
2607 desc += sprintf(buff+desc, " %s %s\n",
2596 (tbl->is_SGI) ? "SGI" : "", 2608 (tbl->is_SGI) ? "SGI" : "",
2597 (lq_sta->is_green) ? "GF enabled" : "", 2609 (lq_sta->is_agg) ? "AGG on" : "");
2598 (lq_sta->is_agg) ? "AGG on" : "");
2599 } 2610 }
2600 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 2611 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2601 lq_sta->last_rate_n_flags); 2612 lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2653 int desc = 0; 2664 int desc = 0;
2654 int i, j; 2665 int i, j;
2655 ssize_t ret; 2666 ssize_t ret;
2656 2667 struct iwl_scale_tbl_info *tbl;
2657 struct iwl_lq_sta *lq_sta = file->private_data; 2668 struct iwl_lq_sta *lq_sta = file->private_data;
2658 2669
2659 buff = kmalloc(1024, GFP_KERNEL); 2670 buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2661 return -ENOMEM; 2672 return -ENOMEM;
2662 2673
2663 for (i = 0; i < LQ_SIZE; i++) { 2674 for (i = 0; i < LQ_SIZE; i++) {
2675 tbl = &(lq_sta->lq_info[i]);
2664 desc += sprintf(buff+desc, 2676 desc += sprintf(buff+desc,
2665 "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n" 2677 "%s type=%d SGI=%d BW=%s DUP=0\n"
2666 "rate=0x%X\n", 2678 "rate=0x%X\n",
2667 lq_sta->active_tbl == i ? "*" : "x", 2679 lq_sta->active_tbl == i ? "*" : "x",
2668 lq_sta->lq_info[i].lq_type, 2680 tbl->lq_type,
2669 lq_sta->lq_info[i].is_SGI, 2681 tbl->is_SGI,
2670 lq_sta->lq_info[i].is_ht40, 2682 is_ht20(tbl) ? "20Mhz" :
2671 lq_sta->is_green, 2683 is_ht40(tbl) ? "40Mhz" :
2672 lq_sta->lq_info[i].current_rate); 2684 is_ht80(tbl) ? "80Mhz" : "ERR",
2685 tbl->current_rate);
2673 for (j = 0; j < IWL_RATE_COUNT; j++) { 2686 for (j = 0; j < IWL_RATE_COUNT; j++) {
2674 desc += sprintf(buff+desc, 2687 desc += sprintf(buff+desc,
2675 "counter=%d success=%d %%=%d\n", 2688 "counter=%d success=%d %%=%d\n",
2676 lq_sta->lq_info[i].win[j].counter, 2689 tbl->win[j].counter,
2677 lq_sta->lq_info[i].win[j].success_counter, 2690 tbl->win[j].success_counter,
2678 lq_sta->lq_info[i].win[j].success_ratio); 2691 tbl->win[j].success_ratio);
2679 } 2692 }
2680 } 2693 }
2681 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); 2694 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 465d40ee176f..5d5344f7070b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -35,9 +35,11 @@
35#include "iwl-trans.h" 35#include "iwl-trans.h"
36 36
37struct iwl_rs_rate_info { 37struct iwl_rs_rate_info {
38 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 38 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
39 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 39 u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
40 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ 40 u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
41 u8 plcp_vht_siso;
42 u8 plcp_vht_mimo2;
41 u8 prev_rs; /* previous rate used in rs algo */ 43 u8 prev_rs; /* previous rate used in rs algo */
42 u8 next_rs; /* next rate used in rs algo */ 44 u8 next_rs; /* next rate used in rs algo */
43}; 45};
@@ -83,35 +85,52 @@ enum {
83#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) 85#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
84 86
85 87
86/* uCode API values for OFDM high-throughput (HT) bit rates */ 88/* uCode API values for HT/VHT bit rates */
87enum { 89enum {
88 IWL_RATE_SISO_6M_PLCP = 0, 90 IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
89 IWL_RATE_SISO_12M_PLCP = 1, 91 IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
90 IWL_RATE_SISO_18M_PLCP = 2, 92 IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
91 IWL_RATE_SISO_24M_PLCP = 3, 93 IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
92 IWL_RATE_SISO_36M_PLCP = 4, 94 IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
93 IWL_RATE_SISO_48M_PLCP = 5, 95 IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
94 IWL_RATE_SISO_54M_PLCP = 6, 96 IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
95 IWL_RATE_SISO_60M_PLCP = 7, 97 IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
96 IWL_RATE_MIMO2_6M_PLCP = 0x8, 98 IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
97 IWL_RATE_MIMO2_12M_PLCP = 0x9, 99 IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
98 IWL_RATE_MIMO2_18M_PLCP = 0xa, 100 IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
99 IWL_RATE_MIMO2_24M_PLCP = 0xb, 101 IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
100 IWL_RATE_MIMO2_36M_PLCP = 0xc, 102 IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
101 IWL_RATE_MIMO2_48M_PLCP = 0xd, 103 IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
102 IWL_RATE_MIMO2_54M_PLCP = 0xe, 104 IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
103 IWL_RATE_MIMO2_60M_PLCP = 0xf, 105 IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
104 IWL_RATE_MIMO3_6M_PLCP = 0x10, 106 IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
105 IWL_RATE_MIMO3_12M_PLCP = 0x11, 107 IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
106 IWL_RATE_MIMO3_18M_PLCP = 0x12, 108 IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
107 IWL_RATE_MIMO3_24M_PLCP = 0x13, 109 IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
108 IWL_RATE_MIMO3_36M_PLCP = 0x14, 110 IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
109 IWL_RATE_MIMO3_48M_PLCP = 0x15, 111 IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
110 IWL_RATE_MIMO3_54M_PLCP = 0x16, 112 IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
111 IWL_RATE_MIMO3_60M_PLCP = 0x17, 113 IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
112 IWL_RATE_SISO_INVM_PLCP, 114 IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
113 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 115 IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
114 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 116 IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
117 IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
118 IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
119 IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
120 IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
121 IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
122 IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
123 IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
124 IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
125 IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
126 IWL_RATE_HT_SISO_MCS_INV_PLCP,
127 IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
128 IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
129 IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
130 IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
131 IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
132 IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
133 IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
115}; 134};
116 135
117#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 136#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@ enum {
139#define IWL_RATE_DECREASE_TH 1920 /* 15% */ 158#define IWL_RATE_DECREASE_TH 1920 /* 15% */
140 159
141/* possible actions when in legacy mode */ 160/* possible actions when in legacy mode */
142#define IWL_LEGACY_SWITCH_ANTENNA1 0 161enum {
143#define IWL_LEGACY_SWITCH_ANTENNA2 1 162 IWL_LEGACY_SWITCH_ANTENNA,
144#define IWL_LEGACY_SWITCH_SISO 2 163 IWL_LEGACY_SWITCH_SISO,
145#define IWL_LEGACY_SWITCH_MIMO2 3 164 IWL_LEGACY_SWITCH_MIMO2,
165 IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
166 IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
167};
146 168
147/* possible actions when in siso mode */ 169/* possible actions when in siso mode */
148#define IWL_SISO_SWITCH_ANTENNA1 0 170enum {
149#define IWL_SISO_SWITCH_ANTENNA2 1 171 IWL_SISO_SWITCH_ANTENNA,
150#define IWL_SISO_SWITCH_MIMO2 2 172 IWL_SISO_SWITCH_MIMO2,
151#define IWL_SISO_SWITCH_GI 3 173 IWL_SISO_SWITCH_GI,
174 IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
175 IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
176};
152 177
153/* possible actions when in mimo mode */ 178/* possible actions when in mimo mode */
154#define IWL_MIMO2_SWITCH_ANTENNA1 0 179enum {
155#define IWL_MIMO2_SWITCH_ANTENNA2 1 180 IWL_MIMO2_SWITCH_SISO_A,
156#define IWL_MIMO2_SWITCH_SISO_A 2 181 IWL_MIMO2_SWITCH_SISO_B,
157#define IWL_MIMO2_SWITCH_SISO_B 3 182 IWL_MIMO2_SWITCH_GI,
158#define IWL_MIMO2_SWITCH_GI 4 183 IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
184 IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
185};
159 186
160#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI 187#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
161 188
162#define IWL_ACTION_LIMIT 3 /* # possible actions */ 189#define IWL_ACTION_LIMIT 3 /* # possible actions */
163 190
@@ -188,20 +215,31 @@ enum {
188 215
189enum iwl_table_type { 216enum iwl_table_type {
190 LQ_NONE, 217 LQ_NONE,
191 LQ_G, /* legacy types */ 218 LQ_LEGACY_G, /* legacy types */
192 LQ_A, 219 LQ_LEGACY_A,
193 LQ_SISO, /* high-throughput types */ 220 LQ_HT_SISO, /* HT types */
194 LQ_MIMO2, 221 LQ_HT_MIMO2,
222 LQ_VHT_SISO, /* VHT types */
223 LQ_VHT_MIMO2,
195 LQ_MAX, 224 LQ_MAX,
196}; 225};
197 226
198#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) 227#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
199#define is_siso(tbl) ((tbl) == LQ_SISO) 228#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
200#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) 229#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
201#define is_mimo(tbl) is_mimo2(tbl) 230#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
202#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) 231#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
203#define is_a_band(tbl) ((tbl) == LQ_A) 232#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
204#define is_g_and(tbl) ((tbl) == LQ_G) 233#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
234#define is_mimo(tbl) (is_mimo2(tbl))
235#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
236#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
237#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
238#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
239
240#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
241#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
242#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
205 243
206#define IWL_MAX_MCS_DISPLAY_SIZE 12 244#define IWL_MAX_MCS_DISPLAY_SIZE 12
207 245
@@ -232,7 +270,7 @@ struct iwl_scale_tbl_info {
232 enum iwl_table_type lq_type; 270 enum iwl_table_type lq_type;
233 u8 ant_type; 271 u8 ant_type;
234 u8 is_SGI; /* 1 = short guard interval */ 272 u8 is_SGI; /* 1 = short guard interval */
235 u8 is_ht40; /* 1 = 40 MHz channel width */ 273 u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
236 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 274 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
237 u8 max_search; /* maximun number of tables we can search */ 275 u8 max_search; /* maximun number of tables we can search */
238 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 276 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@ struct iwl_lq_sta {
262 u64 flush_timer; /* time staying in mode before new search */ 300 u64 flush_timer; /* time staying in mode before new search */
263 301
264 u8 action_counter; /* # mode-switch actions tried */ 302 u8 action_counter; /* # mode-switch actions tried */
265 u8 is_green; 303 bool is_vht;
266 enum ieee80211_band band; 304 enum ieee80211_band band;
267 305
268 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 306 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 2a8cb5a60535..a4af5019a496 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -422,6 +422,27 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
422 422
423 mvmvif->bf_data.ave_beacon_signal = sig; 423 mvmvif->bf_data.ave_beacon_signal = sig;
424 424
425 /* BT Coex */
426 if (mvmvif->bf_data.bt_coex_min_thold !=
427 mvmvif->bf_data.bt_coex_max_thold) {
428 last_event = mvmvif->bf_data.last_bt_coex_event;
429 if (sig > mvmvif->bf_data.bt_coex_max_thold &&
430 (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
431 last_event == 0)) {
432 mvmvif->bf_data.last_bt_coex_event = sig;
433 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
434 sig);
435 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
436 } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
437 (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
438 last_event == 0)) {
439 mvmvif->bf_data.last_bt_coex_event = sig;
440 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
441 sig);
442 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
443 }
444 }
445
425 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) 446 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
426 return; 447 return;
427 448
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 621fb71f282a..dff7592e1ff8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -74,8 +74,12 @@
74static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) 74static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
75{ 75{
76 u16 rx_chain; 76 u16 rx_chain;
77 u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw); 77 u8 rx_ant;
78 78
79 if (mvm->scan_rx_ant != ANT_NONE)
80 rx_ant = mvm->scan_rx_ant;
81 else
82 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
79 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; 83 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
80 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; 84 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
81 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; 85 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
93 97
94static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif) 98static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
95{ 99{
96 if (vif->bss_conf.assoc) 100 if (!vif->bss_conf.assoc)
97 return cpu_to_le32(vif->bss_conf.beacon_int);
98 else
99 return 0; 101 return 0;
102
103 return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
100} 104}
101 105
102static inline __le32 106static inline __le32
@@ -133,11 +137,12 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
133 * request. 137 * request.
134 */ 138 */
135static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd, 139static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
136 struct cfg80211_scan_request *req) 140 struct cfg80211_scan_request *req,
141 int first)
137{ 142{
138 int fw_idx, req_idx; 143 int fw_idx, req_idx;
139 144
140 for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0; 145 for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
141 req_idx--, fw_idx++) { 146 req_idx--, fw_idx++) {
142 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; 147 cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
143 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; 148 cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
153 * just to notify that this scan is active and not passive. 158 * just to notify that this scan is active and not passive.
154 * In order to notify the FW of the number of SSIDs we wish to scan (including 159 * In order to notify the FW of the number of SSIDs we wish to scan (including
155 * the zero-length one), we need to set the corresponding bits in chan->type, 160 * the zero-length one), we need to set the corresponding bits in chan->type,
156 * one for each SSID, and set the active bit (first). The first SSID is already 161 * one for each SSID, and set the active bit (first). If the first SSID is
157 * included in the probe template, so we need to set only req->n_ssids - 1 bits 162 * already included in the probe template, so we need to set only
158 * in addition to the first bit. 163 * req->n_ssids - 1 bits in addition to the first bit.
159 */ 164 */
160static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 165static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
161{ 166{
@@ -170,7 +175,8 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
170} 175}
171 176
172static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, 177static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
173 struct cfg80211_scan_request *req) 178 struct cfg80211_scan_request *req,
179 bool basic_ssid)
174{ 180{
175 u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band); 181 u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
176 u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band, 182 u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
178 struct iwl_scan_channel *chan = (struct iwl_scan_channel *) 184 struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
179 (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); 185 (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
180 int i; 186 int i;
187 int type = BIT(req->n_ssids) - 1;
188
189 if (!basic_ssid)
190 type |= BIT(req->n_ssids);
181 191
182 for (i = 0; i < cmd->channel_count; i++) { 192 for (i = 0; i < cmd->channel_count; i++) {
183 chan->channel = cpu_to_le16(req->channels[i]->hw_value); 193 chan->channel = cpu_to_le16(req->channels[i]->hw_value);
184 chan->type = cpu_to_le32(BIT(req->n_ssids) - 1); 194 chan->type = cpu_to_le32(type);
185 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) 195 if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
186 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE); 196 chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
187 chan->active_dwell = cpu_to_le16(active_dwell); 197 chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
268 u32 status; 278 u32 status;
269 int ssid_len = 0; 279 int ssid_len = 0;
270 u8 *ssid = NULL; 280 u8 *ssid = NULL;
281 bool basic_ssid = !(mvm->fw->ucode_capa.flags &
282 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
271 283
272 lockdep_assert_held(&mvm->mutex); 284 lockdep_assert_held(&mvm->mutex);
273 BUG_ON(mvm->scan_cmd == NULL); 285 BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
302 if (req->n_ssids > 0) { 314 if (req->n_ssids > 0) {
303 cmd->passive2active = cpu_to_le16(1); 315 cmd->passive2active = cpu_to_le16(1);
304 cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE; 316 cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
305 ssid = req->ssids[0].ssid; 317 if (basic_ssid) {
306 ssid_len = req->ssids[0].ssid_len; 318 ssid = req->ssids[0].ssid;
319 ssid_len = req->ssids[0].ssid_len;
320 }
307 } else { 321 } else {
308 cmd->passive2active = 0; 322 cmd->passive2active = 0;
309 cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE; 323 cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
310 } 324 }
311 325
312 iwl_mvm_scan_fill_ssids(cmd, req); 326 iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
313 327
314 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); 328 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
315 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; 329 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
326 req->ie, req->ie_len, 340 req->ie, req->ie_len,
327 mvm->fw->ucode_capa.max_probe_length)); 341 mvm->fw->ucode_capa.max_probe_length));
328 342
329 iwl_mvm_scan_fill_channels(cmd, req); 343 iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
330 344
331 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) + 345 cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
332 le16_to_cpu(cmd->tx_cmd.len) + 346 le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
377 return 0; 391 return 0;
378} 392}
379 393
394int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
395 struct iwl_rx_cmd_buffer *rxb,
396 struct iwl_device_cmd *cmd)
397{
398 struct iwl_rx_packet *pkt = rxb_addr(rxb);
399 struct iwl_sched_scan_results *notif = (void *)pkt->data;
400
401 if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
402 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
403 ieee80211_sched_scan_results(mvm->hw);
404 }
405
406 return 0;
407}
408
380static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait, 409static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
381 struct iwl_rx_packet *pkt, void *data) 410 struct iwl_rx_packet *pkt, void *data)
382{ 411{
@@ -447,3 +476,406 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
447out_remove_notif: 476out_remove_notif:
448 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort); 477 iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
449} 478}
479
480int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
481 struct iwl_rx_cmd_buffer *rxb,
482 struct iwl_device_cmd *cmd)
483{
484 struct iwl_rx_packet *pkt = rxb_addr(rxb);
485 struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
486
487 IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
488 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
489 "completed" : "aborted");
490
491 mvm->scan_status = IWL_MVM_SCAN_NONE;
492 ieee80211_sched_scan_stopped(mvm->hw);
493
494 return 0;
495}
496
497static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
498 struct ieee80211_vif *vif,
499 struct ieee80211_sched_scan_ies *ies,
500 enum ieee80211_band band,
501 struct iwl_tx_cmd *cmd,
502 u8 *data)
503{
504 u16 cmd_len;
505
506 cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
507 cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
508 cmd->sta_id = mvm->aux_sta.sta_id;
509
510 cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
511
512 cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
513 vif->addr,
514 1, NULL, 0,
515 ies->ie[band], ies->len[band],
516 SCAN_OFFLOAD_PROBE_REQ_SIZE);
517 cmd->len = cpu_to_le16(cmd_len);
518}
519
520static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
521 struct ieee80211_vif *vif,
522 struct cfg80211_sched_scan_request *req,
523 struct iwl_scan_offload_cmd *scan)
524{
525 scan->channel_count =
526 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
527 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
528 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
529 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
530 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
531 scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
532 scan->max_out_time = cpu_to_le32(200 * 1024);
533 scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
534 scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
535 MAC_FILTER_IN_BEACON);
536 scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
537 scan->rep_count = cpu_to_le32(1);
538}
539
540static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
541{
542 int i;
543
544 for (i = 0; i < PROBE_OPTION_MAX; i++) {
545 if (!ssid_list[i].len)
546 break;
547 if (ssid_list[i].len == ssid_len &&
548 !memcmp(ssid_list->ssid, ssid, ssid_len))
549 return i;
550 }
551 return -1;
552}
553
554static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
555 struct iwl_scan_offload_cmd *scan,
556 u32 *ssid_bitmap)
557{
558 int i, j;
559 int index;
560
561 /*
562 * copy SSIDs from match list.
563 * iwl_config_sched_scan_profiles() uses the order of these ssids to
564 * config match list.
565 */
566 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
567 scan->direct_scan[i].id = WLAN_EID_SSID;
568 scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
569 memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
570 scan->direct_scan[i].len);
571 }
572
573 /* add SSIDs from scan SSID list */
574 *ssid_bitmap = 0;
575 for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
576 index = iwl_ssid_exist(req->ssids[j].ssid,
577 req->ssids[j].ssid_len,
578 scan->direct_scan);
579 if (index < 0) {
580 if (!req->ssids[j].ssid_len)
581 continue;
582 scan->direct_scan[i].id = WLAN_EID_SSID;
583 scan->direct_scan[i].len = req->ssids[j].ssid_len;
584 memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
585 scan->direct_scan[i].len);
586 *ssid_bitmap |= BIT(i + 1);
587 i++;
588 } else {
589 *ssid_bitmap |= BIT(index + 1);
590 }
591 }
592}
593
594static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
595 struct cfg80211_sched_scan_request *req,
596 struct iwl_scan_channel_cfg *channels,
597 enum ieee80211_band band,
598 int *head, int *tail,
599 u32 ssid_bitmap)
600{
601 struct ieee80211_supported_band *s_band;
602 int n_probes = req->n_ssids;
603 int n_channels = req->n_channels;
604 u8 active_dwell, passive_dwell;
605 int i, j, index = 0;
606 bool partial;
607
608 /*
609 * We have to configure all supported channels, even if we don't want to
610 * scan on them, but we have to send channels in the order that we want
611 * to scan. So add requested channels to head of the list and others to
612 * the end.
613 */
614 active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
615 passive_dwell = iwl_mvm_get_passive_dwell(band);
616 s_band = &mvm->nvm_data->bands[band];
617
618 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
619 partial = false;
620 for (j = 0; j < n_channels; j++)
621 if (s_band->channels[i].center_freq ==
622 req->channels[j]->center_freq) {
623 index = *head;
624 (*head)++;
625 /*
626 * Channels that came with the request will be
627 * in partial scan .
628 */
629 partial = true;
630 break;
631 }
632 if (!partial) {
633 index = *tail;
634 (*tail)--;
635 }
636 channels->channel_number[index] =
637 cpu_to_le16(ieee80211_frequency_to_channel(
638 s_band->channels[i].center_freq));
639 channels->dwell_time[index][0] = active_dwell;
640 channels->dwell_time[index][1] = passive_dwell;
641
642 channels->iter_count[index] = cpu_to_le16(1);
643 channels->iter_interval[index] = 0;
644
645 if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
646 channels->type[index] |=
647 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
648
649 channels->type[index] |=
650 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
651 if (partial)
652 channels->type[index] |=
653 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
654
655 if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
656 channels->type[index] |=
657 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
658
659 /* scan for all SSIDs from req->ssids */
660 channels->type[index] |= cpu_to_le32(ssid_bitmap);
661 }
662}
663
664int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
665 struct ieee80211_vif *vif,
666 struct cfg80211_sched_scan_request *req,
667 struct ieee80211_sched_scan_ies *ies)
668{
669 int supported_bands = 0;
670 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
671 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
672 int head = 0;
673 int tail = band_2ghz + band_5ghz;
674 u32 ssid_bitmap;
675 int cmd_len;
676 int ret;
677
678 struct iwl_scan_offload_cfg *scan_cfg;
679 struct iwl_host_cmd cmd = {
680 .id = SCAN_OFFLOAD_CONFIG_CMD,
681 .flags = CMD_SYNC,
682 };
683
684 lockdep_assert_held(&mvm->mutex);
685
686 if (band_2ghz)
687 supported_bands++;
688 if (band_5ghz)
689 supported_bands++;
690
691 cmd_len = sizeof(struct iwl_scan_offload_cfg) +
692 supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
693
694 scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
695 if (!scan_cfg)
696 return -ENOMEM;
697
698 iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
699 scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
700
701 iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
702 /* build tx frames for supported bands */
703 if (band_2ghz) {
704 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
705 IEEE80211_BAND_2GHZ,
706 &scan_cfg->scan_cmd.tx_cmd[0],
707 scan_cfg->data);
708 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
709 IEEE80211_BAND_2GHZ, &head, &tail,
710 ssid_bitmap);
711 }
712 if (band_5ghz) {
713 iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
714 IEEE80211_BAND_5GHZ,
715 &scan_cfg->scan_cmd.tx_cmd[1],
716 scan_cfg->data +
717 SCAN_OFFLOAD_PROBE_REQ_SIZE);
718 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
719 IEEE80211_BAND_5GHZ, &head, &tail,
720 ssid_bitmap);
721 }
722
723 cmd.data[0] = scan_cfg;
724 cmd.len[0] = cmd_len;
725 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
726
727 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
728
729 ret = iwl_mvm_send_cmd(mvm, &cmd);
730 kfree(scan_cfg);
731 return ret;
732}
733
734int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
735 struct cfg80211_sched_scan_request *req)
736{
737 struct iwl_scan_offload_profile *profile;
738 struct iwl_scan_offload_profile_cfg *profile_cfg;
739 struct iwl_scan_offload_blacklist *blacklist;
740 struct iwl_host_cmd cmd = {
741 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
742 .flags = CMD_SYNC,
743 .len[1] = sizeof(*profile_cfg),
744 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
745 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
746 };
747 int blacklist_len;
748 int i;
749 int ret;
750
751 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
752 return -EIO;
753
754 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
755 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
756 else
757 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
758
759 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
760 if (!blacklist)
761 return -ENOMEM;
762
763 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
764 if (!profile_cfg) {
765 ret = -ENOMEM;
766 goto free_blacklist;
767 }
768
769 cmd.data[0] = blacklist;
770 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
771 cmd.data[1] = profile_cfg;
772
773 /* No blacklist configuration */
774
775 profile_cfg->num_profiles = req->n_match_sets;
776 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
777 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
778 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
779
780 for (i = 0; i < req->n_match_sets; i++) {
781 profile = &profile_cfg->profiles[i];
782 profile->ssid_index = i;
783 /* Support any cipher and auth algorithm */
784 profile->unicast_cipher = 0xff;
785 profile->auth_alg = 0xff;
786 profile->network_type = IWL_NETWORK_TYPE_ANY;
787 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
788 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
789 }
790
791 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
792
793 ret = iwl_mvm_send_cmd(mvm, &cmd);
794 kfree(profile_cfg);
795free_blacklist:
796 kfree(blacklist);
797
798 return ret;
799}
800
801int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
802 struct cfg80211_sched_scan_request *req)
803{
804 struct iwl_scan_offload_req scan_req = {
805 .watchdog = IWL_SCHED_SCAN_WATCHDOG,
806
807 .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
808 .schedule_line[0].delay = req->interval / 1000,
809 .schedule_line[0].full_scan_mul = 1,
810
811 .schedule_line[1].iterations = 0xff,
812 .schedule_line[1].delay = req->interval / 1000,
813 .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
814 };
815
816 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
817 IWL_DEBUG_SCAN(mvm,
818 "Sending scheduled scan with filtering, filter len %d\n",
819 req->n_match_sets);
820 scan_req.flags |=
821 cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
822 } else {
823 IWL_DEBUG_SCAN(mvm,
824 "Sending Scheduled scan without filtering\n");
825 }
826
827 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
828 sizeof(scan_req), &scan_req);
829}
830
831static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
832{
833 int ret;
834 struct iwl_host_cmd cmd = {
835 .id = SCAN_OFFLOAD_ABORT_CMD,
836 .flags = CMD_SYNC,
837 };
838 u32 status;
839
840 /* Exit instantly with error when device is not ready
841 * to receive scan abort command or it does not perform
842 * scheduled scan currently */
843 if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
844 return -EIO;
845
846 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
847 if (ret)
848 return ret;
849
850 if (status != CAN_ABORT_STATUS) {
851 /*
852 * The scan abort will return 1 for success or
853 * 2 for "failure". A failure condition can be
854 * due to simply not being in an active scan which
855 * can occur if we send the scan abort before the
856 * microcode has notified us that a scan is completed.
857 */
858 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
859 ret = -EIO;
860 }
861
862 return ret;
863}
864
865void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
866{
867 int ret;
868
869 lockdep_assert_held(&mvm->mutex);
870
871 if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
872 IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
873 return;
874 }
875
876 ret = iwl_mvm_send_sched_scan_abort(mvm);
877 if (ret)
878 IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
879 else
880 IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
881}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 44add291531b..329952363a54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,6 +66,115 @@
66#include "sta.h" 66#include "sta.h"
67#include "rs.h" 67#include "rs.h"
68 68
69static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
70 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
71{
72 memset(cmd_v5, 0, sizeof(*cmd_v5));
73
74 cmd_v5->add_modify = cmd_v6->add_modify;
75 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
76 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
77 memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
78 cmd_v5->sta_id = cmd_v6->sta_id;
79 cmd_v5->modify_mask = cmd_v6->modify_mask;
80 cmd_v5->station_flags = cmd_v6->station_flags;
81 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
82 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
83 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
84 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
85 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
86 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
87 cmd_v5->assoc_id = cmd_v6->assoc_id;
88 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
89 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
90}
91
92static void
93iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
94 struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
95 u32 mac_id_n_color)
96{
97 memset(sta_cmd, 0, sizeof(*sta_cmd));
98
99 sta_cmd->sta_id = key_cmd->sta_id;
100 sta_cmd->add_modify = STA_MODE_MODIFY;
101 sta_cmd->modify_mask = STA_MODIFY_KEY;
102 sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
103
104 sta_cmd->key.key_offset = key_cmd->key_offset;
105 sta_cmd->key.key_flags = key_cmd->key_flags;
106 memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
107 sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
108 memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
109 sizeof(sta_cmd->key.tkip_rx_ttak));
110}
111
112static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
113 struct iwl_mvm_add_sta_cmd_v6 *cmd,
114 int *status)
115{
116 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
117
118 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
119 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
120 cmd, status);
121
122 iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
123
124 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
125 &cmd_v5, status);
126}
127
128static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
129 struct iwl_mvm_add_sta_cmd_v6 *cmd)
130{
131 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
132
133 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
134 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
135 sizeof(*cmd), cmd);
136
137 iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
138
139 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
140 &cmd_v5);
141}
142
143static int
144iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
145 struct iwl_mvm_add_sta_key_cmd *cmd,
146 u32 mac_id_n_color,
147 int *status)
148{
149 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
150
151 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
152 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
153 sizeof(*cmd), cmd, status);
154
155 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
156
157 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
158 &sta_cmd, status);
159}
160
161static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
162 u32 flags,
163 struct iwl_mvm_add_sta_key_cmd *cmd,
164 u32 mac_id_n_color)
165{
166 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
167
168 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
169 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
170 sizeof(*cmd), cmd);
171
172 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
173
174 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
175 &sta_cmd);
176}
177
69static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) 178static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
70{ 179{
71 int sta_id; 180 int sta_id;
@@ -87,7 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
87 bool update) 196 bool update)
88{ 197{
89 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 198 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
90 struct iwl_mvm_add_sta_cmd add_sta_cmd; 199 struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
91 int ret; 200 int ret;
92 u32 status; 201 u32 status;
93 u32 agg_size = 0, mpdu_dens = 0; 202 u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
175 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 284 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
176 285
177 status = ADD_STA_SUCCESS; 286 status = ADD_STA_SUCCESS;
178 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), 287 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
179 &add_sta_cmd, &status);
180 if (ret) 288 if (ret)
181 return ret; 289 return ret;
182 290
@@ -229,8 +337,12 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
229 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) 337 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
230 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); 338 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
231 339
232 /* for HW restart - need to reset the seq_number etc... */ 340 /* for HW restart - reset everything but the sequence number */
233 memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); 341 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
342 u16 seq = mvm_sta->tid_data[i].seq_number;
343 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
344 mvm_sta->tid_data[i].seq_number = seq;
345 }
234 346
235 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); 347 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
236 if (ret) 348 if (ret)
@@ -256,7 +368,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
256int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 368int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
257 bool drain) 369 bool drain)
258{ 370{
259 struct iwl_mvm_add_sta_cmd cmd = {}; 371 struct iwl_mvm_add_sta_cmd_v6 cmd = {};
260 int ret; 372 int ret;
261 u32 status; 373 u32 status;
262 374
@@ -269,8 +381,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
269 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 381 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
270 382
271 status = ADD_STA_SUCCESS; 383 status = ADD_STA_SUCCESS;
272 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 384 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
273 &cmd, &status);
274 if (ret) 385 if (ret)
275 return ret; 386 return ret;
276 387
@@ -469,13 +580,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
469 const u8 *addr, 580 const u8 *addr,
470 u16 mac_id, u16 color) 581 u16 mac_id, u16 color)
471{ 582{
472 struct iwl_mvm_add_sta_cmd cmd; 583 struct iwl_mvm_add_sta_cmd_v6 cmd;
473 int ret; 584 int ret;
474 u32 status; 585 u32 status;
475 586
476 lockdep_assert_held(&mvm->mutex); 587 lockdep_assert_held(&mvm->mutex);
477 588
478 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd)); 589 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
479 cmd.sta_id = sta->sta_id; 590 cmd.sta_id = sta->sta_id;
480 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 591 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
481 color)); 592 color));
@@ -485,8 +596,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
485 if (addr) 596 if (addr)
486 memcpy(cmd.addr, addr, ETH_ALEN); 597 memcpy(cmd.addr, addr, ETH_ALEN);
487 598
488 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 599 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
489 &cmd, &status);
490 if (ret) 600 if (ret)
491 return ret; 601 return ret;
492 602
@@ -534,10 +644,14 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
534 struct iwl_mvm_int_sta *bsta) 644 struct iwl_mvm_int_sta *bsta)
535{ 645{
536 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 646 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
537 static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 647 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
648 static const u8 *baddr = _baddr;
538 649
539 lockdep_assert_held(&mvm->mutex); 650 lockdep_assert_held(&mvm->mutex);
540 651
652 if (vif->type == NL80211_IFTYPE_ADHOC)
653 baddr = vif->bss_conf.bssid;
654
541 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) 655 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
542 return -ENOSPC; 656 return -ENOSPC;
543 657
@@ -614,7 +728,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
614 int tid, u16 ssn, bool start) 728 int tid, u16 ssn, bool start)
615{ 729{
616 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 730 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
617 struct iwl_mvm_add_sta_cmd cmd = {}; 731 struct iwl_mvm_add_sta_cmd_v6 cmd = {};
618 int ret; 732 int ret;
619 u32 status; 733 u32 status;
620 734
@@ -638,8 +752,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
638 STA_MODIFY_REMOVE_BA_TID; 752 STA_MODIFY_REMOVE_BA_TID;
639 753
640 status = ADD_STA_SUCCESS; 754 status = ADD_STA_SUCCESS;
641 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 755 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
642 &cmd, &status);
643 if (ret) 756 if (ret)
644 return ret; 757 return ret;
645 758
@@ -674,7 +787,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
674 int tid, u8 queue, bool start) 787 int tid, u8 queue, bool start)
675{ 788{
676 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 789 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
677 struct iwl_mvm_add_sta_cmd cmd = {}; 790 struct iwl_mvm_add_sta_cmd_v6 cmd = {};
678 int ret; 791 int ret;
679 u32 status; 792 u32 status;
680 793
@@ -696,8 +809,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
696 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 809 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
697 810
698 status = ADD_STA_SUCCESS; 811 status = ADD_STA_SUCCESS;
699 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 812 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
700 &cmd, &status);
701 if (ret) 813 if (ret)
702 return ret; 814 return ret;
703 815
@@ -743,13 +855,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
743 855
744 lockdep_assert_held(&mvm->mutex); 856 lockdep_assert_held(&mvm->mutex);
745 857
746 for (txq_id = IWL_MVM_FIRST_AGG_QUEUE; 858 for (txq_id = mvm->first_agg_queue;
747 txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++) 859 txq_id <= mvm->last_agg_queue; txq_id++)
748 if (mvm->queue_to_mac80211[txq_id] == 860 if (mvm->queue_to_mac80211[txq_id] ==
749 IWL_INVALID_MAC80211_QUEUE) 861 IWL_INVALID_MAC80211_QUEUE)
750 break; 862 break;
751 863
752 if (txq_id > IWL_MVM_LAST_AGG_QUEUE) { 864 if (txq_id > mvm->last_agg_queue) {
753 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 865 IWL_ERR(mvm, "Failed to allocate agg queue\n");
754 return -EIO; 866 return -EIO;
755 } 867 }
@@ -987,10 +1099,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
987 u32 cmd_flags) 1099 u32 cmd_flags)
988{ 1100{
989 __le16 key_flags; 1101 __le16 key_flags;
990 struct iwl_mvm_add_sta_cmd cmd = {}; 1102 struct iwl_mvm_add_sta_key_cmd cmd = {};
991 int ret, status; 1103 int ret, status;
992 u16 keyidx; 1104 u16 keyidx;
993 int i; 1105 int i;
1106 u32 mac_id_n_color = mvm_sta->mac_id_n_color;
994 1107
995 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 1108 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
996 STA_KEY_FLG_KEYID_MSK; 1109 STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1000 switch (keyconf->cipher) { 1113 switch (keyconf->cipher) {
1001 case WLAN_CIPHER_SUITE_TKIP: 1114 case WLAN_CIPHER_SUITE_TKIP:
1002 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 1115 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1003 cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; 1116 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1004 for (i = 0; i < 5; i++) 1117 for (i = 0; i < 5; i++)
1005 cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); 1118 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1006 memcpy(cmd.key.key, keyconf->key, keyconf->keylen); 1119 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1007 break; 1120 break;
1008 case WLAN_CIPHER_SUITE_CCMP: 1121 case WLAN_CIPHER_SUITE_CCMP:
1009 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 1122 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1010 memcpy(cmd.key.key, keyconf->key, keyconf->keylen); 1123 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1011 break; 1124 break;
1012 default: 1125 default:
1013 WARN_ON(1); 1126 WARN_ON(1);
@@ -1017,20 +1130,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1017 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1130 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1018 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 1131 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1019 1132
1020 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 1133 cmd.key_offset = keyconf->hw_key_idx;
1021 cmd.key.key_offset = keyconf->hw_key_idx; 1134 cmd.key_flags = key_flags;
1022 cmd.key.key_flags = key_flags;
1023 cmd.add_modify = STA_MODE_MODIFY;
1024 cmd.modify_mask = STA_MODIFY_KEY;
1025 cmd.sta_id = sta_id; 1135 cmd.sta_id = sta_id;
1026 1136
1027 status = ADD_STA_SUCCESS; 1137 status = ADD_STA_SUCCESS;
1028 if (cmd_flags == CMD_SYNC) 1138 if (cmd_flags == CMD_SYNC)
1029 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 1139 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
1030 &cmd, &status); 1140 mac_id_n_color,
1141 &status);
1031 else 1142 else
1032 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1143 ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
1033 sizeof(cmd), &cmd); 1144 mac_id_n_color);
1034 1145
1035 switch (status) { 1146 switch (status) {
1036 case ADD_STA_SUCCESS: 1147 case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1197 struct ieee80211_key_conf *keyconf) 1308 struct ieee80211_key_conf *keyconf)
1198{ 1309{
1199 struct iwl_mvm_sta *mvm_sta; 1310 struct iwl_mvm_sta *mvm_sta;
1200 struct iwl_mvm_add_sta_cmd cmd = {}; 1311 struct iwl_mvm_add_sta_key_cmd cmd = {};
1201 __le16 key_flags; 1312 __le16 key_flags;
1202 int ret, status; 1313 int ret, status;
1203 u8 sta_id; 1314 u8 sta_id;
@@ -1252,17 +1363,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1252 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1363 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1253 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 1364 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1254 1365
1255 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 1366 cmd.key_flags = key_flags;
1256 cmd.key.key_flags = key_flags; 1367 cmd.key_offset = keyconf->hw_key_idx;
1257 cmd.key.key_offset = keyconf->hw_key_idx;
1258 cmd.sta_id = sta_id; 1368 cmd.sta_id = sta_id;
1259 1369
1260 cmd.modify_mask = STA_MODIFY_KEY;
1261 cmd.add_modify = STA_MODE_MODIFY;
1262
1263 status = ADD_STA_SUCCESS; 1370 status = ADD_STA_SUCCESS;
1264 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 1371 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
1265 &cmd, &status); 1372 mvm_sta->mac_id_n_color,
1373 &status);
1266 1374
1267 switch (status) { 1375 switch (status) {
1268 case ADD_STA_SUCCESS: 1376 case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1309 struct ieee80211_sta *sta) 1417 struct ieee80211_sta *sta)
1310{ 1418{
1311 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; 1419 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
1312 struct iwl_mvm_add_sta_cmd cmd = { 1420 struct iwl_mvm_add_sta_cmd_v6 cmd = {
1313 .add_modify = STA_MODE_MODIFY, 1421 .add_modify = STA_MODE_MODIFY,
1314 .sta_id = mvmsta->sta_id, 1422 .sta_id = mvmsta->sta_id,
1315 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 1423 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1317 }; 1425 };
1318 int ret; 1426 int ret;
1319 1427
1320 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1428 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
1321 if (ret) 1429 if (ret)
1322 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1430 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1323} 1431}
@@ -1331,7 +1439,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1331 (reason == IEEE80211_FRAME_RELEASE_UAPSD) ? 1439 (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
1332 STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL; 1440 STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
1333 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; 1441 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
1334 struct iwl_mvm_add_sta_cmd cmd = { 1442 struct iwl_mvm_add_sta_cmd_v6 cmd = {
1335 .add_modify = STA_MODE_MODIFY, 1443 .add_modify = STA_MODE_MODIFY,
1336 .sta_id = mvmsta->sta_id, 1444 .sta_id = mvmsta->sta_id,
1337 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 1445 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1346 int ret; 1454 int ret;
1347 1455
1348 /* TODO: somehow the fw doesn't seem to take PS_POLL into account */ 1456 /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
1349 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1457 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
1350 if (ret) 1458 if (ret)
1351 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1459 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1352} 1460}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 94b265eb32b8..4dfc359a4bdd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -293,10 +293,6 @@ struct iwl_mvm_sta {
293 struct iwl_lq_sta lq_sta; 293 struct iwl_lq_sta lq_sta;
294 struct ieee80211_vif *vif; 294 struct ieee80211_vif *vif;
295 295
296#ifdef CONFIG_PM_SLEEP
297 u16 last_seq_ctl;
298#endif
299
300 /* Temporary, until the new TLC will control the Tx protection */ 296 /* Temporary, until the new TLC will control the Tx protection */
301 s8 tx_protection; 297 s8 tx_protection;
302 bool tt_tx_protection; 298 bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000000..eb74391d91ca
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -0,0 +1,95 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_MVM_TESTMODE_H__
65#define __IWL_MVM_TESTMODE_H__
66
67/**
68 * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
69 * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
70 * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
71 * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
72 * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
73 */
74enum iwl_mvm_testmode_attrs {
75 IWL_MVM_TM_ATTR_UNSPEC,
76 IWL_MVM_TM_ATTR_CMD,
77 IWL_MVM_TM_ATTR_NOA_DURATION,
78 IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
79
80 /* keep last */
81 NUM_IWL_MVM_TM_ATTRS,
82 IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
83};
84
85/**
86 * enum iwl_mvm_testmode_commands - MVM testmode commands
87 * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
88 * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
89 */
90enum iwl_mvm_testmode_commands {
91 IWL_MVM_TM_CMD_SET_NOA,
92 IWL_MVM_TM_CMD_SET_BEACON_FILTER,
93};
94
95#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 76a3c177e100..33cf56fdfc41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -387,7 +387,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
387 387
388void iwl_mvm_protect_session(struct iwl_mvm *mvm, 388void iwl_mvm_protect_session(struct iwl_mvm *mvm,
389 struct ieee80211_vif *vif, 389 struct ieee80211_vif *vif,
390 u32 duration, u32 min_duration) 390 u32 duration, u32 min_duration,
391 u32 max_delay)
391{ 392{
392 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 393 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
393 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 394 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
426 cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); 427 cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
427 428
428 time_cmd.max_frags = TE_V2_FRAG_NONE; 429 time_cmd.max_frags = TE_V2_FRAG_NONE;
429 time_cmd.max_delay = cpu_to_le32(500); 430 time_cmd.max_delay = cpu_to_le32(max_delay);
430 /* TODO: why do we need to interval = bi if it is not periodic? */ 431 /* TODO: why do we need to interval = bi if it is not periodic? */
431 time_cmd.interval = cpu_to_le32(1); 432 time_cmd.interval = cpu_to_le32(1);
432 time_cmd.duration = cpu_to_le32(duration); 433 time_cmd.duration = cpu_to_le32(duration);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index f86c51065ed3..d9c8d6cfa2db 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -123,6 +123,7 @@
123 * @duration: the duration of the session in TU. 123 * @duration: the duration of the session in TU.
124 * @min_duration: will start a new session if the current session will end 124 * @min_duration: will start a new session if the current session will end
125 * in less than min_duration. 125 * in less than min_duration.
126 * @max_delay: maximum delay before starting the time event (in TU)
126 * 127 *
127 * This function can be used to start a session protection which means that the 128 * This function can be used to start a session protection which means that the
128 * fw will stay on the channel for %duration_ms milliseconds. This function 129 * fw will stay on the channel for %duration_ms milliseconds. This function
@@ -133,7 +134,8 @@
133 */ 134 */
134void iwl_mvm_protect_session(struct iwl_mvm *mvm, 135void iwl_mvm_protect_session(struct iwl_mvm *mvm,
135 struct ieee80211_vif *vif, 136 struct ieee80211_vif *vif,
136 u32 duration, u32 min_duration); 137 u32 duration, u32 min_duration,
138 u32 max_delay);
137 139
138/** 140/**
139 * iwl_mvm_stop_session_protection - cancel the session protection. 141 * iwl_mvm_stop_session_protection - cancel the session protection.
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e05440d90319..43d97c33a75a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
417 417
418 spin_unlock(&mvmsta->lock); 418 spin_unlock(&mvmsta->lock);
419 419
420 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE) 420 if (txq_id < mvm->first_agg_queue)
421 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); 421 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
422 422
423 return 0; 423 return 0;
@@ -511,16 +511,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
511} 511}
512#endif /* CONFIG_IWLWIFI_DEBUG */ 512#endif /* CONFIG_IWLWIFI_DEBUG */
513 513
514/** 514void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
515 * translate ucode response to mac80211 tx status control values 515 enum ieee80211_band band,
516 */ 516 struct ieee80211_tx_rate *r)
517static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
518 struct ieee80211_tx_info *info)
519{ 517{
520 struct ieee80211_tx_rate *r = &info->status.rates[0];
521
522 info->status.antenna =
523 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
524 if (rate_n_flags & RATE_HT_MCS_GF_MSK) 518 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
525 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; 519 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
526 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 520 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@ static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
549 r->flags |= IEEE80211_TX_RC_VHT_MCS; 543 r->flags |= IEEE80211_TX_RC_VHT_MCS;
550 } else { 544 } else {
551 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 545 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
552 info->band); 546 band);
553 } 547 }
554} 548}
555 549
550/**
551 * translate ucode response to mac80211 tx status control values
552 */
553static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
554 struct ieee80211_tx_info *info)
555{
556 struct ieee80211_tx_rate *r = &info->status.rates[0];
557
558 info->status.antenna =
559 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
560 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
561}
562
556static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, 563static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
557 struct iwl_rx_packet *pkt) 564 struct iwl_rx_packet *pkt)
558{ 565{
@@ -602,11 +609,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
602 } 609 }
603 610
604 info->status.rates[0].count = tx_resp->failure_frame + 1; 611 info->status.rates[0].count = tx_resp->failure_frame + 1;
605 iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate), 612 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
606 info); 613 info);
607 614
608 /* Single frame failure in an AMPDU queue => send BAR */ 615 /* Single frame failure in an AMPDU queue => send BAR */
609 if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE && 616 if (txq_id >= mvm->first_agg_queue &&
610 !(info->flags & IEEE80211_TX_STAT_ACK)) 617 !(info->flags & IEEE80211_TX_STAT_ACK))
611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 618 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
612 619
@@ -619,7 +626,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
619 ieee80211_tx_status_ni(mvm->hw, skb); 626 ieee80211_tx_status_ni(mvm->hw, skb);
620 } 627 }
621 628
622 if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) { 629 if (txq_id >= mvm->first_agg_queue) {
623 /* If this is an aggregation queue, we use the ssn since: 630 /* If this is an aggregation queue, we use the ssn since:
624 * ssn = wifi seq_num % 256. 631 * ssn = wifi seq_num % 256.
625 * The seq_ctl is the sequence control of the packet to which 632 * The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
668 iwl_mvm_check_ratid_empty(mvm, sta, tid); 675 iwl_mvm_check_ratid_empty(mvm, sta, tid);
669 spin_unlock_bh(&mvmsta->lock); 676 spin_unlock_bh(&mvmsta->lock);
670 } 677 }
671
672#ifdef CONFIG_PM_SLEEP
673 mvmsta->last_seq_ctl = seq_ctl;
674#endif
675 } else { 678 } else {
676 sta = NULL; 679 sta = NULL;
677 mvmsta = NULL; 680 mvmsta = NULL;
@@ -681,7 +684,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
681 * If the txq is not an AMPDU queue, there is no chance we freed 684 * If the txq is not an AMPDU queue, there is no chance we freed
682 * several skbs. Check that out... 685 * several skbs. Check that out...
683 */ 686 */
684 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) && 687 if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
685 atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { 688 atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
686 if (mvmsta) { 689 if (mvmsta) {
687 /* 690 /*
@@ -777,7 +780,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
777 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 780 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
778 struct ieee80211_sta *sta; 781 struct ieee80211_sta *sta;
779 782
780 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE)) 783 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
781 return; 784 return;
782 785
783 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) 786 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
904 info->flags |= IEEE80211_TX_STAT_AMPDU; 907 info->flags |= IEEE80211_TX_STAT_AMPDU;
905 info->status.ampdu_ack_len = ba_notif->txed_2_done; 908 info->status.ampdu_ack_len = ba_notif->txed_2_done;
906 info->status.ampdu_len = ba_notif->txed; 909 info->status.ampdu_len = ba_notif->txed;
907 iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags, 910 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
908 info); 911 info);
909 } 912 }
910 } 913 }
911 914
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a9c357491434..ed69e9b78e82 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -466,7 +466,7 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
466 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; 466 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
467 len = img->sec[IWL_UCODE_SECTION_DATA].len; 467 len = img->sec[IWL_UCODE_SECTION_DATA].len;
468 468
469 buf = kzalloc(len, GFP_KERNEL); 469 buf = kzalloc(len, GFP_ATOMIC);
470 if (!buf) 470 if (!buf)
471 return; 471 return;
472 472
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 26108a1a29fa..941c0c88f982 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -268,7 +268,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
268#endif /* CONFIG_IWLDVM */ 268#endif /* CONFIG_IWLDVM */
269 269
270#if IS_ENABLED(CONFIG_IWLMVM) 270#if IS_ENABLED(CONFIG_IWLMVM)
271/* 7000 Series */ 271/* 7260 Series */
272 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 272 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, 273 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, 274 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
@@ -350,6 +350,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
350 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 350 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
351 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 351 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
352 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, 352 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
353
354/* 7265 Series */
355 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
353#endif /* CONFIG_IWLMVM */ 356#endif /* CONFIG_IWLMVM */
354 357
355 {0} 358 {0}
@@ -391,7 +394,6 @@ out_free_drv:
391 iwl_drv_stop(trans_pcie->drv); 394 iwl_drv_stop(trans_pcie->drv);
392out_free_trans: 395out_free_trans:
393 iwl_trans_pcie_free(iwl_trans); 396 iwl_trans_pcie_free(iwl_trans);
394 pci_set_drvdata(pdev, NULL);
395 return ret; 397 return ret;
396} 398}
397 399
@@ -402,8 +404,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
402 404
403 iwl_drv_stop(trans_pcie->drv); 405 iwl_drv_stop(trans_pcie->drv);
404 iwl_trans_pcie_free(trans); 406 iwl_trans_pcie_free(trans);
405
406 pci_set_drvdata(pdev, NULL);
407} 407}
408 408
409#ifdef CONFIG_PM_SLEEP 409#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index c3f904d422b0..5d9337bec67a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -220,6 +220,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
220 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 220 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
221 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 221 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
222 222
223 /* Clear the interrupt in APMG if the NIC is in RFKILL */
224 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
225
223 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); 226 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
224 227
225out: 228out:
@@ -443,22 +446,138 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
443 return ret; 446 return ret;
444} 447}
445 448
449static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
450{
451 int shift_param;
452 u32 address;
453 int ret = 0;
454
455 if (cpu == 1) {
456 shift_param = 0;
457 address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
458 } else {
459 shift_param = 16;
460 address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
461 }
462
463 /* set CPU to started */
464 iwl_trans_set_bits_mask(trans,
465 CSR_UCODE_LOAD_STATUS_ADDR,
466 CSR_CPU_STATUS_LOADING_STARTED << shift_param,
467 1);
468
469 /* set last complete descriptor number */
470 iwl_trans_set_bits_mask(trans,
471 CSR_UCODE_LOAD_STATUS_ADDR,
472 CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
473 << shift_param,
474 1);
475
476 /* set last loaded block */
477 iwl_trans_set_bits_mask(trans,
478 CSR_UCODE_LOAD_STATUS_ADDR,
479 CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
480 << shift_param,
481 1);
482
483 /* image loading complete */
484 iwl_trans_set_bits_mask(trans,
485 CSR_UCODE_LOAD_STATUS_ADDR,
486 CSR_CPU_STATUS_LOADING_COMPLETED
487 << shift_param,
488 1);
489
490 /* set FH_TCSR_0_REG */
491 iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
492
493 /* verify image verification started */
494 ret = iwl_poll_bit(trans, address,
495 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
496 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
497 CSR_SECURE_TIME_OUT);
498 if (ret < 0) {
499 IWL_ERR(trans, "secure boot process didn't start\n");
500 return ret;
501 }
502
503 /* wait for image verification to complete */
504 ret = iwl_poll_bit(trans, address,
505 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
506 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
507 CSR_SECURE_TIME_OUT);
508
509 if (ret < 0) {
510 IWL_ERR(trans, "Time out on secure boot process\n");
511 return ret;
512 }
513
514 return 0;
515}
516
446static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 517static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
447 const struct fw_img *image) 518 const struct fw_img *image)
448{ 519{
449 int i, ret = 0; 520 int i, ret = 0;
450 521
451 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { 522 IWL_DEBUG_FW(trans,
523 "working with %s image\n",
524 image->is_secure ? "Secured" : "Non Secured");
525 IWL_DEBUG_FW(trans,
526 "working with %s CPU\n",
527 image->is_dual_cpus ? "Dual" : "Single");
528
529 /* configure the ucode to be ready to get the secured image */
530 if (image->is_secure) {
531 /* set secure boot inspector addresses */
532 iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
533 iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
534
535 /* release CPU1 reset if secure inspector image burned in OTP */
536 iwl_write32(trans, CSR_RESET, 0);
537 }
538
539 /* load to FW the binary sections of CPU1 */
540 IWL_DEBUG_INFO(trans, "Loading CPU1\n");
541 for (i = 0;
542 i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
543 i++) {
452 if (!image->sec[i].data) 544 if (!image->sec[i].data)
453 break; 545 break;
454
455 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 546 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
456 if (ret) 547 if (ret)
457 return ret; 548 return ret;
458 } 549 }
459 550
460 /* Remove all resets to allow NIC to operate */ 551 /* configure the ucode to start secure process on CPU1 */
461 iwl_write32(trans, CSR_RESET, 0); 552 if (image->is_secure) {
553 /* config CPU1 to start secure protocol */
554 ret = iwl_pcie_secure_set(trans, 1);
555 if (ret)
556 return ret;
557 } else {
558 /* Remove all resets to allow NIC to operate */
559 iwl_write32(trans, CSR_RESET, 0);
560 }
561
562 if (image->is_dual_cpus) {
563 /* load to FW the binary sections of CPU2 */
564 IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
565 for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
566 i < IWL_UCODE_SECTION_MAX; i++) {
567 if (!image->sec[i].data)
568 break;
569 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
570 if (ret)
571 return ret;
572 }
573
574 if (image->is_secure) {
575 /* set CPU2 for secure protocol */
576 ret = iwl_pcie_secure_set(trans, 2);
577 if (ret)
578 return ret;
579 }
580 }
462 581
463 return 0; 582 return 0;
464} 583}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 1424335163b9..f644fcf861a8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1465,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1465 spin_unlock_bh(&txq->lock); 1465 spin_unlock_bh(&txq->lock);
1466} 1466}
1467 1467
1468#define HOST_COMPLETE_TIMEOUT (2 * HZ) 1468#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1469#define COMMAND_POKE_TIMEOUT (HZ / 10)
1469 1470
1470static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1471static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1471 struct iwl_host_cmd *cmd) 1472 struct iwl_host_cmd *cmd)
@@ -1493,6 +1494,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1494 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1494 int cmd_idx; 1495 int cmd_idx;
1495 int ret; 1496 int ret;
1497 int timeout = HOST_COMPLETE_TIMEOUT;
1496 1498
1497 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1499 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1498 get_cmd_string(trans_pcie, cmd->id)); 1500 get_cmd_string(trans_pcie, cmd->id));
@@ -1517,10 +1519,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1517 return ret; 1519 return ret;
1518 } 1520 }
1519 1521
1520 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1522 while (timeout > 0) {
1521 !test_bit(STATUS_HCMD_ACTIVE, 1523 unsigned long flags;
1522 &trans_pcie->status), 1524
1523 HOST_COMPLETE_TIMEOUT); 1525 timeout -= COMMAND_POKE_TIMEOUT;
1526 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1527 !test_bit(STATUS_HCMD_ACTIVE,
1528 &trans_pcie->status),
1529 COMMAND_POKE_TIMEOUT);
1530 if (ret)
1531 break;
1532 /* poke the device - it may have lost the command */
1533 if (iwl_trans_grab_nic_access(trans, true, &flags)) {
1534 iwl_trans_release_nic_access(trans, &flags);
1535 IWL_DEBUG_INFO(trans,
1536 "Tried to wake NIC for command %s\n",
1537 get_cmd_string(trans_pcie, cmd->id));
1538 } else {
1539 IWL_ERR(trans, "Failed to poke NIC for command %s\n",
1540 get_cmd_string(trans_pcie, cmd->id));
1541 break;
1542 }
1543 }
1544
1524 if (!ret) { 1545 if (!ret) {
1525 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 1546 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1526 struct iwl_txq *txq = 1547 struct iwl_txq *txq =
@@ -1541,6 +1562,9 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1541 "Clearing HCMD_ACTIVE for command %s\n", 1562 "Clearing HCMD_ACTIVE for command %s\n",
1542 get_cmd_string(trans_pcie, cmd->id)); 1563 get_cmd_string(trans_pcie, cmd->id));
1543 ret = -ETIMEDOUT; 1564 ret = -ETIMEDOUT;
1565
1566 iwl_op_mode_nic_error(trans->op_mode);
1567
1544 goto cancel; 1568 goto cancel;
1545 } 1569 }
1546 } 1570 }
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index c0f9e7e862f6..51b92b5df119 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -53,6 +53,11 @@ static void main_firmware_cb(const struct firmware *firmware, void *context)
53 53
54 /* Firmware found! */ 54 /* Firmware found! */
55 lbs_fw_loaded(priv, 0, priv->helper_fw, firmware); 55 lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
56 if (priv->helper_fw) {
57 release_firmware (priv->helper_fw);
58 priv->helper_fw = NULL;
59 }
60 release_firmware (firmware);
56} 61}
57 62
58static void helper_firmware_cb(const struct firmware *firmware, void *context) 63static void helper_firmware_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index c94dd6802672..ef8c98e21098 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -754,14 +754,14 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
754 if (ret == 0 && (card->model != MODEL_8305)) 754 if (ret == 0 && (card->model != MODEL_8305))
755 ret = if_cs_prog_real(card, mainfw); 755 ret = if_cs_prog_real(card, mainfw);
756 if (ret) 756 if (ret)
757 goto out; 757 return;
758 758
759 /* Now actually get the IRQ */ 759 /* Now actually get the IRQ */
760 ret = request_irq(card->p_dev->irq, if_cs_interrupt, 760 ret = request_irq(card->p_dev->irq, if_cs_interrupt,
761 IRQF_SHARED, DRV_NAME, card); 761 IRQF_SHARED, DRV_NAME, card);
762 if (ret) { 762 if (ret) {
763 pr_err("error in request_irq\n"); 763 pr_err("error in request_irq\n");
764 goto out; 764 return;
765 } 765 }
766 766
767 /* 767 /*
@@ -777,10 +777,6 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
777 pr_err("could not activate card\n"); 777 pr_err("could not activate card\n");
778 free_irq(card->p_dev->irq, card); 778 free_irq(card->p_dev->irq, card);
779 } 779 }
780
781out:
782 release_firmware(helper);
783 release_firmware(mainfw);
784} 780}
785 781
786 782
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 45578335e420..991238afd1b6 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -708,20 +708,16 @@ static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
708 708
709 ret = if_sdio_prog_helper(card, helper); 709 ret = if_sdio_prog_helper(card, helper);
710 if (ret) 710 if (ret)
711 goto out; 711 return;
712 712
713 lbs_deb_sdio("Helper firmware loaded\n"); 713 lbs_deb_sdio("Helper firmware loaded\n");
714 714
715 ret = if_sdio_prog_real(card, mainfw); 715 ret = if_sdio_prog_real(card, mainfw);
716 if (ret) 716 if (ret)
717 goto out; 717 return;
718 718
719 lbs_deb_sdio("Firmware loaded\n"); 719 lbs_deb_sdio("Firmware loaded\n");
720 if_sdio_finish_power_on(card); 720 if_sdio_finish_power_on(card);
721
722out:
723 release_firmware(helper);
724 release_firmware(mainfw);
725} 721}
726 722
727static int if_sdio_prog_firmware(struct if_sdio_card *card) 723static int if_sdio_prog_firmware(struct if_sdio_card *card)
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 5d39ec880d84..83669151bb82 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1094,11 +1094,7 @@ static int if_spi_init_card(struct if_spi_card *card)
1094 goto out; 1094 goto out;
1095 1095
1096out: 1096out:
1097 release_firmware(helper);
1098 release_firmware(mainfw);
1099
1100 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1097 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1101
1102 return err; 1098 return err;
1103} 1099}
1104 1100
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 27980778d992..dff08a2896a3 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -844,7 +844,7 @@ static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
844 cardp->fw = fw; 844 cardp->fw = fw;
845 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) { 845 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
846 ret = -EINVAL; 846 ret = -EINVAL;
847 goto release_fw; 847 goto done;
848 } 848 }
849 849
850 /* Cancel any pending usb business */ 850 /* Cancel any pending usb business */
@@ -861,7 +861,7 @@ restart:
861 if (if_usb_submit_rx_urb_fwload(cardp) < 0) { 861 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
862 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); 862 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
863 ret = -EIO; 863 ret = -EIO;
864 goto release_fw; 864 goto done;
865 } 865 }
866 866
867 cardp->bootcmdresp = 0; 867 cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@ restart:
883 usb_kill_urb(cardp->tx_urb); 883 usb_kill_urb(cardp->tx_urb);
884 if (if_usb_submit_rx_urb(cardp) < 0) 884 if (if_usb_submit_rx_urb(cardp) < 0)
885 ret = -EIO; 885 ret = -EIO;
886 goto release_fw; 886 goto done;
887 } else if (cardp->bootcmdresp <= 0) { 887 } else if (cardp->bootcmdresp <= 0) {
888 if (--reset_count >= 0) { 888 if (--reset_count >= 0) {
889 if_usb_reset_device(cardp); 889 if_usb_reset_device(cardp);
890 goto restart; 890 goto restart;
891 } 891 }
892 ret = -EIO; 892 ret = -EIO;
893 goto release_fw; 893 goto done;
894 } 894 }
895 895
896 i = 0; 896 i = 0;
@@ -921,14 +921,14 @@ restart:
921 921
922 pr_info("FW download failure, time = %d ms\n", i * 100); 922 pr_info("FW download failure, time = %d ms\n", i * 100);
923 ret = -EIO; 923 ret = -EIO;
924 goto release_fw; 924 goto done;
925 } 925 }
926 926
927 cardp->priv->fw_ready = 1; 927 cardp->priv->fw_ready = 1;
928 if_usb_submit_rx_urb(cardp); 928 if_usb_submit_rx_urb(cardp);
929 929
930 if (lbs_start_card(priv)) 930 if (lbs_start_card(priv))
931 goto release_fw; 931 goto done;
932 932
933 if_usb_setup_firmware(priv); 933 if_usb_setup_firmware(priv);
934 934
@@ -939,11 +939,8 @@ restart:
939 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL)) 939 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
940 priv->ehs_remove_supported = false; 940 priv->ehs_remove_supported = false;
941 941
942 release_fw:
943 release_firmware(cardp->fw);
944 cardp->fw = NULL;
945
946 done: 942 done:
943 cardp->fw = NULL;
947 lbs_deb_leave(LBS_DEB_USB); 944 lbs_deb_leave(LBS_DEB_USB);
948} 945}
949 946
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index a18b0051a745..006b8bcb2e31 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -58,11 +58,11 @@ config RT61PCI
58 58
59config RT2800PCI 59config RT2800PCI
60 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 60 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
61 depends on PCI || SOC_RT288X || SOC_RT305X 61 depends on PCI
62 select RT2800_LIB 62 select RT2800_LIB
63 select RT2800_LIB_MMIO
63 select RT2X00_LIB_MMIO 64 select RT2X00_LIB_MMIO
64 select RT2X00_LIB_PCI if PCI 65 select RT2X00_LIB_PCI
65 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
66 select RT2X00_LIB_FIRMWARE 66 select RT2X00_LIB_FIRMWARE
67 select RT2X00_LIB_CRYPTO 67 select RT2X00_LIB_CRYPTO
68 select CRC_CCITT 68 select CRC_CCITT
@@ -199,9 +199,30 @@ config RT2800USB_UNKNOWN
199 199
200endif 200endif
201 201
202config RT2800SOC
203 tristate "Ralink WiSoC support"
204 depends on SOC_RT288X || SOC_RT305X
205 select RT2X00_LIB_SOC
206 select RT2X00_LIB_MMIO
207 select RT2X00_LIB_CRYPTO
208 select RT2X00_LIB_FIRMWARE
209 select RT2800_LIB
210 select RT2800_LIB_MMIO
211 ---help---
212 This adds support for Ralink WiSoC devices.
213 Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
214
215 When compiled as a module, this driver will be called rt2800soc.
216
217
202config RT2800_LIB 218config RT2800_LIB
203 tristate 219 tristate
204 220
221config RT2800_LIB_MMIO
222 tristate
223 select RT2X00_LIB_MMIO
224 select RT2800_LIB
225
205config RT2X00_LIB_MMIO 226config RT2X00_LIB_MMIO
206 tristate 227 tristate
207 228
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b67..24a66015a495 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o 14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
16obj-$(CONFIG_RT2800_LIB) += rt2800lib.o 16obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
17obj-$(CONFIG_RT2800_LIB_MMIO) += rt2800mmio.o
17obj-$(CONFIG_RT2400PCI) += rt2400pci.o 18obj-$(CONFIG_RT2400PCI) += rt2400pci.o
18obj-$(CONFIG_RT2500PCI) += rt2500pci.o 19obj-$(CONFIG_RT2500PCI) += rt2500pci.o
19obj-$(CONFIG_RT61PCI) += rt61pci.o 20obj-$(CONFIG_RT61PCI) += rt61pci.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_RT2800PCI) += rt2800pci.o
21obj-$(CONFIG_RT2500USB) += rt2500usb.o 22obj-$(CONFIG_RT2500USB) += rt2500usb.o
22obj-$(CONFIG_RT73USB) += rt73usb.o 23obj-$(CONFIG_RT73USB) += rt73usb.o
23obj-$(CONFIG_RT2800USB) += rt2800usb.o 24obj-$(CONFIG_RT2800USB) += rt2800usb.o
25obj-$(CONFIG_RT2800SOC) += rt2800soc.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3d53a09da5a1..38ed9a3e44c8 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1261 */ 1261 */
1262 rxdesc->timestamp = ((u64)rx_high << 32) | rx_low; 1262 rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
1263 rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08; 1263 rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
1264 rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) - 1264 rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
1265 entry->queue->rt2x00dev->rssi_offset; 1265 entry->queue->rt2x00dev->rssi_offset;
1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1267 1267
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index aa8789423937..c5738f14c4ba 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7450,7 +7450,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7450 char *default_power2; 7450 char *default_power2;
7451 char *default_power3; 7451 char *default_power3;
7452 unsigned int i; 7452 unsigned int i;
7453 u16 eeprom;
7454 u32 reg; 7453 u32 reg;
7455 7454
7456 /* 7455 /*
@@ -7499,46 +7498,48 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7499 rt2x00dev->hw->max_report_rates = 7; 7498 rt2x00dev->hw->max_report_rates = 7;
7500 rt2x00dev->hw->max_rate_tries = 1; 7499 rt2x00dev->hw->max_rate_tries = 1;
7501 7500
7502 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
7503
7504 /* 7501 /*
7505 * Initialize hw_mode information. 7502 * Initialize hw_mode information.
7506 */ 7503 */
7507 spec->supported_bands = SUPPORT_BAND_2GHZ;
7508 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 7504 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
7509 7505
7510 if (rt2x00_rf(rt2x00dev, RF2820) || 7506 switch (rt2x00dev->chip.rf) {
7511 rt2x00_rf(rt2x00dev, RF2720)) { 7507 case RF2720:
7508 case RF2820:
7512 spec->num_channels = 14; 7509 spec->num_channels = 14;
7513 spec->channels = rf_vals; 7510 spec->channels = rf_vals;
7514 } else if (rt2x00_rf(rt2x00dev, RF2850) || 7511 break;
7515 rt2x00_rf(rt2x00dev, RF2750)) { 7512
7516 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7513 case RF2750:
7514 case RF2850:
7517 spec->num_channels = ARRAY_SIZE(rf_vals); 7515 spec->num_channels = ARRAY_SIZE(rf_vals);
7518 spec->channels = rf_vals; 7516 spec->channels = rf_vals;
7519 } else if (rt2x00_rf(rt2x00dev, RF3020) || 7517 break;
7520 rt2x00_rf(rt2x00dev, RF2020) || 7518
7521 rt2x00_rf(rt2x00dev, RF3021) || 7519 case RF2020:
7522 rt2x00_rf(rt2x00dev, RF3022) || 7520 case RF3020:
7523 rt2x00_rf(rt2x00dev, RF3070) || 7521 case RF3021:
7524 rt2x00_rf(rt2x00dev, RF3290) || 7522 case RF3022:
7525 rt2x00_rf(rt2x00dev, RF3320) || 7523 case RF3070:
7526 rt2x00_rf(rt2x00dev, RF3322) || 7524 case RF3290:
7527 rt2x00_rf(rt2x00dev, RF5360) || 7525 case RF3320:
7528 rt2x00_rf(rt2x00dev, RF5370) || 7526 case RF3322:
7529 rt2x00_rf(rt2x00dev, RF5372) || 7527 case RF5360:
7530 rt2x00_rf(rt2x00dev, RF5390) || 7528 case RF5370:
7531 rt2x00_rf(rt2x00dev, RF5392)) { 7529 case RF5372:
7530 case RF5390:
7531 case RF5392:
7532 spec->num_channels = 14; 7532 spec->num_channels = 14;
7533 spec->channels = rf_vals_3x; 7533 spec->channels = rf_vals_3x;
7534 } else if (rt2x00_rf(rt2x00dev, RF3052) || 7534 break;
7535 rt2x00_rf(rt2x00dev, RF3053)) { 7535
7536 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7536 case RF3052:
7537 case RF3053:
7537 spec->num_channels = ARRAY_SIZE(rf_vals_3x); 7538 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
7538 spec->channels = rf_vals_3x; 7539 spec->channels = rf_vals_3x;
7539 } else if (rt2x00_rf(rt2x00dev, RF5592)) { 7540 break;
7540 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7541 7541
7542 case RF5592:
7542 rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg); 7543 rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
7543 if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) { 7544 if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
7544 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40); 7545 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7547,11 +7548,16 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7547 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20); 7548 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
7548 spec->channels = rf_vals_5592_xtal20; 7549 spec->channels = rf_vals_5592_xtal20;
7549 } 7550 }
7551 break;
7550 } 7552 }
7551 7553
7552 if (WARN_ON_ONCE(!spec->channels)) 7554 if (WARN_ON_ONCE(!spec->channels))
7553 return -ENODEV; 7555 return -ENODEV;
7554 7556
7557 spec->supported_bands = SUPPORT_BAND_2GHZ;
7558 if (spec->num_channels > 14)
7559 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7560
7555 /* 7561 /*
7556 * Initialize HT information. 7562 * Initialize HT information.
7557 */ 7563 */
@@ -7566,22 +7572,21 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7566 IEEE80211_HT_CAP_SGI_20 | 7572 IEEE80211_HT_CAP_SGI_20 |
7567 IEEE80211_HT_CAP_SGI_40; 7573 IEEE80211_HT_CAP_SGI_40;
7568 7574
7569 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2) 7575 if (rt2x00dev->default_ant.tx_chain_num >= 2)
7570 spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC; 7576 spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
7571 7577
7572 spec->ht.cap |= 7578 spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
7573 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) << 7579 IEEE80211_HT_CAP_RX_STBC_SHIFT;
7574 IEEE80211_HT_CAP_RX_STBC_SHIFT;
7575 7580
7576 spec->ht.ampdu_factor = 3; 7581 spec->ht.ampdu_factor = 3;
7577 spec->ht.ampdu_density = 4; 7582 spec->ht.ampdu_density = 4;
7578 spec->ht.mcs.tx_params = 7583 spec->ht.mcs.tx_params =
7579 IEEE80211_HT_MCS_TX_DEFINED | 7584 IEEE80211_HT_MCS_TX_DEFINED |
7580 IEEE80211_HT_MCS_TX_RX_DIFF | 7585 IEEE80211_HT_MCS_TX_RX_DIFF |
7581 ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) << 7586 ((rt2x00dev->default_ant.tx_chain_num - 1) <<
7582 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 7587 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
7583 7588
7584 switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) { 7589 switch (rt2x00dev->default_ant.rx_chain_num) {
7585 case 3: 7590 case 3:
7586 spec->ht.mcs.rx_mask[2] = 0xff; 7591 spec->ht.mcs.rx_mask[2] = 0xff;
7587 case 2: 7592 case 2:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644
index 000000000000..ae152280e071
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -0,0 +1,873 @@
1/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
9 * <http://rt2x00.serialmonkey.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the
23 * Free Software Foundation, Inc.,
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 */
26
27/* Module: rt2800mmio
28 * Abstract: rt2800 MMIO device routines.
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/export.h>
34
35#include "rt2x00.h"
36#include "rt2x00mmio.h"
37#include "rt2800.h"
38#include "rt2800lib.h"
39#include "rt2800mmio.h"
40
41/*
42 * TX descriptor initialization
43 */
44__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
45{
46 return (__le32 *) entry->skb->data;
47}
48EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
49
50void rt2800mmio_write_tx_desc(struct queue_entry *entry,
51 struct txentry_desc *txdesc)
52{
53 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
54 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
55 __le32 *txd = entry_priv->desc;
56 u32 word;
57 const unsigned int txwi_size = entry->queue->winfo_size;
58
59 /*
60 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
61 * must contains a TXWI structure + 802.11 header + padding + 802.11
62 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
63 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
64 * data. It means that LAST_SEC0 is always 0.
65 */
66
67 /*
68 * Initialize TX descriptor
69 */
70 word = 0;
71 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
72 rt2x00_desc_write(txd, 0, word);
73
74 word = 0;
75 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
76 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
77 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
78 rt2x00_set_field32(&word, TXD_W1_BURST,
79 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
80 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
81 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
82 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
83 rt2x00_desc_write(txd, 1, word);
84
85 word = 0;
86 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
87 skbdesc->skb_dma + txwi_size);
88 rt2x00_desc_write(txd, 2, word);
89
90 word = 0;
91 rt2x00_set_field32(&word, TXD_W3_WIV,
92 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
93 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
94 rt2x00_desc_write(txd, 3, word);
95
96 /*
97 * Register descriptor details in skb frame descriptor.
98 */
99 skbdesc->desc = txd;
100 skbdesc->desc_len = TXD_DESC_SIZE;
101}
102EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
103
104/*
105 * RX control handlers
106 */
107void rt2800mmio_fill_rxdone(struct queue_entry *entry,
108 struct rxdone_entry_desc *rxdesc)
109{
110 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
111 __le32 *rxd = entry_priv->desc;
112 u32 word;
113
114 rt2x00_desc_read(rxd, 3, &word);
115
116 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
117 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
118
119 /*
120 * Unfortunately we don't know the cipher type used during
121 * decryption. This prevents us from correct providing
122 * correct statistics through debugfs.
123 */
124 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
125
126 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
127 /*
128 * Hardware has stripped IV/EIV data from 802.11 frame during
129 * decryption. Unfortunately the descriptor doesn't contain
130 * any fields with the EIV/IV data either, so they can't
131 * be restored by rt2x00lib.
132 */
133 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
134
135 /*
136 * The hardware has already checked the Michael Mic and has
137 * stripped it from the frame. Signal this to mac80211.
138 */
139 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
140
141 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
142 rxdesc->flags |= RX_FLAG_DECRYPTED;
143 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
144 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
145 }
146
147 if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
148 rxdesc->dev_flags |= RXDONE_MY_BSS;
149
150 if (rt2x00_get_field32(word, RXD_W3_L2PAD))
151 rxdesc->dev_flags |= RXDONE_L2PAD;
152
153 /*
154 * Process the RXWI structure that is at the start of the buffer.
155 */
156 rt2800_process_rxwi(entry, rxdesc);
157}
158EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
159
160/*
161 * Interrupt functions.
162 */
163static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
164{
165 struct ieee80211_conf conf = { .flags = 0 };
166 struct rt2x00lib_conf libconf = { .conf = &conf };
167
168 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
169}
170
171static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
172{
173 __le32 *txwi;
174 u32 word;
175 int wcid, tx_wcid;
176
177 wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
178
179 txwi = rt2800_drv_get_txwi(entry);
180 rt2x00_desc_read(txwi, 1, &word);
181 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
182
183 return (tx_wcid == wcid);
184}
185
186static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
187{
188 u32 status = *(u32 *)data;
189
190 /*
191 * rt2800pci hardware might reorder frames when exchanging traffic
192 * with multiple BA enabled STAs.
193 *
194 * For example, a tx queue
195 * [ STA1 | STA2 | STA1 | STA2 ]
196 * can result in tx status reports
197 * [ STA1 | STA1 | STA2 | STA2 ]
198 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
199 *
200 * To mitigate this effect, associate the tx status to the first frame
201 * in the tx queue with a matching wcid.
202 */
203 if (rt2800mmio_txdone_entry_check(entry, status) &&
204 !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
205 /*
206 * Got a matching frame, associate the tx status with
207 * the frame
208 */
209 entry->status = status;
210 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
211 return true;
212 }
213
214 /* Check the next frame */
215 return false;
216}
217
218static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
219{
220 u32 status = *(u32 *)data;
221
222 /*
223 * Find the first frame without tx status and assign this status to it
224 * regardless if it matches or not.
225 */
226 if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
227 /*
228 * Got a matching frame, associate the tx status with
229 * the frame
230 */
231 entry->status = status;
232 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
233 return true;
234 }
235
236 /* Check the next frame */
237 return false;
238}
239static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
240 void *data)
241{
242 if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
243 rt2800_txdone_entry(entry, entry->status,
244 rt2800mmio_get_txwi(entry));
245 return false;
246 }
247
248 /* No more frames to release */
249 return true;
250}
251
252static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
253{
254 struct data_queue *queue;
255 u32 status;
256 u8 qid;
257 int max_tx_done = 16;
258
259 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
260 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
261 if (unlikely(qid >= QID_RX)) {
262 /*
263 * Unknown queue, this shouldn't happen. Just drop
264 * this tx status.
265 */
266 rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
267 qid);
268 break;
269 }
270
271 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
272 if (unlikely(queue == NULL)) {
273 /*
274 * The queue is NULL, this shouldn't happen. Stop
275 * processing here and drop the tx status
276 */
277 rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
278 qid);
279 break;
280 }
281
282 if (unlikely(rt2x00queue_empty(queue))) {
283 /*
284 * The queue is empty. Stop processing here
285 * and drop the tx status.
286 */
287 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
288 qid);
289 break;
290 }
291
292 /*
293 * Let's associate this tx status with the first
294 * matching frame.
295 */
296 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
297 Q_INDEX, &status,
298 rt2800mmio_txdone_find_entry)) {
299 /*
300 * We cannot match the tx status to any frame, so just
301 * use the first one.
302 */
303 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
304 Q_INDEX, &status,
305 rt2800mmio_txdone_match_first)) {
306 rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
307 qid);
308 break;
309 }
310 }
311
312 /*
313 * Release all frames with a valid tx status.
314 */
315 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
316 Q_INDEX, NULL,
317 rt2800mmio_txdone_release_entries);
318
319 if (--max_tx_done == 0)
320 break;
321 }
322
323 return !max_tx_done;
324}
325
326static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
327 struct rt2x00_field32 irq_field)
328{
329 u32 reg;
330
331 /*
332 * Enable a single interrupt. The interrupt mask register
333 * access needs locking.
334 */
335 spin_lock_irq(&rt2x00dev->irqmask_lock);
336 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
337 rt2x00_set_field32(&reg, irq_field, 1);
338 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
339 spin_unlock_irq(&rt2x00dev->irqmask_lock);
340}
341
342void rt2800mmio_txstatus_tasklet(unsigned long data)
343{
344 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
345 if (rt2800mmio_txdone(rt2x00dev))
346 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
347
348 /*
349 * No need to enable the tx status interrupt here as we always
350 * leave it enabled to minimize the possibility of a tx status
351 * register overflow. See comment in interrupt handler.
352 */
353}
354EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
355
356void rt2800mmio_pretbtt_tasklet(unsigned long data)
357{
358 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
359 rt2x00lib_pretbtt(rt2x00dev);
360 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
361 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
362}
363EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
364
365void rt2800mmio_tbtt_tasklet(unsigned long data)
366{
367 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
368 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
369 u32 reg;
370
371 rt2x00lib_beacondone(rt2x00dev);
372
373 if (rt2x00dev->intf_ap_count) {
374 /*
375 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
376 * causing beacon skew and as a result causing problems with
377 * some powersaving clients over time. Shorten the beacon
378 * interval every 64 beacons by 64us to mitigate this effect.
379 */
380 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
381 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
382 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
383 (rt2x00dev->beacon_int * 16) - 1);
384 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
385 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
386 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
387 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
388 (rt2x00dev->beacon_int * 16));
389 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
390 }
391 drv_data->tbtt_tick++;
392 drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
393 }
394
395 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
396 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
397}
398EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
399
400void rt2800mmio_rxdone_tasklet(unsigned long data)
401{
402 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
403 if (rt2x00mmio_rxdone(rt2x00dev))
404 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
405 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
406 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
407}
408EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
409
410void rt2800mmio_autowake_tasklet(unsigned long data)
411{
412 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
413 rt2800mmio_wakeup(rt2x00dev);
414 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
415 rt2800mmio_enable_interrupt(rt2x00dev,
416 INT_MASK_CSR_AUTO_WAKEUP);
417}
418EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
419
420static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
421{
422 u32 status;
423 int i;
424
425 /*
426 * The TX_FIFO_STATUS interrupt needs special care. We should
427 * read TX_STA_FIFO but we should do it immediately as otherwise
428 * the register can overflow and we would lose status reports.
429 *
430 * Hence, read the TX_STA_FIFO register and copy all tx status
431 * reports into a kernel FIFO which is handled in the txstatus
432 * tasklet. We use a tasklet to process the tx status reports
433 * because we can schedule the tasklet multiple times (when the
434 * interrupt fires again during tx status processing).
435 *
436 * Furthermore we don't disable the TX_FIFO_STATUS
437 * interrupt here but leave it enabled so that the TX_STA_FIFO
438 * can also be read while the tx status tasklet gets executed.
439 *
440 * Since we have only one producer and one consumer we don't
441 * need to lock the kfifo.
442 */
443 for (i = 0; i < rt2x00dev->tx->limit; i++) {
444 rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
445
446 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
447 break;
448
449 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
450 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
451 break;
452 }
453 }
454
455 /* Schedule the tasklet for processing the tx status. */
456 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
457}
458
459irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
460{
461 struct rt2x00_dev *rt2x00dev = dev_instance;
462 u32 reg, mask;
463
464 /* Read status and ACK all interrupts */
465 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
466 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
467
468 if (!reg)
469 return IRQ_NONE;
470
471 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
472 return IRQ_HANDLED;
473
474 /*
475 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
476 * for interrupts and interrupt masks we can just use the value of
477 * INT_SOURCE_CSR to create the interrupt mask.
478 */
479 mask = ~reg;
480
481 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
482 rt2800mmio_txstatus_interrupt(rt2x00dev);
483 /*
484 * Never disable the TX_FIFO_STATUS interrupt.
485 */
486 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
487 }
488
489 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
490 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
491
492 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
493 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
494
495 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
496 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
497
498 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
499 tasklet_schedule(&rt2x00dev->autowake_tasklet);
500
501 /*
502 * Disable all interrupts for which a tasklet was scheduled right now,
503 * the tasklet will reenable the appropriate interrupts.
504 */
505 spin_lock(&rt2x00dev->irqmask_lock);
506 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
507 reg &= mask;
508 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
509 spin_unlock(&rt2x00dev->irqmask_lock);
510
511 return IRQ_HANDLED;
512}
513EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
514
515void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
516 enum dev_state state)
517{
518 u32 reg;
519 unsigned long flags;
520
521 /*
522 * When interrupts are being enabled, the interrupt registers
523 * should clear the register to assure a clean state.
524 */
525 if (state == STATE_RADIO_IRQ_ON) {
526 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
527 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
528 }
529
530 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
531 reg = 0;
532 if (state == STATE_RADIO_IRQ_ON) {
533 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
534 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
535 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
536 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
537 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
538 }
539 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
540 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
541
542 if (state == STATE_RADIO_IRQ_OFF) {
543 /*
544 * Wait for possibly running tasklets to finish.
545 */
546 tasklet_kill(&rt2x00dev->txstatus_tasklet);
547 tasklet_kill(&rt2x00dev->rxdone_tasklet);
548 tasklet_kill(&rt2x00dev->autowake_tasklet);
549 tasklet_kill(&rt2x00dev->tbtt_tasklet);
550 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
551 }
552}
553EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
554
555/*
556 * Queue handlers.
557 */
558void rt2800mmio_start_queue(struct data_queue *queue)
559{
560 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
561 u32 reg;
562
563 switch (queue->qid) {
564 case QID_RX:
565 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
566 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
567 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
568 break;
569 case QID_BEACON:
570 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
571 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
572 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
573 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
574 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
575
576 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
577 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
578 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
579 break;
580 default:
581 break;
582 }
583}
584EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
585
586void rt2800mmio_kick_queue(struct data_queue *queue)
587{
588 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
589 struct queue_entry *entry;
590
591 switch (queue->qid) {
592 case QID_AC_VO:
593 case QID_AC_VI:
594 case QID_AC_BE:
595 case QID_AC_BK:
596 entry = rt2x00queue_get_entry(queue, Q_INDEX);
597 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
598 entry->entry_idx);
599 break;
600 case QID_MGMT:
601 entry = rt2x00queue_get_entry(queue, Q_INDEX);
602 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
603 entry->entry_idx);
604 break;
605 default:
606 break;
607 }
608}
609EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
610
611void rt2800mmio_stop_queue(struct data_queue *queue)
612{
613 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
614 u32 reg;
615
616 switch (queue->qid) {
617 case QID_RX:
618 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
619 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
620 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
621 break;
622 case QID_BEACON:
623 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
624 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
625 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
626 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
627 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
628
629 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
630 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
631 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
632
633 /*
634 * Wait for current invocation to finish. The tasklet
635 * won't be scheduled anymore afterwards since we disabled
636 * the TBTT and PRE TBTT timer.
637 */
638 tasklet_kill(&rt2x00dev->tbtt_tasklet);
639 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
640
641 break;
642 default:
643 break;
644 }
645}
646EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
647
648void rt2800mmio_queue_init(struct data_queue *queue)
649{
650 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
651 unsigned short txwi_size, rxwi_size;
652
653 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
654
655 switch (queue->qid) {
656 case QID_RX:
657 queue->limit = 128;
658 queue->data_size = AGGREGATION_SIZE;
659 queue->desc_size = RXD_DESC_SIZE;
660 queue->winfo_size = rxwi_size;
661 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
662 break;
663
664 case QID_AC_VO:
665 case QID_AC_VI:
666 case QID_AC_BE:
667 case QID_AC_BK:
668 queue->limit = 64;
669 queue->data_size = AGGREGATION_SIZE;
670 queue->desc_size = TXD_DESC_SIZE;
671 queue->winfo_size = txwi_size;
672 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
673 break;
674
675 case QID_BEACON:
676 queue->limit = 8;
677 queue->data_size = 0; /* No DMA required for beacons */
678 queue->desc_size = TXD_DESC_SIZE;
679 queue->winfo_size = txwi_size;
680 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
681 break;
682
683 case QID_ATIM:
684 /* fallthrough */
685 default:
686 BUG();
687 break;
688 }
689}
690EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
691
692/*
693 * Initialization functions.
694 */
695bool rt2800mmio_get_entry_state(struct queue_entry *entry)
696{
697 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
698 u32 word;
699
700 if (entry->queue->qid == QID_RX) {
701 rt2x00_desc_read(entry_priv->desc, 1, &word);
702
703 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
704 } else {
705 rt2x00_desc_read(entry_priv->desc, 1, &word);
706
707 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
708 }
709}
710EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
711
712void rt2800mmio_clear_entry(struct queue_entry *entry)
713{
714 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
715 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
716 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
717 u32 word;
718
719 if (entry->queue->qid == QID_RX) {
720 rt2x00_desc_read(entry_priv->desc, 0, &word);
721 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
722 rt2x00_desc_write(entry_priv->desc, 0, word);
723
724 rt2x00_desc_read(entry_priv->desc, 1, &word);
725 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
726 rt2x00_desc_write(entry_priv->desc, 1, word);
727
728 /*
729 * Set RX IDX in register to inform hardware that we have
730 * handled this entry and it is available for reuse again.
731 */
732 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
733 entry->entry_idx);
734 } else {
735 rt2x00_desc_read(entry_priv->desc, 1, &word);
736 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
737 rt2x00_desc_write(entry_priv->desc, 1, word);
738 }
739}
740EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
741
742int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
743{
744 struct queue_entry_priv_mmio *entry_priv;
745
746 /*
747 * Initialize registers.
748 */
749 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
750 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
751 entry_priv->desc_dma);
752 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
753 rt2x00dev->tx[0].limit);
754 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
755 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
756
757 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
758 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
759 entry_priv->desc_dma);
760 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
761 rt2x00dev->tx[1].limit);
762 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
763 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
764
765 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
766 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
767 entry_priv->desc_dma);
768 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
769 rt2x00dev->tx[2].limit);
770 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
771 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
772
773 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
774 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
775 entry_priv->desc_dma);
776 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
777 rt2x00dev->tx[3].limit);
778 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
779 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
780
781 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
782 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
783 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
784 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
785
786 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
787 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
788 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
789 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
790
791 entry_priv = rt2x00dev->rx->entries[0].priv_data;
792 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
793 entry_priv->desc_dma);
794 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
795 rt2x00dev->rx[0].limit);
796 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
797 rt2x00dev->rx[0].limit - 1);
798 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
799
800 rt2800_disable_wpdma(rt2x00dev);
801
802 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
803
804 return 0;
805}
806EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
807
808int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
809{
810 u32 reg;
811
812 /*
813 * Reset DMA indexes
814 */
815 rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
816 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
817 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
818 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
819 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
820 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
821 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
822 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
823 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
824
825 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
826 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
827
828 if (rt2x00_is_pcie(rt2x00dev) &&
829 (rt2x00_rt(rt2x00dev, RT3090) ||
830 rt2x00_rt(rt2x00dev, RT3390) ||
831 rt2x00_rt(rt2x00dev, RT3572) ||
832 rt2x00_rt(rt2x00dev, RT3593) ||
833 rt2x00_rt(rt2x00dev, RT5390) ||
834 rt2x00_rt(rt2x00dev, RT5392) ||
835 rt2x00_rt(rt2x00dev, RT5592))) {
836 rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
837 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
838 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
839 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
840 }
841
842 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
843
844 reg = 0;
845 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
846 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
847 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
848
849 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
850
851 return 0;
852}
853EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
854
855/*
856 * Device state switch handlers.
857 */
858int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
859{
860 /* Wait for DMA, ignore error until we initialize queues. */
861 rt2800_wait_wpdma_ready(rt2x00dev);
862
863 if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
864 return -EIO;
865
866 return rt2800_enable_radio(rt2x00dev);
867}
868EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
869
870MODULE_AUTHOR(DRV_PROJECT);
871MODULE_VERSION(DRV_VERSION);
872MODULE_DESCRIPTION("rt2800 MMIO library");
873MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644
index 000000000000..6a10de3eee3e
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -0,0 +1,165 @@
1/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
9 * <http://rt2x00.serialmonkey.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the
23 * Free Software Foundation, Inc.,
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 */
26
27/* Module: rt2800mmio
28 * Abstract: forward declarations for the rt2800mmio module.
29 */
30
31#ifndef RT2800MMIO_H
32#define RT2800MMIO_H
33
34/*
35 * Queue register offset macros
36 */
37#define TX_QUEUE_REG_OFFSET 0x10
38#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
39#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
40#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
41#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
42
43/*
44 * DMA descriptor defines.
45 */
46#define TXD_DESC_SIZE (4 * sizeof(__le32))
47#define RXD_DESC_SIZE (4 * sizeof(__le32))
48
49/*
50 * TX descriptor format for TX, PRIO and Beacon Ring.
51 */
52
53/*
54 * Word0
55 */
56#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
57
58/*
59 * Word1
60 */
61#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
62#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
63#define TXD_W1_BURST FIELD32(0x00008000)
64#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
65#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
66#define TXD_W1_DMA_DONE FIELD32(0x80000000)
67
68/*
69 * Word2
70 */
71#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
72
73/*
74 * Word3
75 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
76 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
77 * 0:MGMT, 1:HCCA 2:EDCA
78 */
79#define TXD_W3_WIV FIELD32(0x01000000)
80#define TXD_W3_QSEL FIELD32(0x06000000)
81#define TXD_W3_TCO FIELD32(0x20000000)
82#define TXD_W3_UCO FIELD32(0x40000000)
83#define TXD_W3_ICO FIELD32(0x80000000)
84
85/*
86 * RX descriptor format for RX Ring.
87 */
88
89/*
90 * Word0
91 */
92#define RXD_W0_SDP0 FIELD32(0xffffffff)
93
94/*
95 * Word1
96 */
97#define RXD_W1_SDL1 FIELD32(0x00003fff)
98#define RXD_W1_SDL0 FIELD32(0x3fff0000)
99#define RXD_W1_LS0 FIELD32(0x40000000)
100#define RXD_W1_DMA_DONE FIELD32(0x80000000)
101
102/*
103 * Word2
104 */
105#define RXD_W2_SDP1 FIELD32(0xffffffff)
106
107/*
108 * Word3
109 * AMSDU: RX with 802.3 header, not 802.11 header.
110 * DECRYPTED: This frame is being decrypted.
111 */
112#define RXD_W3_BA FIELD32(0x00000001)
113#define RXD_W3_DATA FIELD32(0x00000002)
114#define RXD_W3_NULLDATA FIELD32(0x00000004)
115#define RXD_W3_FRAG FIELD32(0x00000008)
116#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
117#define RXD_W3_MULTICAST FIELD32(0x00000020)
118#define RXD_W3_BROADCAST FIELD32(0x00000040)
119#define RXD_W3_MY_BSS FIELD32(0x00000080)
120#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
121#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
122#define RXD_W3_AMSDU FIELD32(0x00000800)
123#define RXD_W3_HTC FIELD32(0x00001000)
124#define RXD_W3_RSSI FIELD32(0x00002000)
125#define RXD_W3_L2PAD FIELD32(0x00004000)
126#define RXD_W3_AMPDU FIELD32(0x00008000)
127#define RXD_W3_DECRYPTED FIELD32(0x00010000)
128#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
129#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
130
131/* TX descriptor initialization */
132__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
133void rt2800mmio_write_tx_desc(struct queue_entry *entry,
134 struct txentry_desc *txdesc);
135
136/* RX control handlers */
137void rt2800mmio_fill_rxdone(struct queue_entry *entry,
138 struct rxdone_entry_desc *rxdesc);
139
140/* Interrupt functions */
141void rt2800mmio_txstatus_tasklet(unsigned long data);
142void rt2800mmio_pretbtt_tasklet(unsigned long data);
143void rt2800mmio_tbtt_tasklet(unsigned long data);
144void rt2800mmio_rxdone_tasklet(unsigned long data);
145void rt2800mmio_autowake_tasklet(unsigned long data);
146irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
147void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
148 enum dev_state state);
149
150/* Queue handlers */
151void rt2800mmio_start_queue(struct data_queue *queue);
152void rt2800mmio_kick_queue(struct data_queue *queue);
153void rt2800mmio_stop_queue(struct data_queue *queue);
154void rt2800mmio_queue_init(struct data_queue *queue);
155
156/* Initialization functions */
157bool rt2800mmio_get_entry_state(struct queue_entry *entry);
158void rt2800mmio_clear_entry(struct queue_entry *entry);
159int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
160int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
161
162/* Device state switch handlers. */
163int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
164
165#endif /* RT2800MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index f8f2abbfbb65..b504455b4fec 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -37,14 +37,13 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/platform_device.h>
41#include <linux/eeprom_93cx6.h> 40#include <linux/eeprom_93cx6.h>
42 41
43#include "rt2x00.h" 42#include "rt2x00.h"
44#include "rt2x00mmio.h" 43#include "rt2x00mmio.h"
45#include "rt2x00pci.h" 44#include "rt2x00pci.h"
46#include "rt2x00soc.h"
47#include "rt2800lib.h" 45#include "rt2800lib.h"
46#include "rt2800mmio.h"
48#include "rt2800.h" 47#include "rt2800.h"
49#include "rt2800pci.h" 48#include "rt2800pci.h"
50 49
@@ -90,27 +89,6 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
90 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 89 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
91} 90}
92 91
93#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
94static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
95{
96 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
97
98 if (!base_addr)
99 return -ENOMEM;
100
101 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
102
103 iounmap(base_addr);
104 return 0;
105}
106#else
107static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
108{
109 return -ENOMEM;
110}
111#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
112
113#ifdef CONFIG_PCI
114static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 92static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
115{ 93{
116 struct rt2x00_dev *rt2x00dev = eeprom->data; 94 struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@ static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
183{ 161{
184 return rt2800_read_eeprom_efuse(rt2x00dev); 162 return rt2800_read_eeprom_efuse(rt2x00dev);
185} 163}
186#else
187static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
188{
189 return -EOPNOTSUPP;
190}
191
192static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
193{
194 return 0;
195}
196
197static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
198{
199 return -EOPNOTSUPP;
200}
201#endif /* CONFIG_PCI */
202
203/*
204 * Queue handlers.
205 */
206static void rt2800pci_start_queue(struct data_queue *queue)
207{
208 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
209 u32 reg;
210
211 switch (queue->qid) {
212 case QID_RX:
213 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
214 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
215 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
216 break;
217 case QID_BEACON:
218 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
219 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
220 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
221 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
222 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
223
224 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
225 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
226 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
227 break;
228 default:
229 break;
230 }
231}
232
233static void rt2800pci_kick_queue(struct data_queue *queue)
234{
235 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
236 struct queue_entry *entry;
237
238 switch (queue->qid) {
239 case QID_AC_VO:
240 case QID_AC_VI:
241 case QID_AC_BE:
242 case QID_AC_BK:
243 entry = rt2x00queue_get_entry(queue, Q_INDEX);
244 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
245 entry->entry_idx);
246 break;
247 case QID_MGMT:
248 entry = rt2x00queue_get_entry(queue, Q_INDEX);
249 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
250 entry->entry_idx);
251 break;
252 default:
253 break;
254 }
255}
256
257static void rt2800pci_stop_queue(struct data_queue *queue)
258{
259 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
260 u32 reg;
261
262 switch (queue->qid) {
263 case QID_RX:
264 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
265 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
266 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
267 break;
268 case QID_BEACON:
269 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
270 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
271 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
272 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
273 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
274
275 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
276 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
277 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
278
279 /*
280 * Wait for current invocation to finish. The tasklet
281 * won't be scheduled anymore afterwards since we disabled
282 * the TBTT and PRE TBTT timer.
283 */
284 tasklet_kill(&rt2x00dev->tbtt_tasklet);
285 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
286
287 break;
288 default:
289 break;
290 }
291}
292 164
293/* 165/*
294 * Firmware functions 166 * Firmware functions
@@ -332,217 +204,13 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
332} 204}
333 205
334/* 206/*
335 * Initialization functions.
336 */
337static bool rt2800pci_get_entry_state(struct queue_entry *entry)
338{
339 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
340 u32 word;
341
342 if (entry->queue->qid == QID_RX) {
343 rt2x00_desc_read(entry_priv->desc, 1, &word);
344
345 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
346 } else {
347 rt2x00_desc_read(entry_priv->desc, 1, &word);
348
349 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
350 }
351}
352
353static void rt2800pci_clear_entry(struct queue_entry *entry)
354{
355 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
356 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
357 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
358 u32 word;
359
360 if (entry->queue->qid == QID_RX) {
361 rt2x00_desc_read(entry_priv->desc, 0, &word);
362 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
363 rt2x00_desc_write(entry_priv->desc, 0, word);
364
365 rt2x00_desc_read(entry_priv->desc, 1, &word);
366 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
367 rt2x00_desc_write(entry_priv->desc, 1, word);
368
369 /*
370 * Set RX IDX in register to inform hardware that we have
371 * handled this entry and it is available for reuse again.
372 */
373 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
374 entry->entry_idx);
375 } else {
376 rt2x00_desc_read(entry_priv->desc, 1, &word);
377 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
378 rt2x00_desc_write(entry_priv->desc, 1, word);
379 }
380}
381
382static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
383{
384 struct queue_entry_priv_mmio *entry_priv;
385
386 /*
387 * Initialize registers.
388 */
389 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
390 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
391 entry_priv->desc_dma);
392 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
393 rt2x00dev->tx[0].limit);
394 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
395 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
396
397 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
398 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
399 entry_priv->desc_dma);
400 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
401 rt2x00dev->tx[1].limit);
402 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
403 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
404
405 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
406 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
407 entry_priv->desc_dma);
408 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
409 rt2x00dev->tx[2].limit);
410 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
411 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
412
413 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
414 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
415 entry_priv->desc_dma);
416 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
417 rt2x00dev->tx[3].limit);
418 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
419 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
420
421 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
422 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
423 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
424 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
425
426 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
427 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
428 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
429 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
430
431 entry_priv = rt2x00dev->rx->entries[0].priv_data;
432 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
433 entry_priv->desc_dma);
434 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
435 rt2x00dev->rx[0].limit);
436 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
437 rt2x00dev->rx[0].limit - 1);
438 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
439
440 rt2800_disable_wpdma(rt2x00dev);
441
442 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
443
444 return 0;
445}
446
447/*
448 * Device state switch handlers. 207 * Device state switch handlers.
449 */ 208 */
450static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
451 enum dev_state state)
452{
453 u32 reg;
454 unsigned long flags;
455
456 /*
457 * When interrupts are being enabled, the interrupt registers
458 * should clear the register to assure a clean state.
459 */
460 if (state == STATE_RADIO_IRQ_ON) {
461 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
462 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
463 }
464
465 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
466 reg = 0;
467 if (state == STATE_RADIO_IRQ_ON) {
468 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
469 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
470 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
471 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
472 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
473 }
474 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
475 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
476
477 if (state == STATE_RADIO_IRQ_OFF) {
478 /*
479 * Wait for possibly running tasklets to finish.
480 */
481 tasklet_kill(&rt2x00dev->txstatus_tasklet);
482 tasklet_kill(&rt2x00dev->rxdone_tasklet);
483 tasklet_kill(&rt2x00dev->autowake_tasklet);
484 tasklet_kill(&rt2x00dev->tbtt_tasklet);
485 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
486 }
487}
488
489static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
490{
491 u32 reg;
492
493 /*
494 * Reset DMA indexes
495 */
496 rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
497 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
498 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
499 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
500 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
501 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
502 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
503 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
504 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
505
506 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
507 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
508
509 if (rt2x00_is_pcie(rt2x00dev) &&
510 (rt2x00_rt(rt2x00dev, RT3090) ||
511 rt2x00_rt(rt2x00dev, RT3390) ||
512 rt2x00_rt(rt2x00dev, RT3572) ||
513 rt2x00_rt(rt2x00dev, RT3593) ||
514 rt2x00_rt(rt2x00dev, RT5390) ||
515 rt2x00_rt(rt2x00dev, RT5392) ||
516 rt2x00_rt(rt2x00dev, RT5592))) {
517 rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
518 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
519 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
520 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
521 }
522
523 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
524
525 reg = 0;
526 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
527 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
528 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
529
530 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
531
532 return 0;
533}
534
535static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 209static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
536{ 210{
537 int retval; 211 int retval;
538 212
539 /* Wait for DMA, ignore error until we initialize queues. */ 213 retval = rt2800mmio_enable_radio(rt2x00dev);
540 rt2800_wait_wpdma_ready(rt2x00dev);
541
542 if (unlikely(rt2800pci_init_queues(rt2x00dev)))
543 return -EIO;
544
545 retval = rt2800_enable_radio(rt2x00dev);
546 if (retval) 214 if (retval)
547 return retval; 215 return retval;
548 216
@@ -559,15 +227,6 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
559 return retval; 227 return retval;
560} 228}
561 229
562static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
563{
564 if (rt2x00_is_soc(rt2x00dev)) {
565 rt2800_disable_radio(rt2x00dev);
566 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
567 rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
568 }
569}
570
571static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 230static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
572 enum dev_state state) 231 enum dev_state state)
573{ 232{
@@ -601,12 +260,11 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
601 * After the radio has been disabled, the device should 260 * After the radio has been disabled, the device should
602 * be put to sleep for powersaving. 261 * be put to sleep for powersaving.
603 */ 262 */
604 rt2800pci_disable_radio(rt2x00dev);
605 rt2800pci_set_state(rt2x00dev, STATE_SLEEP); 263 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
606 break; 264 break;
607 case STATE_RADIO_IRQ_ON: 265 case STATE_RADIO_IRQ_ON:
608 case STATE_RADIO_IRQ_OFF: 266 case STATE_RADIO_IRQ_OFF:
609 rt2800pci_toggle_irq(rt2x00dev, state); 267 rt2800mmio_toggle_irq(rt2x00dev, state);
610 break; 268 break;
611 case STATE_DEEP_SLEEP: 269 case STATE_DEEP_SLEEP:
612 case STATE_SLEEP: 270 case STATE_SLEEP:
@@ -627,479 +285,13 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
627} 285}
628 286
629/* 287/*
630 * TX descriptor initialization
631 */
632static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
633{
634 return (__le32 *) entry->skb->data;
635}
636
637static void rt2800pci_write_tx_desc(struct queue_entry *entry,
638 struct txentry_desc *txdesc)
639{
640 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
641 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
642 __le32 *txd = entry_priv->desc;
643 u32 word;
644 const unsigned int txwi_size = entry->queue->winfo_size;
645
646 /*
647 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
648 * must contains a TXWI structure + 802.11 header + padding + 802.11
649 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
650 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
651 * data. It means that LAST_SEC0 is always 0.
652 */
653
654 /*
655 * Initialize TX descriptor
656 */
657 word = 0;
658 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
659 rt2x00_desc_write(txd, 0, word);
660
661 word = 0;
662 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
663 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
664 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
665 rt2x00_set_field32(&word, TXD_W1_BURST,
666 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
667 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
668 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
669 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
670 rt2x00_desc_write(txd, 1, word);
671
672 word = 0;
673 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
674 skbdesc->skb_dma + txwi_size);
675 rt2x00_desc_write(txd, 2, word);
676
677 word = 0;
678 rt2x00_set_field32(&word, TXD_W3_WIV,
679 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
680 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
681 rt2x00_desc_write(txd, 3, word);
682
683 /*
684 * Register descriptor details in skb frame descriptor.
685 */
686 skbdesc->desc = txd;
687 skbdesc->desc_len = TXD_DESC_SIZE;
688}
689
690/*
691 * RX control handlers
692 */
693static void rt2800pci_fill_rxdone(struct queue_entry *entry,
694 struct rxdone_entry_desc *rxdesc)
695{
696 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
697 __le32 *rxd = entry_priv->desc;
698 u32 word;
699
700 rt2x00_desc_read(rxd, 3, &word);
701
702 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
703 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
704
705 /*
706 * Unfortunately we don't know the cipher type used during
707 * decryption. This prevents us from correct providing
708 * correct statistics through debugfs.
709 */
710 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
711
712 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
713 /*
714 * Hardware has stripped IV/EIV data from 802.11 frame during
715 * decryption. Unfortunately the descriptor doesn't contain
716 * any fields with the EIV/IV data either, so they can't
717 * be restored by rt2x00lib.
718 */
719 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
720
721 /*
722 * The hardware has already checked the Michael Mic and has
723 * stripped it from the frame. Signal this to mac80211.
724 */
725 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
726
727 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
728 rxdesc->flags |= RX_FLAG_DECRYPTED;
729 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
730 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
731 }
732
733 if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
734 rxdesc->dev_flags |= RXDONE_MY_BSS;
735
736 if (rt2x00_get_field32(word, RXD_W3_L2PAD))
737 rxdesc->dev_flags |= RXDONE_L2PAD;
738
739 /*
740 * Process the RXWI structure that is at the start of the buffer.
741 */
742 rt2800_process_rxwi(entry, rxdesc);
743}
744
745/*
746 * Interrupt functions.
747 */
748static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
749{
750 struct ieee80211_conf conf = { .flags = 0 };
751 struct rt2x00lib_conf libconf = { .conf = &conf };
752
753 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
754}
755
756static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
757{
758 __le32 *txwi;
759 u32 word;
760 int wcid, tx_wcid;
761
762 wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
763
764 txwi = rt2800_drv_get_txwi(entry);
765 rt2x00_desc_read(txwi, 1, &word);
766 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
767
768 return (tx_wcid == wcid);
769}
770
771static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
772{
773 u32 status = *(u32 *)data;
774
775 /*
776 * rt2800pci hardware might reorder frames when exchanging traffic
777 * with multiple BA enabled STAs.
778 *
779 * For example, a tx queue
780 * [ STA1 | STA2 | STA1 | STA2 ]
781 * can result in tx status reports
782 * [ STA1 | STA1 | STA2 | STA2 ]
783 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
784 *
785 * To mitigate this effect, associate the tx status to the first frame
786 * in the tx queue with a matching wcid.
787 */
788 if (rt2800pci_txdone_entry_check(entry, status) &&
789 !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
790 /*
791 * Got a matching frame, associate the tx status with
792 * the frame
793 */
794 entry->status = status;
795 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
796 return true;
797 }
798
799 /* Check the next frame */
800 return false;
801}
802
803static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
804{
805 u32 status = *(u32 *)data;
806
807 /*
808 * Find the first frame without tx status and assign this status to it
809 * regardless if it matches or not.
810 */
811 if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
812 /*
813 * Got a matching frame, associate the tx status with
814 * the frame
815 */
816 entry->status = status;
817 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
818 return true;
819 }
820
821 /* Check the next frame */
822 return false;
823}
824static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
825 void *data)
826{
827 if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
828 rt2800_txdone_entry(entry, entry->status,
829 rt2800pci_get_txwi(entry));
830 return false;
831 }
832
833 /* No more frames to release */
834 return true;
835}
836
837static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
838{
839 struct data_queue *queue;
840 u32 status;
841 u8 qid;
842 int max_tx_done = 16;
843
844 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
845 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
846 if (unlikely(qid >= QID_RX)) {
847 /*
848 * Unknown queue, this shouldn't happen. Just drop
849 * this tx status.
850 */
851 rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
852 qid);
853 break;
854 }
855
856 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
857 if (unlikely(queue == NULL)) {
858 /*
859 * The queue is NULL, this shouldn't happen. Stop
860 * processing here and drop the tx status
861 */
862 rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
863 qid);
864 break;
865 }
866
867 if (unlikely(rt2x00queue_empty(queue))) {
868 /*
869 * The queue is empty. Stop processing here
870 * and drop the tx status.
871 */
872 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
873 qid);
874 break;
875 }
876
877 /*
878 * Let's associate this tx status with the first
879 * matching frame.
880 */
881 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
882 Q_INDEX, &status,
883 rt2800pci_txdone_find_entry)) {
884 /*
885 * We cannot match the tx status to any frame, so just
886 * use the first one.
887 */
888 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
889 Q_INDEX, &status,
890 rt2800pci_txdone_match_first)) {
891 rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
892 qid);
893 break;
894 }
895 }
896
897 /*
898 * Release all frames with a valid tx status.
899 */
900 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
901 Q_INDEX, NULL,
902 rt2800pci_txdone_release_entries);
903
904 if (--max_tx_done == 0)
905 break;
906 }
907
908 return !max_tx_done;
909}
910
911static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
912 struct rt2x00_field32 irq_field)
913{
914 u32 reg;
915
916 /*
917 * Enable a single interrupt. The interrupt mask register
918 * access needs locking.
919 */
920 spin_lock_irq(&rt2x00dev->irqmask_lock);
921 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
922 rt2x00_set_field32(&reg, irq_field, 1);
923 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
924 spin_unlock_irq(&rt2x00dev->irqmask_lock);
925}
926
927static void rt2800pci_txstatus_tasklet(unsigned long data)
928{
929 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
930 if (rt2800pci_txdone(rt2x00dev))
931 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
932
933 /*
934 * No need to enable the tx status interrupt here as we always
935 * leave it enabled to minimize the possibility of a tx status
936 * register overflow. See comment in interrupt handler.
937 */
938}
939
940static void rt2800pci_pretbtt_tasklet(unsigned long data)
941{
942 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
943 rt2x00lib_pretbtt(rt2x00dev);
944 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
945 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
946}
947
948static void rt2800pci_tbtt_tasklet(unsigned long data)
949{
950 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
951 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
952 u32 reg;
953
954 rt2x00lib_beacondone(rt2x00dev);
955
956 if (rt2x00dev->intf_ap_count) {
957 /*
958 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
959 * causing beacon skew and as a result causing problems with
960 * some powersaving clients over time. Shorten the beacon
961 * interval every 64 beacons by 64us to mitigate this effect.
962 */
963 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
964 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
965 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
966 (rt2x00dev->beacon_int * 16) - 1);
967 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
968 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
969 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
970 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
971 (rt2x00dev->beacon_int * 16));
972 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
973 }
974 drv_data->tbtt_tick++;
975 drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
976 }
977
978 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
979 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
980}
981
982static void rt2800pci_rxdone_tasklet(unsigned long data)
983{
984 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
985 if (rt2x00mmio_rxdone(rt2x00dev))
986 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
987 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
988 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
989}
990
991static void rt2800pci_autowake_tasklet(unsigned long data)
992{
993 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
994 rt2800pci_wakeup(rt2x00dev);
995 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
996 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
997}
998
999static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
1000{
1001 u32 status;
1002 int i;
1003
1004 /*
1005 * The TX_FIFO_STATUS interrupt needs special care. We should
1006 * read TX_STA_FIFO but we should do it immediately as otherwise
1007 * the register can overflow and we would lose status reports.
1008 *
1009 * Hence, read the TX_STA_FIFO register and copy all tx status
1010 * reports into a kernel FIFO which is handled in the txstatus
1011 * tasklet. We use a tasklet to process the tx status reports
1012 * because we can schedule the tasklet multiple times (when the
1013 * interrupt fires again during tx status processing).
1014 *
1015 * Furthermore we don't disable the TX_FIFO_STATUS
1016 * interrupt here but leave it enabled so that the TX_STA_FIFO
1017 * can also be read while the tx status tasklet gets executed.
1018 *
1019 * Since we have only one producer and one consumer we don't
1020 * need to lock the kfifo.
1021 */
1022 for (i = 0; i < rt2x00dev->tx->limit; i++) {
1023 rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
1024
1025 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
1026 break;
1027
1028 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
1029 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
1030 break;
1031 }
1032 }
1033
1034 /* Schedule the tasklet for processing the tx status. */
1035 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
1036}
1037
1038static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1039{
1040 struct rt2x00_dev *rt2x00dev = dev_instance;
1041 u32 reg, mask;
1042
1043 /* Read status and ACK all interrupts */
1044 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
1045 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
1046
1047 if (!reg)
1048 return IRQ_NONE;
1049
1050 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1051 return IRQ_HANDLED;
1052
1053 /*
1054 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
1055 * for interrupts and interrupt masks we can just use the value of
1056 * INT_SOURCE_CSR to create the interrupt mask.
1057 */
1058 mask = ~reg;
1059
1060 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
1061 rt2800pci_txstatus_interrupt(rt2x00dev);
1062 /*
1063 * Never disable the TX_FIFO_STATUS interrupt.
1064 */
1065 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
1066 }
1067
1068 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
1069 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
1070
1071 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
1072 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
1073
1074 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
1075 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1076
1077 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
1078 tasklet_schedule(&rt2x00dev->autowake_tasklet);
1079
1080 /*
1081 * Disable all interrupts for which a tasklet was scheduled right now,
1082 * the tasklet will reenable the appropriate interrupts.
1083 */
1084 spin_lock(&rt2x00dev->irqmask_lock);
1085 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1086 reg &= mask;
1087 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
1088 spin_unlock(&rt2x00dev->irqmask_lock);
1089
1090 return IRQ_HANDLED;
1091}
1092
1093/*
1094 * Device probe functions. 288 * Device probe functions.
1095 */ 289 */
1096static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) 290static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
1097{ 291{
1098 int retval; 292 int retval;
1099 293
1100 if (rt2x00_is_soc(rt2x00dev)) 294 if (rt2800pci_efuse_detect(rt2x00dev))
1101 retval = rt2800pci_read_eeprom_soc(rt2x00dev);
1102 else if (rt2800pci_efuse_detect(rt2x00dev))
1103 retval = rt2800pci_read_eeprom_efuse(rt2x00dev); 295 retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
1104 else 296 else
1105 retval = rt2800pci_read_eeprom_pci(rt2x00dev); 297 retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
1145 .read_eeprom = rt2800pci_read_eeprom, 337 .read_eeprom = rt2800pci_read_eeprom,
1146 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled, 338 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
1147 .drv_write_firmware = rt2800pci_write_firmware, 339 .drv_write_firmware = rt2800pci_write_firmware,
1148 .drv_init_registers = rt2800pci_init_registers, 340 .drv_init_registers = rt2800mmio_init_registers,
1149 .drv_get_txwi = rt2800pci_get_txwi, 341 .drv_get_txwi = rt2800mmio_get_txwi,
1150}; 342};
1151 343
1152static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 344static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1153 .irq_handler = rt2800pci_interrupt, 345 .irq_handler = rt2800mmio_interrupt,
1154 .txstatus_tasklet = rt2800pci_txstatus_tasklet, 346 .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
1155 .pretbtt_tasklet = rt2800pci_pretbtt_tasklet, 347 .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
1156 .tbtt_tasklet = rt2800pci_tbtt_tasklet, 348 .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
1157 .rxdone_tasklet = rt2800pci_rxdone_tasklet, 349 .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
1158 .autowake_tasklet = rt2800pci_autowake_tasklet, 350 .autowake_tasklet = rt2800mmio_autowake_tasklet,
1159 .probe_hw = rt2800_probe_hw, 351 .probe_hw = rt2800_probe_hw,
1160 .get_firmware_name = rt2800pci_get_firmware_name, 352 .get_firmware_name = rt2800pci_get_firmware_name,
1161 .check_firmware = rt2800_check_firmware, 353 .check_firmware = rt2800_check_firmware,
1162 .load_firmware = rt2800_load_firmware, 354 .load_firmware = rt2800_load_firmware,
1163 .initialize = rt2x00mmio_initialize, 355 .initialize = rt2x00mmio_initialize,
1164 .uninitialize = rt2x00mmio_uninitialize, 356 .uninitialize = rt2x00mmio_uninitialize,
1165 .get_entry_state = rt2800pci_get_entry_state, 357 .get_entry_state = rt2800mmio_get_entry_state,
1166 .clear_entry = rt2800pci_clear_entry, 358 .clear_entry = rt2800mmio_clear_entry,
1167 .set_device_state = rt2800pci_set_device_state, 359 .set_device_state = rt2800pci_set_device_state,
1168 .rfkill_poll = rt2800_rfkill_poll, 360 .rfkill_poll = rt2800_rfkill_poll,
1169 .link_stats = rt2800_link_stats, 361 .link_stats = rt2800_link_stats,
@@ -1171,15 +363,15 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1171 .link_tuner = rt2800_link_tuner, 363 .link_tuner = rt2800_link_tuner,
1172 .gain_calibration = rt2800_gain_calibration, 364 .gain_calibration = rt2800_gain_calibration,
1173 .vco_calibration = rt2800_vco_calibration, 365 .vco_calibration = rt2800_vco_calibration,
1174 .start_queue = rt2800pci_start_queue, 366 .start_queue = rt2800mmio_start_queue,
1175 .kick_queue = rt2800pci_kick_queue, 367 .kick_queue = rt2800mmio_kick_queue,
1176 .stop_queue = rt2800pci_stop_queue, 368 .stop_queue = rt2800mmio_stop_queue,
1177 .flush_queue = rt2x00mmio_flush_queue, 369 .flush_queue = rt2x00mmio_flush_queue,
1178 .write_tx_desc = rt2800pci_write_tx_desc, 370 .write_tx_desc = rt2800mmio_write_tx_desc,
1179 .write_tx_data = rt2800_write_tx_data, 371 .write_tx_data = rt2800_write_tx_data,
1180 .write_beacon = rt2800_write_beacon, 372 .write_beacon = rt2800_write_beacon,
1181 .clear_beacon = rt2800_clear_beacon, 373 .clear_beacon = rt2800_clear_beacon,
1182 .fill_rxdone = rt2800pci_fill_rxdone, 374 .fill_rxdone = rt2800mmio_fill_rxdone,
1183 .config_shared_key = rt2800_config_shared_key, 375 .config_shared_key = rt2800_config_shared_key,
1184 .config_pairwise_key = rt2800_config_pairwise_key, 376 .config_pairwise_key = rt2800_config_pairwise_key,
1185 .config_filter = rt2800_config_filter, 377 .config_filter = rt2800_config_filter,
@@ -1191,49 +383,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1191 .sta_remove = rt2800_sta_remove, 383 .sta_remove = rt2800_sta_remove,
1192}; 384};
1193 385
1194static void rt2800pci_queue_init(struct data_queue *queue)
1195{
1196 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1197 unsigned short txwi_size, rxwi_size;
1198
1199 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
1200
1201 switch (queue->qid) {
1202 case QID_RX:
1203 queue->limit = 128;
1204 queue->data_size = AGGREGATION_SIZE;
1205 queue->desc_size = RXD_DESC_SIZE;
1206 queue->winfo_size = rxwi_size;
1207 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1208 break;
1209
1210 case QID_AC_VO:
1211 case QID_AC_VI:
1212 case QID_AC_BE:
1213 case QID_AC_BK:
1214 queue->limit = 64;
1215 queue->data_size = AGGREGATION_SIZE;
1216 queue->desc_size = TXD_DESC_SIZE;
1217 queue->winfo_size = txwi_size;
1218 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1219 break;
1220
1221 case QID_BEACON:
1222 queue->limit = 8;
1223 queue->data_size = 0; /* No DMA required for beacons */
1224 queue->desc_size = TXD_DESC_SIZE;
1225 queue->winfo_size = txwi_size;
1226 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1227 break;
1228
1229 case QID_ATIM:
1230 /* fallthrough */
1231 default:
1232 BUG();
1233 break;
1234 }
1235}
1236
1237static const struct rt2x00_ops rt2800pci_ops = { 386static const struct rt2x00_ops rt2800pci_ops = {
1238 .name = KBUILD_MODNAME, 387 .name = KBUILD_MODNAME,
1239 .drv_data_size = sizeof(struct rt2800_drv_data), 388 .drv_data_size = sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1241 .eeprom_size = EEPROM_SIZE, 390 .eeprom_size = EEPROM_SIZE,
1242 .rf_size = RF_SIZE, 391 .rf_size = RF_SIZE,
1243 .tx_queues = NUM_TX_QUEUES, 392 .tx_queues = NUM_TX_QUEUES,
1244 .queue_init = rt2800pci_queue_init, 393 .queue_init = rt2800mmio_queue_init,
1245 .lib = &rt2800pci_rt2x00_ops, 394 .lib = &rt2800pci_rt2x00_ops,
1246 .drv = &rt2800pci_rt2800_ops, 395 .drv = &rt2800pci_rt2800_ops,
1247 .hw = &rt2800pci_mac80211_ops, 396 .hw = &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@ static const struct rt2x00_ops rt2800pci_ops = {
1253/* 402/*
1254 * RT2800pci module information. 403 * RT2800pci module information.
1255 */ 404 */
1256#ifdef CONFIG_PCI
1257static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 405static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1258 { PCI_DEVICE(0x1814, 0x0601) }, 406 { PCI_DEVICE(0x1814, 0x0601) },
1259 { PCI_DEVICE(0x1814, 0x0681) }, 407 { PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1298#endif 446#endif
1299 { 0, } 447 { 0, }
1300}; 448};
1301#endif /* CONFIG_PCI */
1302 449
1303MODULE_AUTHOR(DRV_PROJECT); 450MODULE_AUTHOR(DRV_PROJECT);
1304MODULE_VERSION(DRV_VERSION); 451MODULE_VERSION(DRV_VERSION);
1305MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); 452MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
1306MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); 453MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
1307#ifdef CONFIG_PCI
1308MODULE_FIRMWARE(FIRMWARE_RT2860); 454MODULE_FIRMWARE(FIRMWARE_RT2860);
1309MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); 455MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1310#endif /* CONFIG_PCI */
1311MODULE_LICENSE("GPL"); 456MODULE_LICENSE("GPL");
1312 457
1313#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1314static int rt2800soc_probe(struct platform_device *pdev)
1315{
1316 return rt2x00soc_probe(pdev, &rt2800pci_ops);
1317}
1318
1319static struct platform_driver rt2800soc_driver = {
1320 .driver = {
1321 .name = "rt2800_wmac",
1322 .owner = THIS_MODULE,
1323 .mod_name = KBUILD_MODNAME,
1324 },
1325 .probe = rt2800soc_probe,
1326 .remove = rt2x00soc_remove,
1327 .suspend = rt2x00soc_suspend,
1328 .resume = rt2x00soc_resume,
1329};
1330#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
1331
1332#ifdef CONFIG_PCI
1333static int rt2800pci_probe(struct pci_dev *pci_dev, 458static int rt2800pci_probe(struct pci_dev *pci_dev,
1334 const struct pci_device_id *id) 459 const struct pci_device_id *id)
1335{ 460{
@@ -1344,39 +469,5 @@ static struct pci_driver rt2800pci_driver = {
1344 .suspend = rt2x00pci_suspend, 469 .suspend = rt2x00pci_suspend,
1345 .resume = rt2x00pci_resume, 470 .resume = rt2x00pci_resume,
1346}; 471};
1347#endif /* CONFIG_PCI */
1348
1349static int __init rt2800pci_init(void)
1350{
1351 int ret = 0;
1352
1353#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1354 ret = platform_driver_register(&rt2800soc_driver);
1355 if (ret)
1356 return ret;
1357#endif
1358#ifdef CONFIG_PCI
1359 ret = pci_register_driver(&rt2800pci_driver);
1360 if (ret) {
1361#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1362 platform_driver_unregister(&rt2800soc_driver);
1363#endif
1364 return ret;
1365 }
1366#endif
1367
1368 return ret;
1369}
1370
1371static void __exit rt2800pci_exit(void)
1372{
1373#ifdef CONFIG_PCI
1374 pci_unregister_driver(&rt2800pci_driver);
1375#endif
1376#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1377 platform_driver_unregister(&rt2800soc_driver);
1378#endif
1379}
1380 472
1381module_init(rt2800pci_init); 473module_pci_driver(rt2800pci_driver);
1382module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index ab22a087c50d..a81c9ee281c0 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,107 +35,10 @@
35#define RT2800PCI_H 35#define RT2800PCI_H
36 36
37/* 37/*
38 * Queue register offset macros
39 */
40#define TX_QUEUE_REG_OFFSET 0x10
41#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
42#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
43#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
44#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
45
46/*
47 * 8051 firmware image. 38 * 8051 firmware image.
48 */ 39 */
49#define FIRMWARE_RT2860 "rt2860.bin" 40#define FIRMWARE_RT2860 "rt2860.bin"
50#define FIRMWARE_RT3290 "rt3290.bin" 41#define FIRMWARE_RT3290 "rt3290.bin"
51#define FIRMWARE_IMAGE_BASE 0x2000 42#define FIRMWARE_IMAGE_BASE 0x2000
52 43
53/*
54 * DMA descriptor defines.
55 */
56#define TXD_DESC_SIZE (4 * sizeof(__le32))
57#define RXD_DESC_SIZE (4 * sizeof(__le32))
58
59/*
60 * TX descriptor format for TX, PRIO and Beacon Ring.
61 */
62
63/*
64 * Word0
65 */
66#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
67
68/*
69 * Word1
70 */
71#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
72#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
73#define TXD_W1_BURST FIELD32(0x00008000)
74#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
75#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
76#define TXD_W1_DMA_DONE FIELD32(0x80000000)
77
78/*
79 * Word2
80 */
81#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
82
83/*
84 * Word3
85 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
86 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
87 * 0:MGMT, 1:HCCA 2:EDCA
88 */
89#define TXD_W3_WIV FIELD32(0x01000000)
90#define TXD_W3_QSEL FIELD32(0x06000000)
91#define TXD_W3_TCO FIELD32(0x20000000)
92#define TXD_W3_UCO FIELD32(0x40000000)
93#define TXD_W3_ICO FIELD32(0x80000000)
94
95/*
96 * RX descriptor format for RX Ring.
97 */
98
99/*
100 * Word0
101 */
102#define RXD_W0_SDP0 FIELD32(0xffffffff)
103
104/*
105 * Word1
106 */
107#define RXD_W1_SDL1 FIELD32(0x00003fff)
108#define RXD_W1_SDL0 FIELD32(0x3fff0000)
109#define RXD_W1_LS0 FIELD32(0x40000000)
110#define RXD_W1_DMA_DONE FIELD32(0x80000000)
111
112/*
113 * Word2
114 */
115#define RXD_W2_SDP1 FIELD32(0xffffffff)
116
117/*
118 * Word3
119 * AMSDU: RX with 802.3 header, not 802.11 header.
120 * DECRYPTED: This frame is being decrypted.
121 */
122#define RXD_W3_BA FIELD32(0x00000001)
123#define RXD_W3_DATA FIELD32(0x00000002)
124#define RXD_W3_NULLDATA FIELD32(0x00000004)
125#define RXD_W3_FRAG FIELD32(0x00000008)
126#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
127#define RXD_W3_MULTICAST FIELD32(0x00000020)
128#define RXD_W3_BROADCAST FIELD32(0x00000040)
129#define RXD_W3_MY_BSS FIELD32(0x00000080)
130#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
131#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
132#define RXD_W3_AMSDU FIELD32(0x00000800)
133#define RXD_W3_HTC FIELD32(0x00001000)
134#define RXD_W3_RSSI FIELD32(0x00002000)
135#define RXD_W3_L2PAD FIELD32(0x00004000)
136#define RXD_W3_AMPDU FIELD32(0x00008000)
137#define RXD_W3_DECRYPTED FIELD32(0x00010000)
138#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
139#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
140
141#endif /* RT2800PCI_H */ 44#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644
index 000000000000..1359227ca411
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -0,0 +1,263 @@
1/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
9 * <http://rt2x00.serialmonkey.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the
23 * Free Software Foundation, Inc.,
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 */
26
27/* Module: rt2800soc
28 * Abstract: rt2800 WiSoC specific routines.
29 */
30
31#include <linux/etherdevice.h>
32#include <linux/init.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/platform_device.h>
36
37#include "rt2x00.h"
38#include "rt2x00mmio.h"
39#include "rt2x00soc.h"
40#include "rt2800.h"
41#include "rt2800lib.h"
42#include "rt2800mmio.h"
43
44/* Allow hardware encryption to be disabled. */
45static bool modparam_nohwcrypt;
46module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
47MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
48
49static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
50{
51 return modparam_nohwcrypt;
52}
53
54static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
55{
56 rt2800_disable_radio(rt2x00dev);
57 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
58 rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
59}
60
61static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
62 enum dev_state state)
63{
64 int retval = 0;
65
66 switch (state) {
67 case STATE_RADIO_ON:
68 retval = rt2800mmio_enable_radio(rt2x00dev);
69 break;
70
71 case STATE_RADIO_OFF:
72 rt2800soc_disable_radio(rt2x00dev);
73 break;
74
75 case STATE_RADIO_IRQ_ON:
76 case STATE_RADIO_IRQ_OFF:
77 rt2800mmio_toggle_irq(rt2x00dev, state);
78 break;
79
80 case STATE_DEEP_SLEEP:
81 case STATE_SLEEP:
82 case STATE_STANDBY:
83 case STATE_AWAKE:
84 /* These states are not supported, but don't report an error */
85 retval = 0;
86 break;
87
88 default:
89 retval = -ENOTSUPP;
90 break;
91 }
92
93 if (unlikely(retval))
94 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
95 state, retval);
96
97 return retval;
98}
99
100static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
101{
102 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
103
104 if (!base_addr)
105 return -ENOMEM;
106
107 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
108
109 iounmap(base_addr);
110 return 0;
111}
112
113/* Firmware functions */
114static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
115{
116 WARN_ON_ONCE(1);
117 return NULL;
118}
119
120static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
121 const u8 *data, const size_t len)
122{
123 WARN_ON_ONCE(1);
124 return 0;
125}
126
127static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
128 const u8 *data, const size_t len)
129{
130 WARN_ON_ONCE(1);
131 return 0;
132}
133
134static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
135 const u8 *data, const size_t len)
136{
137 WARN_ON_ONCE(1);
138 return 0;
139}
140
141static const struct ieee80211_ops rt2800soc_mac80211_ops = {
142 .tx = rt2x00mac_tx,
143 .start = rt2x00mac_start,
144 .stop = rt2x00mac_stop,
145 .add_interface = rt2x00mac_add_interface,
146 .remove_interface = rt2x00mac_remove_interface,
147 .config = rt2x00mac_config,
148 .configure_filter = rt2x00mac_configure_filter,
149 .set_key = rt2x00mac_set_key,
150 .sw_scan_start = rt2x00mac_sw_scan_start,
151 .sw_scan_complete = rt2x00mac_sw_scan_complete,
152 .get_stats = rt2x00mac_get_stats,
153 .get_tkip_seq = rt2800_get_tkip_seq,
154 .set_rts_threshold = rt2800_set_rts_threshold,
155 .sta_add = rt2x00mac_sta_add,
156 .sta_remove = rt2x00mac_sta_remove,
157 .bss_info_changed = rt2x00mac_bss_info_changed,
158 .conf_tx = rt2800_conf_tx,
159 .get_tsf = rt2800_get_tsf,
160 .rfkill_poll = rt2x00mac_rfkill_poll,
161 .ampdu_action = rt2800_ampdu_action,
162 .flush = rt2x00mac_flush,
163 .get_survey = rt2800_get_survey,
164 .get_ringparam = rt2x00mac_get_ringparam,
165 .tx_frames_pending = rt2x00mac_tx_frames_pending,
166};
167
168static const struct rt2800_ops rt2800soc_rt2800_ops = {
169 .register_read = rt2x00mmio_register_read,
170 .register_read_lock = rt2x00mmio_register_read, /* same for SoCs */
171 .register_write = rt2x00mmio_register_write,
172 .register_write_lock = rt2x00mmio_register_write, /* same for SoCs */
173 .register_multiread = rt2x00mmio_register_multiread,
174 .register_multiwrite = rt2x00mmio_register_multiwrite,
175 .regbusy_read = rt2x00mmio_regbusy_read,
176 .read_eeprom = rt2800soc_read_eeprom,
177 .hwcrypt_disabled = rt2800soc_hwcrypt_disabled,
178 .drv_write_firmware = rt2800soc_write_firmware,
179 .drv_init_registers = rt2800mmio_init_registers,
180 .drv_get_txwi = rt2800mmio_get_txwi,
181};
182
183static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
184 .irq_handler = rt2800mmio_interrupt,
185 .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
186 .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
187 .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
188 .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
189 .autowake_tasklet = rt2800mmio_autowake_tasklet,
190 .probe_hw = rt2800_probe_hw,
191 .get_firmware_name = rt2800soc_get_firmware_name,
192 .check_firmware = rt2800soc_check_firmware,
193 .load_firmware = rt2800soc_load_firmware,
194 .initialize = rt2x00mmio_initialize,
195 .uninitialize = rt2x00mmio_uninitialize,
196 .get_entry_state = rt2800mmio_get_entry_state,
197 .clear_entry = rt2800mmio_clear_entry,
198 .set_device_state = rt2800soc_set_device_state,
199 .rfkill_poll = rt2800_rfkill_poll,
200 .link_stats = rt2800_link_stats,
201 .reset_tuner = rt2800_reset_tuner,
202 .link_tuner = rt2800_link_tuner,
203 .gain_calibration = rt2800_gain_calibration,
204 .vco_calibration = rt2800_vco_calibration,
205 .start_queue = rt2800mmio_start_queue,
206 .kick_queue = rt2800mmio_kick_queue,
207 .stop_queue = rt2800mmio_stop_queue,
208 .flush_queue = rt2x00mmio_flush_queue,
209 .write_tx_desc = rt2800mmio_write_tx_desc,
210 .write_tx_data = rt2800_write_tx_data,
211 .write_beacon = rt2800_write_beacon,
212 .clear_beacon = rt2800_clear_beacon,
213 .fill_rxdone = rt2800mmio_fill_rxdone,
214 .config_shared_key = rt2800_config_shared_key,
215 .config_pairwise_key = rt2800_config_pairwise_key,
216 .config_filter = rt2800_config_filter,
217 .config_intf = rt2800_config_intf,
218 .config_erp = rt2800_config_erp,
219 .config_ant = rt2800_config_ant,
220 .config = rt2800_config,
221 .sta_add = rt2800_sta_add,
222 .sta_remove = rt2800_sta_remove,
223};
224
225static const struct rt2x00_ops rt2800soc_ops = {
226 .name = KBUILD_MODNAME,
227 .drv_data_size = sizeof(struct rt2800_drv_data),
228 .max_ap_intf = 8,
229 .eeprom_size = EEPROM_SIZE,
230 .rf_size = RF_SIZE,
231 .tx_queues = NUM_TX_QUEUES,
232 .queue_init = rt2800mmio_queue_init,
233 .lib = &rt2800soc_rt2x00_ops,
234 .drv = &rt2800soc_rt2800_ops,
235 .hw = &rt2800soc_mac80211_ops,
236#ifdef CONFIG_RT2X00_LIB_DEBUGFS
237 .debugfs = &rt2800_rt2x00debug,
238#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
239};
240
241static int rt2800soc_probe(struct platform_device *pdev)
242{
243 return rt2x00soc_probe(pdev, &rt2800soc_ops);
244}
245
246static struct platform_driver rt2800soc_driver = {
247 .driver = {
248 .name = "rt2800_wmac",
249 .owner = THIS_MODULE,
250 .mod_name = KBUILD_MODNAME,
251 },
252 .probe = rt2800soc_probe,
253 .remove = rt2x00soc_remove,
254 .suspend = rt2x00soc_suspend,
255 .resume = rt2x00soc_resume,
256};
257
258module_platform_driver(rt2800soc_driver);
259
260MODULE_AUTHOR(DRV_PROJECT);
261MODULE_VERSION(DRV_VERSION);
262MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
263MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96677ce55da4..997df03a0c2e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
148 return false; 148 return false;
149} 149}
150 150
151#define TXSTATUS_READ_INTERVAL 1000000
152
151static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, 153static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
152 int urb_status, u32 tx_status) 154 int urb_status, u32 tx_status)
153{ 155{
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
176 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 178 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
177 179
178 if (rt2800usb_txstatus_pending(rt2x00dev)) { 180 if (rt2800usb_txstatus_pending(rt2x00dev)) {
179 /* Read register after 250 us */ 181 /* Read register after 1 ms */
180 hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000), 182 hrtimer_start(&rt2x00dev->txstatus_timer,
183 ktime_set(0, TXSTATUS_READ_INTERVAL),
181 HRTIMER_MODE_REL); 184 HRTIMER_MODE_REL);
182 return false; 185 return false;
183 } 186 }
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
202 if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags)) 205 if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
203 return; 206 return;
204 207
205 /* Read TX_STA_FIFO register after 500 us */ 208 /* Read TX_STA_FIFO register after 2 ms */
206 hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000), 209 hrtimer_start(&rt2x00dev->txstatus_timer,
210 ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
207 HRTIMER_MODE_REL); 211 HRTIMER_MODE_REL);
208} 212}
209 213
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 51f17cfb93f9..7c157857f5ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
754 struct rt2x00_dev *rt2x00dev = hw->priv; 754 struct rt2x00_dev *rt2x00dev = hw->priv;
755 struct data_queue *queue; 755 struct data_queue *queue;
756 756
757 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
758 return;
759
757 tx_queue_for_each(rt2x00dev, queue) 760 tx_queue_for_each(rt2x00dev, queue)
758 rt2x00queue_flush_queue(queue, drop); 761 rt2x00queue_flush_queue(queue, drop);
759} 762}