summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-14 16:04:54 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-14 16:04:54 -0400
commit921060ccdae9756f645bcf42ca27fd3ad3a352ff (patch)
treea72f30d36765b5871d66dc70e3f6f17149da7382
parentd864991b220b7c62e81d21209e1fd978fd67352c (diff)
parentf95cd52476dee761a1a8ebe617dd01793e0eb39c (diff)
Merge tag 'wireless-drivers-next-for-davem-2018-10-14' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.20 Third set of patches for 4.20. Most notable is finalising ath10k wcn3990 support, all components should be implemented now. Major changes: ath10k * support NET_DETECT WoWLAN feature * wcn3990 basic functionality now working after we got QMI support mt76 * mt76x0e improvements (should be usable now) * more mt76x0/mt76x2 unification work brcmsmac * fix a problem on AP mode with clients using power save mode iwlwifi * support for a new scan type: fast balance ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt6
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c76
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1019
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h129
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c2072
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h677
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c267
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c187
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h254
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h57
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c837
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c420
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c29
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c311
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c206
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c167
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/quantenna/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h22
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h245
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c71
-rw-r--r--include/linux/qcom_scm.h4
106 files changed, 6783 insertions, 2249 deletions
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 7fd4e8ce4149..2196d1ab3c8c 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -56,6 +56,11 @@ Optional properties:
56 the length can vary between hw versions. 56 the length can vary between hw versions.
57- <supply-name>-supply: handle to the regulator device tree node 57- <supply-name>-supply: handle to the regulator device tree node
58 optional "supply-name" is "vdd-0.8-cx-mx". 58 optional "supply-name" is "vdd-0.8-cx-mx".
59- memory-region:
60 Usage: optional
61 Value type: <phandle>
62 Definition: reference to the reserved-memory for the msa region
63 used by the wifi firmware running in Q6.
59 64
60Example (to supply the calibration data alone): 65Example (to supply the calibration data alone):
61 66
@@ -149,4 +154,5 @@ wifi@18000000 {
149 <0 140 0 /* CE10 */ >, 154 <0 140 0 /* CE10 */ >,
150 <0 141 0 /* CE11 */ >; 155 <0 141 0 /* CE11 */ >;
151 vdd-0.8-cx-mx-supply = <&pm8998_l5>; 156 vdd-0.8-cx-mx-supply = <&pm8998_l5>;
157 memory-region = <&wifi_msa_mem>;
152}; 158};
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 6572a43590a8..e1ad6b9166a6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -44,6 +44,7 @@ config ATH10K_SNOC
44 tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" 44 tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
45 depends on ATH10K 45 depends on ATH10K
46 depends on ARCH_QCOM || COMPILE_TEST 46 depends on ARCH_QCOM || COMPILE_TEST
47 select QCOM_QMI_HELPERS
47 ---help--- 48 ---help---
48 This module adds support for integrated WCN3990 chip connected 49 This module adds support for integrated WCN3990 chip connected
49 to system NOC(SNOC). Currently work in progress and will not 50 to system NOC(SNOC). Currently work in progress and will not
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 44d60a61b242..66326b949ab1 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
36ath10k_usb-y += usb.o 36ath10k_usb-y += usb.o
37 37
38obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o 38obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
39ath10k_snoc-y += snoc.o 39ath10k_snoc-y += qmi.o \
40 qmi_wlfw_v01.o \
41 snoc.o
40 42
41# for tracing framework to find trace.h 43# for tracing framework to find trace.h
42CFLAGS_trace.o := -I$(src) 44CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 203f30992c26..da607febfd82 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -989,7 +989,7 @@ static int ath10k_download_fw(struct ath10k *ar)
989 data, data_len); 989 data, data_len);
990} 990}
991 991
992static void ath10k_core_free_board_files(struct ath10k *ar) 992void ath10k_core_free_board_files(struct ath10k *ar)
993{ 993{
994 if (!IS_ERR(ar->normal_mode_fw.board)) 994 if (!IS_ERR(ar->normal_mode_fw.board))
995 release_firmware(ar->normal_mode_fw.board); 995 release_firmware(ar->normal_mode_fw.board);
@@ -1004,6 +1004,7 @@ static void ath10k_core_free_board_files(struct ath10k *ar)
1004 ar->normal_mode_fw.ext_board_data = NULL; 1004 ar->normal_mode_fw.ext_board_data = NULL;
1005 ar->normal_mode_fw.ext_board_len = 0; 1005 ar->normal_mode_fw.ext_board_len = 0;
1006} 1006}
1007EXPORT_SYMBOL(ath10k_core_free_board_files);
1007 1008
1008static void ath10k_core_free_firmware_files(struct ath10k *ar) 1009static void ath10k_core_free_firmware_files(struct ath10k *ar)
1009{ 1010{
@@ -1331,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
1331 goto out; 1332 goto out;
1332 } 1333 }
1333 1334
1335 if (ar->id.qmi_ids_valid) {
1336 scnprintf(name, name_len,
1337 "bus=%s,qmi-board-id=%x",
1338 ath10k_bus_str(ar->hif.bus),
1339 ar->id.qmi_board_id);
1340 goto out;
1341 }
1342
1334 scnprintf(name, name_len, 1343 scnprintf(name, name_len,
1335 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", 1344 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
1336 ath10k_bus_str(ar->hif.bus), 1345 ath10k_bus_str(ar->hif.bus),
@@ -1359,7 +1368,7 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
1359 return -1; 1368 return -1;
1360} 1369}
1361 1370
1362static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) 1371int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
1363{ 1372{
1364 char boardname[100], fallback_boardname[100]; 1373 char boardname[100], fallback_boardname[100];
1365 int ret; 1374 int ret;
@@ -1407,6 +1416,7 @@ success:
1407 ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); 1416 ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
1408 return 0; 1417 return 0;
1409} 1418}
1419EXPORT_SYMBOL(ath10k_core_fetch_board_file);
1410 1420
1411static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) 1421static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
1412{ 1422{
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index c76af343db3d..042418097cf9 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -951,6 +951,7 @@ struct ath10k {
951 /* protected by conf_mutex */ 951 /* protected by conf_mutex */
952 u8 ps_state_enable; 952 u8 ps_state_enable;
953 953
954 bool nlo_enabled;
954 bool p2p; 955 bool p2p;
955 956
956 struct { 957 struct {
@@ -988,6 +989,8 @@ struct ath10k {
988 u32 subsystem_device; 989 u32 subsystem_device;
989 990
990 bool bmi_ids_valid; 991 bool bmi_ids_valid;
992 bool qmi_ids_valid;
993 u32 qmi_board_id;
991 u8 bmi_board_id; 994 u8 bmi_board_id;
992 u8 bmi_eboard_id; 995 u8 bmi_eboard_id;
993 u8 bmi_chip_id; 996 u8 bmi_chip_id;
@@ -1215,5 +1218,7 @@ void ath10k_core_stop(struct ath10k *ar);
1215int ath10k_core_register(struct ath10k *ar, 1218int ath10k_core_register(struct ath10k *ar,
1216 const struct ath10k_bus_params *bus_params); 1219 const struct ath10k_bus_params *bus_params);
1217void ath10k_core_unregister(struct ath10k *ar); 1220void ath10k_core_unregister(struct ath10k *ar);
1221int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
1222void ath10k_core_free_board_files(struct ath10k *ar);
1218 1223
1219#endif /* _CORE_H_ */ 1224#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 2c0cb6757fc6..15964b374f68 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2421,7 +2421,7 @@ static ssize_t ath10k_write_ps_state_enable(struct file *file,
2421 if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) 2421 if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
2422 return -EINVAL; 2422 return -EINVAL;
2423 2423
2424 if (ps_state_enable > 1 || ps_state_enable < 0) 2424 if (ps_state_enable > 1)
2425 return -EINVAL; 2425 return -EINVAL;
2426 2426
2427 mutex_lock(&ar->conf_mutex); 2427 mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 3a6191cff2f9..5cf16d690724 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -44,6 +44,7 @@ enum ath10k_debug_mask {
44 ATH10K_DBG_USB = 0x00040000, 44 ATH10K_DBG_USB = 0x00040000,
45 ATH10K_DBG_USB_BULK = 0x00080000, 45 ATH10K_DBG_USB_BULK = 0x00080000,
46 ATH10K_DBG_SNOC = 0x00100000, 46 ATH10K_DBG_SNOC = 0x00100000,
47 ATH10K_DBG_QMI = 0x00200000,
47 ATH10K_DBG_ANY = 0xffffffff, 48 ATH10K_DBG_ANY = 0xffffffff,
48}; 49};
49 50
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f2405258a6d3..ffec98f7be50 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2680,8 +2680,6 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
2680 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 2680 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
2681 } else { 2681 } else {
2682 mcs = legacy_rate_idx; 2682 mcs = legacy_rate_idx;
2683 if (mcs < 0)
2684 return;
2685 2683
2686 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 2684 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
2687 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 2685 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
@@ -2753,7 +2751,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2753 struct ath10k_per_peer_tx_stats *peer_stats) 2751 struct ath10k_per_peer_tx_stats *peer_stats)
2754{ 2752{
2755 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 2753 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2756 u8 rate = 0, rate_idx = 0, sgi; 2754 u8 rate = 0, sgi;
2755 s8 rate_idx = 0;
2757 struct rate_info txrate; 2756 struct rate_info txrate;
2758 2757
2759 lockdep_assert_held(&ar->data_lock); 2758 lockdep_assert_held(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3933dd96da55..a1c2801ded10 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -164,7 +164,7 @@ static int ath10k_mac_get_rate_hw_value(int bitrate)
164 if (ath10k_mac_bitrate_is_cck(bitrate)) 164 if (ath10k_mac_bitrate_is_cck(bitrate))
165 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; 165 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
166 166
167 for (i = 0; i < sizeof(ath10k_rates); i++) { 167 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
168 if (ath10k_rates[i].bitrate == bitrate) 168 if (ath10k_rates[i].bitrate == bitrate)
169 return hw_value_prefix | ath10k_rates[i].hw_value; 169 return hw_value_prefix | ath10k_rates[i].hw_value;
170 } 170 }
@@ -4697,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
4697 goto err_core_stop; 4697 goto err_core_stop;
4698 } 4698 }
4699 4699
4700 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
4701 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
4702 if (ret) {
4703 ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
4704 goto err_core_stop;
4705 }
4706 }
4707
4700 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4708 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4701 ret = ath10k_wmi_adaptive_qcs(ar, true); 4709 ret = ath10k_wmi_adaptive_qcs(ar, true);
4702 if (ret) { 4710 if (ret) {
@@ -5682,22 +5690,22 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5682 return; 5690 return;
5683 } 5691 }
5684 5692
5685 sband = ar->hw->wiphy->bands[def.chan->band]; 5693 sband = ar->hw->wiphy->bands[def.chan->band];
5686 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5694 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
5687 bitrate = sband->bitrates[basic_rate_idx].bitrate; 5695 bitrate = sband->bitrates[basic_rate_idx].bitrate;
5688 5696
5689 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5697 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
5690 if (hw_rate_code < 0) { 5698 if (hw_rate_code < 0) {
5691 ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5699 ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
5692 mutex_unlock(&ar->conf_mutex); 5700 mutex_unlock(&ar->conf_mutex);
5693 return; 5701 return;
5694 } 5702 }
5695 5703
5696 vdev_param = ar->wmi.vdev_param->mgmt_rate; 5704 vdev_param = ar->wmi.vdev_param->mgmt_rate;
5697 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5705 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5698 hw_rate_code); 5706 hw_rate_code);
5699 if (ret) 5707 if (ret)
5700 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5708 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
5701 } 5709 }
5702 5710
5703 mutex_unlock(&ar->conf_mutex); 5711 mutex_unlock(&ar->conf_mutex);
@@ -6855,9 +6863,20 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6855 u32 queues, bool drop) 6863 u32 queues, bool drop)
6856{ 6864{
6857 struct ath10k *ar = hw->priv; 6865 struct ath10k *ar = hw->priv;
6858 6866 struct ath10k_vif *arvif;
6859 if (drop) 6867 u32 bitmap;
6868
6869 if (drop) {
6870 if (vif->type == NL80211_IFTYPE_STATION) {
6871 bitmap = ~(1 << WMI_MGMT_TID);
6872 list_for_each_entry(arvif, &ar->arvifs, list) {
6873 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
6874 ath10k_wmi_peer_flush(ar, arvif->vdev_id,
6875 arvif->bssid, bitmap);
6876 }
6877 }
6860 return; 6878 return;
6879 }
6861 6880
6862 mutex_lock(&ar->conf_mutex); 6881 mutex_lock(&ar->conf_mutex);
6863 ath10k_mac_wait_tx_complete(ar); 6882 ath10k_mac_wait_tx_complete(ar);
@@ -8493,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar)
8493 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8512 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8494 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8513 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8495 8514
8515 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
8516 ar->hw->wiphy->max_sched_scan_reqs = 1;
8517 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
8518 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
8519 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
8520 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
8521 ar->hw->wiphy->max_sched_scan_plan_interval =
8522 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
8523 ar->hw->wiphy->max_sched_scan_plan_iterations =
8524 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
8525 }
8526
8496 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8527 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8497 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8528 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8498 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8529 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
@@ -8542,9 +8573,10 @@ int ath10k_mac_register(struct ath10k *ar)
8542 wiphy_ext_feature_set(ar->hw->wiphy, 8573 wiphy_ext_feature_set(ar->hw->wiphy,
8543 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8574 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
8544 8575
8545 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 8576 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
8577 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
8546 wiphy_ext_feature_set(ar->hw->wiphy, 8578 wiphy_ext_feature_set(ar->hw->wiphy,
8547 NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT); 8579 NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
8548 8580
8549 /* 8581 /*
8550 * on LL hardware queues are managed entirely by the FW 8582 * on LL hardware queues are managed entirely by the FW
@@ -8635,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar)
8635 } 8667 }
8636 8668
8637 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8669 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
8638 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
8639 if (ret) {
8640 ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
8641 goto err_dfs_detector_exit;
8642 }
8643
8644 ar->hw->wiphy->features |= 8670 ar->hw->wiphy->features |=
8645 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8671 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
8646 } 8672 }
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 873dbb65439f..01b4edb00e9e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1071,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1071 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1071 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1072 int ret = 0; 1072 int ret = 0;
1073 u32 *buf; 1073 u32 *buf;
1074 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1074 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1075 struct ath10k_ce_pipe *ce_diag; 1075 struct ath10k_ce_pipe *ce_diag;
1076 void *data_buf = NULL; 1076 void *data_buf = NULL;
1077 u32 ce_data; /* Host buffer address in CE space */
1078 dma_addr_t ce_data_base = 0; 1077 dma_addr_t ce_data_base = 0;
1079 int i; 1078 int i;
1080 1079
@@ -1088,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1088 * 1) 4-byte alignment 1087 * 1) 4-byte alignment
1089 * 2) Buffer in DMA-able space 1088 * 2) Buffer in DMA-able space
1090 */ 1089 */
1091 orig_nbytes = nbytes; 1090 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1091
1092 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1092 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1093 orig_nbytes, 1093 alloc_nbytes,
1094 &ce_data_base, 1094 &ce_data_base,
1095 GFP_ATOMIC); 1095 GFP_ATOMIC);
1096 if (!data_buf) { 1096 if (!data_buf) {
@@ -1098,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1098 goto done; 1098 goto done;
1099 } 1099 }
1100 1100
1101 /* Copy caller's data to allocated DMA buf */
1102 memcpy(data_buf, data, orig_nbytes);
1103
1104 /* 1101 /*
1105 * The address supplied by the caller is in the 1102 * The address supplied by the caller is in the
1106 * Target CPU virtual address space. 1103 * Target CPU virtual address space.
@@ -1113,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1113 */ 1110 */
1114 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1111 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1115 1112
1116 remaining_bytes = orig_nbytes; 1113 remaining_bytes = nbytes;
1117 ce_data = ce_data_base;
1118 while (remaining_bytes) { 1114 while (remaining_bytes) {
1119 /* FIXME: check cast */ 1115 /* FIXME: check cast */
1120 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1116 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1121 1117
1118 /* Copy caller's data to allocated DMA buf */
1119 memcpy(data_buf, data, nbytes);
1120
1122 /* Set up to receive directly into Target(!) address */ 1121 /* Set up to receive directly into Target(!) address */
1123 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); 1122 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
1124 if (ret != 0) 1123 if (ret != 0)
@@ -1128,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1128 * Request CE to send caller-supplied data that 1127 * Request CE to send caller-supplied data that
1129 * was copied to bounce buffer to Target(!) address. 1128 * was copied to bounce buffer to Target(!) address.
1130 */ 1129 */
1131 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1130 ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
1132 nbytes, 0, 0); 1131 nbytes, 0, 0);
1133 if (ret != 0) 1132 if (ret != 0)
1134 goto done; 1133 goto done;
@@ -1171,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1171 1170
1172 remaining_bytes -= nbytes; 1171 remaining_bytes -= nbytes;
1173 address += nbytes; 1172 address += nbytes;
1174 ce_data += nbytes; 1173 data += nbytes;
1175 } 1174 }
1176 1175
1177done: 1176done:
1178 if (data_buf) { 1177 if (data_buf) {
1179 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1178 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1180 ce_data_base); 1179 ce_data_base);
1181 } 1180 }
1182 1181
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
new file mode 100644
index 000000000000..56cb1831dcdf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/completion.h>
18#include <linux/device.h>
19#include <linux/debugfs.h>
20#include <linux/idr.h>
21#include <linux/kernel.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/module.h>
25#include <linux/net.h>
26#include <linux/platform_device.h>
27#include <linux/qcom_scm.h>
28#include <linux/string.h>
29#include <net/sock.h>
30
31#include "debug.h"
32#include "snoc.h"
33
34#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
35#define ATH10K_QMI_TIMEOUT 30
36
37static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
38 struct ath10k_msa_mem_info *mem_info)
39{
40 struct qcom_scm_vmperm dst_perms[3];
41 struct ath10k *ar = qmi->ar;
42 unsigned int src_perms;
43 u32 perm_count;
44 int ret;
45
46 src_perms = BIT(QCOM_SCM_VMID_HLOS);
47
48 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
49 dst_perms[0].perm = QCOM_SCM_PERM_RW;
50 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
51 dst_perms[1].perm = QCOM_SCM_PERM_RW;
52
53 if (mem_info->secure) {
54 perm_count = 2;
55 } else {
56 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
57 dst_perms[2].perm = QCOM_SCM_PERM_RW;
58 perm_count = 3;
59 }
60
61 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
62 &src_perms, dst_perms, perm_count);
63 if (ret < 0)
64 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
65
66 return ret;
67}
68
69static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
70 struct ath10k_msa_mem_info *mem_info)
71{
72 struct qcom_scm_vmperm dst_perms;
73 struct ath10k *ar = qmi->ar;
74 unsigned int src_perms;
75 int ret;
76
77 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
78
79 if (!mem_info->secure)
80 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
81
82 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
83 dst_perms.perm = QCOM_SCM_PERM_RW;
84
85 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
86 &src_perms, &dst_perms, 1);
87 if (ret < 0)
88 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
89
90 return ret;
91}
92
93static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
94{
95 int ret;
96 int i;
97
98 for (i = 0; i < qmi->nr_mem_region; i++) {
99 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
100 if (ret)
101 goto err_unmap;
102 }
103
104 return 0;
105
106err_unmap:
107 for (i--; i >= 0; i--)
108 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
109 return ret;
110}
111
112static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
113{
114 int i;
115
116 for (i = 0; i < qmi->nr_mem_region; i++)
117 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
118}
119
120static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
121{
122 struct wlfw_msa_info_resp_msg_v01 resp = {};
123 struct wlfw_msa_info_req_msg_v01 req = {};
124 struct ath10k *ar = qmi->ar;
125 struct qmi_txn txn;
126 int ret;
127 int i;
128
129 req.msa_addr = qmi->msa_pa;
130 req.size = qmi->msa_mem_size;
131
132 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
133 wlfw_msa_info_resp_msg_v01_ei, &resp);
134 if (ret < 0)
135 goto out;
136
137 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
138 QMI_WLFW_MSA_INFO_REQ_V01,
139 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
140 wlfw_msa_info_req_msg_v01_ei, &req);
141 if (ret < 0) {
142 qmi_txn_cancel(&txn);
143 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
144 goto out;
145 }
146
147 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
148 if (ret < 0)
149 goto out;
150
151 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
152 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
153 ret = -EINVAL;
154 goto out;
155 }
156
157 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
158 ath10k_err(ar, "invalid memory region length received: %d\n",
159 resp.mem_region_info_len);
160 ret = -EINVAL;
161 goto out;
162 }
163
164 qmi->nr_mem_region = resp.mem_region_info_len;
165 for (i = 0; i < resp.mem_region_info_len; i++) {
166 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
167 qmi->mem_region[i].size = resp.mem_region_info[i].size;
168 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
169 ath10k_dbg(ar, ATH10K_DBG_QMI,
170 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
171 i, &qmi->mem_region[i].addr,
172 qmi->mem_region[i].size,
173 qmi->mem_region[i].secure);
174 }
175
176 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
177 return 0;
178
179out:
180 return ret;
181}
182
183static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
184{
185 struct wlfw_msa_ready_resp_msg_v01 resp = {};
186 struct wlfw_msa_ready_req_msg_v01 req = {};
187 struct ath10k *ar = qmi->ar;
188 struct qmi_txn txn;
189 int ret;
190
191 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
192 wlfw_msa_ready_resp_msg_v01_ei, &resp);
193 if (ret < 0)
194 goto out;
195
196 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
197 QMI_WLFW_MSA_READY_REQ_V01,
198 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
199 wlfw_msa_ready_req_msg_v01_ei, &req);
200 if (ret < 0) {
201 qmi_txn_cancel(&txn);
202 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
203 goto out;
204 }
205
206 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
207 if (ret < 0)
208 goto out;
209
210 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
211 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
212 ret = -EINVAL;
213 }
214
215 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
216 return 0;
217
218out:
219 return ret;
220}
221
222static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
223{
224 struct wlfw_bdf_download_resp_msg_v01 resp = {};
225 struct wlfw_bdf_download_req_msg_v01 *req;
226 struct ath10k *ar = qmi->ar;
227 unsigned int remaining;
228 struct qmi_txn txn;
229 const u8 *temp;
230 int ret;
231
232 req = kzalloc(sizeof(*req), GFP_KERNEL);
233 if (!req)
234 return -ENOMEM;
235
236 temp = ar->normal_mode_fw.board_data;
237 remaining = ar->normal_mode_fw.board_len;
238
239 while (remaining) {
240 req->valid = 1;
241 req->file_id_valid = 1;
242 req->file_id = 0;
243 req->total_size_valid = 1;
244 req->total_size = ar->normal_mode_fw.board_len;
245 req->seg_id_valid = 1;
246 req->data_valid = 1;
247 req->end_valid = 1;
248
249 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
250 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
251 } else {
252 req->data_len = remaining;
253 req->end = 1;
254 }
255
256 memcpy(req->data, temp, req->data_len);
257
258 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
259 wlfw_bdf_download_resp_msg_v01_ei,
260 &resp);
261 if (ret < 0)
262 goto out;
263
264 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
265 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
266 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
267 wlfw_bdf_download_req_msg_v01_ei, req);
268 if (ret < 0) {
269 qmi_txn_cancel(&txn);
270 goto out;
271 }
272
273 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
274
275 if (ret < 0)
276 goto out;
277
278 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
279 ath10k_err(ar, "failed to download board data file: %d\n",
280 resp.resp.error);
281 ret = -EINVAL;
282 goto out;
283 }
284
285 remaining -= req->data_len;
286 temp += req->data_len;
287 req->seg_id++;
288 }
289
290 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
291
292 kfree(req);
293 return 0;
294
295out:
296 kfree(req);
297 return ret;
298}
299
300static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
301{
302 struct wlfw_cal_report_resp_msg_v01 resp = {};
303 struct wlfw_cal_report_req_msg_v01 req = {};
304 struct ath10k *ar = qmi->ar;
305 struct qmi_txn txn;
306 int i, j = 0;
307 int ret;
308
309 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
310 &resp);
311 if (ret < 0)
312 goto out;
313
314 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
315 if (qmi->cal_data[i].total_size &&
316 qmi->cal_data[i].data) {
317 req.meta_data[j] = qmi->cal_data[i].cal_id;
318 j++;
319 }
320 }
321 req.meta_data_len = j;
322
323 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
324 QMI_WLFW_CAL_REPORT_REQ_V01,
325 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
326 wlfw_cal_report_req_msg_v01_ei, &req);
327 if (ret < 0) {
328 qmi_txn_cancel(&txn);
329 ath10k_err(ar, "failed to send calibration request: %d\n", ret);
330 goto out;
331 }
332
333 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
334 if (ret < 0)
335 goto out;
336
337 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
338 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
339 ret = -EINVAL;
340 goto out;
341 }
342
343 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
344 return 0;
345
346out:
347 return ret;
348}
349
350static int
351ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
352{
353 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
354 struct ath10k_qmi *qmi = ar_snoc->qmi;
355 struct wlfw_wlan_mode_resp_msg_v01 resp = {};
356 struct wlfw_wlan_mode_req_msg_v01 req = {};
357 struct qmi_txn txn;
358 int ret;
359
360 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
361 wlfw_wlan_mode_resp_msg_v01_ei,
362 &resp);
363 if (ret < 0)
364 goto out;
365
366 req.mode = mode;
367 req.hw_debug_valid = 1;
368 req.hw_debug = 0;
369
370 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
371 QMI_WLFW_WLAN_MODE_REQ_V01,
372 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
373 wlfw_wlan_mode_req_msg_v01_ei, &req);
374 if (ret < 0) {
375 qmi_txn_cancel(&txn);
376 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
377 goto out;
378 }
379
380 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
381 if (ret < 0)
382 goto out;
383
384 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
385 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
386 ret = -EINVAL;
387 goto out;
388 }
389
390 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
391 return 0;
392
393out:
394 return ret;
395}
396
397static int
398ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
399 struct ath10k_qmi_wlan_enable_cfg *config,
400 const char *version)
401{
402 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
403 struct ath10k_qmi *qmi = ar_snoc->qmi;
404 struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
405 struct wlfw_wlan_cfg_req_msg_v01 *req;
406 struct qmi_txn txn;
407 int ret;
408 u32 i;
409
410 req = kzalloc(sizeof(*req), GFP_KERNEL);
411 if (!req)
412 return -ENOMEM;
413
414 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
415 wlfw_wlan_cfg_resp_msg_v01_ei,
416 &resp);
417 if (ret < 0)
418 goto out;
419
420 req->host_version_valid = 0;
421
422 req->tgt_cfg_valid = 1;
423 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
424 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
425 else
426 req->tgt_cfg_len = config->num_ce_tgt_cfg;
427 for (i = 0; i < req->tgt_cfg_len; i++) {
428 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
429 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
430 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
431 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
432 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
433 }
434
435 req->svc_cfg_valid = 1;
436 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
437 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
438 else
439 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
440 for (i = 0; i < req->svc_cfg_len; i++) {
441 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
442 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
443 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
444 }
445
446 req->shadow_reg_valid = 1;
447 if (config->num_shadow_reg_cfg >
448 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
449 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
450 else
451 req->shadow_reg_len = config->num_shadow_reg_cfg;
452
453 memcpy(req->shadow_reg, config->shadow_reg_cfg,
454 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
455
456 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
457 QMI_WLFW_WLAN_CFG_REQ_V01,
458 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
459 wlfw_wlan_cfg_req_msg_v01_ei, req);
460 if (ret < 0) {
461 qmi_txn_cancel(&txn);
462 ath10k_err(ar, "failed to send config request: %d\n", ret);
463 goto out;
464 }
465
466 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
467 if (ret < 0)
468 goto out;
469
470 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
471 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
472 ret = -EINVAL;
473 goto out;
474 }
475
476 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
477 kfree(req);
478 return 0;
479
480out:
481 kfree(req);
482 return ret;
483}
484
485int ath10k_qmi_wlan_enable(struct ath10k *ar,
486 struct ath10k_qmi_wlan_enable_cfg *config,
487 enum wlfw_driver_mode_enum_v01 mode,
488 const char *version)
489{
490 int ret;
491
492 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
493 mode, config);
494
495 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
496 if (ret) {
497 ath10k_err(ar, "failed to send qmi config: %d\n", ret);
498 return ret;
499 }
500
501 ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
502 if (ret) {
503 ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
504 return ret;
505 }
506
507 return 0;
508}
509
510int ath10k_qmi_wlan_disable(struct ath10k *ar)
511{
512 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
513}
514
515static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
516{
517 struct wlfw_cap_resp_msg_v01 *resp;
518 struct wlfw_cap_req_msg_v01 req = {};
519 struct ath10k *ar = qmi->ar;
520 struct qmi_txn txn;
521 int ret;
522
523 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
524 if (!resp)
525 return -ENOMEM;
526
527 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
528 if (ret < 0)
529 goto out;
530
531 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
532 QMI_WLFW_CAP_REQ_V01,
533 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
534 wlfw_cap_req_msg_v01_ei, &req);
535 if (ret < 0) {
536 qmi_txn_cancel(&txn);
537 ath10k_err(ar, "failed to send capability request: %d\n", ret);
538 goto out;
539 }
540
541 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
542 if (ret < 0)
543 goto out;
544
545 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
546 ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error);
547 ret = -EINVAL;
548 goto out;
549 }
550
551 if (resp->chip_info_valid) {
552 qmi->chip_info.chip_id = resp->chip_info.chip_id;
553 qmi->chip_info.chip_family = resp->chip_info.chip_family;
554 }
555
556 if (resp->board_info_valid)
557 qmi->board_info.board_id = resp->board_info.board_id;
558 else
559 qmi->board_info.board_id = 0xFF;
560
561 if (resp->soc_info_valid)
562 qmi->soc_info.soc_id = resp->soc_info.soc_id;
563
564 if (resp->fw_version_info_valid) {
565 qmi->fw_version = resp->fw_version_info.fw_version;
566 strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
567 sizeof(qmi->fw_build_timestamp));
568 }
569
570 if (resp->fw_build_id_valid)
571 strlcpy(qmi->fw_build_id, resp->fw_build_id,
572 MAX_BUILD_ID_LEN + 1);
573
574 ath10k_dbg(ar, ATH10K_DBG_QMI,
575 "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
576 qmi->chip_info.chip_id, qmi->chip_info.chip_family,
577 qmi->board_info.board_id, qmi->soc_info.soc_id);
578 ath10k_dbg(ar, ATH10K_DBG_QMI,
579 "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
580 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
581
582 kfree(resp);
583 return 0;
584
585out:
586 kfree(resp);
587 return ret;
588}
589
590static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
591{
592 struct wlfw_host_cap_resp_msg_v01 resp = {};
593 struct wlfw_host_cap_req_msg_v01 req = {};
594 struct ath10k *ar = qmi->ar;
595 struct qmi_txn txn;
596 int ret;
597
598 req.daemon_support_valid = 1;
599 req.daemon_support = 0;
600
601 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
602 wlfw_host_cap_resp_msg_v01_ei, &resp);
603 if (ret < 0)
604 goto out;
605
606 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
607 QMI_WLFW_HOST_CAP_REQ_V01,
608 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
609 wlfw_host_cap_req_msg_v01_ei, &req);
610 if (ret < 0) {
611 qmi_txn_cancel(&txn);
612 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
613 goto out;
614 }
615
616 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
617 if (ret < 0)
618 goto out;
619
620 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
621 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
622 ret = -EINVAL;
623 goto out;
624 }
625
626 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n");
627 return 0;
628
629out:
630 return ret;
631}
632
633static int
634ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
635{
636 struct wlfw_ind_register_resp_msg_v01 resp = {};
637 struct wlfw_ind_register_req_msg_v01 req = {};
638 struct ath10k *ar = qmi->ar;
639 struct qmi_txn txn;
640 int ret;
641
642 req.client_id_valid = 1;
643 req.client_id = ATH10K_QMI_CLIENT_ID;
644 req.fw_ready_enable_valid = 1;
645 req.fw_ready_enable = 1;
646 req.msa_ready_enable_valid = 1;
647 req.msa_ready_enable = 1;
648
649 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
650 wlfw_ind_register_resp_msg_v01_ei, &resp);
651 if (ret < 0)
652 goto out;
653
654 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
655 QMI_WLFW_IND_REGISTER_REQ_V01,
656 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
657 wlfw_ind_register_req_msg_v01_ei, &req);
658 if (ret < 0) {
659 qmi_txn_cancel(&txn);
660 ath10k_err(ar, "failed to send indication registed request: %d\n", ret);
661 goto out;
662 }
663
664 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
665 if (ret < 0)
666 goto out;
667
668 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
669 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
670 ret = -EINVAL;
671 goto out;
672 }
673
674 if (resp.fw_status_valid) {
675 if (resp.fw_status & QMI_WLFW_FW_READY_V01)
676 qmi->fw_ready = true;
677 }
678 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
679 return 0;
680
681out:
682 return ret;
683}
684
685static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
686{
687 struct ath10k *ar = qmi->ar;
688 int ret;
689
690 ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
691 if (ret)
692 return;
693
694 if (qmi->fw_ready) {
695 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
696 return;
697 }
698
699 ret = ath10k_qmi_host_cap_send_sync(qmi);
700 if (ret)
701 return;
702
703 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
704 if (ret)
705 return;
706
707 ret = ath10k_qmi_setup_msa_permissions(qmi);
708 if (ret)
709 return;
710
711 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
712 if (ret)
713 goto err_setup_msa;
714
715 ret = ath10k_qmi_cap_send_sync_msg(qmi);
716 if (ret)
717 goto err_setup_msa;
718
719 return;
720
721err_setup_msa:
722 ath10k_qmi_remove_msa_permission(qmi);
723}
724
725static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
726{
727 struct ath10k *ar = qmi->ar;
728
729 ar->hif.bus = ATH10K_BUS_SNOC;
730 ar->id.qmi_ids_valid = true;
731 ar->id.qmi_board_id = qmi->board_info.board_id;
732 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
733
734 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
735}
736
737static int
738ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
739 enum ath10k_qmi_driver_event_type type,
740 void *data)
741{
742 struct ath10k_qmi_driver_event *event;
743
744 event = kzalloc(sizeof(*event), GFP_ATOMIC);
745 if (!event)
746 return -ENOMEM;
747
748 event->type = type;
749 event->data = data;
750
751 spin_lock(&qmi->event_lock);
752 list_add_tail(&event->list, &qmi->event_list);
753 spin_unlock(&qmi->event_lock);
754
755 queue_work(qmi->event_wq, &qmi->event_work);
756
757 return 0;
758}
759
760static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
761{
762 struct ath10k *ar = qmi->ar;
763
764 ath10k_qmi_remove_msa_permission(qmi);
765 ath10k_core_free_board_files(ar);
766 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
767 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
768}
769
770static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
771{
772 int ret;
773
774 ret = ath10k_qmi_fetch_board_file(qmi);
775 if (ret)
776 goto out;
777
778 ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
779 if (ret)
780 goto out;
781
782 ret = ath10k_qmi_send_cal_report_req(qmi);
783
784out:
785 return;
786}
787
788static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
789{
790 struct ath10k *ar = qmi->ar;
791
792 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
793 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
794
795 return 0;
796}
797
798static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
799 struct sockaddr_qrtr *sq,
800 struct qmi_txn *txn, const void *data)
801{
802 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
803
804 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
805}
806
807static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
808 struct sockaddr_qrtr *sq,
809 struct qmi_txn *txn, const void *data)
810{
811 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
812
813 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
814}
815
816static struct qmi_msg_handler qmi_msg_handler[] = {
817 {
818 .type = QMI_INDICATION,
819 .msg_id = QMI_WLFW_FW_READY_IND_V01,
820 .ei = wlfw_fw_ready_ind_msg_v01_ei,
821 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
822 .fn = ath10k_qmi_fw_ready_ind,
823 },
824 {
825 .type = QMI_INDICATION,
826 .msg_id = QMI_WLFW_MSA_READY_IND_V01,
827 .ei = wlfw_msa_ready_ind_msg_v01_ei,
828 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
829 .fn = ath10k_qmi_msa_ready_ind,
830 },
831 {}
832};
833
834static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
835 struct qmi_service *service)
836{
837 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
838 struct sockaddr_qrtr *sq = &qmi->sq;
839 struct ath10k *ar = qmi->ar;
840 int ret;
841
842 sq->sq_family = AF_QIPCRTR;
843 sq->sq_node = service->node;
844 sq->sq_port = service->port;
845
846 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
847
848 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
849 sizeof(qmi->sq), 0);
850 if (ret) {
851 ath10k_err(ar, "failed to connect to a remote QMI service port\n");
852 return ret;
853 }
854
855 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
856 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
857
858 return ret;
859}
860
861static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
862 struct qmi_service *service)
863{
864 struct ath10k_qmi *qmi =
865 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
866
867 qmi->fw_ready = false;
868 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
869}
870
871static struct qmi_ops ath10k_qmi_ops = {
872 .new_server = ath10k_qmi_new_server,
873 .del_server = ath10k_qmi_del_server,
874};
875
876static void ath10k_qmi_driver_event_work(struct work_struct *work)
877{
878 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
879 event_work);
880 struct ath10k_qmi_driver_event *event;
881 struct ath10k *ar = qmi->ar;
882
883 spin_lock(&qmi->event_lock);
884 while (!list_empty(&qmi->event_list)) {
885 event = list_first_entry(&qmi->event_list,
886 struct ath10k_qmi_driver_event, list);
887 list_del(&event->list);
888 spin_unlock(&qmi->event_lock);
889
890 switch (event->type) {
891 case ATH10K_QMI_EVENT_SERVER_ARRIVE:
892 ath10k_qmi_event_server_arrive(qmi);
893 break;
894 case ATH10K_QMI_EVENT_SERVER_EXIT:
895 ath10k_qmi_event_server_exit(qmi);
896 break;
897 case ATH10K_QMI_EVENT_FW_READY_IND:
898 ath10k_qmi_event_fw_ready_ind(qmi);
899 break;
900 case ATH10K_QMI_EVENT_MSA_READY_IND:
901 ath10k_qmi_event_msa_ready(qmi);
902 break;
903 default:
904 ath10k_warn(ar, "invalid event type: %d", event->type);
905 break;
906 }
907 kfree(event);
908 spin_lock(&qmi->event_lock);
909 }
910 spin_unlock(&qmi->event_lock);
911}
912
913static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
914{
915 struct ath10k *ar = qmi->ar;
916 struct device *dev = ar->dev;
917 struct device_node *node;
918 struct resource r;
919 int ret;
920
921 node = of_parse_phandle(dev->of_node, "memory-region", 0);
922 if (node) {
923 ret = of_address_to_resource(node, 0, &r);
924 if (ret) {
925 dev_err(dev, "failed to resolve msa fixed region\n");
926 return ret;
927 }
928 of_node_put(node);
929
930 qmi->msa_pa = r.start;
931 qmi->msa_mem_size = resource_size(&r);
932 qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
933 MEMREMAP_WT);
934 if (!qmi->msa_pa) {
935 dev_err(dev, "failed to map memory region: %pa\n", &r.start);
936 return -EBUSY;
937 }
938 } else {
939 qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
940 &qmi->msa_pa, GFP_KERNEL);
941 if (!qmi->msa_va) {
942 ath10k_err(ar, "failed to allocate dma memory for msa region\n");
943 return -ENOMEM;
944 }
945 qmi->msa_mem_size = msa_size;
946 }
947
948 ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
949 &qmi->msa_pa,
950 qmi->msa_va);
951
952 return 0;
953}
954
955int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
956{
957 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
958 struct ath10k_qmi *qmi;
959 int ret;
960
961 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
962 if (!qmi)
963 return -ENOMEM;
964
965 qmi->ar = ar;
966 ar_snoc->qmi = qmi;
967
968 ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
969 if (ret)
970 goto err;
971
972 ret = qmi_handle_init(&qmi->qmi_hdl,
973 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
974 &ath10k_qmi_ops, qmi_msg_handler);
975 if (ret)
976 goto err;
977
978 qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
979 WQ_UNBOUND, 1);
980 if (!qmi->event_wq) {
981 ath10k_err(ar, "failed to allocate workqueue\n");
982 ret = -EFAULT;
983 goto err_release_qmi_handle;
984 }
985
986 INIT_LIST_HEAD(&qmi->event_list);
987 spin_lock_init(&qmi->event_lock);
988 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
989
990 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
991 WLFW_SERVICE_VERS_V01, 0);
992 if (ret)
993 goto err_qmi_lookup;
994
995 return 0;
996
997err_qmi_lookup:
998 destroy_workqueue(qmi->event_wq);
999
1000err_release_qmi_handle:
1001 qmi_handle_release(&qmi->qmi_hdl);
1002
1003err:
1004 kfree(qmi);
1005 return ret;
1006}
1007
1008int ath10k_qmi_deinit(struct ath10k *ar)
1009{
1010 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1011 struct ath10k_qmi *qmi = ar_snoc->qmi;
1012
1013 qmi_handle_release(&qmi->qmi_hdl);
1014 cancel_work_sync(&qmi->event_work);
1015 destroy_workqueue(qmi->event_wq);
1016 ar_snoc->qmi = NULL;
1017
1018 return 0;
1019}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
new file mode 100644
index 000000000000..1efe1d22fc2f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -0,0 +1,129 @@
1/*
2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef _ATH10K_QMI_H_
17#define _ATH10K_QMI_H_
18
19#include <linux/soc/qcom/qmi.h>
20#include <linux/qrtr.h>
21#include "qmi_wlfw_v01.h"
22
23#define MAX_NUM_MEMORY_REGIONS 2
24#define MAX_TIMESTAMP_LEN 32
25#define MAX_BUILD_ID_LEN 128
26#define MAX_NUM_CAL_V01 5
27
28enum ath10k_qmi_driver_event_type {
29 ATH10K_QMI_EVENT_SERVER_ARRIVE,
30 ATH10K_QMI_EVENT_SERVER_EXIT,
31 ATH10K_QMI_EVENT_FW_READY_IND,
32 ATH10K_QMI_EVENT_FW_DOWN_IND,
33 ATH10K_QMI_EVENT_MSA_READY_IND,
34 ATH10K_QMI_EVENT_MAX,
35};
36
37struct ath10k_msa_mem_info {
38 phys_addr_t addr;
39 u32 size;
40 bool secure;
41};
42
43struct ath10k_qmi_chip_info {
44 u32 chip_id;
45 u32 chip_family;
46};
47
48struct ath10k_qmi_board_info {
49 u32 board_id;
50};
51
52struct ath10k_qmi_soc_info {
53 u32 soc_id;
54};
55
56struct ath10k_qmi_cal_data {
57 u32 cal_id;
58 u32 total_size;
59 u8 *data;
60};
61
62struct ath10k_tgt_pipe_cfg {
63 __le32 pipe_num;
64 __le32 pipe_dir;
65 __le32 nentries;
66 __le32 nbytes_max;
67 __le32 flags;
68 __le32 reserved;
69};
70
71struct ath10k_svc_pipe_cfg {
72 __le32 service_id;
73 __le32 pipe_dir;
74 __le32 pipe_num;
75};
76
77struct ath10k_shadow_reg_cfg {
78 __le16 ce_id;
79 __le16 reg_offset;
80};
81
82struct ath10k_qmi_wlan_enable_cfg {
83 u32 num_ce_tgt_cfg;
84 struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
85 u32 num_ce_svc_pipe_cfg;
86 struct ath10k_svc_pipe_cfg *ce_svc_cfg;
87 u32 num_shadow_reg_cfg;
88 struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
89};
90
91struct ath10k_qmi_driver_event {
92 struct list_head list;
93 enum ath10k_qmi_driver_event_type type;
94 void *data;
95};
96
97struct ath10k_qmi {
98 struct ath10k *ar;
99 struct qmi_handle qmi_hdl;
100 struct sockaddr_qrtr sq;
101 struct work_struct event_work;
102 struct workqueue_struct *event_wq;
103 struct list_head event_list;
104 spinlock_t event_lock; /* spinlock for qmi event list */
105 u32 nr_mem_region;
106 struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
107 dma_addr_t msa_pa;
108 u32 msa_mem_size;
109 void *msa_va;
110 struct ath10k_qmi_chip_info chip_info;
111 struct ath10k_qmi_board_info board_info;
112 struct ath10k_qmi_soc_info soc_info;
113 char fw_build_id[MAX_BUILD_ID_LEN + 1];
114 u32 fw_version;
115 bool fw_ready;
116 char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
117 struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
118};
119
120int ath10k_qmi_wlan_enable(struct ath10k *ar,
121 struct ath10k_qmi_wlan_enable_cfg *config,
122 enum wlfw_driver_mode_enum_v01 mode,
123 const char *version);
124int ath10k_qmi_wlan_disable(struct ath10k *ar);
125int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
126int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
127int ath10k_qmi_deinit(struct ath10k *ar);
128
129#endif /* ATH10K_QMI_H */
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
new file mode 100644
index 000000000000..ba79c2e4aed6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -0,0 +1,2072 @@
1/*
2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/soc/qcom/qmi.h>
18#include <linux/types.h>
19#include "qmi_wlfw_v01.h"
20
21static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
22 {
23 .data_type = QMI_UNSIGNED_4_BYTE,
24 .elem_len = 1,
25 .elem_size = sizeof(u32),
26 .array_type = NO_ARRAY,
27 .tlv_type = 0,
28 .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
29 pipe_num),
30 },
31 {
32 .data_type = QMI_SIGNED_4_BYTE_ENUM,
33 .elem_len = 1,
34 .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
35 .array_type = NO_ARRAY,
36 .tlv_type = 0,
37 .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
38 pipe_dir),
39 },
40 {
41 .data_type = QMI_UNSIGNED_4_BYTE,
42 .elem_len = 1,
43 .elem_size = sizeof(u32),
44 .array_type = NO_ARRAY,
45 .tlv_type = 0,
46 .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
47 nentries),
48 },
49 {
50 .data_type = QMI_UNSIGNED_4_BYTE,
51 .elem_len = 1,
52 .elem_size = sizeof(u32),
53 .array_type = NO_ARRAY,
54 .tlv_type = 0,
55 .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
56 nbytes_max),
57 },
58 {
59 .data_type = QMI_UNSIGNED_4_BYTE,
60 .elem_len = 1,
61 .elem_size = sizeof(u32),
62 .array_type = NO_ARRAY,
63 .tlv_type = 0,
64 .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
65 flags),
66 },
67 {}
68};
69
70static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
71 {
72 .data_type = QMI_UNSIGNED_4_BYTE,
73 .elem_len = 1,
74 .elem_size = sizeof(u32),
75 .array_type = NO_ARRAY,
76 .tlv_type = 0,
77 .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
78 service_id),
79 },
80 {
81 .data_type = QMI_SIGNED_4_BYTE_ENUM,
82 .elem_len = 1,
83 .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
84 .array_type = NO_ARRAY,
85 .tlv_type = 0,
86 .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
87 pipe_dir),
88 },
89 {
90 .data_type = QMI_UNSIGNED_4_BYTE,
91 .elem_len = 1,
92 .elem_size = sizeof(u32),
93 .array_type = NO_ARRAY,
94 .tlv_type = 0,
95 .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
96 pipe_num),
97 },
98 {}
99};
100
101static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
102 {
103 .data_type = QMI_UNSIGNED_2_BYTE,
104 .elem_len = 1,
105 .elem_size = sizeof(u16),
106 .array_type = NO_ARRAY,
107 .tlv_type = 0,
108 .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
109 id),
110 },
111 {
112 .data_type = QMI_UNSIGNED_2_BYTE,
113 .elem_len = 1,
114 .elem_size = sizeof(u16),
115 .array_type = NO_ARRAY,
116 .tlv_type = 0,
117 .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
118 offset),
119 },
120 {}
121};
122
123static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
124 {
125 .data_type = QMI_UNSIGNED_4_BYTE,
126 .elem_len = 1,
127 .elem_size = sizeof(u32),
128 .array_type = NO_ARRAY,
129 .tlv_type = 0,
130 .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
131 addr),
132 },
133 {}
134};
135
136static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
137 {
138 .data_type = QMI_UNSIGNED_8_BYTE,
139 .elem_len = 1,
140 .elem_size = sizeof(u64),
141 .array_type = NO_ARRAY,
142 .tlv_type = 0,
143 .offset = offsetof(struct wlfw_memory_region_info_s_v01,
144 region_addr),
145 },
146 {
147 .data_type = QMI_UNSIGNED_4_BYTE,
148 .elem_len = 1,
149 .elem_size = sizeof(u32),
150 .array_type = NO_ARRAY,
151 .tlv_type = 0,
152 .offset = offsetof(struct wlfw_memory_region_info_s_v01,
153 size),
154 },
155 {
156 .data_type = QMI_UNSIGNED_1_BYTE,
157 .elem_len = 1,
158 .elem_size = sizeof(u8),
159 .array_type = NO_ARRAY,
160 .tlv_type = 0,
161 .offset = offsetof(struct wlfw_memory_region_info_s_v01,
162 secure_flag),
163 },
164 {}
165};
166
167static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
168 {
169 .data_type = QMI_UNSIGNED_8_BYTE,
170 .elem_len = 1,
171 .elem_size = sizeof(u64),
172 .array_type = NO_ARRAY,
173 .tlv_type = 0,
174 .offset = offsetof(struct wlfw_mem_cfg_s_v01,
175 offset),
176 },
177 {
178 .data_type = QMI_UNSIGNED_4_BYTE,
179 .elem_len = 1,
180 .elem_size = sizeof(u32),
181 .array_type = NO_ARRAY,
182 .tlv_type = 0,
183 .offset = offsetof(struct wlfw_mem_cfg_s_v01,
184 size),
185 },
186 {
187 .data_type = QMI_UNSIGNED_1_BYTE,
188 .elem_len = 1,
189 .elem_size = sizeof(u8),
190 .array_type = NO_ARRAY,
191 .tlv_type = 0,
192 .offset = offsetof(struct wlfw_mem_cfg_s_v01,
193 secure_flag),
194 },
195 {}
196};
197
198static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
199 {
200 .data_type = QMI_UNSIGNED_4_BYTE,
201 .elem_len = 1,
202 .elem_size = sizeof(u32),
203 .array_type = NO_ARRAY,
204 .tlv_type = 0,
205 .offset = offsetof(struct wlfw_mem_seg_s_v01,
206 size),
207 },
208 {
209 .data_type = QMI_SIGNED_4_BYTE_ENUM,
210 .elem_len = 1,
211 .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
212 .array_type = NO_ARRAY,
213 .tlv_type = 0,
214 .offset = offsetof(struct wlfw_mem_seg_s_v01,
215 type),
216 },
217 {
218 .data_type = QMI_DATA_LEN,
219 .elem_len = 1,
220 .elem_size = sizeof(u8),
221 .array_type = NO_ARRAY,
222 .tlv_type = 0,
223 .offset = offsetof(struct wlfw_mem_seg_s_v01,
224 mem_cfg_len),
225 },
226 {
227 .data_type = QMI_STRUCT,
228 .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
229 .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
230 .array_type = VAR_LEN_ARRAY,
231 .tlv_type = 0,
232 .offset = offsetof(struct wlfw_mem_seg_s_v01,
233 mem_cfg),
234 .ei_array = wlfw_mem_cfg_s_v01_ei,
235 },
236 {}
237};
238
239static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
240 {
241 .data_type = QMI_UNSIGNED_8_BYTE,
242 .elem_len = 1,
243 .elem_size = sizeof(u64),
244 .array_type = NO_ARRAY,
245 .tlv_type = 0,
246 .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
247 addr),
248 },
249 {
250 .data_type = QMI_UNSIGNED_4_BYTE,
251 .elem_len = 1,
252 .elem_size = sizeof(u32),
253 .array_type = NO_ARRAY,
254 .tlv_type = 0,
255 .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
256 size),
257 },
258 {
259 .data_type = QMI_SIGNED_4_BYTE_ENUM,
260 .elem_len = 1,
261 .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
262 .array_type = NO_ARRAY,
263 .tlv_type = 0,
264 .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
265 type),
266 },
267 {}
268};
269
270static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
271 {
272 .data_type = QMI_UNSIGNED_4_BYTE,
273 .elem_len = 1,
274 .elem_size = sizeof(u32),
275 .array_type = NO_ARRAY,
276 .tlv_type = 0,
277 .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
278 chip_id),
279 },
280 {
281 .data_type = QMI_UNSIGNED_4_BYTE,
282 .elem_len = 1,
283 .elem_size = sizeof(u32),
284 .array_type = NO_ARRAY,
285 .tlv_type = 0,
286 .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
287 chip_family),
288 },
289 {}
290};
291
292static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
293 {
294 .data_type = QMI_UNSIGNED_4_BYTE,
295 .elem_len = 1,
296 .elem_size = sizeof(u32),
297 .array_type = NO_ARRAY,
298 .tlv_type = 0,
299 .offset = offsetof(struct wlfw_rf_board_info_s_v01,
300 board_id),
301 },
302 {}
303};
304
305static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
306 {
307 .data_type = QMI_UNSIGNED_4_BYTE,
308 .elem_len = 1,
309 .elem_size = sizeof(u32),
310 .array_type = NO_ARRAY,
311 .tlv_type = 0,
312 .offset = offsetof(struct wlfw_soc_info_s_v01,
313 soc_id),
314 },
315 {}
316};
317
318static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
319 {
320 .data_type = QMI_UNSIGNED_4_BYTE,
321 .elem_len = 1,
322 .elem_size = sizeof(u32),
323 .array_type = NO_ARRAY,
324 .tlv_type = 0,
325 .offset = offsetof(struct wlfw_fw_version_info_s_v01,
326 fw_version),
327 },
328 {
329 .data_type = QMI_STRING,
330 .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
331 .elem_size = sizeof(char),
332 .array_type = NO_ARRAY,
333 .tlv_type = 0,
334 .offset = offsetof(struct wlfw_fw_version_info_s_v01,
335 fw_build_timestamp),
336 },
337 {}
338};
339
340struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
341 {
342 .data_type = QMI_OPT_FLAG,
343 .elem_len = 1,
344 .elem_size = sizeof(u8),
345 .array_type = NO_ARRAY,
346 .tlv_type = 0x10,
347 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
348 fw_ready_enable_valid),
349 },
350 {
351 .data_type = QMI_UNSIGNED_1_BYTE,
352 .elem_len = 1,
353 .elem_size = sizeof(u8),
354 .array_type = NO_ARRAY,
355 .tlv_type = 0x10,
356 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
357 fw_ready_enable),
358 },
359 {
360 .data_type = QMI_OPT_FLAG,
361 .elem_len = 1,
362 .elem_size = sizeof(u8),
363 .array_type = NO_ARRAY,
364 .tlv_type = 0x11,
365 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
366 initiate_cal_download_enable_valid),
367 },
368 {
369 .data_type = QMI_UNSIGNED_1_BYTE,
370 .elem_len = 1,
371 .elem_size = sizeof(u8),
372 .array_type = NO_ARRAY,
373 .tlv_type = 0x11,
374 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
375 initiate_cal_download_enable),
376 },
377 {
378 .data_type = QMI_OPT_FLAG,
379 .elem_len = 1,
380 .elem_size = sizeof(u8),
381 .array_type = NO_ARRAY,
382 .tlv_type = 0x12,
383 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
384 initiate_cal_update_enable_valid),
385 },
386 {
387 .data_type = QMI_UNSIGNED_1_BYTE,
388 .elem_len = 1,
389 .elem_size = sizeof(u8),
390 .array_type = NO_ARRAY,
391 .tlv_type = 0x12,
392 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
393 initiate_cal_update_enable),
394 },
395 {
396 .data_type = QMI_OPT_FLAG,
397 .elem_len = 1,
398 .elem_size = sizeof(u8),
399 .array_type = NO_ARRAY,
400 .tlv_type = 0x13,
401 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
402 msa_ready_enable_valid),
403 },
404 {
405 .data_type = QMI_UNSIGNED_1_BYTE,
406 .elem_len = 1,
407 .elem_size = sizeof(u8),
408 .array_type = NO_ARRAY,
409 .tlv_type = 0x13,
410 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
411 msa_ready_enable),
412 },
413 {
414 .data_type = QMI_OPT_FLAG,
415 .elem_len = 1,
416 .elem_size = sizeof(u8),
417 .array_type = NO_ARRAY,
418 .tlv_type = 0x14,
419 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
420 pin_connect_result_enable_valid),
421 },
422 {
423 .data_type = QMI_UNSIGNED_1_BYTE,
424 .elem_len = 1,
425 .elem_size = sizeof(u8),
426 .array_type = NO_ARRAY,
427 .tlv_type = 0x14,
428 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
429 pin_connect_result_enable),
430 },
431 {
432 .data_type = QMI_OPT_FLAG,
433 .elem_len = 1,
434 .elem_size = sizeof(u8),
435 .array_type = NO_ARRAY,
436 .tlv_type = 0x15,
437 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
438 client_id_valid),
439 },
440 {
441 .data_type = QMI_UNSIGNED_4_BYTE,
442 .elem_len = 1,
443 .elem_size = sizeof(u32),
444 .array_type = NO_ARRAY,
445 .tlv_type = 0x15,
446 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
447 client_id),
448 },
449 {
450 .data_type = QMI_OPT_FLAG,
451 .elem_len = 1,
452 .elem_size = sizeof(u8),
453 .array_type = NO_ARRAY,
454 .tlv_type = 0x16,
455 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
456 request_mem_enable_valid),
457 },
458 {
459 .data_type = QMI_UNSIGNED_1_BYTE,
460 .elem_len = 1,
461 .elem_size = sizeof(u8),
462 .array_type = NO_ARRAY,
463 .tlv_type = 0x16,
464 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
465 request_mem_enable),
466 },
467 {
468 .data_type = QMI_OPT_FLAG,
469 .elem_len = 1,
470 .elem_size = sizeof(u8),
471 .array_type = NO_ARRAY,
472 .tlv_type = 0x17,
473 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
474 mem_ready_enable_valid),
475 },
476 {
477 .data_type = QMI_UNSIGNED_1_BYTE,
478 .elem_len = 1,
479 .elem_size = sizeof(u8),
480 .array_type = NO_ARRAY,
481 .tlv_type = 0x17,
482 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
483 mem_ready_enable),
484 },
485 {
486 .data_type = QMI_OPT_FLAG,
487 .elem_len = 1,
488 .elem_size = sizeof(u8),
489 .array_type = NO_ARRAY,
490 .tlv_type = 0x18,
491 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
492 fw_init_done_enable_valid),
493 },
494 {
495 .data_type = QMI_UNSIGNED_1_BYTE,
496 .elem_len = 1,
497 .elem_size = sizeof(u8),
498 .array_type = NO_ARRAY,
499 .tlv_type = 0x18,
500 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
501 fw_init_done_enable),
502 },
503 {
504 .data_type = QMI_OPT_FLAG,
505 .elem_len = 1,
506 .elem_size = sizeof(u8),
507 .array_type = NO_ARRAY,
508 .tlv_type = 0x19,
509 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
510 rejuvenate_enable_valid),
511 },
512 {
513 .data_type = QMI_UNSIGNED_4_BYTE,
514 .elem_len = 1,
515 .elem_size = sizeof(u32),
516 .array_type = NO_ARRAY,
517 .tlv_type = 0x19,
518 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
519 rejuvenate_enable),
520 },
521 {
522 .data_type = QMI_OPT_FLAG,
523 .elem_len = 1,
524 .elem_size = sizeof(u8),
525 .array_type = NO_ARRAY,
526 .tlv_type = 0x1A,
527 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
528 xo_cal_enable_valid),
529 },
530 {
531 .data_type = QMI_UNSIGNED_1_BYTE,
532 .elem_len = 1,
533 .elem_size = sizeof(u8),
534 .array_type = NO_ARRAY,
535 .tlv_type = 0x1A,
536 .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
537 xo_cal_enable),
538 },
539 {}
540};
541
542struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
543 {
544 .data_type = QMI_STRUCT,
545 .elem_len = 1,
546 .elem_size = sizeof(struct qmi_response_type_v01),
547 .array_type = NO_ARRAY,
548 .tlv_type = 0x02,
549 .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
550 resp),
551 .ei_array = qmi_response_type_v01_ei,
552 },
553 {
554 .data_type = QMI_OPT_FLAG,
555 .elem_len = 1,
556 .elem_size = sizeof(u8),
557 .array_type = NO_ARRAY,
558 .tlv_type = 0x10,
559 .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
560 fw_status_valid),
561 },
562 {
563 .data_type = QMI_UNSIGNED_8_BYTE,
564 .elem_len = 1,
565 .elem_size = sizeof(u64),
566 .array_type = NO_ARRAY,
567 .tlv_type = 0x10,
568 .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
569 fw_status),
570 },
571 {}
572};
573
574struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
575 {}
576};
577
578struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
579 {}
580};
581
582struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
583 {
584 .data_type = QMI_OPT_FLAG,
585 .elem_len = 1,
586 .elem_size = sizeof(u8),
587 .array_type = NO_ARRAY,
588 .tlv_type = 0x10,
589 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
590 pwr_pin_result_valid),
591 },
592 {
593 .data_type = QMI_UNSIGNED_4_BYTE,
594 .elem_len = 1,
595 .elem_size = sizeof(u32),
596 .array_type = NO_ARRAY,
597 .tlv_type = 0x10,
598 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
599 pwr_pin_result),
600 },
601 {
602 .data_type = QMI_OPT_FLAG,
603 .elem_len = 1,
604 .elem_size = sizeof(u8),
605 .array_type = NO_ARRAY,
606 .tlv_type = 0x11,
607 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
608 phy_io_pin_result_valid),
609 },
610 {
611 .data_type = QMI_UNSIGNED_4_BYTE,
612 .elem_len = 1,
613 .elem_size = sizeof(u32),
614 .array_type = NO_ARRAY,
615 .tlv_type = 0x11,
616 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
617 phy_io_pin_result),
618 },
619 {
620 .data_type = QMI_OPT_FLAG,
621 .elem_len = 1,
622 .elem_size = sizeof(u8),
623 .array_type = NO_ARRAY,
624 .tlv_type = 0x12,
625 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
626 rf_pin_result_valid),
627 },
628 {
629 .data_type = QMI_UNSIGNED_4_BYTE,
630 .elem_len = 1,
631 .elem_size = sizeof(u32),
632 .array_type = NO_ARRAY,
633 .tlv_type = 0x12,
634 .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
635 rf_pin_result),
636 },
637 {}
638};
639
640struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
641 {
642 .data_type = QMI_SIGNED_4_BYTE_ENUM,
643 .elem_len = 1,
644 .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
645 .array_type = NO_ARRAY,
646 .tlv_type = 0x01,
647 .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
648 mode),
649 },
650 {
651 .data_type = QMI_OPT_FLAG,
652 .elem_len = 1,
653 .elem_size = sizeof(u8),
654 .array_type = NO_ARRAY,
655 .tlv_type = 0x10,
656 .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
657 hw_debug_valid),
658 },
659 {
660 .data_type = QMI_UNSIGNED_1_BYTE,
661 .elem_len = 1,
662 .elem_size = sizeof(u8),
663 .array_type = NO_ARRAY,
664 .tlv_type = 0x10,
665 .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
666 hw_debug),
667 },
668 {}
669};
670
671struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
672 {
673 .data_type = QMI_STRUCT,
674 .elem_len = 1,
675 .elem_size = sizeof(struct qmi_response_type_v01),
676 .array_type = NO_ARRAY,
677 .tlv_type = 0x02,
678 .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
679 resp),
680 .ei_array = qmi_response_type_v01_ei,
681 },
682 {}
683};
684
685struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
686 {
687 .data_type = QMI_OPT_FLAG,
688 .elem_len = 1,
689 .elem_size = sizeof(u8),
690 .array_type = NO_ARRAY,
691 .tlv_type = 0x10,
692 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
693 host_version_valid),
694 },
695 {
696 .data_type = QMI_STRING,
697 .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
698 .elem_size = sizeof(char),
699 .array_type = NO_ARRAY,
700 .tlv_type = 0x10,
701 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
702 host_version),
703 },
704 {
705 .data_type = QMI_OPT_FLAG,
706 .elem_len = 1,
707 .elem_size = sizeof(u8),
708 .array_type = NO_ARRAY,
709 .tlv_type = 0x11,
710 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
711 tgt_cfg_valid),
712 },
713 {
714 .data_type = QMI_DATA_LEN,
715 .elem_len = 1,
716 .elem_size = sizeof(u8),
717 .array_type = NO_ARRAY,
718 .tlv_type = 0x11,
719 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
720 tgt_cfg_len),
721 },
722 {
723 .data_type = QMI_STRUCT,
724 .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
725 .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
726 .array_type = VAR_LEN_ARRAY,
727 .tlv_type = 0x11,
728 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
729 tgt_cfg),
730 .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
731 },
732 {
733 .data_type = QMI_OPT_FLAG,
734 .elem_len = 1,
735 .elem_size = sizeof(u8),
736 .array_type = NO_ARRAY,
737 .tlv_type = 0x12,
738 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
739 svc_cfg_valid),
740 },
741 {
742 .data_type = QMI_DATA_LEN,
743 .elem_len = 1,
744 .elem_size = sizeof(u8),
745 .array_type = NO_ARRAY,
746 .tlv_type = 0x12,
747 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
748 svc_cfg_len),
749 },
750 {
751 .data_type = QMI_STRUCT,
752 .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
753 .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
754 .array_type = VAR_LEN_ARRAY,
755 .tlv_type = 0x12,
756 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
757 svc_cfg),
758 .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
759 },
760 {
761 .data_type = QMI_OPT_FLAG,
762 .elem_len = 1,
763 .elem_size = sizeof(u8),
764 .array_type = NO_ARRAY,
765 .tlv_type = 0x13,
766 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
767 shadow_reg_valid),
768 },
769 {
770 .data_type = QMI_DATA_LEN,
771 .elem_len = 1,
772 .elem_size = sizeof(u8),
773 .array_type = NO_ARRAY,
774 .tlv_type = 0x13,
775 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
776 shadow_reg_len),
777 },
778 {
779 .data_type = QMI_STRUCT,
780 .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
781 .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
782 .array_type = VAR_LEN_ARRAY,
783 .tlv_type = 0x13,
784 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
785 shadow_reg),
786 .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
787 },
788 {
789 .data_type = QMI_OPT_FLAG,
790 .elem_len = 1,
791 .elem_size = sizeof(u8),
792 .array_type = NO_ARRAY,
793 .tlv_type = 0x14,
794 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
795 shadow_reg_v2_valid),
796 },
797 {
798 .data_type = QMI_DATA_LEN,
799 .elem_len = 1,
800 .elem_size = sizeof(u8),
801 .array_type = NO_ARRAY,
802 .tlv_type = 0x14,
803 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
804 shadow_reg_v2_len),
805 },
806 {
807 .data_type = QMI_STRUCT,
808 .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2,
809 .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
810 .array_type = VAR_LEN_ARRAY,
811 .tlv_type = 0x14,
812 .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
813 shadow_reg_v2),
814 .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
815 },
816 {}
817};
818
819struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
820 {
821 .data_type = QMI_STRUCT,
822 .elem_len = 1,
823 .elem_size = sizeof(struct qmi_response_type_v01),
824 .array_type = NO_ARRAY,
825 .tlv_type = 0x02,
826 .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
827 resp),
828 .ei_array = qmi_response_type_v01_ei,
829 },
830 {}
831};
832
833struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
834 {}
835};
836
837struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
838 {
839 .data_type = QMI_STRUCT,
840 .elem_len = 1,
841 .elem_size = sizeof(struct qmi_response_type_v01),
842 .array_type = NO_ARRAY,
843 .tlv_type = 0x02,
844 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
845 resp),
846 .ei_array = qmi_response_type_v01_ei,
847 },
848 {
849 .data_type = QMI_OPT_FLAG,
850 .elem_len = 1,
851 .elem_size = sizeof(u8),
852 .array_type = NO_ARRAY,
853 .tlv_type = 0x10,
854 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
855 chip_info_valid),
856 },
857 {
858 .data_type = QMI_STRUCT,
859 .elem_len = 1,
860 .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
861 .array_type = NO_ARRAY,
862 .tlv_type = 0x10,
863 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
864 chip_info),
865 .ei_array = wlfw_rf_chip_info_s_v01_ei,
866 },
867 {
868 .data_type = QMI_OPT_FLAG,
869 .elem_len = 1,
870 .elem_size = sizeof(u8),
871 .array_type = NO_ARRAY,
872 .tlv_type = 0x11,
873 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
874 board_info_valid),
875 },
876 {
877 .data_type = QMI_STRUCT,
878 .elem_len = 1,
879 .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
880 .array_type = NO_ARRAY,
881 .tlv_type = 0x11,
882 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
883 board_info),
884 .ei_array = wlfw_rf_board_info_s_v01_ei,
885 },
886 {
887 .data_type = QMI_OPT_FLAG,
888 .elem_len = 1,
889 .elem_size = sizeof(u8),
890 .array_type = NO_ARRAY,
891 .tlv_type = 0x12,
892 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
893 soc_info_valid),
894 },
895 {
896 .data_type = QMI_STRUCT,
897 .elem_len = 1,
898 .elem_size = sizeof(struct wlfw_soc_info_s_v01),
899 .array_type = NO_ARRAY,
900 .tlv_type = 0x12,
901 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
902 soc_info),
903 .ei_array = wlfw_soc_info_s_v01_ei,
904 },
905 {
906 .data_type = QMI_OPT_FLAG,
907 .elem_len = 1,
908 .elem_size = sizeof(u8),
909 .array_type = NO_ARRAY,
910 .tlv_type = 0x13,
911 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
912 fw_version_info_valid),
913 },
914 {
915 .data_type = QMI_STRUCT,
916 .elem_len = 1,
917 .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
918 .array_type = NO_ARRAY,
919 .tlv_type = 0x13,
920 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
921 fw_version_info),
922 .ei_array = wlfw_fw_version_info_s_v01_ei,
923 },
924 {
925 .data_type = QMI_OPT_FLAG,
926 .elem_len = 1,
927 .elem_size = sizeof(u8),
928 .array_type = NO_ARRAY,
929 .tlv_type = 0x14,
930 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
931 fw_build_id_valid),
932 },
933 {
934 .data_type = QMI_STRING,
935 .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
936 .elem_size = sizeof(char),
937 .array_type = NO_ARRAY,
938 .tlv_type = 0x14,
939 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
940 fw_build_id),
941 },
942 {
943 .data_type = QMI_OPT_FLAG,
944 .elem_len = 1,
945 .elem_size = sizeof(u8),
946 .array_type = NO_ARRAY,
947 .tlv_type = 0x15,
948 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
949 num_macs_valid),
950 },
951 {
952 .data_type = QMI_UNSIGNED_1_BYTE,
953 .elem_len = 1,
954 .elem_size = sizeof(u8),
955 .array_type = NO_ARRAY,
956 .tlv_type = 0x15,
957 .offset = offsetof(struct wlfw_cap_resp_msg_v01,
958 num_macs),
959 },
960 {}
961};
962
963struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
964 {
965 .data_type = QMI_UNSIGNED_1_BYTE,
966 .elem_len = 1,
967 .elem_size = sizeof(u8),
968 .array_type = NO_ARRAY,
969 .tlv_type = 0x01,
970 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
971 valid),
972 },
973 {
974 .data_type = QMI_OPT_FLAG,
975 .elem_len = 1,
976 .elem_size = sizeof(u8),
977 .array_type = NO_ARRAY,
978 .tlv_type = 0x10,
979 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
980 file_id_valid),
981 },
982 {
983 .data_type = QMI_SIGNED_4_BYTE_ENUM,
984 .elem_len = 1,
985 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
986 .array_type = NO_ARRAY,
987 .tlv_type = 0x10,
988 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
989 file_id),
990 },
991 {
992 .data_type = QMI_OPT_FLAG,
993 .elem_len = 1,
994 .elem_size = sizeof(u8),
995 .array_type = NO_ARRAY,
996 .tlv_type = 0x11,
997 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
998 total_size_valid),
999 },
1000 {
1001 .data_type = QMI_UNSIGNED_4_BYTE,
1002 .elem_len = 1,
1003 .elem_size = sizeof(u32),
1004 .array_type = NO_ARRAY,
1005 .tlv_type = 0x11,
1006 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1007 total_size),
1008 },
1009 {
1010 .data_type = QMI_OPT_FLAG,
1011 .elem_len = 1,
1012 .elem_size = sizeof(u8),
1013 .array_type = NO_ARRAY,
1014 .tlv_type = 0x12,
1015 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1016 seg_id_valid),
1017 },
1018 {
1019 .data_type = QMI_UNSIGNED_4_BYTE,
1020 .elem_len = 1,
1021 .elem_size = sizeof(u32),
1022 .array_type = NO_ARRAY,
1023 .tlv_type = 0x12,
1024 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1025 seg_id),
1026 },
1027 {
1028 .data_type = QMI_OPT_FLAG,
1029 .elem_len = 1,
1030 .elem_size = sizeof(u8),
1031 .array_type = NO_ARRAY,
1032 .tlv_type = 0x13,
1033 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1034 data_valid),
1035 },
1036 {
1037 .data_type = QMI_DATA_LEN,
1038 .elem_len = 1,
1039 .elem_size = sizeof(u16),
1040 .array_type = NO_ARRAY,
1041 .tlv_type = 0x13,
1042 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1043 data_len),
1044 },
1045 {
1046 .data_type = QMI_UNSIGNED_1_BYTE,
1047 .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
1048 .elem_size = sizeof(u8),
1049 .array_type = VAR_LEN_ARRAY,
1050 .tlv_type = 0x13,
1051 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1052 data),
1053 },
1054 {
1055 .data_type = QMI_OPT_FLAG,
1056 .elem_len = 1,
1057 .elem_size = sizeof(u8),
1058 .array_type = NO_ARRAY,
1059 .tlv_type = 0x14,
1060 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1061 end_valid),
1062 },
1063 {
1064 .data_type = QMI_UNSIGNED_1_BYTE,
1065 .elem_len = 1,
1066 .elem_size = sizeof(u8),
1067 .array_type = NO_ARRAY,
1068 .tlv_type = 0x14,
1069 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1070 end),
1071 },
1072 {
1073 .data_type = QMI_OPT_FLAG,
1074 .elem_len = 1,
1075 .elem_size = sizeof(u8),
1076 .array_type = NO_ARRAY,
1077 .tlv_type = 0x15,
1078 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1079 bdf_type_valid),
1080 },
1081 {
1082 .data_type = QMI_UNSIGNED_1_BYTE,
1083 .elem_len = 1,
1084 .elem_size = sizeof(u8),
1085 .array_type = NO_ARRAY,
1086 .tlv_type = 0x15,
1087 .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
1088 bdf_type),
1089 },
1090 {}
1091};
1092
1093struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
1094 {
1095 .data_type = QMI_STRUCT,
1096 .elem_len = 1,
1097 .elem_size = sizeof(struct qmi_response_type_v01),
1098 .array_type = NO_ARRAY,
1099 .tlv_type = 0x02,
1100 .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01,
1101 resp),
1102 .ei_array = qmi_response_type_v01_ei,
1103 },
1104 {}
1105};
1106
1107struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
1108 {
1109 .data_type = QMI_DATA_LEN,
1110 .elem_len = 1,
1111 .elem_size = sizeof(u8),
1112 .array_type = NO_ARRAY,
1113 .tlv_type = 0x01,
1114 .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
1115 meta_data_len),
1116 },
1117 {
1118 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1119 .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
1120 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1121 .array_type = VAR_LEN_ARRAY,
1122 .tlv_type = 0x01,
1123 .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
1124 meta_data),
1125 },
1126 {
1127 .data_type = QMI_OPT_FLAG,
1128 .elem_len = 1,
1129 .elem_size = sizeof(u8),
1130 .array_type = NO_ARRAY,
1131 .tlv_type = 0x10,
1132 .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
1133 xo_cal_data_valid),
1134 },
1135 {
1136 .data_type = QMI_UNSIGNED_1_BYTE,
1137 .elem_len = 1,
1138 .elem_size = sizeof(u8),
1139 .array_type = NO_ARRAY,
1140 .tlv_type = 0x10,
1141 .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
1142 xo_cal_data),
1143 },
1144 {}
1145};
1146
1147struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
1148 {
1149 .data_type = QMI_STRUCT,
1150 .elem_len = 1,
1151 .elem_size = sizeof(struct qmi_response_type_v01),
1152 .array_type = NO_ARRAY,
1153 .tlv_type = 0x02,
1154 .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
1155 resp),
1156 .ei_array = qmi_response_type_v01_ei,
1157 },
1158 {}
1159};
1160
1161struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
1162 {
1163 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1164 .elem_len = 1,
1165 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1166 .array_type = NO_ARRAY,
1167 .tlv_type = 0x01,
1168 .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
1169 cal_id),
1170 },
1171 {}
1172};
1173
1174struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
1175 {
1176 .data_type = QMI_UNSIGNED_1_BYTE,
1177 .elem_len = 1,
1178 .elem_size = sizeof(u8),
1179 .array_type = NO_ARRAY,
1180 .tlv_type = 0x01,
1181 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1182 valid),
1183 },
1184 {
1185 .data_type = QMI_OPT_FLAG,
1186 .elem_len = 1,
1187 .elem_size = sizeof(u8),
1188 .array_type = NO_ARRAY,
1189 .tlv_type = 0x10,
1190 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1191 file_id_valid),
1192 },
1193 {
1194 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1195 .elem_len = 1,
1196 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1197 .array_type = NO_ARRAY,
1198 .tlv_type = 0x10,
1199 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1200 file_id),
1201 },
1202 {
1203 .data_type = QMI_OPT_FLAG,
1204 .elem_len = 1,
1205 .elem_size = sizeof(u8),
1206 .array_type = NO_ARRAY,
1207 .tlv_type = 0x11,
1208 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1209 total_size_valid),
1210 },
1211 {
1212 .data_type = QMI_UNSIGNED_4_BYTE,
1213 .elem_len = 1,
1214 .elem_size = sizeof(u32),
1215 .array_type = NO_ARRAY,
1216 .tlv_type = 0x11,
1217 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1218 total_size),
1219 },
1220 {
1221 .data_type = QMI_OPT_FLAG,
1222 .elem_len = 1,
1223 .elem_size = sizeof(u8),
1224 .array_type = NO_ARRAY,
1225 .tlv_type = 0x12,
1226 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1227 seg_id_valid),
1228 },
1229 {
1230 .data_type = QMI_UNSIGNED_4_BYTE,
1231 .elem_len = 1,
1232 .elem_size = sizeof(u32),
1233 .array_type = NO_ARRAY,
1234 .tlv_type = 0x12,
1235 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1236 seg_id),
1237 },
1238 {
1239 .data_type = QMI_OPT_FLAG,
1240 .elem_len = 1,
1241 .elem_size = sizeof(u8),
1242 .array_type = NO_ARRAY,
1243 .tlv_type = 0x13,
1244 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1245 data_valid),
1246 },
1247 {
1248 .data_type = QMI_DATA_LEN,
1249 .elem_len = 1,
1250 .elem_size = sizeof(u16),
1251 .array_type = NO_ARRAY,
1252 .tlv_type = 0x13,
1253 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1254 data_len),
1255 },
1256 {
1257 .data_type = QMI_UNSIGNED_1_BYTE,
1258 .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
1259 .elem_size = sizeof(u8),
1260 .array_type = VAR_LEN_ARRAY,
1261 .tlv_type = 0x13,
1262 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1263 data),
1264 },
1265 {
1266 .data_type = QMI_OPT_FLAG,
1267 .elem_len = 1,
1268 .elem_size = sizeof(u8),
1269 .array_type = NO_ARRAY,
1270 .tlv_type = 0x14,
1271 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1272 end_valid),
1273 },
1274 {
1275 .data_type = QMI_UNSIGNED_1_BYTE,
1276 .elem_len = 1,
1277 .elem_size = sizeof(u8),
1278 .array_type = NO_ARRAY,
1279 .tlv_type = 0x14,
1280 .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
1281 end),
1282 },
1283 {}
1284};
1285
1286struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
1287 {
1288 .data_type = QMI_STRUCT,
1289 .elem_len = 1,
1290 .elem_size = sizeof(struct qmi_response_type_v01),
1291 .array_type = NO_ARRAY,
1292 .tlv_type = 0x02,
1293 .offset = offsetof(struct wlfw_cal_download_resp_msg_v01,
1294 resp),
1295 .ei_array = qmi_response_type_v01_ei,
1296 },
1297 {}
1298};
1299
1300struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
1301 {
1302 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1303 .elem_len = 1,
1304 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1305 .array_type = NO_ARRAY,
1306 .tlv_type = 0x01,
1307 .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
1308 cal_id),
1309 },
1310 {
1311 .data_type = QMI_UNSIGNED_4_BYTE,
1312 .elem_len = 1,
1313 .elem_size = sizeof(u32),
1314 .array_type = NO_ARRAY,
1315 .tlv_type = 0x02,
1316 .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
1317 total_size),
1318 },
1319 {}
1320};
1321
1322struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
1323 {
1324 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1325 .elem_len = 1,
1326 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1327 .array_type = NO_ARRAY,
1328 .tlv_type = 0x01,
1329 .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
1330 cal_id),
1331 },
1332 {
1333 .data_type = QMI_UNSIGNED_4_BYTE,
1334 .elem_len = 1,
1335 .elem_size = sizeof(u32),
1336 .array_type = NO_ARRAY,
1337 .tlv_type = 0x02,
1338 .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
1339 seg_id),
1340 },
1341 {}
1342};
1343
1344struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
1345 {
1346 .data_type = QMI_STRUCT,
1347 .elem_len = 1,
1348 .elem_size = sizeof(struct qmi_response_type_v01),
1349 .array_type = NO_ARRAY,
1350 .tlv_type = 0x02,
1351 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1352 resp),
1353 .ei_array = qmi_response_type_v01_ei,
1354 },
1355 {
1356 .data_type = QMI_OPT_FLAG,
1357 .elem_len = 1,
1358 .elem_size = sizeof(u8),
1359 .array_type = NO_ARRAY,
1360 .tlv_type = 0x10,
1361 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1362 file_id_valid),
1363 },
1364 {
1365 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1366 .elem_len = 1,
1367 .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
1368 .array_type = NO_ARRAY,
1369 .tlv_type = 0x10,
1370 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1371 file_id),
1372 },
1373 {
1374 .data_type = QMI_OPT_FLAG,
1375 .elem_len = 1,
1376 .elem_size = sizeof(u8),
1377 .array_type = NO_ARRAY,
1378 .tlv_type = 0x11,
1379 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1380 total_size_valid),
1381 },
1382 {
1383 .data_type = QMI_UNSIGNED_4_BYTE,
1384 .elem_len = 1,
1385 .elem_size = sizeof(u32),
1386 .array_type = NO_ARRAY,
1387 .tlv_type = 0x11,
1388 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1389 total_size),
1390 },
1391 {
1392 .data_type = QMI_OPT_FLAG,
1393 .elem_len = 1,
1394 .elem_size = sizeof(u8),
1395 .array_type = NO_ARRAY,
1396 .tlv_type = 0x12,
1397 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1398 seg_id_valid),
1399 },
1400 {
1401 .data_type = QMI_UNSIGNED_4_BYTE,
1402 .elem_len = 1,
1403 .elem_size = sizeof(u32),
1404 .array_type = NO_ARRAY,
1405 .tlv_type = 0x12,
1406 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1407 seg_id),
1408 },
1409 {
1410 .data_type = QMI_OPT_FLAG,
1411 .elem_len = 1,
1412 .elem_size = sizeof(u8),
1413 .array_type = NO_ARRAY,
1414 .tlv_type = 0x13,
1415 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1416 data_valid),
1417 },
1418 {
1419 .data_type = QMI_DATA_LEN,
1420 .elem_len = 1,
1421 .elem_size = sizeof(u16),
1422 .array_type = NO_ARRAY,
1423 .tlv_type = 0x13,
1424 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1425 data_len),
1426 },
1427 {
1428 .data_type = QMI_UNSIGNED_1_BYTE,
1429 .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
1430 .elem_size = sizeof(u8),
1431 .array_type = VAR_LEN_ARRAY,
1432 .tlv_type = 0x13,
1433 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1434 data),
1435 },
1436 {
1437 .data_type = QMI_OPT_FLAG,
1438 .elem_len = 1,
1439 .elem_size = sizeof(u8),
1440 .array_type = NO_ARRAY,
1441 .tlv_type = 0x14,
1442 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1443 end_valid),
1444 },
1445 {
1446 .data_type = QMI_UNSIGNED_1_BYTE,
1447 .elem_len = 1,
1448 .elem_size = sizeof(u8),
1449 .array_type = NO_ARRAY,
1450 .tlv_type = 0x14,
1451 .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
1452 end),
1453 },
1454 {}
1455};
1456
1457struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
1458 {
1459 .data_type = QMI_UNSIGNED_8_BYTE,
1460 .elem_len = 1,
1461 .elem_size = sizeof(u64),
1462 .array_type = NO_ARRAY,
1463 .tlv_type = 0x01,
1464 .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
1465 msa_addr),
1466 },
1467 {
1468 .data_type = QMI_UNSIGNED_4_BYTE,
1469 .elem_len = 1,
1470 .elem_size = sizeof(u32),
1471 .array_type = NO_ARRAY,
1472 .tlv_type = 0x02,
1473 .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
1474 size),
1475 },
1476 {}
1477};
1478
1479struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
1480 {
1481 .data_type = QMI_STRUCT,
1482 .elem_len = 1,
1483 .elem_size = sizeof(struct qmi_response_type_v01),
1484 .array_type = NO_ARRAY,
1485 .tlv_type = 0x02,
1486 .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
1487 resp),
1488 .ei_array = qmi_response_type_v01_ei,
1489 },
1490 {
1491 .data_type = QMI_DATA_LEN,
1492 .elem_len = 1,
1493 .elem_size = sizeof(u8),
1494 .array_type = NO_ARRAY,
1495 .tlv_type = 0x03,
1496 .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
1497 mem_region_info_len),
1498 },
1499 {
1500 .data_type = QMI_STRUCT,
1501 .elem_len = QMI_WLFW_MAX_MEM_REG_V01,
1502 .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
1503 .array_type = VAR_LEN_ARRAY,
1504 .tlv_type = 0x03,
1505 .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
1506 mem_region_info),
1507 .ei_array = wlfw_memory_region_info_s_v01_ei,
1508 },
1509 {}
1510};
1511
1512struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
1513 {}
1514};
1515
1516struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
1517 {
1518 .data_type = QMI_STRUCT,
1519 .elem_len = 1,
1520 .elem_size = sizeof(struct qmi_response_type_v01),
1521 .array_type = NO_ARRAY,
1522 .tlv_type = 0x02,
1523 .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
1524 resp),
1525 .ei_array = qmi_response_type_v01_ei,
1526 },
1527 {}
1528};
1529
1530struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
1531 {
1532 .data_type = QMI_OPT_FLAG,
1533 .elem_len = 1,
1534 .elem_size = sizeof(u8),
1535 .array_type = NO_ARRAY,
1536 .tlv_type = 0x10,
1537 .offset = offsetof(struct wlfw_ini_req_msg_v01,
1538 enablefwlog_valid),
1539 },
1540 {
1541 .data_type = QMI_UNSIGNED_1_BYTE,
1542 .elem_len = 1,
1543 .elem_size = sizeof(u8),
1544 .array_type = NO_ARRAY,
1545 .tlv_type = 0x10,
1546 .offset = offsetof(struct wlfw_ini_req_msg_v01,
1547 enablefwlog),
1548 },
1549 {}
1550};
1551
1552struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
1553 {
1554 .data_type = QMI_STRUCT,
1555 .elem_len = 1,
1556 .elem_size = sizeof(struct qmi_response_type_v01),
1557 .array_type = NO_ARRAY,
1558 .tlv_type = 0x02,
1559 .offset = offsetof(struct wlfw_ini_resp_msg_v01,
1560 resp),
1561 .ei_array = qmi_response_type_v01_ei,
1562 },
1563 {}
1564};
1565
1566struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
1567 {
1568 .data_type = QMI_UNSIGNED_4_BYTE,
1569 .elem_len = 1,
1570 .elem_size = sizeof(u32),
1571 .array_type = NO_ARRAY,
1572 .tlv_type = 0x01,
1573 .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
1574 offset),
1575 },
1576 {
1577 .data_type = QMI_UNSIGNED_4_BYTE,
1578 .elem_len = 1,
1579 .elem_size = sizeof(u32),
1580 .array_type = NO_ARRAY,
1581 .tlv_type = 0x02,
1582 .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
1583 mem_type),
1584 },
1585 {
1586 .data_type = QMI_UNSIGNED_4_BYTE,
1587 .elem_len = 1,
1588 .elem_size = sizeof(u32),
1589 .array_type = NO_ARRAY,
1590 .tlv_type = 0x03,
1591 .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
1592 data_len),
1593 },
1594 {}
1595};
1596
1597struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
1598 {
1599 .data_type = QMI_STRUCT,
1600 .elem_len = 1,
1601 .elem_size = sizeof(struct qmi_response_type_v01),
1602 .array_type = NO_ARRAY,
1603 .tlv_type = 0x02,
1604 .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
1605 resp),
1606 .ei_array = qmi_response_type_v01_ei,
1607 },
1608 {
1609 .data_type = QMI_OPT_FLAG,
1610 .elem_len = 1,
1611 .elem_size = sizeof(u8),
1612 .array_type = NO_ARRAY,
1613 .tlv_type = 0x10,
1614 .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
1615 data_valid),
1616 },
1617 {
1618 .data_type = QMI_DATA_LEN,
1619 .elem_len = 1,
1620 .elem_size = sizeof(u16),
1621 .array_type = NO_ARRAY,
1622 .tlv_type = 0x10,
1623 .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
1624 data_len),
1625 },
1626 {
1627 .data_type = QMI_UNSIGNED_1_BYTE,
1628 .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
1629 .elem_size = sizeof(u8),
1630 .array_type = VAR_LEN_ARRAY,
1631 .tlv_type = 0x10,
1632 .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
1633 data),
1634 },
1635 {}
1636};
1637
1638struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
1639 {
1640 .data_type = QMI_UNSIGNED_4_BYTE,
1641 .elem_len = 1,
1642 .elem_size = sizeof(u32),
1643 .array_type = NO_ARRAY,
1644 .tlv_type = 0x01,
1645 .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
1646 offset),
1647 },
1648 {
1649 .data_type = QMI_UNSIGNED_4_BYTE,
1650 .elem_len = 1,
1651 .elem_size = sizeof(u32),
1652 .array_type = NO_ARRAY,
1653 .tlv_type = 0x02,
1654 .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
1655 mem_type),
1656 },
1657 {
1658 .data_type = QMI_DATA_LEN,
1659 .elem_len = 1,
1660 .elem_size = sizeof(u16),
1661 .array_type = NO_ARRAY,
1662 .tlv_type = 0x03,
1663 .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
1664 data_len),
1665 },
1666 {
1667 .data_type = QMI_UNSIGNED_1_BYTE,
1668 .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
1669 .elem_size = sizeof(u8),
1670 .array_type = VAR_LEN_ARRAY,
1671 .tlv_type = 0x03,
1672 .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
1673 data),
1674 },
1675 {}
1676};
1677
1678struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
1679 {
1680 .data_type = QMI_STRUCT,
1681 .elem_len = 1,
1682 .elem_size = sizeof(struct qmi_response_type_v01),
1683 .array_type = NO_ARRAY,
1684 .tlv_type = 0x02,
1685 .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01,
1686 resp),
1687 .ei_array = qmi_response_type_v01_ei,
1688 },
1689 {}
1690};
1691
1692struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
1693 {
1694 .data_type = QMI_UNSIGNED_8_BYTE,
1695 .elem_len = 1,
1696 .elem_size = sizeof(u64),
1697 .array_type = NO_ARRAY,
1698 .tlv_type = 0x01,
1699 .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
1700 voltage_uv),
1701 },
1702 {}
1703};
1704
1705struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
1706 {
1707 .data_type = QMI_STRUCT,
1708 .elem_len = 1,
1709 .elem_size = sizeof(struct qmi_response_type_v01),
1710 .array_type = NO_ARRAY,
1711 .tlv_type = 0x02,
1712 .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
1713 resp),
1714 .ei_array = qmi_response_type_v01_ei,
1715 },
1716 {}
1717};
1718
1719struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
1720 {
1721 .data_type = QMI_OPT_FLAG,
1722 .elem_len = 1,
1723 .elem_size = sizeof(u8),
1724 .array_type = NO_ARRAY,
1725 .tlv_type = 0x10,
1726 .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
1727 mac_addr_valid),
1728 },
1729 {
1730 .data_type = QMI_UNSIGNED_1_BYTE,
1731 .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
1732 .elem_size = sizeof(u8),
1733 .array_type = STATIC_ARRAY,
1734 .tlv_type = 0x10,
1735 .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
1736 mac_addr),
1737 },
1738 {}
1739};
1740
1741struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
1742 {
1743 .data_type = QMI_STRUCT,
1744 .elem_len = 1,
1745 .elem_size = sizeof(struct qmi_response_type_v01),
1746 .array_type = NO_ARRAY,
1747 .tlv_type = 0x02,
1748 .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
1749 resp),
1750 .ei_array = qmi_response_type_v01_ei,
1751 },
1752 {}
1753};
1754
1755struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
1756 {
1757 .data_type = QMI_OPT_FLAG,
1758 .elem_len = 1,
1759 .elem_size = sizeof(u8),
1760 .array_type = NO_ARRAY,
1761 .tlv_type = 0x10,
1762 .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1763 daemon_support_valid),
1764 },
1765 {
1766 .data_type = QMI_UNSIGNED_1_BYTE,
1767 .elem_len = 1,
1768 .elem_size = sizeof(u8),
1769 .array_type = NO_ARRAY,
1770 .tlv_type = 0x10,
1771 .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1772 daemon_support),
1773 },
1774 {}
1775};
1776
1777struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
1778 {
1779 .data_type = QMI_STRUCT,
1780 .elem_len = 1,
1781 .elem_size = sizeof(struct qmi_response_type_v01),
1782 .array_type = NO_ARRAY,
1783 .tlv_type = 0x02,
1784 .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
1785 resp),
1786 .ei_array = qmi_response_type_v01_ei,
1787 },
1788 {}
1789};
1790
1791struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
1792 {
1793 .data_type = QMI_DATA_LEN,
1794 .elem_len = 1,
1795 .elem_size = sizeof(u8),
1796 .array_type = NO_ARRAY,
1797 .tlv_type = 0x01,
1798 .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
1799 mem_seg_len),
1800 },
1801 {
1802 .data_type = QMI_STRUCT,
1803 .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
1804 .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
1805 .array_type = VAR_LEN_ARRAY,
1806 .tlv_type = 0x01,
1807 .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
1808 mem_seg),
1809 .ei_array = wlfw_mem_seg_s_v01_ei,
1810 },
1811 {}
1812};
1813
1814struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
1815 {
1816 .data_type = QMI_DATA_LEN,
1817 .elem_len = 1,
1818 .elem_size = sizeof(u8),
1819 .array_type = NO_ARRAY,
1820 .tlv_type = 0x01,
1821 .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
1822 mem_seg_len),
1823 },
1824 {
1825 .data_type = QMI_STRUCT,
1826 .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
1827 .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
1828 .array_type = VAR_LEN_ARRAY,
1829 .tlv_type = 0x01,
1830 .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
1831 mem_seg),
1832 .ei_array = wlfw_mem_seg_resp_s_v01_ei,
1833 },
1834 {}
1835};
1836
1837struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
1838 {
1839 .data_type = QMI_STRUCT,
1840 .elem_len = 1,
1841 .elem_size = sizeof(struct qmi_response_type_v01),
1842 .array_type = NO_ARRAY,
1843 .tlv_type = 0x02,
1844 .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
1845 resp),
1846 .ei_array = qmi_response_type_v01_ei,
1847 },
1848 {}
1849};
1850
1851struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
1852 {}
1853};
1854
1855struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
1856 {}
1857};
1858
1859struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
1860 {
1861 .data_type = QMI_OPT_FLAG,
1862 .elem_len = 1,
1863 .elem_size = sizeof(u8),
1864 .array_type = NO_ARRAY,
1865 .tlv_type = 0x10,
1866 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1867 cause_for_rejuvenation_valid),
1868 },
1869 {
1870 .data_type = QMI_UNSIGNED_1_BYTE,
1871 .elem_len = 1,
1872 .elem_size = sizeof(u8),
1873 .array_type = NO_ARRAY,
1874 .tlv_type = 0x10,
1875 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1876 cause_for_rejuvenation),
1877 },
1878 {
1879 .data_type = QMI_OPT_FLAG,
1880 .elem_len = 1,
1881 .elem_size = sizeof(u8),
1882 .array_type = NO_ARRAY,
1883 .tlv_type = 0x11,
1884 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1885 requesting_sub_system_valid),
1886 },
1887 {
1888 .data_type = QMI_UNSIGNED_1_BYTE,
1889 .elem_len = 1,
1890 .elem_size = sizeof(u8),
1891 .array_type = NO_ARRAY,
1892 .tlv_type = 0x11,
1893 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1894 requesting_sub_system),
1895 },
1896 {
1897 .data_type = QMI_OPT_FLAG,
1898 .elem_len = 1,
1899 .elem_size = sizeof(u8),
1900 .array_type = NO_ARRAY,
1901 .tlv_type = 0x12,
1902 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1903 line_number_valid),
1904 },
1905 {
1906 .data_type = QMI_UNSIGNED_2_BYTE,
1907 .elem_len = 1,
1908 .elem_size = sizeof(u16),
1909 .array_type = NO_ARRAY,
1910 .tlv_type = 0x12,
1911 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1912 line_number),
1913 },
1914 {
1915 .data_type = QMI_OPT_FLAG,
1916 .elem_len = 1,
1917 .elem_size = sizeof(u8),
1918 .array_type = NO_ARRAY,
1919 .tlv_type = 0x13,
1920 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1921 function_name_valid),
1922 },
1923 {
1924 .data_type = QMI_STRING,
1925 .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
1926 .elem_size = sizeof(char),
1927 .array_type = NO_ARRAY,
1928 .tlv_type = 0x13,
1929 .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
1930 function_name),
1931 },
1932 {}
1933};
1934
1935struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
1936 {}
1937};
1938
1939struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
1940 {
1941 .data_type = QMI_STRUCT,
1942 .elem_len = 1,
1943 .elem_size = sizeof(struct qmi_response_type_v01),
1944 .array_type = NO_ARRAY,
1945 .tlv_type = 0x02,
1946 .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01,
1947 resp),
1948 .ei_array = qmi_response_type_v01_ei,
1949 },
1950 {}
1951};
1952
1953struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
1954 {
1955 .data_type = QMI_OPT_FLAG,
1956 .elem_len = 1,
1957 .elem_size = sizeof(u8),
1958 .array_type = NO_ARRAY,
1959 .tlv_type = 0x10,
1960 .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
1961 mask_valid),
1962 },
1963 {
1964 .data_type = QMI_UNSIGNED_8_BYTE,
1965 .elem_len = 1,
1966 .elem_size = sizeof(u64),
1967 .array_type = NO_ARRAY,
1968 .tlv_type = 0x10,
1969 .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
1970 mask),
1971 },
1972 {}
1973};
1974
1975struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
1976 {
1977 .data_type = QMI_STRUCT,
1978 .elem_len = 1,
1979 .elem_size = sizeof(struct qmi_response_type_v01),
1980 .array_type = NO_ARRAY,
1981 .tlv_type = 0x02,
1982 .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
1983 resp),
1984 .ei_array = qmi_response_type_v01_ei,
1985 },
1986 {
1987 .data_type = QMI_OPT_FLAG,
1988 .elem_len = 1,
1989 .elem_size = sizeof(u8),
1990 .array_type = NO_ARRAY,
1991 .tlv_type = 0x10,
1992 .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
1993 prev_mask_valid),
1994 },
1995 {
1996 .data_type = QMI_UNSIGNED_8_BYTE,
1997 .elem_len = 1,
1998 .elem_size = sizeof(u64),
1999 .array_type = NO_ARRAY,
2000 .tlv_type = 0x10,
2001 .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
2002 prev_mask),
2003 },
2004 {
2005 .data_type = QMI_OPT_FLAG,
2006 .elem_len = 1,
2007 .elem_size = sizeof(u8),
2008 .array_type = NO_ARRAY,
2009 .tlv_type = 0x11,
2010 .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
2011 curr_mask_valid),
2012 },
2013 {
2014 .data_type = QMI_UNSIGNED_8_BYTE,
2015 .elem_len = 1,
2016 .elem_size = sizeof(u64),
2017 .array_type = NO_ARRAY,
2018 .tlv_type = 0x11,
2019 .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
2020 curr_mask),
2021 },
2022 {}
2023};
2024
2025struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
2026 {
2027 .data_type = QMI_UNSIGNED_8_BYTE,
2028 .elem_len = 1,
2029 .elem_size = sizeof(u64),
2030 .array_type = NO_ARRAY,
2031 .tlv_type = 0x01,
2032 .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
2033 addr),
2034 },
2035 {
2036 .data_type = QMI_UNSIGNED_4_BYTE,
2037 .elem_len = 1,
2038 .elem_size = sizeof(u32),
2039 .array_type = NO_ARRAY,
2040 .tlv_type = 0x02,
2041 .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
2042 size),
2043 },
2044 {}
2045};
2046
2047struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
2048 {
2049 .data_type = QMI_STRUCT,
2050 .elem_len = 1,
2051 .elem_size = sizeof(struct qmi_response_type_v01),
2052 .array_type = NO_ARRAY,
2053 .tlv_type = 0x02,
2054 .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
2055 resp),
2056 .ei_array = qmi_response_type_v01_ei,
2057 },
2058 {}
2059};
2060
2061struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
2062 {
2063 .data_type = QMI_UNSIGNED_1_BYTE,
2064 .elem_len = 1,
2065 .elem_size = sizeof(u8),
2066 .array_type = NO_ARRAY,
2067 .tlv_type = 0x01,
2068 .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
2069 xo_cal_data),
2070 },
2071 {}
2072};
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
new file mode 100644
index 000000000000..c5e3870b8871
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -0,0 +1,677 @@
1/*
2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef WCN3990_QMI_SVC_V01_H
18#define WCN3990_QMI_SVC_V01_H
19
20#define WLFW_SERVICE_ID_V01 0x45
21#define WLFW_SERVICE_VERS_V01 0x01
22
23#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
24#define QMI_WLFW_MEM_READY_IND_V01 0x0037
25#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
26#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
27#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
28#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
29#define QMI_WLFW_CAP_REQ_V01 0x0024
30#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
31#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
32#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
33#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
34#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
35#define QMI_WLFW_XO_CAL_IND_V01 0x003D
36#define QMI_WLFW_INI_RESP_V01 0x002F
37#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
38#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
39#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
40#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
41#define QMI_WLFW_MSA_READY_IND_V01 0x002B
42#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
43#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
44#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
45#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
46#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
47#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
48#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
49#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
50#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
51#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
52#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
53#define QMI_WLFW_FW_READY_IND_V01 0x0021
54#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
55#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
56#define QMI_WLFW_INI_REQ_V01 0x002F
57#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
58#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
59#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
60#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
61#define QMI_WLFW_CAP_RESP_V01 0x0024
62#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
63#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
64#define QMI_WLFW_VBATT_REQ_V01 0x0032
65#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
66#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
67#define QMI_WLFW_VBATT_RESP_V01 0x0032
68#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
69#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
70#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
71#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
72#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
73
74#define QMI_WLFW_MAX_MEM_REG_V01 2
75#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16
76#define QMI_WLFW_MAX_NUM_CAL_V01 5
77#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
78#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
79#define QMI_WLFW_MAX_NUM_CE_V01 12
80#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
81#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
82#define QMI_WLFW_MAX_NUM_GPIO_V01 32
83#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
84#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
85#define QMI_WLFW_MAX_STR_LEN_V01 16
86#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
87#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
88#define QMI_WLFW_MAX_SHADOW_REG_V2 36
89#define QMI_WLFW_MAX_NUM_SVC_V01 24
90
91enum wlfw_driver_mode_enum_v01 {
92 QMI_WLFW_MISSION_V01 = 0,
93 QMI_WLFW_FTM_V01 = 1,
94 QMI_WLFW_EPPING_V01 = 2,
95 QMI_WLFW_WALTEST_V01 = 3,
96 QMI_WLFW_OFF_V01 = 4,
97 QMI_WLFW_CCPM_V01 = 5,
98 QMI_WLFW_QVIT_V01 = 6,
99 QMI_WLFW_CALIBRATION_V01 = 7,
100};
101
102enum wlfw_cal_temp_id_enum_v01 {
103 QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
104 QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
105 QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
106 QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
107 QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
108};
109
110enum wlfw_pipedir_enum_v01 {
111 QMI_WLFW_PIPEDIR_NONE_V01 = 0,
112 QMI_WLFW_PIPEDIR_IN_V01 = 1,
113 QMI_WLFW_PIPEDIR_OUT_V01 = 2,
114 QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
115};
116
117enum wlfw_mem_type_enum_v01 {
118 QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
119 QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
120};
121
122#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
123#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
124#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
125#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
126#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
127#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
128
129#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
130#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
131#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
132#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL)
133#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
134
135#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
136
137struct wlfw_ce_tgt_pipe_cfg_s_v01 {
138 __le32 pipe_num;
139 __le32 pipe_dir;
140 __le32 nentries;
141 __le32 nbytes_max;
142 __le32 flags;
143};
144
145struct wlfw_ce_svc_pipe_cfg_s_v01 {
146 __le32 service_id;
147 __le32 pipe_dir;
148 __le32 pipe_num;
149};
150
151struct wlfw_shadow_reg_cfg_s_v01 {
152 u16 id;
153 u16 offset;
154};
155
156struct wlfw_shadow_reg_v2_cfg_s_v01 {
157 u32 addr;
158};
159
160struct wlfw_memory_region_info_s_v01 {
161 u64 region_addr;
162 u32 size;
163 u8 secure_flag;
164};
165
166struct wlfw_mem_cfg_s_v01 {
167 u64 offset;
168 u32 size;
169 u8 secure_flag;
170};
171
172struct wlfw_mem_seg_s_v01 {
173 u32 size;
174 enum wlfw_mem_type_enum_v01 type;
175 u32 mem_cfg_len;
176 struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
177};
178
179struct wlfw_mem_seg_resp_s_v01 {
180 u64 addr;
181 u32 size;
182 enum wlfw_mem_type_enum_v01 type;
183};
184
185struct wlfw_rf_chip_info_s_v01 {
186 u32 chip_id;
187 u32 chip_family;
188};
189
190struct wlfw_rf_board_info_s_v01 {
191 u32 board_id;
192};
193
194struct wlfw_soc_info_s_v01 {
195 u32 soc_id;
196};
197
198struct wlfw_fw_version_info_s_v01 {
199 u32 fw_version;
200 char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
201};
202
203struct wlfw_ind_register_req_msg_v01 {
204 u8 fw_ready_enable_valid;
205 u8 fw_ready_enable;
206 u8 initiate_cal_download_enable_valid;
207 u8 initiate_cal_download_enable;
208 u8 initiate_cal_update_enable_valid;
209 u8 initiate_cal_update_enable;
210 u8 msa_ready_enable_valid;
211 u8 msa_ready_enable;
212 u8 pin_connect_result_enable_valid;
213 u8 pin_connect_result_enable;
214 u8 client_id_valid;
215 u32 client_id;
216 u8 request_mem_enable_valid;
217 u8 request_mem_enable;
218 u8 mem_ready_enable_valid;
219 u8 mem_ready_enable;
220 u8 fw_init_done_enable_valid;
221 u8 fw_init_done_enable;
222 u8 rejuvenate_enable_valid;
223 u32 rejuvenate_enable;
224 u8 xo_cal_enable_valid;
225 u8 xo_cal_enable;
226};
227
228#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
229extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
230
231struct wlfw_ind_register_resp_msg_v01 {
232 struct qmi_response_type_v01 resp;
233 u8 fw_status_valid;
234 u64 fw_status;
235};
236
237#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
238extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
239
240struct wlfw_fw_ready_ind_msg_v01 {
241 char placeholder;
242};
243
244#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
245extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
246
247struct wlfw_msa_ready_ind_msg_v01 {
248 char placeholder;
249};
250
251#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
252extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
253
254struct wlfw_pin_connect_result_ind_msg_v01 {
255 u8 pwr_pin_result_valid;
256 u32 pwr_pin_result;
257 u8 phy_io_pin_result_valid;
258 u32 phy_io_pin_result;
259 u8 rf_pin_result_valid;
260 u32 rf_pin_result;
261};
262
263#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
264extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
265
266struct wlfw_wlan_mode_req_msg_v01 {
267 enum wlfw_driver_mode_enum_v01 mode;
268 u8 hw_debug_valid;
269 u8 hw_debug;
270};
271
272#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
273extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
274
275struct wlfw_wlan_mode_resp_msg_v01 {
276 struct qmi_response_type_v01 resp;
277};
278
279#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
280extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
281
282struct wlfw_wlan_cfg_req_msg_v01 {
283 u8 host_version_valid;
284 char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
285 u8 tgt_cfg_valid;
286 u32 tgt_cfg_len;
287 struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
288 u8 svc_cfg_valid;
289 u32 svc_cfg_len;
290 struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
291 u8 shadow_reg_valid;
292 u32 shadow_reg_len;
293 struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
294 u8 shadow_reg_v2_valid;
295 u32 shadow_reg_v2_len;
296 struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2];
297};
298
299#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
300extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
301
302struct wlfw_wlan_cfg_resp_msg_v01 {
303 struct qmi_response_type_v01 resp;
304};
305
306#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
307extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
308
309struct wlfw_cap_req_msg_v01 {
310 char placeholder;
311};
312
313#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
314extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
315
316struct wlfw_cap_resp_msg_v01 {
317 struct qmi_response_type_v01 resp;
318 u8 chip_info_valid;
319 struct wlfw_rf_chip_info_s_v01 chip_info;
320 u8 board_info_valid;
321 struct wlfw_rf_board_info_s_v01 board_info;
322 u8 soc_info_valid;
323 struct wlfw_soc_info_s_v01 soc_info;
324 u8 fw_version_info_valid;
325 struct wlfw_fw_version_info_s_v01 fw_version_info;
326 u8 fw_build_id_valid;
327 char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
328 u8 num_macs_valid;
329 u8 num_macs;
330};
331
332#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
333extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
334
335struct wlfw_bdf_download_req_msg_v01 {
336 u8 valid;
337 u8 file_id_valid;
338 enum wlfw_cal_temp_id_enum_v01 file_id;
339 u8 total_size_valid;
340 u32 total_size;
341 u8 seg_id_valid;
342 u32 seg_id;
343 u8 data_valid;
344 u32 data_len;
345 u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
346 u8 end_valid;
347 u8 end;
348 u8 bdf_type_valid;
349 u8 bdf_type;
350};
351
352#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
353extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
354
355struct wlfw_bdf_download_resp_msg_v01 {
356 struct qmi_response_type_v01 resp;
357};
358
359#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
360extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
361
362struct wlfw_cal_report_req_msg_v01 {
363 u32 meta_data_len;
364 enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
365 u8 xo_cal_data_valid;
366 u8 xo_cal_data;
367};
368
369#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
370extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
371
372struct wlfw_cal_report_resp_msg_v01 {
373 struct qmi_response_type_v01 resp;
374};
375
376#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
377extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
378
379struct wlfw_initiate_cal_download_ind_msg_v01 {
380 enum wlfw_cal_temp_id_enum_v01 cal_id;
381};
382
383#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
384extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
385
386struct wlfw_cal_download_req_msg_v01 {
387 u8 valid;
388 u8 file_id_valid;
389 enum wlfw_cal_temp_id_enum_v01 file_id;
390 u8 total_size_valid;
391 u32 total_size;
392 u8 seg_id_valid;
393 u32 seg_id;
394 u8 data_valid;
395 u32 data_len;
396 u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
397 u8 end_valid;
398 u8 end;
399};
400
401#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
402extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
403
404struct wlfw_cal_download_resp_msg_v01 {
405 struct qmi_response_type_v01 resp;
406};
407
408#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
409extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
410
411struct wlfw_initiate_cal_update_ind_msg_v01 {
412 enum wlfw_cal_temp_id_enum_v01 cal_id;
413 u32 total_size;
414};
415
416#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
417extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
418
419struct wlfw_cal_update_req_msg_v01 {
420 enum wlfw_cal_temp_id_enum_v01 cal_id;
421 u32 seg_id;
422};
423
424#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
425extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
426
427struct wlfw_cal_update_resp_msg_v01 {
428 struct qmi_response_type_v01 resp;
429 u8 file_id_valid;
430 enum wlfw_cal_temp_id_enum_v01 file_id;
431 u8 total_size_valid;
432 u32 total_size;
433 u8 seg_id_valid;
434 u32 seg_id;
435 u8 data_valid;
436 u32 data_len;
437 u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
438 u8 end_valid;
439 u8 end;
440};
441
442#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
443extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
444
445struct wlfw_msa_info_req_msg_v01 {
446 u64 msa_addr;
447 u32 size;
448};
449
450#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
451extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
452
453struct wlfw_msa_info_resp_msg_v01 {
454 struct qmi_response_type_v01 resp;
455 u32 mem_region_info_len;
456 struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01];
457};
458
459#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
460extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
461
462struct wlfw_msa_ready_req_msg_v01 {
463 char placeholder;
464};
465
466#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
467extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
468
469struct wlfw_msa_ready_resp_msg_v01 {
470 struct qmi_response_type_v01 resp;
471};
472
473#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
474extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
475
476struct wlfw_ini_req_msg_v01 {
477 u8 enablefwlog_valid;
478 u8 enablefwlog;
479};
480
481#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
482extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
483
484struct wlfw_ini_resp_msg_v01 {
485 struct qmi_response_type_v01 resp;
486};
487
488#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
489extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
490
491struct wlfw_athdiag_read_req_msg_v01 {
492 u32 offset;
493 u32 mem_type;
494 u32 data_len;
495};
496
497#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
498extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
499
500struct wlfw_athdiag_read_resp_msg_v01 {
501 struct qmi_response_type_v01 resp;
502 u8 data_valid;
503 u32 data_len;
504 u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
505};
506
507#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
508extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
509
510struct wlfw_athdiag_write_req_msg_v01 {
511 u32 offset;
512 u32 mem_type;
513 u32 data_len;
514 u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
515};
516
517#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
518extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
519
520struct wlfw_athdiag_write_resp_msg_v01 {
521 struct qmi_response_type_v01 resp;
522};
523
524#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
525extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
526
527struct wlfw_vbatt_req_msg_v01 {
528 u64 voltage_uv;
529};
530
531#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
532extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
533
534struct wlfw_vbatt_resp_msg_v01 {
535 struct qmi_response_type_v01 resp;
536};
537
538#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
539extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
540
541struct wlfw_mac_addr_req_msg_v01 {
542 u8 mac_addr_valid;
543 u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
544};
545
546#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
547extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
548
549struct wlfw_mac_addr_resp_msg_v01 {
550 struct qmi_response_type_v01 resp;
551};
552
553#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
554extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
555
556struct wlfw_host_cap_req_msg_v01 {
557 u8 daemon_support_valid;
558 u8 daemon_support;
559};
560
561#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
562extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
563
564struct wlfw_host_cap_resp_msg_v01 {
565 struct qmi_response_type_v01 resp;
566};
567
568#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
569extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
570
571struct wlfw_request_mem_ind_msg_v01 {
572 u32 mem_seg_len;
573 struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
574};
575
576#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
577extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
578
579struct wlfw_respond_mem_req_msg_v01 {
580 u32 mem_seg_len;
581 struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
582};
583
584#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
585extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
586
587struct wlfw_respond_mem_resp_msg_v01 {
588 struct qmi_response_type_v01 resp;
589};
590
591#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
592extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
593
594struct wlfw_mem_ready_ind_msg_v01 {
595 char placeholder;
596};
597
598#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
599extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
600
601struct wlfw_fw_init_done_ind_msg_v01 {
602 char placeholder;
603};
604
605#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
606extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
607
608struct wlfw_rejuvenate_ind_msg_v01 {
609 u8 cause_for_rejuvenation_valid;
610 u8 cause_for_rejuvenation;
611 u8 requesting_sub_system_valid;
612 u8 requesting_sub_system;
613 u8 line_number_valid;
614 u16 line_number;
615 u8 function_name_valid;
616 char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
617};
618
619#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
620extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
621
622struct wlfw_rejuvenate_ack_req_msg_v01 {
623 char placeholder;
624};
625
626#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
627extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
628
629struct wlfw_rejuvenate_ack_resp_msg_v01 {
630 struct qmi_response_type_v01 resp;
631};
632
633#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
634extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
635
636struct wlfw_dynamic_feature_mask_req_msg_v01 {
637 u8 mask_valid;
638 u64 mask;
639};
640
641#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
642extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
643
644struct wlfw_dynamic_feature_mask_resp_msg_v01 {
645 struct qmi_response_type_v01 resp;
646 u8 prev_mask_valid;
647 u64 prev_mask;
648 u8 curr_mask_valid;
649 u64 curr_mask;
650};
651
652#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
653extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
654
655struct wlfw_m3_info_req_msg_v01 {
656 u64 addr;
657 u32 size;
658};
659
660#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
661extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
662
663struct wlfw_m3_info_resp_msg_v01 {
664 struct qmi_response_type_v01 resp;
665};
666
667#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
668extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
669
670struct wlfw_xo_cal_ind_msg_v01 {
671 u8 xo_cal_data;
672};
673
674#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
675extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
676
677#endif
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f7b5b855aab2..8d3d9bca410f 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -67,6 +67,72 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
67static const struct ath10k_snoc_drv_priv drv_priv = { 67static const struct ath10k_snoc_drv_priv drv_priv = {
68 .hw_rev = ATH10K_HW_WCN3990, 68 .hw_rev = ATH10K_HW_WCN3990,
69 .dma_mask = DMA_BIT_MASK(37), 69 .dma_mask = DMA_BIT_MASK(37),
70 .msa_size = 0x100000,
71};
72
73#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
74#define WCN3990_DST_WR_IDX_OFFSET 0x40
75
76static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
77 {
78 .ce_id = __cpu_to_le16(0),
79 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
80 },
81
82 {
83 .ce_id = __cpu_to_le16(3),
84 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
85 },
86
87 {
88 .ce_id = __cpu_to_le16(4),
89 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
90 },
91
92 {
93 .ce_id = __cpu_to_le16(5),
94 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
95 },
96
97 {
98 .ce_id = __cpu_to_le16(7),
99 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
100 },
101
102 {
103 .ce_id = __cpu_to_le16(1),
104 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
105 },
106
107 {
108 .ce_id = __cpu_to_le16(2),
109 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
110 },
111
112 {
113 .ce_id = __cpu_to_le16(7),
114 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
115 },
116
117 {
118 .ce_id = __cpu_to_le16(8),
119 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
120 },
121
122 {
123 .ce_id = __cpu_to_le16(9),
124 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
125 },
126
127 {
128 .ce_id = __cpu_to_le16(10),
129 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
130 },
131
132 {
133 .ce_id = __cpu_to_le16(11),
134 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
135 },
70}; 136};
71 137
72static struct ce_attr host_ce_config_wlan[] = { 138static struct ce_attr host_ce_config_wlan[] = {
@@ -176,6 +242,128 @@ static struct ce_attr host_ce_config_wlan[] = {
176 }, 242 },
177}; 243};
178 244
245static struct ce_pipe_config target_ce_config_wlan[] = {
246 /* CE0: host->target HTC control and raw streams */
247 {
248 .pipenum = __cpu_to_le32(0),
249 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
250 .nentries = __cpu_to_le32(32),
251 .nbytes_max = __cpu_to_le32(2048),
252 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 .reserved = __cpu_to_le32(0),
254 },
255
256 /* CE1: target->host HTT + HTC control */
257 {
258 .pipenum = __cpu_to_le32(1),
259 .pipedir = __cpu_to_le32(PIPEDIR_IN),
260 .nentries = __cpu_to_le32(32),
261 .nbytes_max = __cpu_to_le32(2048),
262 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 .reserved = __cpu_to_le32(0),
264 },
265
266 /* CE2: target->host WMI */
267 {
268 .pipenum = __cpu_to_le32(2),
269 .pipedir = __cpu_to_le32(PIPEDIR_IN),
270 .nentries = __cpu_to_le32(64),
271 .nbytes_max = __cpu_to_le32(2048),
272 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 .reserved = __cpu_to_le32(0),
274 },
275
276 /* CE3: host->target WMI */
277 {
278 .pipenum = __cpu_to_le32(3),
279 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 .nentries = __cpu_to_le32(32),
281 .nbytes_max = __cpu_to_le32(2048),
282 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
283 .reserved = __cpu_to_le32(0),
284 },
285
286 /* CE4: host->target HTT */
287 {
288 .pipenum = __cpu_to_le32(4),
289 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 .nentries = __cpu_to_le32(256),
291 .nbytes_max = __cpu_to_le32(256),
292 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 .reserved = __cpu_to_le32(0),
294 },
295
296 /* CE5: target->host HTT (HIF->HTT) */
297 {
298 .pipenum = __cpu_to_le32(5),
299 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
300 .nentries = __cpu_to_le32(1024),
301 .nbytes_max = __cpu_to_le32(64),
302 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
303 .reserved = __cpu_to_le32(0),
304 },
305
306 /* CE6: Reserved for target autonomous hif_memcpy */
307 {
308 .pipenum = __cpu_to_le32(6),
309 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310 .nentries = __cpu_to_le32(32),
311 .nbytes_max = __cpu_to_le32(16384),
312 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
313 .reserved = __cpu_to_le32(0),
314 },
315
316 /* CE7 used only by Host */
317 {
318 .pipenum = __cpu_to_le32(7),
319 .pipedir = __cpu_to_le32(4),
320 .nentries = __cpu_to_le32(0),
321 .nbytes_max = __cpu_to_le32(0),
322 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323 .reserved = __cpu_to_le32(0),
324 },
325
326 /* CE8 Target to uMC */
327 {
328 .pipenum = __cpu_to_le32(8),
329 .pipedir = __cpu_to_le32(PIPEDIR_IN),
330 .nentries = __cpu_to_le32(32),
331 .nbytes_max = __cpu_to_le32(2048),
332 .flags = __cpu_to_le32(0),
333 .reserved = __cpu_to_le32(0),
334 },
335
336 /* CE9 target->host HTT */
337 {
338 .pipenum = __cpu_to_le32(9),
339 .pipedir = __cpu_to_le32(PIPEDIR_IN),
340 .nentries = __cpu_to_le32(32),
341 .nbytes_max = __cpu_to_le32(2048),
342 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 .reserved = __cpu_to_le32(0),
344 },
345
346 /* CE10 target->host HTT */
347 {
348 .pipenum = __cpu_to_le32(10),
349 .pipedir = __cpu_to_le32(PIPEDIR_IN),
350 .nentries = __cpu_to_le32(32),
351 .nbytes_max = __cpu_to_le32(2048),
352 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 .reserved = __cpu_to_le32(0),
354 },
355
356 /* CE11 target autonomous qcache memcpy */
357 {
358 .pipenum = __cpu_to_le32(11),
359 .pipedir = __cpu_to_le32(PIPEDIR_IN),
360 .nentries = __cpu_to_le32(32),
361 .nbytes_max = __cpu_to_le32(2048),
362 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
363 .reserved = __cpu_to_le32(0),
364 },
365};
366
179static struct service_to_pipe target_service_to_ce_map_wlan[] = { 367static struct service_to_pipe target_service_to_ce_map_wlan[] = {
180 { 368 {
181 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 369 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
@@ -766,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
766 954
767static int ath10k_snoc_wlan_enable(struct ath10k *ar) 955static int ath10k_snoc_wlan_enable(struct ath10k *ar)
768{ 956{
769 return 0; 957 struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
958 struct ath10k_qmi_wlan_enable_cfg cfg;
959 enum wlfw_driver_mode_enum_v01 mode;
960 int pipe_num;
961
962 for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
963 tgt_cfg[pipe_num].pipe_num =
964 target_ce_config_wlan[pipe_num].pipenum;
965 tgt_cfg[pipe_num].pipe_dir =
966 target_ce_config_wlan[pipe_num].pipedir;
967 tgt_cfg[pipe_num].nentries =
968 target_ce_config_wlan[pipe_num].nentries;
969 tgt_cfg[pipe_num].nbytes_max =
970 target_ce_config_wlan[pipe_num].nbytes_max;
971 tgt_cfg[pipe_num].flags =
972 target_ce_config_wlan[pipe_num].flags;
973 tgt_cfg[pipe_num].reserved = 0;
974 }
975
976 cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
977 sizeof(struct ath10k_tgt_pipe_cfg);
978 cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
979 &tgt_cfg;
980 cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
981 sizeof(struct ath10k_svc_pipe_cfg);
982 cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
983 &target_service_to_ce_map_wlan;
984 cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
985 sizeof(struct ath10k_shadow_reg_cfg);
986 cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
987 &target_shadow_reg_cfg_map;
988
989 mode = QMI_WLFW_MISSION_V01;
990
991 return ath10k_qmi_wlan_enable(ar, &cfg, mode,
992 NULL);
770} 993}
771 994
772static void ath10k_snoc_wlan_disable(struct ath10k *ar) 995static void ath10k_snoc_wlan_disable(struct ath10k *ar)
773{ 996{
997 ath10k_qmi_wlan_disable(ar);
774} 998}
775 999
776static void ath10k_snoc_hif_power_down(struct ath10k *ar) 1000static void ath10k_snoc_hif_power_down(struct ath10k *ar)
@@ -957,6 +1181,32 @@ out:
957 return ret; 1181 return ret;
958} 1182}
959 1183
1184int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1185{
1186 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1187 struct ath10k_bus_params bus_params;
1188 int ret;
1189
1190 switch (type) {
1191 case ATH10K_QMI_EVENT_FW_READY_IND:
1192 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1193 bus_params.chip_id = ar_snoc->target_info.soc_version;
1194 ret = ath10k_core_register(ar, &bus_params);
1195 if (ret) {
1196 ath10k_err(ar, "failed to register driver core: %d\n",
1197 ret);
1198 }
1199 break;
1200 case ATH10K_QMI_EVENT_FW_DOWN_IND:
1201 break;
1202 default:
1203 ath10k_err(ar, "invalid fw indication: %llx\n", type);
1204 return -EINVAL;
1205 }
1206
1207 return 0;
1208}
1209
960static int ath10k_snoc_setup_resource(struct ath10k *ar) 1210static int ath10k_snoc_setup_resource(struct ath10k *ar)
961{ 1211{
962 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 1212 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1281,9 +1531,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
1281 struct ath10k_snoc *ar_snoc; 1531 struct ath10k_snoc *ar_snoc;
1282 struct device *dev; 1532 struct device *dev;
1283 struct ath10k *ar; 1533 struct ath10k *ar;
1534 u32 msa_size;
1284 int ret; 1535 int ret;
1285 u32 i; 1536 u32 i;
1286 struct ath10k_bus_params bus_params;
1287 1537
1288 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); 1538 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1289 if (!of_id) { 1539 if (!of_id) {
@@ -1313,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
1313 ar_snoc->ar = ar; 1563 ar_snoc->ar = ar;
1314 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; 1564 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1315 ar->ce_priv = &ar_snoc->ce; 1565 ar->ce_priv = &ar_snoc->ce;
1566 msa_size = drv_data->msa_size;
1316 1567
1317 ret = ath10k_snoc_resource_init(ar); 1568 ret = ath10k_snoc_resource_init(ar);
1318 if (ret) { 1569 if (ret) {
@@ -1351,12 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
1351 goto err_free_irq; 1602 goto err_free_irq;
1352 } 1603 }
1353 1604
1354 bus_params.dev_type = ATH10K_DEV_TYPE_LL; 1605 ret = ath10k_qmi_init(ar, msa_size);
1355 bus_params.chip_id = drv_data->hw_rev;
1356 ret = ath10k_core_register(ar, &bus_params);
1357 if (ret) { 1606 if (ret) {
1358 ath10k_err(ar, "failed to register driver core: %d\n", ret); 1607 ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1359 goto err_hw_power_off; 1608 goto err_core_destroy;
1360 } 1609 }
1361 1610
1362 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); 1611 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
@@ -1364,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
1364 1613
1365 return 0; 1614 return 0;
1366 1615
1367err_hw_power_off:
1368 ath10k_hw_power_off(ar);
1369
1370err_free_irq: 1616err_free_irq:
1371 ath10k_snoc_free_irq(ar); 1617 ath10k_snoc_free_irq(ar);
1372 1618
@@ -1388,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
1388 ath10k_hw_power_off(ar); 1634 ath10k_hw_power_off(ar);
1389 ath10k_snoc_free_irq(ar); 1635 ath10k_snoc_free_irq(ar);
1390 ath10k_snoc_release_resource(ar); 1636 ath10k_snoc_release_resource(ar);
1637 ath10k_qmi_deinit(ar);
1391 ath10k_core_destroy(ar); 1638 ath10k_core_destroy(ar);
1392 1639
1393 return 0; 1640 return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index f9e530189d48..e1d2d6675556 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,10 +19,12 @@
19 19
20#include "hw.h" 20#include "hw.h"
21#include "ce.h" 21#include "ce.h"
22#include "qmi.h"
22 23
23struct ath10k_snoc_drv_priv { 24struct ath10k_snoc_drv_priv {
24 enum ath10k_hw_rev hw_rev; 25 enum ath10k_hw_rev hw_rev;
25 u64 dma_mask; 26 u64 dma_mask;
27 u32 msa_size;
26}; 28};
27 29
28struct snoc_state { 30struct snoc_state {
@@ -81,6 +83,7 @@ struct ath10k_snoc {
81 struct timer_list rx_post_retry; 83 struct timer_list rx_post_retry;
82 struct ath10k_wcn3990_vreg_info *vreg; 84 struct ath10k_wcn3990_vreg_info *vreg;
83 struct ath10k_wcn3990_clk_info *clk; 85 struct ath10k_wcn3990_clk_info *clk;
86 struct ath10k_qmi *qmi;
84}; 87};
85 88
86static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) 89static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
90 93
91void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); 94void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
92u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); 95u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
96int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
93 97
94#endif /* _SNOC_H_ */ 98#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 7fd63bbf8e24..7978a7783f90 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -210,6 +210,9 @@ struct wmi_ops {
210 u32 fw_feature_bitmap); 210 u32 fw_feature_bitmap);
211 int (*get_vdev_subtype)(struct ath10k *ar, 211 int (*get_vdev_subtype)(struct ath10k *ar,
212 enum wmi_vdev_subtype subtype); 212 enum wmi_vdev_subtype subtype);
213 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
214 u32 vdev_id,
215 struct wmi_pno_scan_req *pno_scan);
213 struct sk_buff *(*gen_pdev_bss_chan_info_req) 216 struct sk_buff *(*gen_pdev_bss_chan_info_req)
214 (struct ath10k *ar, 217 (struct ath10k *ar,
215 enum wmi_bss_survey_req_type type); 218 enum wmi_bss_survey_req_type type);
@@ -1361,6 +1364,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1361} 1364}
1362 1365
1363static inline int 1366static inline int
1367ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1368 struct wmi_pno_scan_req *pno_scan)
1369{
1370 struct sk_buff *skb;
1371 u32 cmd_id;
1372
1373 if (!ar->wmi.ops->gen_wow_config_pno)
1374 return -EOPNOTSUPP;
1375
1376 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1377 if (IS_ERR(skb))
1378 return PTR_ERR(skb);
1379
1380 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1381 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1382}
1383
1384static inline int
1364ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1385ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1365 enum wmi_tdls_state state) 1386 enum wmi_tdls_state state)
1366{ 1387{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 731ceaed4d5a..bab8b2527fb8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3441,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3441 return skb; 3441 return skb;
3442} 3442}
3443 3443
3444/* Request FW to start PNO operation */
3445static struct sk_buff *
3446ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3447 u32 vdev_id,
3448 struct wmi_pno_scan_req *pno)
3449{
3450 struct nlo_configured_parameters *nlo_list;
3451 struct wmi_tlv_wow_nlo_config_cmd *cmd;
3452 struct wmi_tlv *tlv;
3453 struct sk_buff *skb;
3454 __le32 *channel_list;
3455 u16 tlv_len;
3456 size_t len;
3457 void *ptr;
3458 u32 i;
3459
3460 len = sizeof(*tlv) + sizeof(*cmd) +
3461 sizeof(*tlv) +
3462 /* TLV place holder for array of structures
3463 * nlo_configured_parameters(nlo_list)
3464 */
3465 sizeof(*tlv);
3466 /* TLV place holder for array of uint32 channel_list */
3467
3468 len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3469 WMI_NLO_MAX_CHAN);
3470 len += sizeof(struct nlo_configured_parameters) *
3471 min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3472
3473 skb = ath10k_wmi_alloc_skb(ar, len);
3474 if (!skb)
3475 return ERR_PTR(-ENOMEM);
3476
3477 ptr = (void *)skb->data;
3478 tlv = ptr;
3479 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3480 tlv->len = __cpu_to_le16(sizeof(*cmd));
3481 cmd = (void *)tlv->value;
3482
3483 /* wmi_tlv_wow_nlo_config_cmd parameters*/
3484 cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3485 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3486
3487 /* current FW does not support min-max range for dwell time */
3488 cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3489 cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3490
3491 if (pno->do_passive_scan)
3492 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3493
3494 /* copy scan interval */
3495 cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3496 cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3497 cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3498 cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3499
3500 if (pno->enable_pno_scan_randomization) {
3501 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3502 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3503 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3504 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3505 }
3506
3507 ptr += sizeof(*tlv);
3508 ptr += sizeof(*cmd);
3509
3510 /* nlo_configured_parameters(nlo_list) */
3511 cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3512 WMI_NLO_MAX_SSIDS));
3513 tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3514 sizeof(struct nlo_configured_parameters);
3515
3516 tlv = ptr;
3517 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3518 tlv->len = __cpu_to_le16(len);
3519
3520 ptr += sizeof(*tlv);
3521 nlo_list = ptr;
3522 for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3523 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3524 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3525 tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3526 sizeof(*tlv));
3527
3528 /* copy ssid and it's length */
3529 nlo_list[i].ssid.valid = __cpu_to_le32(true);
3530 nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3531 memcpy(nlo_list[i].ssid.ssid.ssid,
3532 pno->a_networks[i].ssid.ssid,
3533 __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3534
3535 /* copy rssi threshold */
3536 if (pno->a_networks[i].rssi_threshold &&
3537 pno->a_networks[i].rssi_threshold > -300) {
3538 nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3539 nlo_list[i].rssi_cond.rssi =
3540 __cpu_to_le32(pno->a_networks[i].rssi_threshold);
3541 }
3542
3543 nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3544 nlo_list[i].bcast_nw_type.bcast_nw_type =
3545 __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3546 }
3547
3548 ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3549
3550 /* copy channel info */
3551 cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3552 pno->a_networks[0].channel_count,
3553 WMI_NLO_MAX_CHAN));
3554
3555 tlv = ptr;
3556 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3557 tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3558 sizeof(u_int32_t));
3559 ptr += sizeof(*tlv);
3560
3561 channel_list = (__le32 *)ptr;
3562 for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3563 channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3564
3565 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3566 vdev_id);
3567
3568 return skb;
3569}
3570
3571/* Request FW to stop ongoing PNO operation */
3572static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3573 u32 vdev_id)
3574{
3575 struct wmi_tlv_wow_nlo_config_cmd *cmd;
3576 struct wmi_tlv *tlv;
3577 struct sk_buff *skb;
3578 void *ptr;
3579 size_t len;
3580
3581 len = sizeof(*tlv) + sizeof(*cmd) +
3582 sizeof(*tlv) +
3583 /* TLV place holder for array of structures
3584 * nlo_configured_parameters(nlo_list)
3585 */
3586 sizeof(*tlv);
3587 /* TLV place holder for array of uint32 channel_list */
3588 skb = ath10k_wmi_alloc_skb(ar, len);
3589 if (!skb)
3590 return ERR_PTR(-ENOMEM);
3591
3592 ptr = (void *)skb->data;
3593 tlv = ptr;
3594 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3595 tlv->len = __cpu_to_le16(sizeof(*cmd));
3596 cmd = (void *)tlv->value;
3597
3598 cmd->vdev_id = __cpu_to_le32(vdev_id);
3599 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
3600
3601 ptr += sizeof(*tlv);
3602 ptr += sizeof(*cmd);
3603
3604 /* nlo_configured_parameters(nlo_list) */
3605 tlv = ptr;
3606 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3607 tlv->len = __cpu_to_le16(0);
3608
3609 ptr += sizeof(*tlv);
3610
3611 /* channel list */
3612 tlv = ptr;
3613 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3614 tlv->len = __cpu_to_le16(0);
3615
3616 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
3617 return skb;
3618}
3619
3620static struct sk_buff *
3621ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
3622 struct wmi_pno_scan_req *pno_scan)
3623{
3624 if (pno_scan->enable)
3625 return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
3626 else
3627 return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
3628}
3629
3444static struct sk_buff * 3630static struct sk_buff *
3445ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) 3631ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
3446{ 3632{
@@ -3973,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = {
3973 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, 4159 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
3974 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, 4160 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
3975 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, 4161 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4162 .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
3976 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, 4163 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
3977 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, 4164 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
3978 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, 4165 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 4f0c20c90642..92c25f51bf86 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -2146,6 +2146,260 @@ struct wmi_tlv_tdls_peer_event {
2146 2146
2147void ath10k_wmi_tlv_attach(struct ath10k *ar); 2147void ath10k_wmi_tlv_attach(struct ath10k *ar);
2148 2148
2149enum wmi_nlo_auth_algorithm {
2150 WMI_NLO_AUTH_ALGO_80211_OPEN = 1,
2151 WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
2152 WMI_NLO_AUTH_ALGO_WPA = 3,
2153 WMI_NLO_AUTH_ALGO_WPA_PSK = 4,
2154 WMI_NLO_AUTH_ALGO_WPA_NONE = 5,
2155 WMI_NLO_AUTH_ALGO_RSNA = 6,
2156 WMI_NLO_AUTH_ALGO_RSNA_PSK = 7,
2157};
2158
2159enum wmi_nlo_cipher_algorithm {
2160 WMI_NLO_CIPHER_ALGO_NONE = 0x00,
2161 WMI_NLO_CIPHER_ALGO_WEP40 = 0x01,
2162 WMI_NLO_CIPHER_ALGO_TKIP = 0x02,
2163 WMI_NLO_CIPHER_ALGO_CCMP = 0x04,
2164 WMI_NLO_CIPHER_ALGO_WEP104 = 0x05,
2165 WMI_NLO_CIPHER_ALGO_BIP = 0x06,
2166 WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
2167 WMI_NLO_CIPHER_ALGO_WEP = 0x101,
2168};
2169
2170/* SSID broadcast type passed in NLO params */
2171enum wmi_nlo_ssid_bcastnwtype {
2172 WMI_NLO_BCAST_UNKNOWN = 0,
2173 WMI_NLO_BCAST_NORMAL = 1,
2174 WMI_NLO_BCAST_HIDDEN = 2,
2175};
2176
2177#define WMI_NLO_MAX_SSIDS 16
2178#define WMI_NLO_MAX_CHAN 48
2179
2180#define WMI_NLO_CONFIG_STOP (0x1 << 0)
2181#define WMI_NLO_CONFIG_START (0x1 << 1)
2182#define WMI_NLO_CONFIG_RESET (0x1 << 2)
2183#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
2184#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
2185#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
2186
2187/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
2188 * Only one of them can be enabled at a given time
2189 */
2190#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
2191#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
2192#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
2193#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
2194#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
2195#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
2196#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
2197
2198/* Whether directed scan needs to be performed (for hidden SSIDs) */
2199#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
2200
2201/* Whether PNO event shall be triggered if the network is found on A band */
2202#define WMI_ENLO_FLAG_A_BAND 2
2203
2204/* Whether PNO event shall be triggered if the network is found on G band */
2205#define WMI_ENLO_FLAG_G_BAND 4
2206
2207/* Whether strict matching is required (i.e. firmware shall not
2208 * match on the entire SSID)
2209 */
2210#define WMI_ENLO_FLAG_STRICT_MATCH 8
2211
2212/* Code for matching the beacon AUTH IE - additional codes TBD */
2213/* open */
2214#define WMI_ENLO_AUTH_CODE_OPEN 1
2215
2216/* WPA_PSK or WPA2PSK */
2217#define WMI_ENLO_AUTH_CODE_PSK 2
2218
2219/* any EAPOL */
2220#define WMI_ENLO_AUTH_CODE_EAPOL 4
2221
2222struct wmi_nlo_ssid_param {
2223 __le32 valid;
2224 struct wmi_ssid ssid;
2225} __packed;
2226
2227struct wmi_nlo_enc_param {
2228 __le32 valid;
2229 __le32 enc_type;
2230} __packed;
2231
2232struct wmi_nlo_auth_param {
2233 __le32 valid;
2234 __le32 auth_type;
2235} __packed;
2236
2237struct wmi_nlo_bcast_nw_param {
2238 __le32 valid;
2239
2240 /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled.
2241 * The value should be true/false. Otherwise EPNO is enabled.
2242 * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX
2243 */
2244 __le32 bcast_nw_type;
2245} __packed;
2246
2247struct wmi_nlo_rssi_param {
2248 __le32 valid;
2249 __le32 rssi;
2250} __packed;
2251
2252struct nlo_configured_parameters {
2253 /* TLV tag and len;*/
2254 __le32 tlv_header;
2255 struct wmi_nlo_ssid_param ssid;
2256 struct wmi_nlo_enc_param enc_type;
2257 struct wmi_nlo_auth_param auth_type;
2258 struct wmi_nlo_rssi_param rssi_cond;
2259
2260 /* indicates if the SSID is hidden or not */
2261 struct wmi_nlo_bcast_nw_param bcast_nw_type;
2262} __packed;
2263
2264/* Support channel prediction for PNO scan after scanning top_k_num channels
2265 * if stationary_threshold is met.
2266 */
2267struct nlo_channel_prediction_cfg {
2268 __le32 tlv_header;
2269
2270 /* Enable or disable this feature. */
2271 __le32 enable;
2272
2273 /* Top K channels will be scanned before deciding whether to further scan
2274 * or stop. Minimum value is 3 and maximum is 5.
2275 */
2276 __le32 top_k_num;
2277
2278 /* Preconfigured stationary threshold.
2279 * Lesser value means more conservative. Bigger value means more aggressive.
2280 * Maximum is 100 and mininum is 0.
2281 */
2282 __le32 stationary_threshold;
2283
2284 /* Periodic full channel scan in milliseconds unit.
2285 * After full_scan_period_ms since last full scan, channel prediction
2286 * scan is suppressed and will do full scan.
2287 * This is to help detecting sudden AP power-on or -off. Value 0 means no
2288 * full scan at all (not recommended).
2289 */
2290 __le32 full_scan_period_ms;
2291} __packed;
2292
2293struct enlo_candidate_score_params_t {
2294 __le32 tlv_header; /* TLV tag and len; */
2295
2296 /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */
2297 __le32 min_5ghz_rssi;
2298
2299 /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */
2300 __le32 min_24ghz_rssi;
2301
2302 /* the maximum score that a network can have before bonuses */
2303 __le32 initial_score_max;
2304
2305 /* current_connection_bonus:
2306 * only report when there is a network's score this much higher
2307 * than the current connection
2308 */
2309 __le32 current_connection_bonus;
2310
2311 /* score bonus for all networks with the same network flag */
2312 __le32 same_network_bonus;
2313
2314 /* score bonus for networks that are not open */
2315 __le32 secure_bonus;
2316
2317 /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
2318 __le32 band_5ghz_bonus;
2319} __packed;
2320
2321struct connected_nlo_bss_band_rssi_pref_t {
2322 __le32 tlv_header; /* TLV tag and len;*/
2323
2324 /* band which needs to get preference over other band
2325 * - see wmi_set_vdev_ie_band enum
2326 */
2327 __le32 band;
2328
2329 /* Amount of RSSI preference (in dB) that can be given to a band */
2330 __le32 rssi_pref;
2331} __packed;
2332
2333struct connected_nlo_rssi_params_t {
2334 __le32 tlv_header; /* TLV tag and len;*/
2335
2336 /* Relative rssi threshold (in dB) by which new BSS should have
2337 * better rssi than the current connected BSS.
2338 */
2339 __le32 relative_rssi;
2340
2341 /* The amount of rssi preference (in dB) that can be given
2342 * to a 5G BSS over 2.4G BSS.
2343 */
2344 __le32 relative_rssi_5g_pref;
2345} __packed;
2346
2347struct wmi_tlv_wow_nlo_config_cmd {
2348 __le32 flags;
2349 __le32 vdev_id;
2350 __le32 fast_scan_max_cycles;
2351 __le32 active_dwell_time;
2352 __le32 passive_dwell_time; /* PDT in msecs */
2353 __le32 probe_bundle_size;
2354
2355 /* ART = IRT */
2356 __le32 rest_time;
2357
2358 /* Max value that can be reached after SBM */
2359 __le32 max_rest_time;
2360
2361 /* SBM */
2362 __le32 scan_backoff_multiplier;
2363
2364 /* SCBM */
2365 __le32 fast_scan_period;
2366
2367 /* specific to windows */
2368 __le32 slow_scan_period;
2369
2370 __le32 no_of_ssids;
2371
2372 __le32 num_of_channels;
2373
2374 /* NLO scan start delay time in milliseconds */
2375 __le32 delay_start_time;
2376
2377 /** MAC Address to use in Probe Req as SA **/
2378 struct wmi_mac_addr mac_addr;
2379
2380 /** Mask on which MAC has to be randomized **/
2381 struct wmi_mac_addr mac_mask;
2382
2383 /** IE bitmap to use in Probe Req **/
2384 __le32 ie_bitmap[8];
2385
2386 /** Number of vendor OUIs. In the TLV vendor_oui[] **/
2387 __le32 num_vendor_oui;
2388
2389 /** Number of connected NLO band preferences **/
2390 __le32 num_cnlo_band_pref;
2391
2392 /* The TLVs will follow.
2393 * nlo_configured_parameters nlo_list[];
2394 * A_UINT32 channel_list[num_of_channels];
2395 * nlo_channel_prediction_cfg ch_prediction_cfg;
2396 * enlo_candidate_score_params candidate_score_params;
2397 * wmi_vendor_oui vendor_oui[num_vendor_oui];
2398 * connected_nlo_rssi_params cnlo_rssi_params;
2399 * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref];
2400 */
2401} __packed;
2402
2149struct wmi_tlv_mgmt_tx_cmd { 2403struct wmi_tlv_mgmt_tx_cmd {
2150 __le32 vdev_id; 2404 __le32 vdev_id;
2151 __le32 desc_id; 2405 __le32 desc_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f67c52757ea6..f7badd079051 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -7068,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params {
7068 __le32 cca_detect_margin; 7068 __le32 cca_detect_margin;
7069} __packed; 7069} __packed;
7070 7070
7071#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
7072#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
7073#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
7074#define WMI_PNO_MAX_NETW_CHANNELS 26
7075#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
7076#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
7077#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
7078
7079/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
7080#define WMI_PNO_MAX_PB_REQ_SIZE 450
7081
7082#define WMI_PNO_24G_DEFAULT_CH 1
7083#define WMI_PNO_5G_DEFAULT_CH 36
7084
7085#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
7086#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
7087
7088/* SSID broadcast type */
7089enum wmi_SSID_bcast_type {
7090 BCAST_UNKNOWN = 0,
7091 BCAST_NORMAL = 1,
7092 BCAST_HIDDEN = 2,
7093};
7094
7095struct wmi_network_type {
7096 struct wmi_ssid ssid;
7097 u32 authentication;
7098 u32 encryption;
7099 u32 bcast_nw_type;
7100 u8 channel_count;
7101 u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
7102 s32 rssi_threshold;
7103} __packed;
7104
7105struct wmi_pno_scan_req {
7106 u8 enable;
7107 u8 vdev_id;
7108 u8 uc_networks_count;
7109 struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
7110 u32 fast_scan_period;
7111 u32 slow_scan_period;
7112 u8 fast_scan_max_cycles;
7113
7114 bool do_passive_scan;
7115
7116 u32 delay_start_time;
7117 u32 active_min_time;
7118 u32 active_max_time;
7119 u32 passive_min_time;
7120 u32 passive_max_time;
7121
7122 /* mac address randomization attributes */
7123 u32 enable_pno_scan_randomization;
7124 u8 mac_addr[ETH_ALEN];
7125 u8 mac_addr_mask[ETH_ALEN];
7126} __packed;
7127
7071enum wmi_host_platform_type { 7128enum wmi_host_platform_type {
7072 WMI_HOST_PLATFORM_HIGH_PERF, 7129 WMI_HOST_PLATFORM_HIGH_PERF,
7073 WMI_HOST_PLATFORM_LOW_PERF, 7130 WMI_HOST_PLATFORM_LOW_PERF,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index af444dfecae9..51b26b305885 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -180,6 +180,100 @@ static void ath10k_wow_convert_8023_to_80211
180 } 180 }
181} 181}
182 182
183static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
184 struct cfg80211_sched_scan_request *nd_config,
185 struct wmi_pno_scan_req *pno)
186{
187 int i, j, ret = 0;
188 u8 ssid_len;
189
190 pno->enable = 1;
191 pno->vdev_id = vdev_id;
192 pno->uc_networks_count = nd_config->n_match_sets;
193
194 if (!pno->uc_networks_count ||
195 pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
196 return -EINVAL;
197
198 if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
199 return -EINVAL;
200
201 /* Filling per profile params */
202 for (i = 0; i < pno->uc_networks_count; i++) {
203 ssid_len = nd_config->match_sets[i].ssid.ssid_len;
204
205 if (ssid_len == 0 || ssid_len > 32)
206 return -EINVAL;
207
208 pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
209
210 memcpy(pno->a_networks[i].ssid.ssid,
211 nd_config->match_sets[i].ssid.ssid,
212 nd_config->match_sets[i].ssid.ssid_len);
213 pno->a_networks[i].authentication = 0;
214 pno->a_networks[i].encryption = 0;
215 pno->a_networks[i].bcast_nw_type = 0;
216
217 /*Copying list of valid channel into request */
218 pno->a_networks[i].channel_count = nd_config->n_channels;
219 pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
220
221 for (j = 0; j < nd_config->n_channels; j++) {
222 pno->a_networks[i].channels[j] =
223 nd_config->channels[j]->center_freq;
224 }
225 }
226
227 /* set scan to passive if no SSIDs are specified in the request */
228 if (nd_config->n_ssids == 0)
229 pno->do_passive_scan = true;
230 else
231 pno->do_passive_scan = false;
232
233 for (i = 0; i < nd_config->n_ssids; i++) {
234 j = 0;
235 while (j < pno->uc_networks_count) {
236 if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
237 nd_config->ssids[i].ssid_len &&
238 (memcmp(pno->a_networks[j].ssid.ssid,
239 nd_config->ssids[i].ssid,
240 __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
241 pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
242 break;
243 }
244 j++;
245 }
246 }
247
248 if (nd_config->n_scan_plans == 2) {
249 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
250 pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
251 pno->slow_scan_period =
252 nd_config->scan_plans[1].interval * MSEC_PER_SEC;
253 } else if (nd_config->n_scan_plans == 1) {
254 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
255 pno->fast_scan_max_cycles = 1;
256 pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
257 } else {
258 ath10k_warn(ar, "Invalid number of scan plans %d !!",
259 nd_config->n_scan_plans);
260 }
261
262 if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
263 /* enable mac randomization */
264 pno->enable_pno_scan_randomization = 1;
265 memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
266 memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
267 }
268
269 pno->delay_start_time = nd_config->delay;
270
271 /* Current FW does not support min-max range for dwell time */
272 pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
273 pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
274 return ret;
275}
276
183static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, 277static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
184 struct cfg80211_wowlan *wowlan) 278 struct cfg80211_wowlan *wowlan)
185{ 279{
@@ -213,6 +307,26 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
213 307
214 if (wowlan->magic_pkt) 308 if (wowlan->magic_pkt)
215 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); 309 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
310
311 if (wowlan->nd_config) {
312 struct wmi_pno_scan_req *pno;
313 int ret;
314
315 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
316 if (!pno)
317 return -ENOMEM;
318
319 ar->nlo_enabled = true;
320
321 ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
322 wowlan->nd_config, pno);
323 if (!ret) {
324 ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
325 __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
326 }
327
328 kfree(pno);
329 }
216 break; 330 break;
217 default: 331 default:
218 break; 332 break;
@@ -299,6 +413,51 @@ static int ath10k_wow_set_wakeups(struct ath10k *ar,
299 return 0; 413 return 0;
300} 414}
301 415
416static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
417{
418 int ret = 0;
419 struct ath10k *ar = arvif->ar;
420
421 switch (arvif->vdev_type) {
422 case WMI_VDEV_TYPE_STA:
423 if (ar->nlo_enabled) {
424 struct wmi_pno_scan_req *pno;
425
426 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
427 if (!pno)
428 return -ENOMEM;
429
430 pno->enable = 0;
431 ar->nlo_enabled = false;
432 ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
433 kfree(pno);
434 }
435 break;
436 default:
437 break;
438 }
439 return ret;
440}
441
442static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
443{
444 struct ath10k_vif *arvif;
445 int ret = 0;
446
447 lockdep_assert_held(&ar->conf_mutex);
448
449 list_for_each_entry(arvif, &ar->arvifs, list) {
450 ret = ath10k_vif_wow_clean_nlo(arvif);
451 if (ret) {
452 ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
453 arvif->vdev_id, ret);
454 return ret;
455 }
456 }
457
458 return 0;
459}
460
302static int ath10k_wow_enable(struct ath10k *ar) 461static int ath10k_wow_enable(struct ath10k *ar)
303{ 462{
304 int ret; 463 int ret;
@@ -436,6 +595,10 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
436 if (ret) 595 if (ret)
437 ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); 596 ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
438 597
598 ret = ath10k_wow_nlo_cleanup(ar);
599 if (ret)
600 ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
601
439exit: 602exit:
440 if (ret) { 603 if (ret) {
441 switch (ar->state) { 604 switch (ar->state) {
@@ -475,6 +638,11 @@ int ath10k_wow_init(struct ath10k *ar)
475 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; 638 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
476 } 639 }
477 640
641 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
642 ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
643 ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
644 }
645
478 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; 646 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
479 ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; 647 ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
480 648
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index a3668433dc02..988222cea9df 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
755 } 755 }
756 756
757 if (main_ant_conf == rx_ant_conf) { 757 if (main_ant_conf == rx_ant_conf) {
758 ANT_STAT_INC(ANT_MAIN, recv_cnt); 758 ANT_STAT_INC(sc, ANT_MAIN, recv_cnt);
759 ANT_LNA_INC(ANT_MAIN, rx_ant_conf); 759 ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf);
760 } else { 760 } else {
761 ANT_STAT_INC(ANT_ALT, recv_cnt); 761 ANT_STAT_INC(sc, ANT_ALT, recv_cnt);
762 ANT_LNA_INC(ANT_ALT, rx_ant_conf); 762 ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf);
763 } 763 }
764 764
765 /* Short scan check */ 765 /* Short scan check */
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 6a43d26276e5..6aa3ec024ffa 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -624,9 +624,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
624 tsf, freq, chan_type); 624 tsf, freq, chan_type);
625 625
626 if (ret == 0) 626 if (ret == 0)
627 RX_STAT_INC(rx_spectral_sample_good); 627 RX_STAT_INC(sc, rx_spectral_sample_good);
628 else 628 else
629 RX_STAT_INC(rx_spectral_sample_err); 629 RX_STAT_INC(sc, rx_spectral_sample_err);
630 630
631 memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); 631 memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
632 632
@@ -642,9 +642,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
642 tsf, freq, chan_type); 642 tsf, freq, chan_type);
643 643
644 if (ret == 0) 644 if (ret == 0)
645 RX_STAT_INC(rx_spectral_sample_good); 645 RX_STAT_INC(sc, rx_spectral_sample_good);
646 else 646 else
647 RX_STAT_INC(rx_spectral_sample_err); 647 RX_STAT_INC(sc, rx_spectral_sample_err);
648 648
649 /* Mix the received bins to the /dev/random 649 /* Mix the received bins to the /dev/random
650 * pool 650 * pool
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c871b7ec5011..4399e9ad058f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
785{ 785{
786 int qnum = txq->axq_qnum; 786 int qnum = txq->axq_qnum;
787 787
788 TX_STAT_INC(qnum, tx_pkts_all); 788 TX_STAT_INC(sc, qnum, tx_pkts_all);
789 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; 789 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
790 790
791 if (bf_isampdu(bf)) { 791 if (bf_isampdu(bf)) {
792 if (flags & ATH_TX_ERROR) 792 if (flags & ATH_TX_ERROR)
793 TX_STAT_INC(qnum, a_xretries); 793 TX_STAT_INC(sc, qnum, a_xretries);
794 else 794 else
795 TX_STAT_INC(qnum, a_completed); 795 TX_STAT_INC(sc, qnum, a_completed);
796 } else { 796 } else {
797 if (ts->ts_status & ATH9K_TXERR_XRETRY) 797 if (ts->ts_status & ATH9K_TXERR_XRETRY)
798 TX_STAT_INC(qnum, xretries); 798 TX_STAT_INC(sc, qnum, xretries);
799 else 799 else
800 TX_STAT_INC(qnum, completed); 800 TX_STAT_INC(sc, qnum, completed);
801 } 801 }
802 802
803 if (ts->ts_status & ATH9K_TXERR_FILT) 803 if (ts->ts_status & ATH9K_TXERR_FILT)
804 TX_STAT_INC(qnum, txerr_filtered); 804 TX_STAT_INC(sc, qnum, txerr_filtered);
805 if (ts->ts_status & ATH9K_TXERR_FIFO) 805 if (ts->ts_status & ATH9K_TXERR_FIFO)
806 TX_STAT_INC(qnum, fifo_underrun); 806 TX_STAT_INC(sc, qnum, fifo_underrun);
807 if (ts->ts_status & ATH9K_TXERR_XTXOP) 807 if (ts->ts_status & ATH9K_TXERR_XTXOP)
808 TX_STAT_INC(qnum, xtxop); 808 TX_STAT_INC(sc, qnum, xtxop);
809 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) 809 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
810 TX_STAT_INC(qnum, timer_exp); 810 TX_STAT_INC(sc, qnum, timer_exp);
811 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) 811 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
812 TX_STAT_INC(qnum, desc_cfg_err); 812 TX_STAT_INC(sc, qnum, desc_cfg_err);
813 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) 813 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
814 TX_STAT_INC(qnum, data_underrun); 814 TX_STAT_INC(sc, qnum, data_underrun);
815 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 815 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
816 TX_STAT_INC(qnum, delim_underrun); 816 TX_STAT_INC(sc, qnum, delim_underrun);
817} 817}
818 818
819void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 819void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 249f8141cd00..79607db14387 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -25,17 +25,17 @@ struct ath_buf;
25struct fft_sample_tlv; 25struct fft_sample_tlv;
26 26
27#ifdef CONFIG_ATH9K_DEBUGFS 27#ifdef CONFIG_ATH9K_DEBUGFS
28#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ 28#define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
29#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) 29#define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0)
30#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ 30#define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0)
31#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++ 31#define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0)
32#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++; 32#define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0)
33#else 33#else
34#define TX_STAT_INC(q, c) do { } while (0) 34#define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
35#define RX_STAT_INC(c) 35#define RX_STAT_INC(sc, c) do { (void)(sc); } while (0)
36#define RESET_STAT_INC(sc, type) do { } while (0) 36#define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0)
37#define ANT_STAT_INC(i, c) do { } while (0) 37#define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0)
38#define ANT_LNA_INC(i, c) do { } while (0) 38#define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0)
39#endif 39#endif
40 40
41enum ath_reset_type { 41enum ath_reset_type {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c85f613e8ceb..1e3b5f4a4cf9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
809 809
810 if (ath_tx_start(hw, skb, &txctl) != 0) { 810 if (ath_tx_start(hw, skb, &txctl) != 0) {
811 ath_dbg(common, XMIT, "TX failed\n"); 811 ath_dbg(common, XMIT, "TX failed\n");
812 TX_STAT_INC(txctl.txq->axq_qnum, txfailed); 812 TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed);
813 goto exit; 813 goto exit;
814 } 814 }
815 815
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a8ac42c96d71..30d1bd832d90 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -829,7 +829,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
829 * Discard zero-length packets and packets smaller than an ACK 829 * Discard zero-length packets and packets smaller than an ACK
830 */ 830 */
831 if (rx_stats->rs_datalen < 10) { 831 if (rx_stats->rs_datalen < 10) {
832 RX_STAT_INC(rx_len_err); 832 RX_STAT_INC(sc, rx_len_err);
833 goto corrupt; 833 goto corrupt;
834 } 834 }
835 835
@@ -839,7 +839,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
839 * those frames. 839 * those frames.
840 */ 840 */
841 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { 841 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
842 RX_STAT_INC(rx_len_err); 842 RX_STAT_INC(sc, rx_len_err);
843 goto corrupt; 843 goto corrupt;
844 } 844 }
845 845
@@ -880,7 +880,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
880 } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED && 880 } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
881 ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, 881 ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
882 rx_status->mactime)) { 882 rx_status->mactime)) {
883 RX_STAT_INC(rx_spectral); 883 RX_STAT_INC(sc, rx_spectral);
884 } 884 }
885 return -EINVAL; 885 return -EINVAL;
886 } 886 }
@@ -898,7 +898,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
898 spin_unlock_bh(&sc->chan_lock); 898 spin_unlock_bh(&sc->chan_lock);
899 899
900 if (ath_is_mybeacon(common, hdr)) { 900 if (ath_is_mybeacon(common, hdr)) {
901 RX_STAT_INC(rx_beacons); 901 RX_STAT_INC(sc, rx_beacons);
902 rx_stats->is_mybeacon = true; 902 rx_stats->is_mybeacon = true;
903 } 903 }
904 904
@@ -915,7 +915,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
915 */ 915 */
916 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 916 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
917 rx_stats->rs_rate); 917 rx_stats->rs_rate);
918 RX_STAT_INC(rx_rate_err); 918 RX_STAT_INC(sc, rx_rate_err);
919 return -EINVAL; 919 return -EINVAL;
920 } 920 }
921 921
@@ -1136,7 +1136,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1136 * skb and put it at the tail of the sc->rx.rxbuf list for 1136 * skb and put it at the tail of the sc->rx.rxbuf list for
1137 * processing. */ 1137 * processing. */
1138 if (!requeue_skb) { 1138 if (!requeue_skb) {
1139 RX_STAT_INC(rx_oom_err); 1139 RX_STAT_INC(sc, rx_oom_err);
1140 goto requeue_drop_frag; 1140 goto requeue_drop_frag;
1141 } 1141 }
1142 1142
@@ -1164,7 +1164,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1164 rxs, decrypt_error); 1164 rxs, decrypt_error);
1165 1165
1166 if (rs.rs_more) { 1166 if (rs.rs_more) {
1167 RX_STAT_INC(rx_frags); 1167 RX_STAT_INC(sc, rx_frags);
1168 /* 1168 /*
1169 * rs_more indicates chained descriptors which can be 1169 * rs_more indicates chained descriptors which can be
1170 * used to link buffers together for a sort of 1170 * used to link buffers together for a sort of
@@ -1174,7 +1174,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1174 /* too many fragments - cannot handle frame */ 1174 /* too many fragments - cannot handle frame */
1175 dev_kfree_skb_any(sc->rx.frag); 1175 dev_kfree_skb_any(sc->rx.frag);
1176 dev_kfree_skb_any(skb); 1176 dev_kfree_skb_any(skb);
1177 RX_STAT_INC(rx_too_many_frags_err); 1177 RX_STAT_INC(sc, rx_too_many_frags_err);
1178 skb = NULL; 1178 skb = NULL;
1179 } 1179 }
1180 sc->rx.frag = skb; 1180 sc->rx.frag = skb;
@@ -1186,7 +1186,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1186 1186
1187 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1187 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1188 dev_kfree_skb(skb); 1188 dev_kfree_skb(skb);
1189 RX_STAT_INC(rx_oom_err); 1189 RX_STAT_INC(sc, rx_oom_err);
1190 goto requeue_drop_frag; 1190 goto requeue_drop_frag;
1191 } 1191 }
1192 1192
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 43b6c8508e49..25b3fc82d4ac 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -391,7 +391,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
391 struct ieee80211_hdr *hdr; 391 struct ieee80211_hdr *hdr;
392 int prev = fi->retries; 392 int prev = fi->retries;
393 393
394 TX_STAT_INC(txq->axq_qnum, a_retries); 394 TX_STAT_INC(sc, txq->axq_qnum, a_retries);
395 fi->retries += count; 395 fi->retries += count;
396 396
397 if (prev > 0) 397 if (prev > 0)
@@ -1105,7 +1105,7 @@ finish:
1105 al = get_frame_info(bf->bf_mpdu)->framelen; 1105 al = get_frame_info(bf->bf_mpdu)->framelen;
1106 bf->bf_state.bf_type = BUF_AMPDU; 1106 bf->bf_state.bf_type = BUF_AMPDU;
1107 } else { 1107 } else {
1108 TX_STAT_INC(txq->axq_qnum, a_aggr); 1108 TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
1109 } 1109 }
1110 1110
1111 return al; 1111 return al;
@@ -1727,7 +1727,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1727 bf_tail = bf; 1727 bf_tail = bf;
1728 nframes--; 1728 nframes--;
1729 sent++; 1729 sent++;
1730 TX_STAT_INC(txq->axq_qnum, a_queued_hw); 1730 TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
1731 1731
1732 if (an->sta && skb_queue_empty(&tid->retry_q)) 1732 if (an->sta && skb_queue_empty(&tid->retry_q))
1733 ieee80211_sta_set_buffered(an->sta, i, false); 1733 ieee80211_sta_set_buffered(an->sta, i, false);
@@ -2110,14 +2110,14 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
2110 } 2110 }
2111 2111
2112 if (puttxbuf) { 2112 if (puttxbuf) {
2113 TX_STAT_INC(txq->axq_qnum, puttxbuf); 2113 TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
2114 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 2114 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2115 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 2115 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
2116 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 2116 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
2117 } 2117 }
2118 2118
2119 if (!edma || sc->tx99_state) { 2119 if (!edma || sc->tx99_state) {
2120 TX_STAT_INC(txq->axq_qnum, txstart); 2120 TX_STAT_INC(sc, txq->axq_qnum, txstart);
2121 ath9k_hw_txstart(ah, txq->axq_qnum); 2121 ath9k_hw_txstart(ah, txq->axq_qnum);
2122 } 2122 }
2123 2123
@@ -2154,7 +2154,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
2154 bf->bf_lastbf = bf; 2154 bf->bf_lastbf = bf;
2155 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 2155 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2156 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 2156 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
2157 TX_STAT_INC(txq->axq_qnum, queued); 2157 TX_STAT_INC(sc, txq->axq_qnum, queued);
2158} 2158}
2159 2159
2160static void setup_frame_info(struct ieee80211_hw *hw, 2160static void setup_frame_info(struct ieee80211_hw *hw,
@@ -2486,7 +2486,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2486 ath_txq_lock(sc, txctl.txq); 2486 ath_txq_lock(sc, txctl.txq);
2487 ath_tx_fill_desc(sc, bf, txctl.txq, 0); 2487 ath_tx_fill_desc(sc, bf, txctl.txq, 0);
2488 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); 2488 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
2489 TX_STAT_INC(txctl.txq->axq_qnum, queued); 2489 TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
2490 ath_txq_unlock(sc, txctl.txq); 2490 ath_txq_unlock(sc, txctl.txq);
2491} 2491}
2492 2492
@@ -2699,7 +2699,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2699 if (status == -EINPROGRESS) 2699 if (status == -EINPROGRESS)
2700 break; 2700 break;
2701 2701
2702 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2702 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2703 2703
2704 /* 2704 /*
2705 * Remove ath_buf's of the same transmit unit from txq, 2705 * Remove ath_buf's of the same transmit unit from txq,
@@ -2778,7 +2778,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2778 2778
2779 ath_txq_lock(sc, txq); 2779 ath_txq_lock(sc, txq);
2780 2780
2781 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2781 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2782 2782
2783 fifo_list = &txq->txq_fifo[txq->txq_tailidx]; 2783 fifo_list = &txq->txq_fifo[txq->txq_tailidx];
2784 if (list_empty(fifo_list)) { 2784 if (list_empty(fifo_list)) {
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 66ffae2de86e..aa50813a0595 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -416,8 +416,8 @@ static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
416 return 0; 416 return 0;
417} 417}
418 418
419DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, 419DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
420 wil_debugfs_iomem_x32_set, "0x%08llx\n"); 420 wil_debugfs_iomem_x32_set, "0x%08llx\n");
421 421
422static struct dentry *wil_debugfs_create_iomem_x32(const char *name, 422static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
423 umode_t mode, 423 umode_t mode,
@@ -432,7 +432,8 @@ static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
432 data->wil = wil; 432 data->wil = wil;
433 data->offset = value; 433 data->offset = value;
434 434
435 file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32); 435 file = debugfs_create_file_unsafe(name, mode, parent, data,
436 &fops_iomem_x32);
436 if (!IS_ERR_OR_NULL(file)) 437 if (!IS_ERR_OR_NULL(file))
437 wil->dbg_data.iomem_data_count++; 438 wil->dbg_data.iomem_data_count++;
438 439
@@ -451,14 +452,15 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
451 return 0; 452 return 0;
452} 453}
453 454
454DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, 455DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
455 wil_debugfs_ulong_set, "0x%llx\n"); 456 wil_debugfs_ulong_set, "0x%llx\n");
456 457
457static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, 458static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
458 struct dentry *parent, 459 struct dentry *parent,
459 ulong *value) 460 ulong *value)
460{ 461{
461 return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong); 462 return debugfs_create_file_unsafe(name, mode, parent, value,
463 &wil_fops_ulong);
462} 464}
463 465
464/** 466/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6255fb6d97a7..81ff558046a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
502 } 502 }
503 503
504 spin_lock_bh(&wl->lock); 504 spin_lock_bh(&wl->lock);
505 wl->wlc->vif = vif;
505 wl->mute_tx = false; 506 wl->mute_tx = false;
506 brcms_c_mute(wl->wlc, false); 507 brcms_c_mute(wl->wlc, false);
507 if (vif->type == NL80211_IFTYPE_STATION) 508 if (vif->type == NL80211_IFTYPE_STATION)
@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
519static void 520static void
520brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 521brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
521{ 522{
523 struct brcms_info *wl = hw->priv;
524
525 spin_lock_bh(&wl->lock);
526 wl->wlc->vif = NULL;
527 spin_unlock_bh(&wl->lock);
522} 528}
523 529
524static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) 530static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
937 spin_unlock_bh(&wl->lock); 943 spin_unlock_bh(&wl->lock);
938} 944}
939 945
946static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
947 struct ieee80211_sta *sta, bool set)
948{
949 struct brcms_info *wl = hw->priv;
950 struct sk_buff *beacon = NULL;
951 u16 tim_offset = 0;
952
953 spin_lock_bh(&wl->lock);
954 if (wl->wlc->vif)
955 beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
956 &tim_offset, NULL);
957 if (beacon)
958 brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
959 wl->wlc->vif->bss_conf.dtim_period);
960 spin_unlock_bh(&wl->lock);
961
962 return 0;
963}
964
940static const struct ieee80211_ops brcms_ops = { 965static const struct ieee80211_ops brcms_ops = {
941 .tx = brcms_ops_tx, 966 .tx = brcms_ops_tx,
942 .start = brcms_ops_start, 967 .start = brcms_ops_start,
@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
955 .flush = brcms_ops_flush, 980 .flush = brcms_ops_flush,
956 .get_tsf = brcms_ops_get_tsf, 981 .get_tsf = brcms_ops_get_tsf,
957 .set_tsf = brcms_ops_set_tsf, 982 .set_tsf = brcms_ops_set_tsf,
983 .set_tim = brcms_ops_beacon_set_tim,
958}; 984};
959 985
960void brcms_dpc(unsigned long data) 986void brcms_dpc(unsigned long data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index c4d135cff04a..9f76b880814e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -563,6 +563,7 @@ struct brcms_c_info {
563 563
564 struct wiphy *wiphy; 564 struct wiphy *wiphy;
565 struct scb pri_scb; 565 struct scb pri_scb;
566 struct ieee80211_vif *vif;
566 567
567 struct sk_buff *beacon; 568 struct sk_buff *beacon;
568 u16 beacon_tim_offset; 569 u16 beacon_tim_offset;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index c3c638ed0ed7..ce4144a89217 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il)
1297 const struct il_rxon_cmd *rxon1 = &il->staging; 1297 const struct il_rxon_cmd *rxon1 = &il->staging;
1298 const struct il_rxon_cmd *rxon2 = &il->active; 1298 const struct il_rxon_cmd *rxon2 = &il->active;
1299 1299
1300 lockdep_assert_held(&il->mutex);
1301
1300 if (rxon1->flags == rxon2->flags && 1302 if (rxon1->flags == rxon2->flags &&
1301 rxon1->filter_flags == rxon2->filter_flags && 1303 rxon1->filter_flags == rxon2->filter_flags &&
1302 rxon1->cck_basic_rates == rxon2->cck_basic_rates && 1304 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f44c716b1130..c16757051f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1154,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
1154} 1154}
1155IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); 1155IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
1156 1156
1157void iwl_fw_error_dump_wk(struct work_struct *work) 1157/* this function assumes dump_start was called beforehand and dump_end will be
1158 * called afterwards
1159 */
1160void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
1158{ 1161{
1159 struct iwl_fw_runtime *fwrt =
1160 container_of(work, struct iwl_fw_runtime, dump.wk.work);
1161 struct iwl_fw_dbg_params params = {0}; 1162 struct iwl_fw_dbg_params params = {0};
1162 1163
1163 if (fwrt->ops && fwrt->ops->dump_start && 1164 if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
1164 fwrt->ops->dump_start(fwrt->ops_ctx))
1165 return; 1165 return;
1166 1166
1167 if (fwrt->ops && fwrt->ops->fw_running && 1167 if (fwrt->ops && fwrt->ops->fw_running &&
@@ -1169,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1169 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); 1169 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1170 iwl_fw_free_dump_desc(fwrt); 1170 iwl_fw_free_dump_desc(fwrt);
1171 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 1171 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1172 goto out; 1172 return;
1173 } 1173 }
1174 1174
1175 iwl_fw_dbg_stop_recording(fwrt, &params); 1175 iwl_fw_dbg_stop_recording(fwrt, &params);
@@ -1183,7 +1183,20 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1183 udelay(500); 1183 udelay(500);
1184 iwl_fw_dbg_restart_recording(fwrt, &params); 1184 iwl_fw_dbg_restart_recording(fwrt, &params);
1185 } 1185 }
1186out: 1186}
1187IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
1188
1189void iwl_fw_error_dump_wk(struct work_struct *work)
1190{
1191 struct iwl_fw_runtime *fwrt =
1192 container_of(work, struct iwl_fw_runtime, dump.wk.work);
1193
1194 if (fwrt->ops && fwrt->ops->dump_start &&
1195 fwrt->ops->dump_start(fwrt->ops_ctx))
1196 return;
1197
1198 iwl_fw_dbg_collect_sync(fwrt);
1199
1187 if (fwrt->ops && fwrt->ops->dump_end) 1200 if (fwrt->ops && fwrt->ops->dump_end)
1188 fwrt->ops->dump_end(fwrt->ops_ctx); 1201 fwrt->ops->dump_end(fwrt->ops_ctx);
1189} 1202}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index d9578dcec24c..6f8d3256f7b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -367,4 +367,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
367#endif /* CONFIG_IWLWIFI_DEBUGFS */ 367#endif /* CONFIG_IWLWIFI_DEBUGFS */
368 368
369void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); 369void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
370void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
370#endif /* __iwl_fw_dbg_h__ */ 371#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 2cc6c019d0e1..420e6d745f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -30,38 +30,20 @@
30#undef TRACE_SYSTEM 30#undef TRACE_SYSTEM
31#define TRACE_SYSTEM iwlwifi_data 31#define TRACE_SYSTEM iwlwifi_data
32 32
33TRACE_EVENT(iwlwifi_dev_tx_data, 33TRACE_EVENT(iwlwifi_dev_tx_tb,
34 TP_PROTO(const struct device *dev, 34 TP_PROTO(const struct device *dev, struct sk_buff *skb,
35 struct sk_buff *skb, u8 hdr_len), 35 u8 *data_src, size_t data_len),
36 TP_ARGS(dev, skb, hdr_len), 36 TP_ARGS(dev, skb, data_src, data_len),
37 TP_STRUCT__entry( 37 TP_STRUCT__entry(
38 DEV_ENTRY 38 DEV_ENTRY
39 39
40 __dynamic_array(u8, data, 40 __dynamic_array(u8, data,
41 iwl_trace_data(skb) ? skb->len - hdr_len : 0) 41 iwl_trace_data(skb) ? data_len : 0)
42 ), 42 ),
43 TP_fast_assign( 43 TP_fast_assign(
44 DEV_ASSIGN; 44 DEV_ASSIGN;
45 if (iwl_trace_data(skb)) 45 if (iwl_trace_data(skb))
46 skb_copy_bits(skb, hdr_len, 46 memcpy(__get_dynamic_array(data), data_src, data_len);
47 __get_dynamic_array(data),
48 skb->len - hdr_len);
49 ),
50 TP_printk("[%s] TX frame data", __get_str(dev))
51);
52
53TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
54 TP_PROTO(const struct device *dev,
55 u8 *data_src, size_t data_len),
56 TP_ARGS(dev, data_src, data_len),
57 TP_STRUCT__entry(
58 DEV_ENTRY
59
60 __dynamic_array(u8, data, data_len)
61 ),
62 TP_fast_assign(
63 DEV_ASSIGN;
64 memcpy(__get_dynamic_array(data), data_src, data_len);
65 ), 47 ),
66 TP_printk("[%s] TX frame data", __get_str(dev)) 48 TP_printk("[%s] TX frame data", __get_str(dev))
67); 49);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 210be26aadaa..843f3b41b72e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
722{ 722{
723 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 723 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
724 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 724 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
725 bool unified = fw_has_capa(&mvm->fw->ucode_capa,
726 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
725 struct wowlan_key_data key_data = { 727 struct wowlan_key_data key_data = {
726 .configure_keys = !d0i3, 728 .configure_keys = !d0i3 && !unified,
727 .use_rsc_tsc = false, 729 .use_rsc_tsc = false,
728 .tkip = &tkip_cmd, 730 .tkip = &tkip_cmd,
729 .use_tkip = false, 731 .use_tkip = false,
@@ -1636,32 +1638,10 @@ out_free_resp:
1636} 1638}
1637 1639
1638static struct iwl_wowlan_status * 1640static struct iwl_wowlan_status *
1639iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1641iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
1640{ 1642{
1641 u32 base = mvm->error_event_table[0];
1642 struct error_table_start {
1643 /* cf. struct iwl_error_event_table */
1644 u32 valid;
1645 u32 error_id;
1646 } err_info;
1647 int ret; 1643 int ret;
1648 1644
1649 iwl_trans_read_mem_bytes(mvm->trans, base,
1650 &err_info, sizeof(err_info));
1651
1652 if (err_info.valid) {
1653 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1654 err_info.valid, err_info.error_id);
1655 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1656 struct cfg80211_wowlan_wakeup wakeup = {
1657 .rfkill_release = true,
1658 };
1659 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1660 GFP_KERNEL);
1661 }
1662 return ERR_PTR(-EIO);
1663 }
1664
1665 /* only for tracing for now */ 1645 /* only for tracing for now */
1666 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); 1646 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1667 if (ret) 1647 if (ret)
@@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1680 bool keep; 1660 bool keep;
1681 struct iwl_mvm_sta *mvm_ap_sta; 1661 struct iwl_mvm_sta *mvm_ap_sta;
1682 1662
1683 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1663 fw_status = iwl_mvm_get_wakeup_status(mvm);
1684 if (IS_ERR_OR_NULL(fw_status)) 1664 if (IS_ERR_OR_NULL(fw_status))
1685 goto out_unlock; 1665 goto out_unlock;
1686 1666
@@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1805 u32 reasons = 0; 1785 u32 reasons = 0;
1806 int i, j, n_matches, ret; 1786 int i, j, n_matches, ret;
1807 1787
1808 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1788 fw_status = iwl_mvm_get_wakeup_status(mvm);
1809 if (!IS_ERR_OR_NULL(fw_status)) { 1789 if (!IS_ERR_OR_NULL(fw_status)) {
1810 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1790 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1811 kfree(fw_status); 1791 kfree(fw_status);
@@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1918 ieee80211_resume_disconnect(vif); 1898 ieee80211_resume_disconnect(vif);
1919} 1899}
1920 1900
1901static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
1902 struct ieee80211_vif *vif)
1903{
1904 u32 base = mvm->error_event_table[0];
1905 struct error_table_start {
1906 /* cf. struct iwl_error_event_table */
1907 u32 valid;
1908 u32 error_id;
1909 } err_info;
1910
1911 iwl_trans_read_mem_bytes(mvm->trans, base,
1912 &err_info, sizeof(err_info));
1913
1914 if (err_info.valid &&
1915 err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1916 struct cfg80211_wowlan_wakeup wakeup = {
1917 .rfkill_release = true,
1918 };
1919 ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
1920 }
1921 return err_info.valid;
1922}
1923
1921static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 1924static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1922{ 1925{
1923 struct ieee80211_vif *vif = NULL; 1926 struct ieee80211_vif *vif = NULL;
@@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1949 /* query SRAM first in case we want event logging */ 1952 /* query SRAM first in case we want event logging */
1950 iwl_mvm_read_d3_sram(mvm); 1953 iwl_mvm_read_d3_sram(mvm);
1951 1954
1955 if (iwl_mvm_check_rt_status(mvm, vif)) {
1956 set_bit(STATUS_FW_ERROR, &mvm->trans->status);
1957 iwl_mvm_dump_nic_error_log(mvm);
1958 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
1959 NULL, 0);
1960 ret = 1;
1961 goto err;
1962 }
1963
1952 if (d0i3_first) { 1964 if (d0i3_first) {
1953 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 1965 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
1954 if (ret < 0) { 1966 if (ret < 0) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c5df73231ba3..dade206d5511 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -364,7 +364,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
364 */ 364 */
365 365
366 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 366 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
367 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; 367 /*
368 * Set a 'fake' TID for the command queue, since we use the
369 * hweight() of the tid_bitmap as a refcount now. Not that
370 * we ever even consider the command queue as one we might
371 * want to reuse, but be safe nevertheless.
372 */
373 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
374 BIT(IWL_MAX_TID_COUNT + 2);
368 375
369 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 376 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
370 atomic_set(&mvm->mac80211_queue_stop_count[i], 0); 377 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 8f71eeed50d9..7ba5bc2ed1c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
512 IWL_SCAN_TYPE_WILD, 512 IWL_SCAN_TYPE_WILD,
513 IWL_SCAN_TYPE_MILD, 513 IWL_SCAN_TYPE_MILD,
514 IWL_SCAN_TYPE_FRAGMENTED, 514 IWL_SCAN_TYPE_FRAGMENTED,
515 IWL_SCAN_TYPE_FAST_BALANCE,
515}; 516};
516 517
517enum iwl_mvm_sched_scan_pass_all_states { 518enum iwl_mvm_sched_scan_pass_all_states {
@@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
753 * This is a state in which a single queue serves more than one TID, all of 754 * This is a state in which a single queue serves more than one TID, all of
754 * which are not aggregated. Note that the queue is only associated to one 755 * which are not aggregated. Note that the queue is only associated to one
755 * RA. 756 * RA.
756 * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
757 * This is a state of a queue that has had traffic on it, but during the
758 * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
759 * it. In this state, when a new queue is needed to be allocated but no
760 * such free queue exists, an inactive queue might be freed and given to
761 * the new RA/TID.
762 * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
763 * This is the state of a queue that has had traffic pass through it, but
764 * needs to be reconfigured for some reason, e.g. the queue needs to
765 * become unshared and aggregations re-enabled on.
766 */ 757 */
767enum iwl_mvm_queue_status { 758enum iwl_mvm_queue_status {
768 IWL_MVM_QUEUE_FREE, 759 IWL_MVM_QUEUE_FREE,
769 IWL_MVM_QUEUE_RESERVED, 760 IWL_MVM_QUEUE_RESERVED,
770 IWL_MVM_QUEUE_READY, 761 IWL_MVM_QUEUE_READY,
771 IWL_MVM_QUEUE_SHARED, 762 IWL_MVM_QUEUE_SHARED,
772 IWL_MVM_QUEUE_INACTIVE,
773 IWL_MVM_QUEUE_RECONFIGURING,
774}; 763};
775 764
776#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) 765#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
787 u8 values[ACPI_GEO_TABLE_SIZE]; 776 u8 values[ACPI_GEO_TABLE_SIZE];
788}; 777};
789 778
779struct iwl_mvm_dqa_txq_info {
780 u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
781 bool reserved; /* Is this the TXQ reserved for a STA */
782 u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
783 u8 txq_tid; /* The TID "owner" of this queue*/
784 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
785 /* Timestamp for inactivation per TID of this queue */
786 unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
787 enum iwl_mvm_queue_status status;
788};
789
790struct iwl_mvm { 790struct iwl_mvm {
791 /* for logger access */ 791 /* for logger access */
792 struct device *dev; 792 struct device *dev;
@@ -843,17 +843,7 @@ struct iwl_mvm {
843 843
844 u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; 844 u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
845 845
846 struct { 846 struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
847 u8 hw_queue_refcount;
848 u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
849 bool reserved; /* Is this the TXQ reserved for a STA */
850 u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
851 u8 txq_tid; /* The TID "owner" of this queue*/
852 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
853 /* Timestamp for inactivation per TID of this queue */
854 unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
855 enum iwl_mvm_queue_status status;
856 } queue_info[IWL_MAX_HW_QUEUES];
857 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ 847 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
858 struct work_struct add_stream_wk; /* To add streams to queues */ 848 struct work_struct add_stream_wk; /* To add streams to queues */
859 849
@@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
1883 mvmvif->low_latency &= ~cause; 1873 mvmvif->low_latency &= ~cause;
1884} 1874}
1885 1875
1886/* hw scheduler queue config */
1887bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1888 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
1889 unsigned int wdg_timeout);
1890int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
1891 u8 sta_id, u8 tid, unsigned int timeout);
1892
1893int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1894 u8 tid, u8 flags);
1895int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
1896
1897/* Return a bitmask with all the hw supported queues, except for the 1876/* Return a bitmask with all the hw supported queues, except for the
1898 * command queue, which can't be flushed. 1877 * command queue, which can't be flushed.
1899 */ 1878 */
@@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1905 1884
1906static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) 1885static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1907{ 1886{
1887 lockdep_assert_held(&mvm->mutex);
1888 /* calling this function without using dump_start/end since at this
1889 * point we already hold the op mode mutex
1890 */
1891 iwl_fw_dbg_collect_sync(&mvm->fwrt);
1908 iwl_fw_cancel_timestamp(&mvm->fwrt); 1892 iwl_fw_cancel_timestamp(&mvm->fwrt);
1909 iwl_free_fw_paging(&mvm->fwrt); 1893 iwl_free_fw_paging(&mvm->fwrt);
1910 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1894 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
1990struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); 1974struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
1991bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); 1975bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
1992 1976
1993void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
1994
1995#define MVM_TCM_PERIOD_MSEC 500 1977#define MVM_TCM_PERIOD_MSEC 500
1996#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) 1978#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
1997#define MVM_LL_PERIOD (10 * HZ) 1979#define MVM_LL_PERIOD (10 * HZ)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 2c75f51a04e4..089972280daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1239 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 1239 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1240 return; 1240 return;
1241 1241
1242 rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); 1242 if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
1243 &tx_resp_rate)) {
1244 WARN_ON_ONCE(1);
1245 return;
1246 }
1243 1247
1244#ifdef CONFIG_MAC80211_DEBUGFS 1248#ifdef CONFIG_MAC80211_DEBUGFS
1245 /* Disable last tx check if we are debugging with fixed rate but 1249 /* Disable last tx check if we are debugging with fixed rate but
@@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1290 */ 1294 */
1291 table = &lq_sta->lq; 1295 table = &lq_sta->lq;
1292 lq_hwrate = le32_to_cpu(table->rs_table[0]); 1296 lq_hwrate = le32_to_cpu(table->rs_table[0]);
1293 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1297 if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
1298 WARN_ON_ONCE(1);
1299 return;
1300 }
1294 1301
1295 /* Here we actually compare this rate to the latest LQ command */ 1302 /* Here we actually compare this rate to the latest LQ command */
1296 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { 1303 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
@@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1392 /* Collect data for each rate used during failed TX attempts */ 1399 /* Collect data for each rate used during failed TX attempts */
1393 for (i = 0; i <= retries; ++i) { 1400 for (i = 0; i <= retries; ++i) {
1394 lq_hwrate = le32_to_cpu(table->rs_table[i]); 1401 lq_hwrate = le32_to_cpu(table->rs_table[i]);
1395 rs_rate_from_ucode_rate(lq_hwrate, info->band, 1402 if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
1396 &lq_rate); 1403 &lq_rate)) {
1404 WARN_ON_ONCE(1);
1405 return;
1406 }
1407
1397 /* 1408 /*
1398 * Only collect stats if retried rate is in the same RS 1409 * Only collect stats if retried rate is in the same RS
1399 * table as active/search. 1410 * table as active/search.
@@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3260 for (i = 0; i < num_rates; i++) 3271 for (i = 0; i < num_rates; i++)
3261 lq_cmd->rs_table[i] = ucode_rate_le32; 3272 lq_cmd->rs_table[i] = ucode_rate_le32;
3262 3273
3263 rs_rate_from_ucode_rate(ucode_rate, band, &rate); 3274 if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
3275 WARN_ON_ONCE(1);
3276 return;
3277 }
3264 3278
3265 if (is_mimo(&rate)) 3279 if (is_mimo(&rate))
3266 lq_cmd->mimo_delim = num_rates - 1; 3280 lq_cmd->mimo_delim = num_rates - 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index ffcd0ca86041..cfb784fea77b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
110 .suspend_time = 95, 110 .suspend_time = 95,
111 .max_out_time = 44, 111 .max_out_time = 44,
112 }, 112 },
113 [IWL_SCAN_TYPE_FAST_BALANCE] = {
114 .suspend_time = 30,
115 .max_out_time = 37,
116 },
113}; 117};
114 118
115struct iwl_mvm_scan_params { 119struct iwl_mvm_scan_params {
@@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
235 return mvm->tcm.result.band_load[band]; 239 return mvm->tcm.result.band_load[band];
236} 240}
237 241
242struct iwl_is_dcm_with_go_iterator_data {
243 struct ieee80211_vif *current_vif;
244 bool is_dcm_with_p2p_go;
245};
246
247static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
248 struct ieee80211_vif *vif)
249{
250 struct iwl_is_dcm_with_go_iterator_data *data = _data;
251 struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
252 struct iwl_mvm_vif *curr_mvmvif =
253 iwl_mvm_vif_from_mac80211(data->current_vif);
254
255 /* exclude the given vif */
256 if (vif == data->current_vif)
257 return;
258
259 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
260 other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
261 other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
262 data->is_dcm_with_p2p_go = true;
263}
264
238static enum 265static enum
239iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, 266iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
267 struct ieee80211_vif *vif,
240 enum iwl_mvm_traffic_load load, 268 enum iwl_mvm_traffic_load load,
241 bool low_latency) 269 bool low_latency)
242{ 270{
@@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
249 if (!global_cnt) 277 if (!global_cnt)
250 return IWL_SCAN_TYPE_UNASSOC; 278 return IWL_SCAN_TYPE_UNASSOC;
251 279
252 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && 280 if (fw_has_api(&mvm->fw->ucode_capa,
253 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) 281 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
254 return IWL_SCAN_TYPE_FRAGMENTED; 282 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
283 (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
284 return IWL_SCAN_TYPE_FRAGMENTED;
285
286 /* in case of DCM with GO where BSS DTIM interval < 220msec
287 * set all scan requests as fast-balance scan
288 * */
289 if (vif && vif->type == NL80211_IFTYPE_STATION &&
290 vif->bss_conf.dtim_period < 220) {
291 struct iwl_is_dcm_with_go_iterator_data data = {
292 .current_vif = vif,
293 .is_dcm_with_p2p_go = false,
294 };
295
296 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
297 IEEE80211_IFACE_ITER_NORMAL,
298 iwl_mvm_is_dcm_with_go_iterator,
299 &data);
300 if (data.is_dcm_with_p2p_go)
301 return IWL_SCAN_TYPE_FAST_BALANCE;
302 }
303 }
255 304
256 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) 305 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
257 return IWL_SCAN_TYPE_MILD; 306 return IWL_SCAN_TYPE_MILD;
@@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
260} 309}
261 310
262static enum 311static enum
263iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) 312iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
313 struct ieee80211_vif *vif)
264{ 314{
265 enum iwl_mvm_traffic_load load; 315 enum iwl_mvm_traffic_load load;
266 bool low_latency; 316 bool low_latency;
@@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
268 load = iwl_mvm_get_traffic_load(mvm); 318 load = iwl_mvm_get_traffic_load(mvm);
269 low_latency = iwl_mvm_low_latency(mvm); 319 low_latency = iwl_mvm_low_latency(mvm);
270 320
271 return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); 321 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
272} 322}
273 323
274static enum 324static enum
275iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, 325iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
276 bool p2p_device, 326 struct ieee80211_vif *vif,
277 enum nl80211_band band) 327 enum nl80211_band band)
278{ 328{
279 enum iwl_mvm_traffic_load load; 329 enum iwl_mvm_traffic_load load;
@@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
282 load = iwl_mvm_get_traffic_load_band(mvm, band); 332 load = iwl_mvm_get_traffic_load_band(mvm, band);
283 low_latency = iwl_mvm_low_latency_band(mvm, band); 333 low_latency = iwl_mvm_low_latency_band(mvm, band);
284 334
285 return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); 335 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
286} 336}
287 337
288static int 338static int
@@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
860 params->scan_plans[0].iterations == 1; 910 params->scan_plans[0].iterations == 1;
861} 911}
862 912
913static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
914{
915 return (type == IWL_SCAN_TYPE_FRAGMENTED ||
916 type == IWL_SCAN_TYPE_FAST_BALANCE);
917}
918
863static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, 919static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
864 struct iwl_mvm_scan_params *params, 920 struct iwl_mvm_scan_params *params,
865 struct ieee80211_vif *vif) 921 struct ieee80211_vif *vif)
@@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
872 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 928 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
873 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; 929 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
874 930
875 if (params->type == IWL_SCAN_TYPE_FRAGMENTED) 931 if (iwl_mvm_is_scan_fragmented(params->type))
876 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; 932 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
877 933
878 if (iwl_mvm_rrm_scan_needed(mvm) && 934 if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
895 951
896 if (iwl_mvm_is_regular_scan(params) && 952 if (iwl_mvm_is_regular_scan(params) &&
897 vif->type != NL80211_IFTYPE_P2P_DEVICE && 953 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
898 params->type != IWL_SCAN_TYPE_FRAGMENTED) 954 !iwl_mvm_is_scan_fragmented(params->type))
899 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; 955 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
900 956
901 return flags; 957 return flags;
@@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
1044static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, 1100static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1045 u32 flags, u8 channel_flags) 1101 u32 flags, u8 channel_flags)
1046{ 1102{
1047 enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); 1103 enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
1048 struct iwl_scan_config_v1 *cfg = config; 1104 struct iwl_scan_config_v1 *cfg = config;
1049 1105
1050 cfg->flags = cpu_to_le32(flags); 1106 cfg->flags = cpu_to_le32(flags);
@@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
1077 if (iwl_mvm_is_cdb_supported(mvm)) { 1133 if (iwl_mvm_is_cdb_supported(mvm)) {
1078 enum iwl_mvm_scan_type lb_type, hb_type; 1134 enum iwl_mvm_scan_type lb_type, hb_type;
1079 1135
1080 lb_type = iwl_mvm_get_scan_type_band(mvm, false, 1136 lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1081 NL80211_BAND_2GHZ); 1137 NL80211_BAND_2GHZ);
1082 hb_type = iwl_mvm_get_scan_type_band(mvm, false, 1138 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1083 NL80211_BAND_5GHZ); 1139 NL80211_BAND_5GHZ);
1084 1140
1085 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = 1141 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
@@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
1093 cpu_to_le32(scan_timing[hb_type].suspend_time); 1149 cpu_to_le32(scan_timing[hb_type].suspend_time);
1094 } else { 1150 } else {
1095 enum iwl_mvm_scan_type type = 1151 enum iwl_mvm_scan_type type =
1096 iwl_mvm_get_scan_type(mvm, false); 1152 iwl_mvm_get_scan_type(mvm, NULL);
1097 1153
1098 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = 1154 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1099 cpu_to_le32(scan_timing[type].max_out_time); 1155 cpu_to_le32(scan_timing[type].max_out_time);
@@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1130 return -ENOBUFS; 1186 return -ENOBUFS;
1131 1187
1132 if (iwl_mvm_is_cdb_supported(mvm)) { 1188 if (iwl_mvm_is_cdb_supported(mvm)) {
1133 type = iwl_mvm_get_scan_type_band(mvm, false, 1189 type = iwl_mvm_get_scan_type_band(mvm, NULL,
1134 NL80211_BAND_2GHZ); 1190 NL80211_BAND_2GHZ);
1135 hb_type = iwl_mvm_get_scan_type_band(mvm, false, 1191 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1136 NL80211_BAND_5GHZ); 1192 NL80211_BAND_5GHZ);
1137 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) 1193 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
1138 return 0; 1194 return 0;
1139 } else { 1195 } else {
1140 type = iwl_mvm_get_scan_type(mvm, false); 1196 type = iwl_mvm_get_scan_type(mvm, NULL);
1141 if (type == mvm->scan_type) 1197 if (type == mvm->scan_type)
1142 return 0; 1198 return 0;
1143 } 1199 }
@@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1162 SCAN_CONFIG_FLAG_SET_MAC_ADDR | 1218 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1163 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | 1219 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
1164 SCAN_CONFIG_N_CHANNELS(num_channels) | 1220 SCAN_CONFIG_N_CHANNELS(num_channels) |
1165 (type == IWL_SCAN_TYPE_FRAGMENTED ? 1221 (iwl_mvm_is_scan_fragmented(type) ?
1166 SCAN_CONFIG_FLAG_SET_FRAGMENTED : 1222 SCAN_CONFIG_FLAG_SET_FRAGMENTED :
1167 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); 1223 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
1168 1224
@@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1177 */ 1233 */
1178 if (iwl_mvm_cdb_scan_api(mvm)) { 1234 if (iwl_mvm_cdb_scan_api(mvm)) {
1179 if (iwl_mvm_is_cdb_supported(mvm)) 1235 if (iwl_mvm_is_cdb_supported(mvm))
1180 flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? 1236 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1181 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : 1237 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1182 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; 1238 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1183 iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); 1239 iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
@@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1338 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 1394 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1339 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; 1395 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1340 1396
1341 if (params->type == IWL_SCAN_TYPE_FRAGMENTED) 1397 if (iwl_mvm_is_scan_fragmented(params->type))
1342 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; 1398 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1343 1399
1344 if (iwl_mvm_is_cdb_supported(mvm) && 1400 if (iwl_mvm_is_cdb_supported(mvm) &&
1345 params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) 1401 iwl_mvm_is_scan_fragmented(params->hb_type))
1346 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; 1402 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
1347 1403
1348 if (iwl_mvm_rrm_scan_needed(mvm) && 1404 if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1380 */ 1436 */
1381 if (iwl_mvm_is_regular_scan(params) && 1437 if (iwl_mvm_is_regular_scan(params) &&
1382 vif->type != NL80211_IFTYPE_P2P_DEVICE && 1438 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
1383 params->type != IWL_SCAN_TYPE_FRAGMENTED && 1439 !iwl_mvm_is_scan_fragmented(params->type) &&
1384 !iwl_mvm_is_adaptive_dwell_supported(mvm) && 1440 !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
1385 !iwl_mvm_is_oce_supported(mvm)) 1441 !iwl_mvm_is_oce_supported(mvm))
1386 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; 1442 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
@@ -1589,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
1589 1645
1590static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, 1646static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
1591 struct iwl_mvm_scan_params *params, 1647 struct iwl_mvm_scan_params *params,
1592 bool p2p) 1648 struct ieee80211_vif *vif)
1593{ 1649{
1594 if (iwl_mvm_is_cdb_supported(mvm)) { 1650 if (iwl_mvm_is_cdb_supported(mvm)) {
1595 params->type = 1651 params->type =
1596 iwl_mvm_get_scan_type_band(mvm, p2p, 1652 iwl_mvm_get_scan_type_band(mvm, vif,
1597 NL80211_BAND_2GHZ); 1653 NL80211_BAND_2GHZ);
1598 params->hb_type = 1654 params->hb_type =
1599 iwl_mvm_get_scan_type_band(mvm, p2p, 1655 iwl_mvm_get_scan_type_band(mvm, vif,
1600 NL80211_BAND_5GHZ); 1656 NL80211_BAND_5GHZ);
1601 } else { 1657 } else {
1602 params->type = iwl_mvm_get_scan_type(mvm, p2p); 1658 params->type = iwl_mvm_get_scan_type(mvm, vif);
1603 } 1659 }
1604} 1660}
1661
1605int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1662int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1606 struct cfg80211_scan_request *req, 1663 struct cfg80211_scan_request *req,
1607 struct ieee80211_scan_ies *ies) 1664 struct ieee80211_scan_ies *ies)
@@ -1649,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1649 params.scan_plans = &scan_plan; 1706 params.scan_plans = &scan_plan;
1650 params.n_scan_plans = 1; 1707 params.n_scan_plans = 1;
1651 1708
1652 iwl_mvm_fill_scan_type(mvm, &params, 1709 iwl_mvm_fill_scan_type(mvm, &params, vif);
1653 vif->type == NL80211_IFTYPE_P2P_DEVICE);
1654 1710
1655 ret = iwl_mvm_get_measurement_dwell(mvm, req, &params); 1711 ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
1656 if (ret < 0) 1712 if (ret < 0)
@@ -1745,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1745 params.n_scan_plans = req->n_scan_plans; 1801 params.n_scan_plans = req->n_scan_plans;
1746 params.scan_plans = req->scan_plans; 1802 params.scan_plans = req->scan_plans;
1747 1803
1748 iwl_mvm_fill_scan_type(mvm, &params, 1804 iwl_mvm_fill_scan_type(mvm, &params, vif);
1749 vif->type == NL80211_IFTYPE_P2P_DEVICE);
1750 1805
1751 /* In theory, LMAC scans can handle a 32-bit delay, but since 1806 /* In theory, LMAC scans can handle a 32-bit delay, but since
1752 * waiting for over 18 hours to start the scan is a bit silly 1807 * waiting for over 18 hours to start the scan is a bit silly
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 8f929c774e70..1887d2b9f185 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
358 return ret; 358 return ret;
359} 359}
360 360
361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
416
417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421
422 IWL_DEBUG_TX_QUEUES(mvm,
423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
424 queue,
425 mvm->queue_info[queue].tid_bitmap,
426 mvm->hw_queue_to_mac80211[queue]);
427
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
431 return 0;
432 }
433
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
436
437 /* Make sure queue info is correct even though we overwrite it */
438 WARN(mvm->queue_info[queue].tid_bitmap ||
439 mvm->hw_queue_to_mac80211[queue],
440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
442 mvm->queue_info[queue].tid_bitmap);
443
444 /* If we are here - the queue is freed and we can zero out these vals */
445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
447
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
450
451 spin_unlock_bh(&mvm->queue_info_lock);
452
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456
457 if (ret)
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 queue, ret);
460 return ret;
461}
462
361static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 463static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
362{ 464{
363 struct ieee80211_sta *sta; 465 struct ieee80211_sta *sta;
@@ -447,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
447} 549}
448 550
449static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 551static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
450 bool same_sta) 552 u8 new_sta_id)
451{ 553{
452 struct iwl_mvm_sta *mvmsta; 554 struct iwl_mvm_sta *mvmsta;
453 u8 txq_curr_ac, sta_id, tid; 555 u8 txq_curr_ac, sta_id, tid;
454 unsigned long disable_agg_tids = 0; 556 unsigned long disable_agg_tids = 0;
557 bool same_sta;
455 int ret; 558 int ret;
456 559
457 lockdep_assert_held(&mvm->mutex); 560 lockdep_assert_held(&mvm->mutex);
@@ -465,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
465 tid = mvm->queue_info[queue].txq_tid; 568 tid = mvm->queue_info[queue].txq_tid;
466 spin_unlock_bh(&mvm->queue_info_lock); 569 spin_unlock_bh(&mvm->queue_info_lock);
467 570
571 same_sta = sta_id == new_sta_id;
572
468 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 573 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
469 if (WARN_ON(!mvmsta)) 574 if (WARN_ON(!mvmsta))
470 return -EINVAL; 575 return -EINVAL;
@@ -479,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
479 mvmsta->vif->hw_queue[txq_curr_ac], 584 mvmsta->vif->hw_queue[txq_curr_ac],
480 tid, 0); 585 tid, 0);
481 if (ret) { 586 if (ret) {
482 /* Re-mark the inactive queue as inactive */
483 spin_lock_bh(&mvm->queue_info_lock);
484 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
485 spin_unlock_bh(&mvm->queue_info_lock);
486 IWL_ERR(mvm, 587 IWL_ERR(mvm,
487 "Failed to free inactive queue %d (ret=%d)\n", 588 "Failed to free inactive queue %d (ret=%d)\n",
488 queue, ret); 589 queue, ret);
@@ -504,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
504 u8 ac_to_queue[IEEE80211_NUM_ACS]; 605 u8 ac_to_queue[IEEE80211_NUM_ACS];
505 int i; 606 int i;
506 607
608 /*
609 * This protects us against grabbing a queue that's being reconfigured
610 * by the inactivity checker.
611 */
612 lockdep_assert_held(&mvm->mutex);
507 lockdep_assert_held(&mvm->queue_info_lock); 613 lockdep_assert_held(&mvm->queue_info_lock);
614
508 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 615 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
509 return -EINVAL; 616 return -EINVAL;
510 617
@@ -517,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
517 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 624 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
518 continue; 625 continue;
519 626
520 /* Don't try and take queues being reconfigured */
521 if (mvm->queue_info[queue].status ==
522 IWL_MVM_QUEUE_RECONFIGURING)
523 continue;
524
525 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 627 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
526 } 628 }
527 629
@@ -562,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
562 return -ENOSPC; 664 return -ENOSPC;
563 } 665 }
564 666
565 /* Make sure the queue isn't in the middle of being reconfigured */
566 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
567 IWL_ERR(mvm,
568 "TXQ %d is in the middle of re-config - try again\n",
569 queue);
570 return -EBUSY;
571 }
572
573 return queue; 667 return queue;
574} 668}
575 669
@@ -579,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
579 * in such a case, otherwise - if no redirection required - it does nothing, 673 * in such a case, otherwise - if no redirection required - it does nothing,
580 * unless the %force param is true. 674 * unless the %force param is true.
581 */ 675 */
582int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 676static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
583 int ac, int ssn, unsigned int wdg_timeout, 677 int ac, int ssn, unsigned int wdg_timeout,
584 bool force) 678 bool force)
585{ 679{
586 struct iwl_scd_txq_cfg_cmd cmd = { 680 struct iwl_scd_txq_cfg_cmd cmd = {
587 .scd_queue = queue, 681 .scd_queue = queue,
@@ -616,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
616 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 710 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
617 cmd.tid = mvm->queue_info[queue].txq_tid; 711 cmd.tid = mvm->queue_info[queue].txq_tid;
618 mq = mvm->hw_queue_to_mac80211[queue]; 712 mq = mvm->hw_queue_to_mac80211[queue];
619 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 713 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
620 spin_unlock_bh(&mvm->queue_info_lock); 714 spin_unlock_bh(&mvm->queue_info_lock);
621 715
622 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 716 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
@@ -674,6 +768,57 @@ out:
674 return ret; 768 return ret;
675} 769}
676 770
771static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
772 u8 minq, u8 maxq)
773{
774 int i;
775
776 lockdep_assert_held(&mvm->queue_info_lock);
777
778 /* This should not be hit with new TX path */
779 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
780 return -ENOSPC;
781
782 /* Start by looking for a free queue */
783 for (i = minq; i <= maxq; i++)
784 if (mvm->queue_info[i].tid_bitmap == 0 &&
785 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
786 return i;
787
788 return -ENOSPC;
789}
790
791static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
792 u8 sta_id, u8 tid, unsigned int timeout)
793{
794 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
795
796 if (tid == IWL_MAX_TID_COUNT) {
797 tid = IWL_MGMT_TID;
798 size = IWL_MGMT_QUEUE_SIZE;
799 }
800 queue = iwl_trans_txq_alloc(mvm->trans,
801 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
802 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
803
804 if (queue < 0) {
805 IWL_DEBUG_TX_QUEUES(mvm,
806 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
807 sta_id, tid, queue);
808 return queue;
809 }
810
811 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
812 queue, sta_id, tid);
813
814 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
815 IWL_DEBUG_TX_QUEUES(mvm,
816 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
817 queue, mvm->hw_queue_to_mac80211[queue]);
818
819 return queue;
820}
821
677static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 822static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
678 struct ieee80211_sta *sta, u8 ac, 823 struct ieee80211_sta *sta, u8 ac,
679 int tid) 824 int tid)
@@ -698,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
698 843
699 spin_lock_bh(&mvmsta->lock); 844 spin_lock_bh(&mvmsta->lock);
700 mvmsta->tid_data[tid].txq_id = queue; 845 mvmsta->tid_data[tid].txq_id = queue;
701 mvmsta->tid_data[tid].is_tid_active = true;
702 spin_unlock_bh(&mvmsta->lock); 846 spin_unlock_bh(&mvmsta->lock);
703 847
704 return 0; 848 return 0;
705} 849}
706 850
851static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
852 int mac80211_queue, u8 sta_id, u8 tid)
853{
854 bool enable_queue = true;
855
856 spin_lock_bh(&mvm->queue_info_lock);
857
858 /* Make sure this TID isn't already enabled */
859 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
860 spin_unlock_bh(&mvm->queue_info_lock);
861 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
862 queue, tid);
863 return false;
864 }
865
866 /* Update mappings and refcounts */
867 if (mvm->queue_info[queue].tid_bitmap)
868 enable_queue = false;
869
870 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
871 WARN(mac80211_queue >=
872 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
873 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
874 mac80211_queue, queue, sta_id, tid);
875 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
876 }
877
878 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
879 mvm->queue_info[queue].ra_sta_id = sta_id;
880
881 if (enable_queue) {
882 if (tid != IWL_MAX_TID_COUNT)
883 mvm->queue_info[queue].mac80211_ac =
884 tid_to_mac80211_ac[tid];
885 else
886 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
887
888 mvm->queue_info[queue].txq_tid = tid;
889 }
890
891 IWL_DEBUG_TX_QUEUES(mvm,
892 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
893 queue, mvm->queue_info[queue].tid_bitmap,
894 mvm->hw_queue_to_mac80211[queue]);
895
896 spin_unlock_bh(&mvm->queue_info_lock);
897
898 return enable_queue;
899}
900
901static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
902 int mac80211_queue, u16 ssn,
903 const struct iwl_trans_txq_scd_cfg *cfg,
904 unsigned int wdg_timeout)
905{
906 struct iwl_scd_txq_cfg_cmd cmd = {
907 .scd_queue = queue,
908 .action = SCD_CFG_ENABLE_QUEUE,
909 .window = cfg->frame_limit,
910 .sta_id = cfg->sta_id,
911 .ssn = cpu_to_le16(ssn),
912 .tx_fifo = cfg->fifo,
913 .aggregate = cfg->aggregate,
914 .tid = cfg->tid,
915 };
916 bool inc_ssn;
917
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
919 return false;
920
921 /* Send the enabling command if we need to */
922 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
923 cfg->sta_id, cfg->tid))
924 return false;
925
926 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
927 NULL, wdg_timeout);
928 if (inc_ssn)
929 le16_add_cpu(&cmd.ssn, 1);
930
931 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
932 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
933
934 return inc_ssn;
935}
936
937static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
938{
939 struct iwl_scd_txq_cfg_cmd cmd = {
940 .scd_queue = queue,
941 .action = SCD_CFG_UPDATE_QUEUE_TID,
942 };
943 int tid;
944 unsigned long tid_bitmap;
945 int ret;
946
947 lockdep_assert_held(&mvm->mutex);
948
949 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
950 return;
951
952 spin_lock_bh(&mvm->queue_info_lock);
953 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
954 spin_unlock_bh(&mvm->queue_info_lock);
955
956 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
957 return;
958
959 /* Find any TID for queue */
960 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
961 cmd.tid = tid;
962 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
963
964 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
965 if (ret) {
966 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
967 queue, ret);
968 return;
969 }
970
971 spin_lock_bh(&mvm->queue_info_lock);
972 mvm->queue_info[queue].txq_tid = tid;
973 spin_unlock_bh(&mvm->queue_info_lock);
974 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
975 queue, tid);
976}
977
978static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
979{
980 struct ieee80211_sta *sta;
981 struct iwl_mvm_sta *mvmsta;
982 u8 sta_id;
983 int tid = -1;
984 unsigned long tid_bitmap;
985 unsigned int wdg_timeout;
986 int ssn;
987 int ret = true;
988
989 /* queue sharing is disabled on new TX path */
990 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
991 return;
992
993 lockdep_assert_held(&mvm->mutex);
994
995 spin_lock_bh(&mvm->queue_info_lock);
996 sta_id = mvm->queue_info[queue].ra_sta_id;
997 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
998 spin_unlock_bh(&mvm->queue_info_lock);
999
1000 /* Find TID for queue, and make sure it is the only one on the queue */
1001 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1002 if (tid_bitmap != BIT(tid)) {
1003 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1004 queue, tid_bitmap);
1005 return;
1006 }
1007
1008 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1009 tid);
1010
1011 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1012 lockdep_is_held(&mvm->mutex));
1013
1014 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1015 return;
1016
1017 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1018 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1019
1020 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1021
1022 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1023 tid_to_mac80211_ac[tid], ssn,
1024 wdg_timeout, true);
1025 if (ret) {
1026 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1027 return;
1028 }
1029
1030 /* If aggs should be turned back on - do it */
1031 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1032 struct iwl_mvm_add_sta_cmd cmd = {0};
1033
1034 mvmsta->tid_disable_agg &= ~BIT(tid);
1035
1036 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1037 cmd.sta_id = mvmsta->sta_id;
1038 cmd.add_modify = STA_MODE_MODIFY;
1039 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1040 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1041 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1042
1043 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1044 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1045 if (!ret) {
1046 IWL_DEBUG_TX_QUEUES(mvm,
1047 "TXQ #%d is now aggregated again\n",
1048 queue);
1049
1050 /* Mark queue intenally as aggregating again */
1051 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1052 }
1053 }
1054
1055 spin_lock_bh(&mvm->queue_info_lock);
1056 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1057 spin_unlock_bh(&mvm->queue_info_lock);
1058}
1059
1060/*
1061 * Remove inactive TIDs of a given queue.
1062 * If all queue TIDs are inactive - mark the queue as inactive
1063 * If only some the queue TIDs are inactive - unmap them from the queue
1064 *
1065 * Returns %true if all TIDs were removed and the queue could be reused.
1066 */
1067static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1068 struct iwl_mvm_sta *mvmsta, int queue,
1069 unsigned long tid_bitmap,
1070 unsigned long *unshare_queues,
1071 unsigned long *changetid_queues)
1072{
1073 int tid;
1074
1075 lockdep_assert_held(&mvmsta->lock);
1076 lockdep_assert_held(&mvm->queue_info_lock);
1077
1078 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1079 return false;
1080
1081 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1082 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1083 /* If some TFDs are still queued - don't mark TID as inactive */
1084 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1085 tid_bitmap &= ~BIT(tid);
1086
1087 /* Don't mark as inactive any TID that has an active BA */
1088 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1089 tid_bitmap &= ~BIT(tid);
1090 }
1091
1092 /* If all TIDs in the queue are inactive - return it can be reused */
1093 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1094 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1095 return true;
1096 }
1097
1098 /*
1099 * If we are here, this is a shared queue and not all TIDs timed-out.
1100 * Remove the ones that did.
1101 */
1102 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1103 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1104 u16 tid_bitmap;
1105
1106 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1107 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1108 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1109
1110 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1111
1112 /*
1113 * We need to take into account a situation in which a TXQ was
1114 * allocated to TID x, and then turned shared by adding TIDs y
1115 * and z. If TID x becomes inactive and is removed from the TXQ,
1116 * ownership must be given to one of the remaining TIDs.
1117 * This is mainly because if TID x continues - a new queue can't
1118 * be allocated for it as long as it is an owner of another TXQ.
1119 *
1120 * Mark this queue in the right bitmap, we'll send the command
1121 * to the firmware later.
1122 */
1123 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1124 set_bit(queue, changetid_queues);
1125
1126 IWL_DEBUG_TX_QUEUES(mvm,
1127 "Removing inactive TID %d from shared Q:%d\n",
1128 tid, queue);
1129 }
1130
1131 IWL_DEBUG_TX_QUEUES(mvm,
1132 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1133 mvm->queue_info[queue].tid_bitmap);
1134
1135 /*
1136 * There may be different TIDs with the same mac queues, so make
1137 * sure all TIDs have existing corresponding mac queues enabled
1138 */
1139 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1140 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1141 mvm->hw_queue_to_mac80211[queue] |=
1142 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1143 }
1144
1145 /* If the queue is marked as shared - "unshare" it */
1146 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1147 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1148 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1149 queue);
1150 set_bit(queue, unshare_queues);
1151 }
1152
1153 return false;
1154}
1155
1156/*
1157 * Check for inactivity - this includes checking if any queue
1158 * can be unshared and finding one (and only one) that can be
1159 * reused.
1160 * This function is also invoked as a sort of clean-up task,
1161 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1162 *
1163 * Returns the queue number, or -ENOSPC.
1164 */
1165static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1166{
1167 unsigned long now = jiffies;
1168 unsigned long unshare_queues = 0;
1169 unsigned long changetid_queues = 0;
1170 int i, ret, free_queue = -ENOSPC;
1171
1172 lockdep_assert_held(&mvm->mutex);
1173
1174 if (iwl_mvm_has_new_tx_api(mvm))
1175 return -ENOSPC;
1176
1177 spin_lock_bh(&mvm->queue_info_lock);
1178
1179 rcu_read_lock();
1180
1181 /* we skip the CMD queue below by starting at 1 */
1182 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1183
1184 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1185 struct ieee80211_sta *sta;
1186 struct iwl_mvm_sta *mvmsta;
1187 u8 sta_id;
1188 int tid;
1189 unsigned long inactive_tid_bitmap = 0;
1190 unsigned long queue_tid_bitmap;
1191
1192 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1193 if (!queue_tid_bitmap)
1194 continue;
1195
1196 /* If TXQ isn't in active use anyway - nothing to do here... */
1197 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1198 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1199 continue;
1200
1201 /* Check to see if there are inactive TIDs on this queue */
1202 for_each_set_bit(tid, &queue_tid_bitmap,
1203 IWL_MAX_TID_COUNT + 1) {
1204 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1205 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1206 continue;
1207
1208 inactive_tid_bitmap |= BIT(tid);
1209 }
1210
1211 /* If all TIDs are active - finish check on this queue */
1212 if (!inactive_tid_bitmap)
1213 continue;
1214
1215 /*
1216 * If we are here - the queue hadn't been served recently and is
1217 * in use
1218 */
1219
1220 sta_id = mvm->queue_info[i].ra_sta_id;
1221 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1222
1223 /*
1224 * If the STA doesn't exist anymore, it isn't an error. It could
1225 * be that it was removed since getting the queues, and in this
1226 * case it should've inactivated its queues anyway.
1227 */
1228 if (IS_ERR_OR_NULL(sta))
1229 continue;
1230
1231 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1232
1233 /* this isn't so nice, but works OK due to the way we loop */
1234 spin_unlock(&mvm->queue_info_lock);
1235
1236 /* and we need this locking order */
1237 spin_lock(&mvmsta->lock);
1238 spin_lock(&mvm->queue_info_lock);
1239 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1240 inactive_tid_bitmap,
1241 &unshare_queues,
1242 &changetid_queues);
1243 if (ret >= 0 && free_queue < 0)
1244 free_queue = ret;
1245 /* only unlock sta lock - we still need the queue info lock */
1246 spin_unlock(&mvmsta->lock);
1247 }
1248
1249 rcu_read_unlock();
1250 spin_unlock_bh(&mvm->queue_info_lock);
1251
1252 /* Reconfigure queues requiring reconfiguation */
1253 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1254 iwl_mvm_unshare_queue(mvm, i);
1255 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1256 iwl_mvm_change_queue_tid(mvm, i);
1257
1258 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1259 ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
1260 alloc_for_sta);
1261 if (ret)
1262 return ret;
1263 }
1264
1265 return free_queue;
1266}
1267
707static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1268static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
708 struct ieee80211_sta *sta, u8 ac, int tid, 1269 struct ieee80211_sta *sta, u8 ac, int tid,
709 struct ieee80211_hdr *hdr) 1270 struct ieee80211_hdr *hdr)
@@ -719,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
719 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1280 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
720 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 1281 u8 mac_queue = mvmsta->vif->hw_queue[ac];
721 int queue = -1; 1282 int queue = -1;
722 bool using_inactive_queue = false, same_sta = false;
723 unsigned long disable_agg_tids = 0; 1283 unsigned long disable_agg_tids = 0;
724 enum iwl_mvm_agg_state queue_state; 1284 enum iwl_mvm_agg_state queue_state;
725 bool shared_queue = false, inc_ssn; 1285 bool shared_queue = false, inc_ssn;
@@ -756,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
756 1316
757 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1317 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
758 (mvm->queue_info[mvmsta->reserved_queue].status == 1318 (mvm->queue_info[mvmsta->reserved_queue].status ==
759 IWL_MVM_QUEUE_RESERVED || 1319 IWL_MVM_QUEUE_RESERVED)) {
760 mvm->queue_info[mvmsta->reserved_queue].status ==
761 IWL_MVM_QUEUE_INACTIVE)) {
762 queue = mvmsta->reserved_queue; 1320 queue = mvmsta->reserved_queue;
763 mvm->queue_info[queue].reserved = true; 1321 mvm->queue_info[queue].reserved = true;
764 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1322 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
@@ -768,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
768 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1326 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
769 IWL_MVM_DQA_MIN_DATA_QUEUE, 1327 IWL_MVM_DQA_MIN_DATA_QUEUE,
770 IWL_MVM_DQA_MAX_DATA_QUEUE); 1328 IWL_MVM_DQA_MAX_DATA_QUEUE);
1329 if (queue < 0) {
1330 spin_unlock_bh(&mvm->queue_info_lock);
771 1331
772 /* 1332 /* try harder - perhaps kill an inactive queue */
773 * Check if this queue is already allocated but inactive. 1333 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
774 * In such a case, we'll need to first free this queue before enabling 1334
775 * it again, so we'll mark it as reserved to make sure no new traffic 1335 spin_lock_bh(&mvm->queue_info_lock);
776 * arrives on it
777 */
778 if (queue > 0 &&
779 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
780 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
781 using_inactive_queue = true;
782 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
783 IWL_DEBUG_TX_QUEUES(mvm,
784 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
785 queue, mvmsta->sta_id, tid);
786 } 1336 }
787 1337
788 /* No free queue - we'll have to share */ 1338 /* No free queue - we'll have to share */
@@ -800,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
800 * This will allow avoiding re-acquiring the lock at the end of the 1350 * This will allow avoiding re-acquiring the lock at the end of the
801 * configuration. On error we'll mark it back as free. 1351 * configuration. On error we'll mark it back as free.
802 */ 1352 */
803 if ((queue > 0) && !shared_queue) 1353 if (queue > 0 && !shared_queue)
804 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1354 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
805 1355
806 spin_unlock_bh(&mvm->queue_info_lock); 1356 spin_unlock_bh(&mvm->queue_info_lock);
@@ -821,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
821 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1371 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
822 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1372 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
823 1373
824 /*
825 * If this queue was previously inactive (idle) - we need to free it
826 * first
827 */
828 if (using_inactive_queue) {
829 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
830 if (ret)
831 return ret;
832 }
833
834 IWL_DEBUG_TX_QUEUES(mvm, 1374 IWL_DEBUG_TX_QUEUES(mvm,
835 "Allocating %squeue #%d to sta %d on tid %d\n", 1375 "Allocating %squeue #%d to sta %d on tid %d\n",
836 shared_queue ? "shared " : "", queue, 1376 shared_queue ? "shared " : "", queue,
@@ -874,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
874 if (inc_ssn) 1414 if (inc_ssn)
875 mvmsta->tid_data[tid].seq_number += 0x10; 1415 mvmsta->tid_data[tid].seq_number += 0x10;
876 mvmsta->tid_data[tid].txq_id = queue; 1416 mvmsta->tid_data[tid].txq_id = queue;
877 mvmsta->tid_data[tid].is_tid_active = true;
878 mvmsta->tfd_queue_msk |= BIT(queue); 1417 mvmsta->tfd_queue_msk |= BIT(queue);
879 queue_state = mvmsta->tid_data[tid].state; 1418 queue_state = mvmsta->tid_data[tid].state;
880 1419
@@ -909,129 +1448,6 @@ out_err:
909 return ret; 1448 return ret;
910} 1449}
911 1450
912static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
913{
914 struct iwl_scd_txq_cfg_cmd cmd = {
915 .scd_queue = queue,
916 .action = SCD_CFG_UPDATE_QUEUE_TID,
917 };
918 int tid;
919 unsigned long tid_bitmap;
920 int ret;
921
922 lockdep_assert_held(&mvm->mutex);
923
924 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
925 return;
926
927 spin_lock_bh(&mvm->queue_info_lock);
928 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
929 spin_unlock_bh(&mvm->queue_info_lock);
930
931 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
932 return;
933
934 /* Find any TID for queue */
935 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
936 cmd.tid = tid;
937 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
938
939 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
940 if (ret) {
941 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
942 queue, ret);
943 return;
944 }
945
946 spin_lock_bh(&mvm->queue_info_lock);
947 mvm->queue_info[queue].txq_tid = tid;
948 spin_unlock_bh(&mvm->queue_info_lock);
949 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
950 queue, tid);
951}
952
953static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
954{
955 struct ieee80211_sta *sta;
956 struct iwl_mvm_sta *mvmsta;
957 u8 sta_id;
958 int tid = -1;
959 unsigned long tid_bitmap;
960 unsigned int wdg_timeout;
961 int ssn;
962 int ret = true;
963
964 /* queue sharing is disabled on new TX path */
965 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
966 return;
967
968 lockdep_assert_held(&mvm->mutex);
969
970 spin_lock_bh(&mvm->queue_info_lock);
971 sta_id = mvm->queue_info[queue].ra_sta_id;
972 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
973 spin_unlock_bh(&mvm->queue_info_lock);
974
975 /* Find TID for queue, and make sure it is the only one on the queue */
976 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
977 if (tid_bitmap != BIT(tid)) {
978 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
979 queue, tid_bitmap);
980 return;
981 }
982
983 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
984 tid);
985
986 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
987 lockdep_is_held(&mvm->mutex));
988
989 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
990 return;
991
992 mvmsta = iwl_mvm_sta_from_mac80211(sta);
993 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
994
995 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
996
997 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
998 tid_to_mac80211_ac[tid], ssn,
999 wdg_timeout, true);
1000 if (ret) {
1001 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1002 return;
1003 }
1004
1005 /* If aggs should be turned back on - do it */
1006 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1007 struct iwl_mvm_add_sta_cmd cmd = {0};
1008
1009 mvmsta->tid_disable_agg &= ~BIT(tid);
1010
1011 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1012 cmd.sta_id = mvmsta->sta_id;
1013 cmd.add_modify = STA_MODE_MODIFY;
1014 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1015 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1016 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1017
1018 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1019 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1020 if (!ret) {
1021 IWL_DEBUG_TX_QUEUES(mvm,
1022 "TXQ #%d is now aggregated again\n",
1023 queue);
1024
1025 /* Mark queue intenally as aggregating again */
1026 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1027 }
1028 }
1029
1030 spin_lock_bh(&mvm->queue_info_lock);
1031 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1032 spin_unlock_bh(&mvm->queue_info_lock);
1033}
1034
1035static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1451static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1036{ 1452{
1037 if (tid == IWL_MAX_TID_COUNT) 1453 if (tid == IWL_MAX_TID_COUNT)
@@ -1100,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1100 struct ieee80211_sta *sta; 1516 struct ieee80211_sta *sta;
1101 struct iwl_mvm_sta *mvmsta; 1517 struct iwl_mvm_sta *mvmsta;
1102 unsigned long deferred_tid_traffic; 1518 unsigned long deferred_tid_traffic;
1103 int queue, sta_id, tid; 1519 int sta_id, tid;
1104
1105 /* Check inactivity of queues */
1106 iwl_mvm_inactivity_check(mvm);
1107 1520
1108 mutex_lock(&mvm->mutex); 1521 mutex_lock(&mvm->mutex);
1109 1522
1110 /* No queue reconfiguration in TVQM mode */ 1523 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1111 if (iwl_mvm_has_new_tx_api(mvm))
1112 goto alloc_queues;
1113
1114 /* Reconfigure queues requiring reconfiguation */
1115 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1116 bool reconfig;
1117 bool change_owner;
1118
1119 spin_lock_bh(&mvm->queue_info_lock);
1120 reconfig = (mvm->queue_info[queue].status ==
1121 IWL_MVM_QUEUE_RECONFIGURING);
1122 1524
1123 /*
1124 * We need to take into account a situation in which a TXQ was
1125 * allocated to TID x, and then turned shared by adding TIDs y
1126 * and z. If TID x becomes inactive and is removed from the TXQ,
1127 * ownership must be given to one of the remaining TIDs.
1128 * This is mainly because if TID x continues - a new queue can't
1129 * be allocated for it as long as it is an owner of another TXQ.
1130 */
1131 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1132 BIT(mvm->queue_info[queue].txq_tid)) &&
1133 (mvm->queue_info[queue].status ==
1134 IWL_MVM_QUEUE_SHARED);
1135 spin_unlock_bh(&mvm->queue_info_lock);
1136
1137 if (reconfig)
1138 iwl_mvm_unshare_queue(mvm, queue);
1139 else if (change_owner)
1140 iwl_mvm_change_queue_owner(mvm, queue);
1141 }
1142
1143alloc_queues:
1144 /* Go over all stations with deferred traffic */ 1525 /* Go over all stations with deferred traffic */
1145 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1526 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1146 IWL_MVM_STATION_COUNT) { 1527 IWL_MVM_STATION_COUNT) {
@@ -1167,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1167{ 1548{
1168 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1549 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169 int queue; 1550 int queue;
1170 bool using_inactive_queue = false, same_sta = false;
1171 1551
1172 /* queue reserving is disabled on new TX path */ 1552 /* queue reserving is disabled on new TX path */
1173 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1553 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1174 return 0; 1554 return 0;
1175 1555
1176 /* 1556 /* run the general cleanup/unsharing of queues */
1177 * Check for inactive queues, so we don't reach a situation where we 1557 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1178 * can't add a STA due to a shortage in queues that doesn't really exist
1179 */
1180 iwl_mvm_inactivity_check(mvm);
1181 1558
1182 spin_lock_bh(&mvm->queue_info_lock); 1559 spin_lock_bh(&mvm->queue_info_lock);
1183 1560
1184 /* Make sure we have free resources for this STA */ 1561 /* Make sure we have free resources for this STA */
1185 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1562 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1186 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 1563 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1187 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1564 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1188 IWL_MVM_QUEUE_FREE)) 1565 IWL_MVM_QUEUE_FREE))
1189 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1566 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
@@ -1193,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1193 IWL_MVM_DQA_MAX_DATA_QUEUE); 1570 IWL_MVM_DQA_MAX_DATA_QUEUE);
1194 if (queue < 0) { 1571 if (queue < 0) {
1195 spin_unlock_bh(&mvm->queue_info_lock); 1572 spin_unlock_bh(&mvm->queue_info_lock);
1196 IWL_ERR(mvm, "No available queues for new station\n"); 1573 /* try again - this time kick out a queue if needed */
1197 return -ENOSPC; 1574 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1198 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 1575 if (queue < 0) {
1199 /* 1576 IWL_ERR(mvm, "No available queues for new station\n");
1200 * If this queue is already allocated but inactive we'll need to 1577 return -ENOSPC;
1201 * first free this queue before enabling it again, we'll mark 1578 }
1202 * it as reserved to make sure no new traffic arrives on it 1579 spin_lock_bh(&mvm->queue_info_lock);
1203 */
1204 using_inactive_queue = true;
1205 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1206 } 1580 }
1207 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1581 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1208 1582
@@ -1210,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1210 1584
1211 mvmsta->reserved_queue = queue; 1585 mvmsta->reserved_queue = queue;
1212 1586
1213 if (using_inactive_queue)
1214 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1215
1216 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1587 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1217 queue, mvmsta->sta_id); 1588 queue, mvmsta->sta_id);
1218 1589
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0fc211108149..de1a0a2d8723 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
312 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 312 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
313 * we are ready to finish the Tx AGG stop / start flow. 313 * we are ready to finish the Tx AGG stop / start flow.
314 * @tx_time: medium time consumed by this A-MPDU 314 * @tx_time: medium time consumed by this A-MPDU
315 * @is_tid_active: has this TID sent traffic in the last
316 * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
317 * field should be ignored.
318 * @tpt_meas_start: time of the throughput measurements start, is reset every HZ 315 * @tpt_meas_start: time of the throughput measurements start, is reset every HZ
319 * @tx_count_last: number of frames transmitted during the last second 316 * @tx_count_last: number of frames transmitted during the last second
320 * @tx_count: counts the number of frames transmitted since the last reset of 317 * @tx_count: counts the number of frames transmitted since the last reset of
@@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
332 u16 txq_id; 329 u16 txq_id;
333 u16 ssn; 330 u16 ssn;
334 u16 tx_time; 331 u16 tx_time;
335 bool is_tid_active;
336 unsigned long tpt_meas_start; 332 unsigned long tpt_meas_start;
337 u32 tx_count_last; 333 u32 tx_count_last;
338 u32 tx_count; 334 u32 tx_count;
@@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
572void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 568void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
573void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); 569void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
574 570
575int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
576 int ac, int ssn, unsigned int wdg_timeout,
577 bool force);
578
579#endif /* __sta_h__ */ 571#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 99c64ea2619b..ec57682efe54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1140,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1140 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); 1140 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1141 1141
1142 /* Check if TXQ needs to be allocated or re-activated */ 1142 /* Check if TXQ needs to be allocated or re-activated */
1143 if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || 1143 if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
1144 !mvmsta->tid_data[tid].is_tid_active)) { 1144 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
1145 /* If TXQ needs to be allocated... */
1146 if (txq_id == IWL_MVM_INVALID_QUEUE) {
1147 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
1148 1145
1149 /* 1146 /*
1150 * The frame is now deferred, and the worker scheduled 1147 * The frame is now deferred, and the worker scheduled
1151 * will re-allocate it, so we can free it for now. 1148 * will re-allocate it, so we can free it for now.
1152 */ 1149 */
1153 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 1150 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1154 spin_unlock(&mvmsta->lock); 1151 spin_unlock(&mvmsta->lock);
1155 return 0; 1152 return 0;
1156 }
1157
1158 /* queue should always be active in new TX path */
1159 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1160
1161 /* If we are here - TXQ exists and needs to be re-activated */
1162 spin_lock(&mvm->queue_info_lock);
1163 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1164 mvmsta->tid_data[tid].is_tid_active = true;
1165 spin_unlock(&mvm->queue_info_lock);
1166
1167 IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
1168 txq_id);
1169 } 1153 }
1170 1154
1171 if (!iwl_mvm_has_new_tx_api(mvm)) { 1155 if (!iwl_mvm_has_new_tx_api(mvm)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 6c14d3413bdc..818e1180bbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
599 iwl_mvm_dump_umac_error_log(mvm); 599 iwl_mvm_dump_umac_error_log(mvm);
600} 600}
601 601
602int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
603{
604 int i;
605
606 lockdep_assert_held(&mvm->queue_info_lock);
607
608 /* This should not be hit with new TX path */
609 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
610 return -ENOSPC;
611
612 /* Start by looking for a free queue */
613 for (i = minq; i <= maxq; i++)
614 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
615 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
616 return i;
617
618 /*
619 * If no free queue found - settle for an inactive one to reconfigure
620 * Make sure that the inactive queue either already belongs to this STA,
621 * or that if it belongs to another one - it isn't the reserved queue
622 */
623 for (i = minq; i <= maxq; i++)
624 if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
625 (sta_id == mvm->queue_info[i].ra_sta_id ||
626 !mvm->queue_info[i].reserved))
627 return i;
628
629 return -ENOSPC;
630}
631
632int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, 602int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
633 int tid, int frame_limit, u16 ssn) 603 int tid, int frame_limit, u16 ssn)
634{ 604{
@@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
649 return -EINVAL; 619 return -EINVAL;
650 620
651 spin_lock_bh(&mvm->queue_info_lock); 621 spin_lock_bh(&mvm->queue_info_lock);
652 if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, 622 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
653 "Trying to reconfig unallocated queue %d\n", queue)) { 623 "Trying to reconfig unallocated queue %d\n", queue)) {
654 spin_unlock_bh(&mvm->queue_info_lock); 624 spin_unlock_bh(&mvm->queue_info_lock);
655 return -ENXIO; 625 return -ENXIO;
@@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
665 return ret; 635 return ret;
666} 636}
667 637
668static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
669 int mac80211_queue, u8 sta_id, u8 tid)
670{
671 bool enable_queue = true;
672
673 spin_lock_bh(&mvm->queue_info_lock);
674
675 /* Make sure this TID isn't already enabled */
676 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
677 spin_unlock_bh(&mvm->queue_info_lock);
678 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
679 queue, tid);
680 return false;
681 }
682
683 /* Update mappings and refcounts */
684 if (mvm->queue_info[queue].hw_queue_refcount > 0)
685 enable_queue = false;
686
687 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
688 WARN(mac80211_queue >=
689 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
690 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
691 mac80211_queue, queue, sta_id, tid);
692 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
693 }
694
695 mvm->queue_info[queue].hw_queue_refcount++;
696 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
697 mvm->queue_info[queue].ra_sta_id = sta_id;
698
699 if (enable_queue) {
700 if (tid != IWL_MAX_TID_COUNT)
701 mvm->queue_info[queue].mac80211_ac =
702 tid_to_mac80211_ac[tid];
703 else
704 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
705
706 mvm->queue_info[queue].txq_tid = tid;
707 }
708
709 IWL_DEBUG_TX_QUEUES(mvm,
710 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
711 queue, mvm->queue_info[queue].hw_queue_refcount,
712 mvm->hw_queue_to_mac80211[queue]);
713
714 spin_unlock_bh(&mvm->queue_info_lock);
715
716 return enable_queue;
717}
718
719int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
720 u8 sta_id, u8 tid, unsigned int timeout)
721{
722 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
723
724 if (tid == IWL_MAX_TID_COUNT) {
725 tid = IWL_MGMT_TID;
726 size = IWL_MGMT_QUEUE_SIZE;
727 }
728 queue = iwl_trans_txq_alloc(mvm->trans,
729 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
730 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
731
732 if (queue < 0) {
733 IWL_DEBUG_TX_QUEUES(mvm,
734 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
735 sta_id, tid, queue);
736 return queue;
737 }
738
739 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
740 queue, sta_id, tid);
741
742 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
743 IWL_DEBUG_TX_QUEUES(mvm,
744 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
745 queue, mvm->hw_queue_to_mac80211[queue]);
746
747 return queue;
748}
749
750bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
751 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
752 unsigned int wdg_timeout)
753{
754 struct iwl_scd_txq_cfg_cmd cmd = {
755 .scd_queue = queue,
756 .action = SCD_CFG_ENABLE_QUEUE,
757 .window = cfg->frame_limit,
758 .sta_id = cfg->sta_id,
759 .ssn = cpu_to_le16(ssn),
760 .tx_fifo = cfg->fifo,
761 .aggregate = cfg->aggregate,
762 .tid = cfg->tid,
763 };
764 bool inc_ssn;
765
766 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
767 return false;
768
769 /* Send the enabling command if we need to */
770 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
771 cfg->sta_id, cfg->tid))
772 return false;
773
774 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
775 NULL, wdg_timeout);
776 if (inc_ssn)
777 le16_add_cpu(&cmd.ssn, 1);
778
779 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
780 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
781
782 return inc_ssn;
783}
784
785int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
786 u8 tid, u8 flags)
787{
788 struct iwl_scd_txq_cfg_cmd cmd = {
789 .scd_queue = queue,
790 .action = SCD_CFG_DISABLE_QUEUE,
791 };
792 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
793 int ret;
794
795 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
796 return -EINVAL;
797
798 if (iwl_mvm_has_new_tx_api(mvm)) {
799 spin_lock_bh(&mvm->queue_info_lock);
800
801 if (remove_mac_queue)
802 mvm->hw_queue_to_mac80211[queue] &=
803 ~BIT(mac80211_queue);
804
805 spin_unlock_bh(&mvm->queue_info_lock);
806
807 iwl_trans_txq_free(mvm->trans, queue);
808
809 return 0;
810 }
811
812 spin_lock_bh(&mvm->queue_info_lock);
813
814 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
815 spin_unlock_bh(&mvm->queue_info_lock);
816 return 0;
817 }
818
819 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
820
821 /*
822 * If there is another TID with the same AC - don't remove the MAC queue
823 * from the mapping
824 */
825 if (tid < IWL_MAX_TID_COUNT) {
826 unsigned long tid_bitmap =
827 mvm->queue_info[queue].tid_bitmap;
828 int ac = tid_to_mac80211_ac[tid];
829 int i;
830
831 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
832 if (tid_to_mac80211_ac[i] == ac)
833 remove_mac_queue = false;
834 }
835 }
836
837 if (remove_mac_queue)
838 mvm->hw_queue_to_mac80211[queue] &=
839 ~BIT(mac80211_queue);
840 mvm->queue_info[queue].hw_queue_refcount--;
841
842 cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
843 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
844 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
845 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
846
847 IWL_DEBUG_TX_QUEUES(mvm,
848 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
849 queue,
850 mvm->queue_info[queue].hw_queue_refcount,
851 mvm->hw_queue_to_mac80211[queue]);
852
853 /* If the queue is still enabled - nothing left to do in this func */
854 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
855 spin_unlock_bh(&mvm->queue_info_lock);
856 return 0;
857 }
858
859 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
860 cmd.tid = mvm->queue_info[queue].txq_tid;
861
862 /* Make sure queue info is correct even though we overwrite it */
863 WARN(mvm->queue_info[queue].hw_queue_refcount ||
864 mvm->queue_info[queue].tid_bitmap ||
865 mvm->hw_queue_to_mac80211[queue],
866 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
867 queue, mvm->queue_info[queue].hw_queue_refcount,
868 mvm->hw_queue_to_mac80211[queue],
869 mvm->queue_info[queue].tid_bitmap);
870
871 /* If we are here - the queue is freed and we can zero out these vals */
872 mvm->queue_info[queue].hw_queue_refcount = 0;
873 mvm->queue_info[queue].tid_bitmap = 0;
874 mvm->hw_queue_to_mac80211[queue] = 0;
875
876 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
877 mvm->queue_info[queue].reserved = false;
878
879 spin_unlock_bh(&mvm->queue_info_lock);
880
881 iwl_trans_txq_disable(mvm->trans, queue, false);
882 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
883 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
884
885 if (ret)
886 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
887 queue, ret);
888 return ret;
889}
890
891/** 638/**
892 * iwl_mvm_send_lq_cmd() - Send link quality command 639 * iwl_mvm_send_lq_cmd() - Send link quality command
893 * @sync: This command can be sent synchronously. 640 * @sync: This command can be sent synchronously.
@@ -1255,171 +1002,6 @@ out:
1255 ieee80211_connection_loss(vif); 1002 ieee80211_connection_loss(vif);
1256} 1003}
1257 1004
1258/*
1259 * Remove inactive TIDs of a given queue.
1260 * If all queue TIDs are inactive - mark the queue as inactive
1261 * If only some the queue TIDs are inactive - unmap them from the queue
1262 */
1263static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1264 struct iwl_mvm_sta *mvmsta, int queue,
1265 unsigned long tid_bitmap)
1266{
1267 int tid;
1268
1269 lockdep_assert_held(&mvmsta->lock);
1270 lockdep_assert_held(&mvm->queue_info_lock);
1271
1272 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1273 return;
1274
1275 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1276 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1277 /* If some TFDs are still queued - don't mark TID as inactive */
1278 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1279 tid_bitmap &= ~BIT(tid);
1280
1281 /* Don't mark as inactive any TID that has an active BA */
1282 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1283 tid_bitmap &= ~BIT(tid);
1284 }
1285
1286 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1287 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1288 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1289
1290 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1291 mvmsta->tid_data[tid].is_tid_active = false;
1292
1293 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1294 queue);
1295 return;
1296 }
1297
1298 /*
1299 * If we are here, this is a shared queue and not all TIDs timed-out.
1300 * Remove the ones that did.
1301 */
1302 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1303 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1304
1305 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1306 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1307 mvm->queue_info[queue].hw_queue_refcount--;
1308 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1309 mvmsta->tid_data[tid].is_tid_active = false;
1310
1311 IWL_DEBUG_TX_QUEUES(mvm,
1312 "Removing inactive TID %d from shared Q:%d\n",
1313 tid, queue);
1314 }
1315
1316 IWL_DEBUG_TX_QUEUES(mvm,
1317 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1318 mvm->queue_info[queue].tid_bitmap);
1319
1320 /*
1321 * There may be different TIDs with the same mac queues, so make
1322 * sure all TIDs have existing corresponding mac queues enabled
1323 */
1324 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1325 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1326 mvm->hw_queue_to_mac80211[queue] |=
1327 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1328 }
1329
1330 /* If the queue is marked as shared - "unshare" it */
1331 if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1332 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1333 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1334 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1335 queue);
1336 }
1337}
1338
1339void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1340{
1341 unsigned long timeout_queues_map = 0;
1342 unsigned long now = jiffies;
1343 int i;
1344
1345 if (iwl_mvm_has_new_tx_api(mvm))
1346 return;
1347
1348 spin_lock_bh(&mvm->queue_info_lock);
1349 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1350 if (mvm->queue_info[i].hw_queue_refcount > 0)
1351 timeout_queues_map |= BIT(i);
1352 spin_unlock_bh(&mvm->queue_info_lock);
1353
1354 rcu_read_lock();
1355
1356 /*
1357 * If a queue time outs - mark it as INACTIVE (don't remove right away
1358 * if we don't have to.) This is an optimization in case traffic comes
1359 * later, and we don't HAVE to use a currently-inactive queue
1360 */
1361 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1362 struct ieee80211_sta *sta;
1363 struct iwl_mvm_sta *mvmsta;
1364 u8 sta_id;
1365 int tid;
1366 unsigned long inactive_tid_bitmap = 0;
1367 unsigned long queue_tid_bitmap;
1368
1369 spin_lock_bh(&mvm->queue_info_lock);
1370 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1371
1372 /* If TXQ isn't in active use anyway - nothing to do here... */
1373 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1374 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1375 spin_unlock_bh(&mvm->queue_info_lock);
1376 continue;
1377 }
1378
1379 /* Check to see if there are inactive TIDs on this queue */
1380 for_each_set_bit(tid, &queue_tid_bitmap,
1381 IWL_MAX_TID_COUNT + 1) {
1382 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1383 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1384 continue;
1385
1386 inactive_tid_bitmap |= BIT(tid);
1387 }
1388 spin_unlock_bh(&mvm->queue_info_lock);
1389
1390 /* If all TIDs are active - finish check on this queue */
1391 if (!inactive_tid_bitmap)
1392 continue;
1393
1394 /*
1395 * If we are here - the queue hadn't been served recently and is
1396 * in use
1397 */
1398
1399 sta_id = mvm->queue_info[i].ra_sta_id;
1400 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1401
1402 /*
1403 * If the STA doesn't exist anymore, it isn't an error. It could
1404 * be that it was removed since getting the queues, and in this
1405 * case it should've inactivated its queues anyway.
1406 */
1407 if (IS_ERR_OR_NULL(sta))
1408 continue;
1409
1410 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1411
1412 spin_lock_bh(&mvmsta->lock);
1413 spin_lock(&mvm->queue_info_lock);
1414 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1415 inactive_tid_bitmap);
1416 spin_unlock(&mvm->queue_info_lock);
1417 spin_unlock_bh(&mvmsta->lock);
1418 }
1419
1420 rcu_read_unlock();
1421}
1422
1423void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, 1005void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1424 struct ieee80211_vif *vif, 1006 struct ieee80211_vif *vif,
1425 const struct ieee80211_sta *sta, 1007 const struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index b71cf55480fc..e880f69eac26 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
330 goto out_err; 330 goto out_err;
331 } 331 }
332 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); 332 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
333 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len); 333 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
334 /* add this subframe's headers' length to the tx_cmd */ 334 /* add this subframe's headers' length to the tx_cmd */
335 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 335 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
336 336
@@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
347 goto out_err; 347 goto out_err;
348 } 348 }
349 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); 349 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
350 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 350 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
351 tb_len); 351 tb_len);
352 352
353 data_left -= tb_len; 353 data_left -= tb_len;
354 tso_build_data(skb, &tso, tb_len); 354 tso_build_data(skb, &tso, tb_len);
@@ -438,6 +438,9 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
438 return -ENOMEM; 438 return -ENOMEM;
439 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, 439 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
440 skb_frag_size(frag)); 440 skb_frag_size(frag));
441 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
442 skb_frag_address(frag),
443 skb_frag_size(frag));
441 if (tb_idx < 0) 444 if (tb_idx < 0)
442 return tb_idx; 445 return tb_idx;
443 446
@@ -454,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
454 struct sk_buff *skb, 457 struct sk_buff *skb,
455 struct iwl_cmd_meta *out_meta, 458 struct iwl_cmd_meta *out_meta,
456 int hdr_len, 459 int hdr_len,
457 int tx_cmd_len) 460 int tx_cmd_len,
461 bool pad)
458{ 462{
459 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 463 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
460 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); 464 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
@@ -478,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
478 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 482 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
479 IWL_FIRST_TB_SIZE; 483 IWL_FIRST_TB_SIZE;
480 484
481 tb1_len = ALIGN(len, 4); 485 if (pad)
486 tb1_len = ALIGN(len, 4);
487 else
488 tb1_len = len;
482 489
483 /* map the data for TB1 */ 490 /* map the data for TB1 */
484 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 491 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -486,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
486 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 493 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
487 goto out_err; 494 goto out_err;
488 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 495 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
496 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
497 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
489 498
490 /* set up TFD's third entry to point to remainder of skb's head */ 499 /* set up TFD's third entry to point to remainder of skb's head */
491 tb2_len = skb_headlen(skb) - hdr_len; 500 tb2_len = skb_headlen(skb) - hdr_len;
@@ -496,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
496 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 505 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
497 goto out_err; 506 goto out_err;
498 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); 507 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
508 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
509 skb->data + hdr_len,
510 tb2_len);
499 } 511 }
500 512
501 if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 513 if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
502 goto out_err; 514 goto out_err;
503 515
504 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
505 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
506 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
507
508 return tfd; 516 return tfd;
509 517
510out_err: 518out_err:
@@ -551,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
551 out_meta, hdr_len, len); 559 out_meta, hdr_len, len);
552 560
553 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 561 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
554 hdr_len, len); 562 hdr_len, len, !amsdu);
555} 563}
556 564
557int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 565int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f227b91098c9..87b7225fe289 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1994 head_tb_len, DMA_TO_DEVICE); 1994 head_tb_len, DMA_TO_DEVICE);
1995 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1995 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1996 return -EINVAL; 1996 return -EINVAL;
1997 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
1998 skb->data + hdr_len,
1999 head_tb_len);
1997 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 2000 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1998 } 2001 }
1999 2002
@@ -2011,6 +2014,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2011 2014
2012 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 2015 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
2013 return -EINVAL; 2016 return -EINVAL;
2017 trace_iwlwifi_dev_tx_tb(trans->dev, skb,
2018 skb_frag_address(frag),
2019 skb_frag_size(frag));
2014 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2020 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2015 skb_frag_size(frag), false); 2021 skb_frag_size(frag), false);
2016 if (tb_idx < 0) 2022 if (tb_idx < 0)
@@ -2190,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2190 } 2196 }
2191 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2197 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2192 hdr_tb_len, false); 2198 hdr_tb_len, false);
2193 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2199 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2194 hdr_tb_len); 2200 hdr_tb_len);
2195 /* add this subframe's headers' length to the tx_cmd */ 2201 /* add this subframe's headers' length to the tx_cmd */
2196 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 2202 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
2197 2203
@@ -2216,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2216 2222
2217 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2223 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2218 size, false); 2224 size, false);
2219 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2225 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2220 size); 2226 size);
2221 2227
2222 data_left -= size; 2228 data_left -= size;
2223 tso_build_data(skb, &tso, size); 2229 tso_build_data(skb, &tso, size);
@@ -2398,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2398 goto out_err; 2404 goto out_err;
2399 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2405 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2400 2406
2407 trace_iwlwifi_dev_tx(trans->dev, skb,
2408 iwl_pcie_get_tfd(trans, txq,
2409 txq->write_ptr),
2410 trans_pcie->tfd_size,
2411 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2412 hdr_len);
2413
2401 /* 2414 /*
2402 * If gso_size wasn't set, don't give the frame "amsdu treatment" 2415 * If gso_size wasn't set, don't give the frame "amsdu treatment"
2403 * (adding subframes, etc.). 2416 * (adding subframes, etc.).
@@ -2421,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2421 out_meta))) 2434 out_meta)))
2422 goto out_err; 2435 goto out_err;
2423 } 2436 }
2424
2425 trace_iwlwifi_dev_tx(trans->dev, skb,
2426 iwl_pcie_get_tfd(trans, txq,
2427 txq->write_ptr),
2428 trans_pcie->tfd_size,
2429 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2430 hdr_len);
2431 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
2432 } 2437 }
2433 2438
2434 /* building the A-MSDU might have changed this data, so memcpy it now */ 2439 /* building the A-MSDU might have changed this data, so memcpy it now */
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
index 7d88223f890b..cebf03c6a622 100644
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ b/drivers/net/wireless/marvell/libertas/if_cs.c
@@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
900 900
901 /* Make this card known to the libertas driver */ 901 /* Make this card known to the libertas driver */
902 priv = lbs_add_card(card, &p_dev->dev); 902 priv = lbs_add_card(card, &p_dev->dev);
903 if (!priv) { 903 if (IS_ERR(priv)) {
904 ret = -ENOMEM; 904 ret = PTR_ERR(priv);
905 goto out2; 905 goto out2;
906 } 906 }
907 907
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 39bf85d0ade0..8d98e7fdd27c 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func,
1206 1206
1207 1207
1208 priv = lbs_add_card(card, &func->dev); 1208 priv = lbs_add_card(card, &func->dev);
1209 if (!priv) { 1209 if (IS_ERR(priv)) {
1210 ret = -ENOMEM; 1210 ret = PTR_ERR(priv);
1211 goto free; 1211 goto free;
1212 } 1212 }
1213 1213
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index e9aec6cb1105..504d6e096476 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1146,8 +1146,8 @@ static int if_spi_probe(struct spi_device *spi)
1146 * This will call alloc_etherdev. 1146 * This will call alloc_etherdev.
1147 */ 1147 */
1148 priv = lbs_add_card(card, &spi->dev); 1148 priv = lbs_add_card(card, &spi->dev);
1149 if (!priv) { 1149 if (IS_ERR(priv)) {
1150 err = -ENOMEM; 1150 err = PTR_ERR(priv);
1151 goto free_card; 1151 goto free_card;
1152 } 1152 }
1153 card->priv = priv; 1153 card->priv = priv;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index c67a8e7be310..220dcdee8d2b 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf,
254 goto dealloc; 254 goto dealloc;
255 } 255 }
256 256
257 if (!(priv = lbs_add_card(cardp, &intf->dev))) 257 priv = lbs_add_card(cardp, &intf->dev);
258 if (IS_ERR(priv)) {
259 r = PTR_ERR(priv);
258 goto err_add_card; 260 goto err_add_card;
261 }
259 262
260 cardp->priv = priv; 263 cardp->priv = priv;
261 264
@@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
456 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, 459 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
457 cardp); 460 cardp);
458 461
459 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
460
461 lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); 462 lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
462 if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) { 463 if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
463 lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); 464 lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index f22e1c220cba..f7db60bc7c7f 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
907 struct net_device *dev; 907 struct net_device *dev;
908 struct wireless_dev *wdev; 908 struct wireless_dev *wdev;
909 struct lbs_private *priv = NULL; 909 struct lbs_private *priv = NULL;
910 int err;
910 911
911 /* Allocate an Ethernet device and register it */ 912 /* Allocate an Ethernet device and register it */
912 wdev = lbs_cfg_alloc(dmdev); 913 wdev = lbs_cfg_alloc(dmdev);
913 if (IS_ERR(wdev)) { 914 if (IS_ERR(wdev)) {
915 err = PTR_ERR(wdev);
914 pr_err("cfg80211 init failed\n"); 916 pr_err("cfg80211 init failed\n");
915 goto done; 917 goto err_cfg;
916 } 918 }
917 919
918 wdev->iftype = NL80211_IFTYPE_STATION; 920 wdev->iftype = NL80211_IFTYPE_STATION;
919 priv = wdev_priv(wdev); 921 priv = wdev_priv(wdev);
920 priv->wdev = wdev; 922 priv->wdev = wdev;
921 923
922 if (lbs_init_adapter(priv)) { 924 err = lbs_init_adapter(priv);
925 if (err) {
923 pr_err("failed to initialize adapter structure\n"); 926 pr_err("failed to initialize adapter structure\n");
924 goto err_wdev; 927 goto err_wdev;
925 } 928 }
926 929
927 dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup); 930 dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
928 if (!dev) { 931 if (!dev) {
932 err = -ENOMEM;
929 dev_err(dmdev, "no memory for network device instance\n"); 933 dev_err(dmdev, "no memory for network device instance\n");
930 goto err_adapter; 934 goto err_adapter;
931 } 935 }
@@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
949 init_waitqueue_head(&priv->waitq); 953 init_waitqueue_head(&priv->waitq);
950 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); 954 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
951 if (IS_ERR(priv->main_thread)) { 955 if (IS_ERR(priv->main_thread)) {
956 err = PTR_ERR(priv->main_thread);
952 lbs_deb_thread("Error creating main thread.\n"); 957 lbs_deb_thread("Error creating main thread.\n");
953 goto err_ndev; 958 goto err_ndev;
954 } 959 }
@@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
961 priv->wol_gap = 20; 966 priv->wol_gap = 20;
962 priv->ehs_remove_supported = true; 967 priv->ehs_remove_supported = true;
963 968
964 goto done; 969 return priv;
965 970
966 err_ndev: 971 err_ndev:
967 free_netdev(dev); 972 free_netdev(dev);
@@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
972 err_wdev: 977 err_wdev:
973 lbs_cfg_free(priv); 978 lbs_cfg_free(priv);
974 979
975 priv = NULL; 980 err_cfg:
976 981 return ERR_PTR(err);
977done:
978 return priv;
979} 982}
980EXPORT_SYMBOL_GPL(lbs_add_card); 983EXPORT_SYMBOL_GPL(lbs_add_card);
981 984
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 30a5d928e655..1d6bbce76041 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -79,6 +79,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
79 .copy = mt76_mmio_copy, 79 .copy = mt76_mmio_copy,
80 .wr_rp = mt76_mmio_wr_rp, 80 .wr_rp = mt76_mmio_wr_rp,
81 .rd_rp = mt76_mmio_rd_rp, 81 .rd_rp = mt76_mmio_rd_rp,
82 .type = MT76_BUS_MMIO,
82 }; 83 };
83 84
84 dev->bus = &mt76_mmio_ops; 85 dev->bus = &mt76_mmio_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index f723a07cab29..3bfa7f5e3513 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -38,6 +38,11 @@ struct mt76_reg_pair {
38 u32 value; 38 u32 value;
39}; 39};
40 40
41enum mt76_bus_type {
42 MT76_BUS_MMIO,
43 MT76_BUS_USB,
44};
45
41struct mt76_bus_ops { 46struct mt76_bus_ops {
42 u32 (*rr)(struct mt76_dev *dev, u32 offset); 47 u32 (*rr)(struct mt76_dev *dev, u32 offset);
43 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 48 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@@ -48,8 +53,12 @@ struct mt76_bus_ops {
48 const struct mt76_reg_pair *rp, int len); 53 const struct mt76_reg_pair *rp, int len);
49 int (*rd_rp)(struct mt76_dev *dev, u32 base, 54 int (*rd_rp)(struct mt76_dev *dev, u32 base,
50 struct mt76_reg_pair *rp, int len); 55 struct mt76_reg_pair *rp, int len);
56 enum mt76_bus_type type;
51}; 57};
52 58
59#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
60#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
61
53enum mt76_txq_id { 62enum mt76_txq_id {
54 MT_TXQ_VO = IEEE80211_AC_VO, 63 MT_TXQ_VO = IEEE80211_AC_VO,
55 MT_TXQ_VI = IEEE80211_AC_VI, 64 MT_TXQ_VI = IEEE80211_AC_VI,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
deleted file mode 100644
index 891ce1c3461f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MT76X0U_DMA_H
16#define __MT76X0U_DMA_H
17
18#include <asm/unaligned.h>
19#include <linux/skbuff.h>
20
21#define MT_DMA_HDR_LEN 4
22#define MT_RX_INFO_LEN 4
23#define MT_FCE_INFO_LEN 4
24#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
25
26/* Common Tx DMA descriptor fields */
27#define MT_TXD_INFO_LEN GENMASK(15, 0)
28#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
29#define MT_TXD_INFO_TYPE GENMASK(31, 30)
30
31/* Tx DMA MCU command specific flags */
32#define MT_TXD_CMD_SEQ GENMASK(19, 16)
33#define MT_TXD_CMD_TYPE GENMASK(26, 20)
34
35enum mt76_msg_port {
36 WLAN_PORT,
37 CPU_RX_PORT,
38 CPU_TX_PORT,
39 HOST_PORT,
40 VIRTUAL_CPU_RX_PORT,
41 VIRTUAL_CPU_TX_PORT,
42 DISCARD,
43};
44
45enum mt76_info_type {
46 DMA_PACKET,
47 DMA_COMMAND,
48};
49
50/* Tx DMA packet specific flags */
51#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
52#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
53#define MT_TXD_PKT_INFO_80211 BIT(19)
54#define MT_TXD_PKT_INFO_TSO BIT(20)
55#define MT_TXD_PKT_INFO_CSO BIT(21)
56#define MT_TXD_PKT_INFO_WIV BIT(24)
57#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
58
59enum mt76_qsel {
60 MT_QSEL_MGMT,
61 MT_QSEL_HCCA,
62 MT_QSEL_EDCA,
63 MT_QSEL_EDCA_2,
64};
65
66
67static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
68 enum mt76_msg_port d_port,
69 enum mt76_info_type type, u32 flags)
70{
71 u32 info;
72
73 /* Buffer layout:
74 * | 4B | xfer len | pad | 4B |
75 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
76 *
77 * length field of TXINFO should be set to 'xfer len'.
78 */
79
80 info = flags |
81 FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
82 FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
83 FIELD_PREP(MT_TXD_INFO_TYPE, type);
84
85 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
86 return skb_put_padto(skb, round_up(skb->len, 4) + 4);
87}
88
89static inline int
90mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
91{
92 flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
93 return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
94}
95
96/* Common Rx DMA descriptor fields */
97#define MT_RXD_INFO_LEN GENMASK(13, 0)
98#define MT_RXD_INFO_PCIE_INTR BIT(24)
99#define MT_RXD_INFO_QSEL GENMASK(26, 25)
100#define MT_RXD_INFO_PORT GENMASK(29, 27)
101#define MT_RXD_INFO_TYPE GENMASK(31, 30)
102
103/* Rx DMA packet specific flags */
104#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
105#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
106#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
107#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
108#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
109#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
110
111/* Rx DMA MCU command specific flags */
112#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
113#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
114#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
115
116enum mt76_evt_type {
117 CMD_DONE,
118 CMD_ERROR,
119 CMD_RETRY,
120 EVENT_PWR_RSP,
121 EVENT_WOW_RSP,
122 EVENT_CARRIER_DETECT_RSP,
123 EVENT_DFS_DETECT_RSP,
124};
125
126#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 5735038c0e2d..ab4fd6e0f23a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -31,8 +31,8 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
31 int ret, i; 31 int ret, i;
32 u32 start = 0, end = 0, cnt_free; 32 u32 start = 0, end = 0, cnt_free;
33 33
34 ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START, 34 ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
35 data, sizeof(data), MT_EE_PHYSICAL_READ); 35 sizeof(data), MT_EE_PHYSICAL_READ);
36 if (ret) 36 if (ret)
37 return ret; 37 return ret;
38 38
@@ -55,10 +55,10 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
55 55
56static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) 56static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
57{ 57{
58 u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); 58 u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
59 u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); 59 u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
60 60
61 mt76x02_eeprom_parse_hw_cap(&dev->mt76); 61 mt76x02_eeprom_parse_hw_cap(dev);
62 dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", 62 dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
63 dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); 63 dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
64 64
@@ -86,7 +86,7 @@ static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
86{ 86{
87 u8 val; 87 u8 val;
88 88
89 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8; 89 val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
90 if (mt76x02_field_valid(val)) 90 if (mt76x02_field_valid(val))
91 dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8); 91 dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
92 else 92 else
@@ -98,12 +98,12 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
98 struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; 98 struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
99 u8 val; 99 u8 val;
100 100
101 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET); 101 val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
102 if (!mt76x02_field_valid(val)) 102 if (!mt76x02_field_valid(val))
103 val = 0; 103 val = 0;
104 caldata->freq_offset = val; 104 caldata->freq_offset = val;
105 105
106 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8; 106 val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
107 if (!mt76x02_field_valid(val)) 107 if (!mt76x02_field_valid(val))
108 val = 0; 108 val = 0;
109 109
@@ -118,10 +118,8 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
118 u16 rssi_offset; 118 u16 rssi_offset;
119 int i; 119 int i;
120 120
121 mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset, 121 mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
122 &lna_2g, lna_5g); 122 caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
123 caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
124 lna_5g, chan);
125 123
126 for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) { 124 for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
127 val = rssi_offset >> (8 * i); 125 val = rssi_offset >> (8 * i);
@@ -132,12 +130,12 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
132 } 130 }
133} 131}
134 132
135static s8 mt76x0_get_delta(struct mt76_dev *dev) 133static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
136{ 134{
137 struct cfg80211_chan_def *chandef = &dev->chandef; 135 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
138 u8 val; 136 u8 val;
139 137
140 if (mt76x02_tssi_enabled(dev)) 138 if (mt76x0_tssi_enabled(dev))
141 return 0; 139 return 0;
142 140
143 if (chandef->width == NL80211_CHAN_WIDTH_80) { 141 if (chandef->width == NL80211_CHAN_WIDTH_80) {
@@ -162,54 +160,54 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
162 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 160 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
163 bool is_2ghz = chan->band == NL80211_BAND_2GHZ; 161 bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
164 struct mt76_rate_power *t = &dev->mt76.rate_power; 162 struct mt76_rate_power *t = &dev->mt76.rate_power;
165 s8 delta = mt76x0_get_delta(&dev->mt76); 163 s8 delta = mt76x0_get_delta(dev);
166 u16 val, addr; 164 u16 val, addr;
167 165
168 memset(t, 0, sizeof(*t)); 166 memset(t, 0, sizeof(*t));
169 167
170 /* cck 1M, 2M, 5.5M, 11M */ 168 /* cck 1M, 2M, 5.5M, 11M */
171 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE); 169 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
172 t->cck[0] = t->cck[1] = s6_to_s8(val); 170 t->cck[0] = t->cck[1] = s6_to_s8(val);
173 t->cck[2] = t->cck[3] = s6_to_s8(val >> 8); 171 t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
174 172
175 /* ofdm 6M, 9M, 12M, 18M */ 173 /* ofdm 6M, 9M, 12M, 18M */
176 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120; 174 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
177 val = mt76x02_eeprom_get(&dev->mt76, addr); 175 val = mt76x02_eeprom_get(dev, addr);
178 t->ofdm[0] = t->ofdm[1] = s6_to_s8(val); 176 t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
179 t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8); 177 t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
180 178
181 /* ofdm 24M, 36M, 48M, 54M */ 179 /* ofdm 24M, 36M, 48M, 54M */
182 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122; 180 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
183 val = mt76x02_eeprom_get(&dev->mt76, addr); 181 val = mt76x02_eeprom_get(dev, addr);
184 t->ofdm[4] = t->ofdm[5] = s6_to_s8(val); 182 t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
185 t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8); 183 t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
186 184
187 /* ht-vht mcs 1ss 0, 1, 2, 3 */ 185 /* ht-vht mcs 1ss 0, 1, 2, 3 */
188 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124; 186 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
189 val = mt76x02_eeprom_get(&dev->mt76, addr); 187 val = mt76x02_eeprom_get(dev, addr);
190 t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val); 188 t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
191 t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8); 189 t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
192 190
193 /* ht-vht mcs 1ss 4, 5, 6 */ 191 /* ht-vht mcs 1ss 4, 5, 6 */
194 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126; 192 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
195 val = mt76x02_eeprom_get(&dev->mt76, addr); 193 val = mt76x02_eeprom_get(dev, addr);
196 t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val); 194 t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
197 t->ht[6] = t->vht[6] = s6_to_s8(val >> 8); 195 t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
198 196
199 /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */ 197 /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
200 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec; 198 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
201 val = mt76x02_eeprom_get(&dev->mt76, addr); 199 val = mt76x02_eeprom_get(dev, addr);
202 t->stbc[0] = t->stbc[1] = s6_to_s8(val); 200 t->stbc[0] = t->stbc[1] = s6_to_s8(val);
203 t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8); 201 t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
204 202
205 /* ht-vht mcs 1ss 4, 5, 6 stbc */ 203 /* ht-vht mcs 1ss 4, 5, 6 stbc */
206 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee; 204 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
207 val = mt76x02_eeprom_get(&dev->mt76, addr); 205 val = mt76x02_eeprom_get(dev, addr);
208 t->stbc[4] = t->stbc[5] = s6_to_s8(val); 206 t->stbc[4] = t->stbc[5] = s6_to_s8(val);
209 t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8); 207 t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
210 208
211 /* vht mcs 8, 9 5GHz */ 209 /* vht mcs 8, 9 5GHz */
212 val = mt76x02_eeprom_get(&dev->mt76, 0x132); 210 val = mt76x02_eeprom_get(dev, 0x132);
213 t->vht[7] = s6_to_s8(val); 211 t->vht[7] = s6_to_s8(val);
214 t->vht[8] = s6_to_s8(val >> 8); 212 t->vht[8] = s6_to_s8(val >> 8);
215 213
@@ -266,7 +264,7 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
266 addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset; 264 addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
267 } 265 }
268 266
269 data = mt76x02_eeprom_get(&dev->mt76, addr); 267 data = mt76x02_eeprom_get(dev, addr);
270 268
271 info[0] = data; 269 info[0] = data;
272 if (!info[0] || info[0] > 0x3f) 270 if (!info[0] || info[0] > 0x3f)
@@ -312,7 +310,7 @@ static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
312 if (found < 0) 310 if (found < 0)
313 return found; 311 return found;
314 312
315 return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data, 313 return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
316 MT76X0_EEPROM_SIZE, MT_EE_READ); 314 MT76X0_EEPROM_SIZE, MT_EE_READ);
317} 315}
318 316
@@ -326,7 +324,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
326 if (err < 0) 324 if (err < 0)
327 return err; 325 return err;
328 326
329 data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION); 327 data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
330 version = data >> 8; 328 version = data >> 8;
331 fae = data; 329 fae = data;
332 330
@@ -337,8 +335,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
337 dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", 335 dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
338 version, fae); 336 version, fae);
339 337
340 mt76x02_mac_setaddr(&dev->mt76, 338 mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
341 dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
342 mt76x0_set_chip_cap(dev); 339 mt76x0_set_chip_cap(dev);
343 mt76x0_set_freq_offset(dev); 340 mt76x0_set_freq_offset(dev);
344 mt76x0_set_temp_offset(dev); 341 mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index 40fd4e61769b..ee9ade9f3c8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -37,4 +37,10 @@ static inline s8 s6_to_s8(u32 val)
37 return ret; 37 return ret;
38} 38}
39 39
40static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
41{
42 return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
43 MT_EE_NIC_CONF_1_TX_ALC_EN);
44}
45
40#endif 46#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index ee2b8e885608..4a9408801260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -138,7 +138,7 @@ static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
138 138
139 RANDOM_WRITE(dev, common_mac_reg_table); 139 RANDOM_WRITE(dev, common_mac_reg_table);
140 140
141 mt76x02_set_beacon_offsets(&dev->mt76); 141 mt76x02_set_beacon_offsets(dev);
142 142
143 /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ 143 /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
144 RANDOM_WRITE(dev, mt76x0_mac_reg_table); 144 RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@@ -280,7 +280,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
280 return -ETIMEDOUT; 280 return -ETIMEDOUT;
281 281
282 mt76x0_reset_csr_bbp(dev); 282 mt76x0_reset_csr_bbp(dev);
283 ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false); 283 ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
284 if (ret) 284 if (ret)
285 return ret; 285 return ret;
286 286
@@ -368,7 +368,10 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
368 hw->max_rates = 1; 368 hw->max_rates = 1;
369 hw->max_report_rates = 7; 369 hw->max_report_rates = 7;
370 hw->max_rate_tries = 1; 370 hw->max_rate_tries = 1;
371 hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2; 371 hw->extra_tx_headroom = 2;
372 if (mt76_is_usb(dev))
373 hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
374 MT_DMA_HDR_LEN;
372 375
373 hw->sta_data_size = sizeof(struct mt76x02_sta); 376 hw->sta_data_size = sizeof(struct mt76x02_sta);
374 hw->vif_data_size = sizeof(struct mt76x02_vif); 377 hw->vif_data_size = sizeof(struct mt76x02_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index c9cd0254a979..9273d2d2764a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -16,6 +16,20 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include "mt76x0.h" 17#include "mt76x0.h"
18 18
19static int
20mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
21{
22 int ret;
23
24 cancel_delayed_work_sync(&dev->cal_work);
25
26 mt76_set_channel(&dev->mt76);
27 ret = mt76x0_phy_set_channel(dev, chandef);
28 mt76_txq_schedule_all(&dev->mt76);
29
30 return ret;
31}
32
19int mt76x0_config(struct ieee80211_hw *hw, u32 changed) 33int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
20{ 34{
21 struct mt76x02_dev *dev = hw->priv; 35 struct mt76x02_dev *dev = hw->priv;
@@ -25,7 +39,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
25 39
26 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 40 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
27 ieee80211_stop_queues(hw); 41 ieee80211_stop_queues(hw);
28 ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); 42 ret = mt76x0_set_channel(dev, &hw->conf.chandef);
29 ieee80211_wake_queues(hw); 43 ieee80211_wake_queues(hw);
30 } 44 }
31 45
@@ -114,8 +128,6 @@ void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
114{ 128{
115 struct mt76x02_dev *dev = hw->priv; 129 struct mt76x02_dev *dev = hw->priv;
116 130
117 cancel_delayed_work_sync(&dev->cal_work);
118 mt76x0_agc_save(dev);
119 set_bit(MT76_SCANNING, &dev->mt76.state); 131 set_bit(MT76_SCANNING, &dev->mt76.state);
120} 132}
121EXPORT_SYMBOL_GPL(mt76x0_sw_scan); 133EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
@@ -125,11 +137,7 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
125{ 137{
126 struct mt76x02_dev *dev = hw->priv; 138 struct mt76x02_dev *dev = hw->priv;
127 139
128 mt76x0_agc_restore(dev);
129 clear_bit(MT76_SCANNING, &dev->mt76.state); 140 clear_bit(MT76_SCANNING, &dev->mt76.state);
130
131 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
132 MT_CALIBRATE_INTERVAL);
133} 141}
134EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); 142EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
135 143
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
index b66e70f6cd89..3b34e1d2769f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -39,6 +39,9 @@ enum mcu_calibrate {
39 MCU_CAL_TXDCOC, 39 MCU_CAL_TXDCOC,
40 MCU_CAL_RX_GROUP_DELAY, 40 MCU_CAL_RX_GROUP_DELAY,
41 MCU_CAL_TX_GROUP_DELAY, 41 MCU_CAL_TX_GROUP_DELAY,
42 MCU_CAL_VCO,
43 MCU_CAL_NO_SIGNAL = 0xfe,
44 MCU_CAL_FULL = 0xff,
42}; 45};
43 46
44int mt76x0e_mcu_init(struct mt76x02_dev *dev); 47int mt76x0e_mcu_init(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 1bff2be45a13..2187bafaf2e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -66,12 +66,11 @@ int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
66/* PHY */ 66/* PHY */
67void mt76x0_phy_init(struct mt76x02_dev *dev); 67void mt76x0_phy_init(struct mt76x02_dev *dev);
68int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); 68int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
69void mt76x0_agc_save(struct mt76x02_dev *dev);
70void mt76x0_agc_restore(struct mt76x02_dev *dev);
71int mt76x0_phy_set_channel(struct mt76x02_dev *dev, 69int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
72 struct cfg80211_chan_def *chandef); 70 struct cfg80211_chan_def *chandef);
73void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev); 71void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
74void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); 72void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
73void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
75 74
76/* MAC */ 75/* MAC */
77void mt76x0_mac_work(struct work_struct *work); 76void mt76x0_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 87997cddf0d6..522c86059bcb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -28,6 +28,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
28 mutex_lock(&dev->mt76.mutex); 28 mutex_lock(&dev->mt76.mutex);
29 29
30 mt76x02_mac_start(dev); 30 mt76x02_mac_start(dev);
31 mt76x0_phy_calibrate(dev, true);
31 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 32 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
32 MT_CALIBRATE_INTERVAL); 33 MT_CALIBRATE_INTERVAL);
33 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 34 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@@ -71,10 +72,19 @@ static const struct ieee80211_ops mt76x0e_ops = {
71 .tx = mt76x02_tx, 72 .tx = mt76x02_tx,
72 .start = mt76x0e_start, 73 .start = mt76x0e_start,
73 .stop = mt76x0e_stop, 74 .stop = mt76x0e_stop,
74 .config = mt76x0_config,
75 .add_interface = mt76x02_add_interface, 75 .add_interface = mt76x02_add_interface,
76 .remove_interface = mt76x02_remove_interface, 76 .remove_interface = mt76x02_remove_interface,
77 .config = mt76x0_config,
77 .configure_filter = mt76x02_configure_filter, 78 .configure_filter = mt76x02_configure_filter,
79 .sta_add = mt76x02_sta_add,
80 .sta_remove = mt76x02_sta_remove,
81 .set_key = mt76x02_set_key,
82 .conf_tx = mt76x02_conf_tx,
83 .sw_scan_start = mt76x0_sw_scan,
84 .sw_scan_complete = mt76x0_sw_scan_complete,
85 .ampdu_action = mt76x02_ampdu_action,
86 .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
87 .wake_tx_queue = mt76_wake_tx_queue,
78}; 88};
79 89
80static int mt76x0e_register_device(struct mt76x02_dev *dev) 90static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -102,28 +112,34 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
102 u16 val; 112 u16 val;
103 113
104 mt76_clear(dev, MT_COEXCFG0, BIT(0)); 114 mt76_clear(dev, MT_COEXCFG0, BIT(0));
105 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); 115
106 if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) { 116 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
107 u32 data; 117 if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
108 118 mt76_set(dev, MT_XO_CTRL7, 0xc03);
109 /* set external external PA I/O
110 * current to 16mA
111 */
112 data = mt76_rr(dev, 0x11c);
113 val |= 0xc03;
114 mt76_wr(dev, 0x11c, val);
115 }
116 } 119 }
117 120
118 mt76_clear(dev, 0x110, BIT(9)); 121 mt76_clear(dev, 0x110, BIT(9));
119 mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); 122 mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
120 123
124 err = mt76x0_register_device(dev);
125 if (err < 0)
126 return err;
127
128 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
129
121 return 0; 130 return 0;
122} 131}
123 132
124static int 133static int
125mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) 134mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
126{ 135{
136 static const struct mt76_driver_ops drv_ops = {
137 .txwi_size = sizeof(struct mt76x02_txwi),
138 .tx_prepare_skb = mt76x02_tx_prepare_skb,
139 .tx_complete_skb = mt76x02_tx_complete_skb,
140 .rx_skb = mt76x02_queue_rx_skb,
141 .rx_poll_complete = mt76x02_rx_poll_complete,
142 };
127 struct mt76x02_dev *dev; 143 struct mt76x02_dev *dev;
128 int ret; 144 int ret;
129 145
@@ -141,7 +157,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
141 if (ret) 157 if (ret)
142 return ret; 158 return ret;
143 159
144 dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops); 160 dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
145 if (!dev) 161 if (!dev)
146 return -ENOMEM; 162 return -ENOMEM;
147 163
@@ -150,6 +166,11 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
150 dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); 166 dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
151 dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); 167 dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
152 168
169 ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
170 IRQF_SHARED, KBUILD_MODNAME, dev);
171 if (ret)
172 goto error;
173
153 ret = mt76x0e_register_device(dev); 174 ret = mt76x0e_register_device(dev);
154 if (ret < 0) 175 if (ret < 0)
155 goto error; 176 goto error;
@@ -167,7 +188,7 @@ static void mt76x0e_cleanup(struct mt76x02_dev *dev)
167 mt76x0_chip_onoff(dev, false, false); 188 mt76x0_chip_onoff(dev, false, false);
168 mt76x0e_stop_hw(dev); 189 mt76x0e_stop_hw(dev);
169 mt76x02_dma_cleanup(dev); 190 mt76x02_dma_cleanup(dev);
170 mt76x02_mcu_cleanup(&dev->mt76); 191 mt76x02_mcu_cleanup(dev);
171} 192}
172 193
173static void 194static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 6c66656c21f4..569861289aa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -116,6 +116,7 @@ static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
116 goto out; 116 goto out;
117 } 117 }
118 118
119 mt76x02_set_ethtool_fwver(dev, hdr);
119 dev_dbg(dev->mt76.dev, "Firmware running!\n"); 120 dev_dbg(dev->mt76.dev, "Firmware running!\n");
120 121
121out: 122out:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 4850a2db18d7..cf024950e0ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -14,6 +14,9 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/etherdevice.h>
19
17#include "mt76x0.h" 20#include "mt76x0.h"
18#include "mcu.h" 21#include "mcu.h"
19#include "eeprom.h" 22#include "eeprom.h"
@@ -23,8 +26,6 @@
23#include "initvals_phy.h" 26#include "initvals_phy.h"
24#include "../mt76x02_phy.h" 27#include "../mt76x02_phy.h"
25 28
26#include <linux/etherdevice.h>
27
28static int 29static int
29mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value) 30mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
30{ 31{
@@ -37,7 +38,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
37 bank = MT_RF_BANK(offset); 38 bank = MT_RF_BANK(offset);
38 reg = MT_RF_REG(offset); 39 reg = MT_RF_REG(offset);
39 40
40 if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) 41 if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
41 return -EINVAL; 42 return -EINVAL;
42 43
43 mutex_lock(&dev->phy_mutex); 44 mutex_lock(&dev->phy_mutex);
@@ -76,7 +77,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
76 bank = MT_RF_BANK(offset); 77 bank = MT_RF_BANK(offset);
77 reg = MT_RF_REG(offset); 78 reg = MT_RF_REG(offset);
78 79
79 if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) 80 if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
80 return -EINVAL; 81 return -EINVAL;
81 82
82 mutex_lock(&dev->phy_mutex); 83 mutex_lock(&dev->phy_mutex);
@@ -111,15 +112,16 @@ out:
111static int 112static int
112rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) 113rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
113{ 114{
114 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { 115 if (mt76_is_usb(dev)) {
115 struct mt76_reg_pair pair = { 116 struct mt76_reg_pair pair = {
116 .reg = offset, 117 .reg = offset,
117 .value = val, 118 .value = val,
118 }; 119 };
119 120
121 WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
122 &dev->mt76.state));
120 return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); 123 return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
121 } else { 124 } else {
122 WARN_ON_ONCE(1);
123 return mt76x0_rf_csr_wr(dev, offset, val); 125 return mt76x0_rf_csr_wr(dev, offset, val);
124 } 126 }
125} 127}
@@ -130,15 +132,16 @@ rf_rr(struct mt76x02_dev *dev, u32 offset)
130 int ret; 132 int ret;
131 u32 val; 133 u32 val;
132 134
133 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { 135 if (mt76_is_usb(dev)) {
134 struct mt76_reg_pair pair = { 136 struct mt76_reg_pair pair = {
135 .reg = offset, 137 .reg = offset,
136 }; 138 };
137 139
140 WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
141 &dev->mt76.state));
138 ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); 142 ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
139 val = pair.value; 143 val = pair.value;
140 } else { 144 } else {
141 WARN_ON_ONCE(1);
142 ret = val = mt76x0_rf_csr_rr(dev, offset); 145 ret = val = mt76x0_rf_csr_rr(dev, offset);
143 } 146 }
144 147
@@ -175,9 +178,22 @@ rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
175} 178}
176#endif 179#endif
177 180
178#define RF_RANDOM_WRITE(dev, tab) \ 181static void
179 mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \ 182mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
180 tab, ARRAY_SIZE(tab)) 183 int n)
184{
185 while (n-- > 0) {
186 mt76x0_rf_csr_wr(dev, data->reg, data->value);
187 data++;
188 }
189}
190
191#define RF_RANDOM_WRITE(dev, tab) do { \
192 if (mt76_is_mmio(dev)) \
193 mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
194 else \
195 mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
196} while (0)
181 197
182int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) 198int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
183{ 199{
@@ -186,7 +202,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
186 202
187 do { 203 do {
188 val = mt76_rr(dev, MT_BBP(CORE, 0)); 204 val = mt76_rr(dev, MT_BBP(CORE, 0));
189 printk("BBP version %08x\n", val);
190 if (val && ~val) 205 if (val && ~val)
191 break; 206 break;
192 } while (--i); 207 } while (--i);
@@ -196,36 +211,10 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
196 return -EIO; 211 return -EIO;
197 } 212 }
198 213
214 dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
199 return 0; 215 return 0;
200} 216}
201 217
202static void
203mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width,
204 u8 ctrl)
205{
206 int core_val, agc_val;
207
208 switch (width) {
209 case NL80211_CHAN_WIDTH_80:
210 core_val = 3;
211 agc_val = 7;
212 break;
213 case NL80211_CHAN_WIDTH_40:
214 core_val = 2;
215 agc_val = 3;
216 break;
217 default:
218 core_val = 0;
219 agc_val = 1;
220 break;
221 }
222
223 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
224 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
225 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
226 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
227}
228
229static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) 218static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
230{ 219{
231 u8 val; 220 u8 val;
@@ -283,13 +272,6 @@ static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
283} 272}
284 273
285static void 274static void
286mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper)
287{
288 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
289 primary_upper);
290}
291
292static void
293mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) 275mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
294{ 276{
295 switch (band) { 277 switch (band) {
@@ -299,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
299 rf_wr(dev, MT_RF(5, 0), 0x45); 281 rf_wr(dev, MT_RF(5, 0), 0x45);
300 rf_wr(dev, MT_RF(6, 0), 0x44); 282 rf_wr(dev, MT_RF(6, 0), 0x44);
301 283
302 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
303 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
304
305 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007); 284 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
306 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002); 285 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
307 break; 286 break;
@@ -311,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
311 rf_wr(dev, MT_RF(5, 0), 0x44); 290 rf_wr(dev, MT_RF(5, 0), 0x44);
312 rf_wr(dev, MT_RF(6, 0), 0x45); 291 rf_wr(dev, MT_RF(6, 0), 0x45);
313 292
314 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
315 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
316
317 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005); 293 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
318 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102); 294 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
319 break; 295 break;
@@ -475,7 +451,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
475 mt76_wr(dev, MT_RF_MISC, mac_reg); 451 mt76_wr(dev, MT_RF_MISC, mac_reg);
476 452
477 band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 453 band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
478 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 454 if (mt76x02_ext_pa_enabled(dev, band)) {
479 /* 455 /*
480 MT_RF_MISC (offset: 0x0518) 456 MT_RF_MISC (offset: 0x0518)
481 [2]1'b1: enable external A band PA, 1'b0: disable external A band PA 457 [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@@ -514,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
514} 490}
515 491
516static void 492static void
517mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) 493mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
518{ 494{
519 int i; 495 int i;
520 496
@@ -587,7 +563,7 @@ mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
587 return ; 563 return ;
588 } 564 }
589 565
590 mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false); 566 mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
591} 567}
592 568
593void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) 569void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
@@ -603,8 +579,50 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
603 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 579 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
604 mt76x02_add_rate_power_offset(t, -info[0]); 580 mt76x02_add_rate_power_offset(t, -info[0]);
605 581
606 mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]); 582 mt76x02_phy_set_txpower(dev, info[0], info[1]);
583}
584
585void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
586{
587 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
588 u32 val, tx_alc, reg_val;
589
590 if (power_on) {
591 mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
592 mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
593 false);
594 usleep_range(10, 20);
595 /* XXX: tssi */
596 }
597
598 tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
599 mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
600 usleep_range(500, 700);
601
602 reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
603 mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
604
605 if (chan->band == NL80211_BAND_5GHZ) {
606 if (chan->hw_value < 100)
607 val = 0x701;
608 else if (chan->hw_value < 140)
609 val = 0x801;
610 else
611 val = 0x901;
612 } else {
613 val = 0x600;
614 }
615
616 mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
617 msleep(350);
618 mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
619 usleep_range(15000, 20000);
620
621 mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
622 mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
623 mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
607} 624}
625EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
608 626
609int mt76x0_phy_set_channel(struct mt76x02_dev *dev, 627int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
610 struct cfg80211_chan_def *chandef) 628 struct cfg80211_chan_def *chandef)
@@ -665,9 +683,19 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
665 break; 683 break;
666 } 684 }
667 685
668 mt76x0_bbp_set_bw(dev, chandef->width); 686 if (mt76_is_usb(dev)) {
669 mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index); 687 mt76x0_bbp_set_bw(dev, chandef->width);
670 mt76x0_mac_set_ctrlch(dev, ch_group_index & 1); 688 } else {
689 if (chandef->width == NL80211_CHAN_WIDTH_80 ||
690 chandef->width == NL80211_CHAN_WIDTH_40)
691 val = 0x201;
692 else
693 val = 0x601;
694 mt76_wr(dev, MT_TX_SW_CFG0, val);
695 }
696 mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
697 mt76x02_phy_set_band(dev, chandef->chan->band,
698 ch_group_index & 1);
671 mt76x0_ant_select(dev); 699 mt76x0_ant_select(dev);
672 700
673 mt76_rmw(dev, MT_EXT_CCA_CFG, 701 mt76_rmw(dev, MT_EXT_CCA_CFG,
@@ -680,7 +708,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
680 708
681 mt76x0_phy_set_band(dev, chandef->chan->band); 709 mt76x0_phy_set_band(dev, chandef->chan->band);
682 mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); 710 mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
683 mt76x0_read_rx_gain(dev);
684 711
685 /* set Japan Tx filter at channel 14 */ 712 /* set Japan Tx filter at channel 14 */
686 val = mt76_rr(dev, MT_BBP(CORE, 1)); 713 val = mt76_rr(dev, MT_BBP(CORE, 1));
@@ -690,17 +717,27 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
690 val &= ~0x20; 717 val &= ~0x20;
691 mt76_wr(dev, MT_BBP(CORE, 1), val); 718 mt76_wr(dev, MT_BBP(CORE, 1), val);
692 719
693 mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band); 720 mt76x0_read_rx_gain(dev);
721 mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
722 mt76x02_init_agc_gain(dev);
694 723
695 /* Vendor driver don't do it */ 724 if (mt76_is_usb(dev)) {
696 /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ 725 mt76x0_vco_cal(dev, channel);
726 } else {
727 /* enable vco */
728 rf_set(dev, MT_RF(0, 4), BIT(7));
729 }
697 730
698 mt76x0_vco_cal(dev, channel);
699 if (scan) 731 if (scan)
700 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); 732 return 0;
701 733
734 if (mt76_is_mmio(dev))
735 mt76x0_phy_calibrate(dev, false);
702 mt76x0_phy_set_txpower(dev); 736 mt76x0_phy_set_txpower(dev);
703 737
738 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
739 MT_CALIBRATE_INTERVAL);
740
704 return 0; 741 return 0;
705} 742}
706 743
@@ -710,7 +747,7 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
710 u8 channel = dev->mt76.chandef.chan->hw_value; 747 u8 channel = dev->mt76.chandef.chan->hw_value;
711 int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; 748 int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
712 749
713 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false); 750 mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
714 751
715 mt76x0_vco_cal(dev, channel); 752 mt76x0_vco_cal(dev, channel);
716 753
@@ -718,109 +755,113 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
718 mt76_wr(dev, MT_TX_ALC_CFG_0, 0); 755 mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
719 usleep_range(500, 700); 756 usleep_range(500, 700);
720 757
721 reg_val = mt76_rr(dev, 0x2124); 758 reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
722 reg_val &= 0xffffff7e; 759 mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
723 mt76_wr(dev, 0x2124, reg_val);
724 760
725 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false); 761 mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
726 762
727 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false); 763 mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
728 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false); 764 mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
729 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); 765 mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
730 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY, 766 mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
731 is_5ghz, false); 767 mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
732 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false); 768 mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
733 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY,
734 is_5ghz, false);
735 769
736 mt76_wr(dev, 0x2124, reg_val); 770 mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
737 mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); 771 mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
738 msleep(100); 772 msleep(100);
739 773
740 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); 774 mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
741}
742
743void mt76x0_agc_save(struct mt76x02_dev *dev)
744{
745 /* Only one RX path */
746 dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
747}
748
749void mt76x0_agc_restore(struct mt76x02_dev *dev)
750{
751 mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
752} 775}
753 776
754static void mt76x0_temp_sensor(struct mt76x02_dev *dev) 777static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
755{ 778{
756 u8 rf_b7_73, rf_b0_66, rf_b0_67; 779 u8 rf_b7_73, rf_b0_66, rf_b0_67;
757 int cycle, temp; 780 s8 val;
758 u32 val;
759 s32 sval;
760 781
761 rf_b7_73 = rf_rr(dev, MT_RF(7, 73)); 782 rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
762 rf_b0_66 = rf_rr(dev, MT_RF(0, 66)); 783 rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
763 rf_b0_67 = rf_rr(dev, MT_RF(0, 73)); 784 rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
764 785
765 rf_wr(dev, MT_RF(7, 73), 0x02); 786 rf_wr(dev, MT_RF(7, 73), 0x02);
766 rf_wr(dev, MT_RF(0, 66), 0x23); 787 rf_wr(dev, MT_RF(0, 66), 0x23);
767 rf_wr(dev, MT_RF(0, 73), 0x01); 788 rf_wr(dev, MT_RF(0, 67), 0x01);
768 789
769 mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055); 790 mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
770 791
771 for (cycle = 0; cycle < 2000; cycle++) { 792 if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
772 val = mt76_rr(dev, MT_BBP(CORE, 34)); 793 mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
773 if (!(val & 0x10))
774 break;
775 udelay(3);
776 }
777
778 if (cycle >= 2000) {
779 val &= 0x10;
780 mt76_wr(dev, MT_BBP(CORE, 34), val);
781 goto done; 794 goto done;
782 } 795 }
783 796
784 sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; 797 val = mt76_rr(dev, MT_BBP(CORE, 35));
785 if (!(sval & 0x80)) 798 val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
786 sval &= 0x7f; /* Positive */
787 else
788 sval |= 0xffffff00; /* Negative */
789 799
790 temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25; 800 if (abs(val - dev->cal.temp_vco) > 20) {
801 mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
802 dev->mt76.chandef.chan->hw_value,
803 false);
804 dev->cal.temp_vco = val;
805 }
806 if (abs(val - dev->cal.temp) > 30) {
807 mt76x0_phy_calibrate(dev, false);
808 dev->cal.temp = val;
809 }
791 810
792done: 811done:
793 rf_wr(dev, MT_RF(7, 73), rf_b7_73); 812 rf_wr(dev, MT_RF(7, 73), rf_b7_73);
794 rf_wr(dev, MT_RF(0, 66), rf_b0_66); 813 rf_wr(dev, MT_RF(0, 66), rf_b0_66);
795 rf_wr(dev, MT_RF(0, 73), rf_b0_67); 814 rf_wr(dev, MT_RF(0, 67), rf_b0_67);
796} 815}
797 816
798static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev) 817static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
799{ 818{
800 struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 819 u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
801 u32 val, init_vga; 820 u32 val = 0x122c << 16 | 0xf2;
802 int avg_rssi; 821
803 822 mt76_wr(dev, MT_BBP(AGC, 8),
804 init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E; 823 val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
805 avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 824}
806 if (avg_rssi > -60) 825
807 init_vga -= 0x20; 826static void
808 else if (avg_rssi > -70) 827mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
809 init_vga -= 0x10; 828{
810 829 bool gain_change;
811 val = mt76_rr(dev, MT_BBP(AGC, 8)); 830 u8 gain_delta;
812 val &= 0xFFFF80FF; 831 int low_gain;
813 val |= init_vga << 8; 832
814 mt76_wr(dev, MT_BBP(AGC,8), val); 833 dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
834
835 low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
836 (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
837
838 gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
839 dev->cal.low_gain = low_gain;
840
841 if (!gain_change) {
842 if (mt76x02_phy_adjust_vga_gain(dev))
843 mt76x0_phy_set_gain_val(dev);
844 return;
845 }
846
847 dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
848 gain_delta = (low_gain == 2) ? 10 : 0;
849
850 dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
851 mt76x0_phy_set_gain_val(dev);
852
853 /* clear false CCA counters */
854 mt76_rr(dev, MT_RX_STAT_1);
815} 855}
816 856
817static void mt76x0_phy_calibrate(struct work_struct *work) 857static void mt76x0_phy_calibration_work(struct work_struct *work)
818{ 858{
819 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, 859 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
820 cal_work.work); 860 cal_work.work);
821 861
822 mt76x0_dynamic_vga_tuning(dev); 862 mt76x0_phy_update_channel_gain(dev);
823 mt76x0_temp_sensor(dev); 863 if (!mt76x0_tssi_enabled(dev))
864 mt76x0_temp_sensor(dev);
824 865
825 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 866 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
826 MT_CALIBRATE_INTERVAL); 867 MT_CALIBRATE_INTERVAL);
@@ -881,9 +922,9 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev)
881 922
882void mt76x0_phy_init(struct mt76x02_dev *dev) 923void mt76x0_phy_init(struct mt76x02_dev *dev)
883{ 924{
884 INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate); 925 INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
885 926
886 mt76x0_rf_init(dev); 927 mt76x0_rf_init(dev);
887 mt76x02_phy_set_rxpath(&dev->mt76); 928 mt76x02_phy_set_rxpath(dev);
888 mt76x02_phy_set_txdac(&dev->mt76); 929 mt76x02_phy_set_txdac(dev);
889} 930}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index fb6fa1fa5548..a9f14d5149d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -40,8 +40,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
40 ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE; 40 ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
41 dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n", 41 dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
42 ilm_len, MT_MCU_IVB_SIZE); 42 ilm_len, MT_MCU_IVB_SIZE);
43 err = mt76x02u_mcu_fw_send_data(&dev->mt76, 43 err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
44 fw_payload + MT_MCU_IVB_SIZE,
45 ilm_len, MCU_FW_URB_MAX_PAYLOAD, 44 ilm_len, MCU_FW_URB_MAX_PAYLOAD,
46 MT_MCU_IVB_SIZE); 45 MT_MCU_IVB_SIZE);
47 if (err) 46 if (err)
@@ -49,7 +48,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
49 48
50 dlm_len = le32_to_cpu(hdr->dlm_len); 49 dlm_len = le32_to_cpu(hdr->dlm_len);
51 dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); 50 dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
52 err = mt76x02u_mcu_fw_send_data(&dev->mt76, 51 err = mt76x02u_mcu_fw_send_data(dev,
53 fw_payload + le32_to_cpu(hdr->ilm_len), 52 fw_payload + le32_to_cpu(hdr->ilm_len),
54 dlm_len, MCU_FW_URB_MAX_PAYLOAD, 53 dlm_len, MCU_FW_URB_MAX_PAYLOAD,
55 MT_MCU_DLM_OFFSET); 54 MT_MCU_DLM_OFFSET);
@@ -121,7 +120,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
121 mt76_set(dev, MT_USB_DMA_CFG, 120 mt76_set(dev, MT_USB_DMA_CFG,
122 (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) | 121 (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
123 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); 122 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
124 mt76x02u_mcu_fw_reset(&dev->mt76); 123 mt76x02u_mcu_fw_reset(dev);
125 usleep_range(5000, 6000); 124 usleep_range(5000, 6000);
126/* 125/*
127 mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | 126 mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 65174817ebc4..47c42c607964 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -55,7 +55,8 @@ struct mt76x02_calibration {
55 s8 agc_gain_adjust; 55 s8 agc_gain_adjust;
56 s8 low_gain; 56 s8 low_gain;
57 57
58 u8 temp; 58 s8 temp_vco;
59 s8 temp;
59 60
60 bool init_cal_done; 61 bool init_cal_done;
61 bool tssi_cal_done; 62 bool tssi_cal_done;
@@ -101,8 +102,6 @@ struct mt76x02_dev {
101 102
102 bool no_2ghz; 103 bool no_2ghz;
103 104
104 u8 agc_save;
105
106 u8 coverage_class; 105 u8 coverage_class;
107 u8 slottime; 106 u8 slottime;
108 107
@@ -119,8 +118,8 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
119int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 118int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
120 struct ieee80211_sta *sta); 119 struct ieee80211_sta *sta);
121 120
122void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, 121void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
123 unsigned int idx); 122 unsigned int idx);
124int mt76x02_add_interface(struct ieee80211_hw *hw, 123int mt76x02_add_interface(struct ieee80211_hw *hw,
125 struct ieee80211_vif *vif); 124 struct ieee80211_vif *vif);
126void mt76x02_remove_interface(struct ieee80211_hw *hw, 125void mt76x02_remove_interface(struct ieee80211_hw *hw,
@@ -136,14 +135,15 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
136void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, 135void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
137 struct ieee80211_vif *vif, 136 struct ieee80211_vif *vif,
138 struct ieee80211_sta *sta); 137 struct ieee80211_sta *sta);
139s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, 138s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
140 const struct ieee80211_tx_rate *rate); 139 const struct ieee80211_tx_rate *rate);
141s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj); 140s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
141 s8 max_txpwr_adj);
142void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); 142void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
143int mt76x02_insert_hdr_pad(struct sk_buff *skb); 143int mt76x02_insert_hdr_pad(struct sk_buff *skb);
144void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); 144void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
145void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); 145void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
146bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update); 146bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
147void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 147void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
148 struct sk_buff *skb); 148 struct sk_buff *skb);
149void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); 149void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
@@ -156,10 +156,17 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
156 u32 *tx_info); 156 u32 *tx_info);
157 157
158extern const u16 mt76x02_beacon_offsets[16]; 158extern const u16 mt76x02_beacon_offsets[16];
159void mt76x02_set_beacon_offsets(struct mt76_dev *dev); 159void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
160void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); 160void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
161void mt76x02_mac_start(struct mt76x02_dev *dev); 161void mt76x02_mac_start(struct mt76x02_dev *dev);
162 162
163static inline bool is_mt76x2(struct mt76x02_dev *dev)
164{
165 return mt76_chip(&dev->mt76) == 0x7612 ||
166 mt76_chip(&dev->mt76) == 0x7662 ||
167 mt76_chip(&dev->mt76) == 0x7602;
168}
169
163static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) 170static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
164{ 171{
165 mt76x02_set_irq_mask(dev, 0, mask); 172 mt76x02_set_irq_mask(dev, 0, mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index d3efeb8a72b7..9390de2a323e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -17,46 +17,43 @@
17 17
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19 19
20#include "mt76.h"
21#include "mt76x02_eeprom.h" 20#include "mt76x02_eeprom.h"
22#include "mt76x02_regs.h"
23 21
24static int 22static int
25mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data, 23mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
26 enum mt76x02_eeprom_modes mode) 24 enum mt76x02_eeprom_modes mode)
27{ 25{
28 u32 val; 26 u32 val;
29 int i; 27 int i;
30 28
31 val = __mt76_rr(dev, MT_EFUSE_CTRL); 29 val = mt76_rr(dev, MT_EFUSE_CTRL);
32 val &= ~(MT_EFUSE_CTRL_AIN | 30 val &= ~(MT_EFUSE_CTRL_AIN |
33 MT_EFUSE_CTRL_MODE); 31 MT_EFUSE_CTRL_MODE);
34 val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); 32 val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
35 val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode); 33 val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
36 val |= MT_EFUSE_CTRL_KICK; 34 val |= MT_EFUSE_CTRL_KICK;
37 __mt76_wr(dev, MT_EFUSE_CTRL, val); 35 mt76_wr(dev, MT_EFUSE_CTRL, val);
38 36
39 if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 37 if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
40 0, 1000))
41 return -ETIMEDOUT; 38 return -ETIMEDOUT;
42 39
43 udelay(2); 40 udelay(2);
44 41
45 val = __mt76_rr(dev, MT_EFUSE_CTRL); 42 val = mt76_rr(dev, MT_EFUSE_CTRL);
46 if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { 43 if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
47 memset(data, 0xff, 16); 44 memset(data, 0xff, 16);
48 return 0; 45 return 0;
49 } 46 }
50 47
51 for (i = 0; i < 4; i++) { 48 for (i = 0; i < 4; i++) {
52 val = __mt76_rr(dev, MT_EFUSE_DATA(i)); 49 val = mt76_rr(dev, MT_EFUSE_DATA(i));
53 put_unaligned_le32(val, data + 4 * i); 50 put_unaligned_le32(val, data + 4 * i);
54 } 51 }
55 52
56 return 0; 53 return 0;
57} 54}
58 55
59int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, 56int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
60 int len, enum mt76x02_eeprom_modes mode) 57 int len, enum mt76x02_eeprom_modes mode)
61{ 58{
62 int ret, i; 59 int ret, i;
@@ -71,26 +68,26 @@ int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
71} 68}
72EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data); 69EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
73 70
74void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev) 71void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
75{ 72{
76 u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 73 u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
77 74
78 switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { 75 switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
79 case BOARD_TYPE_5GHZ: 76 case BOARD_TYPE_5GHZ:
80 dev->cap.has_5ghz = true; 77 dev->mt76.cap.has_5ghz = true;
81 break; 78 break;
82 case BOARD_TYPE_2GHZ: 79 case BOARD_TYPE_2GHZ:
83 dev->cap.has_2ghz = true; 80 dev->mt76.cap.has_2ghz = true;
84 break; 81 break;
85 default: 82 default:
86 dev->cap.has_2ghz = true; 83 dev->mt76.cap.has_2ghz = true;
87 dev->cap.has_5ghz = true; 84 dev->mt76.cap.has_5ghz = true;
88 break; 85 break;
89 } 86 }
90} 87}
91EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap); 88EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
92 89
93bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band) 90bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
94{ 91{
95 u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 92 u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
96 93
@@ -101,7 +98,7 @@ bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
101} 98}
102EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled); 99EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
103 100
104void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, 101void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
105 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g) 102 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
106{ 103{
107 u16 val; 104 u16 val;
@@ -129,7 +126,7 @@ void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
129} 126}
130EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain); 127EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
131 128
132u8 mt76x02_get_lna_gain(struct mt76_dev *dev, 129u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
133 s8 *lna_2g, s8 *lna_5g, 130 s8 *lna_2g, s8 *lna_5g,
134 struct ieee80211_channel *chan) 131 struct ieee80211_channel *chan)
135{ 132{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index bcd05f7c5f45..b3ec74835d10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -18,6 +18,8 @@
18#ifndef __MT76x02_EEPROM_H 18#ifndef __MT76x02_EEPROM_H
19#define __MT76x02_EEPROM_H 19#define __MT76x02_EEPROM_H
20 20
21#include "mt76x02.h"
22
21enum mt76x02_eeprom_field { 23enum mt76x02_eeprom_field {
22 MT_EE_CHIP_ID = 0x000, 24 MT_EE_CHIP_ID = 0x000,
23 MT_EE_VERSION = 0x002, 25 MT_EE_VERSION = 0x002,
@@ -168,44 +170,23 @@ static inline s8 mt76x02_rate_power_val(u8 val)
168} 170}
169 171
170static inline int 172static inline int
171mt76x02_eeprom_get(struct mt76_dev *dev, 173mt76x02_eeprom_get(struct mt76x02_dev *dev,
172 enum mt76x02_eeprom_field field) 174 enum mt76x02_eeprom_field field)
173{ 175{
174 if ((field & 1) || field >= __MT_EE_MAX) 176 if ((field & 1) || field >= __MT_EE_MAX)
175 return -1; 177 return -1;
176 178
177 return get_unaligned_le16(dev->eeprom.data + field); 179 return get_unaligned_le16(dev->mt76.eeprom.data + field);
178}
179
180static inline bool
181mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev)
182{
183 u16 val;
184
185 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
186 if (!(val & BIT(15)))
187 return false;
188
189 return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
190 MT_EE_NIC_CONF_1_TEMP_TX_ALC;
191}
192
193static inline bool
194mt76x02_tssi_enabled(struct mt76_dev *dev)
195{
196 return !mt76x02_temp_tx_alc_enabled(dev) &&
197 (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
198 MT_EE_NIC_CONF_1_TX_ALC_EN);
199} 180}
200 181
201bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band); 182bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
202int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, 183int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
203 int len, enum mt76x02_eeprom_modes mode); 184 int len, enum mt76x02_eeprom_modes mode);
204void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, 185void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
205 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g); 186 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
206u8 mt76x02_get_lna_gain(struct mt76_dev *dev, 187u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
207 s8 *lna_2g, s8 *lna_5g, 188 s8 *lna_2g, s8 *lna_5g,
208 struct ieee80211_channel *chan); 189 struct ieee80211_channel *chan);
209void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev); 190void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
210 191
211#endif /* __MT76x02_EEPROM_H */ 192#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 244245418ebb..10578e4cb269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -45,8 +45,8 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
45} 45}
46EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info); 46EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
47 47
48int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, 48int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
49 struct ieee80211_key_conf *key) 49 u8 key_idx, struct ieee80211_key_conf *key)
50{ 50{
51 enum mt76x02_cipher_type cipher; 51 enum mt76x02_cipher_type cipher;
52 u8 key_data[32]; 52 u8 key_data[32];
@@ -56,20 +56,20 @@ int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
56 if (cipher == MT_CIPHER_NONE && key) 56 if (cipher == MT_CIPHER_NONE && key)
57 return -EOPNOTSUPP; 57 return -EOPNOTSUPP;
58 58
59 val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx)); 59 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
60 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); 60 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
61 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); 61 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
62 __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); 62 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
63 63
64 __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, 64 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
65 sizeof(key_data)); 65 sizeof(key_data));
66 66
67 return 0; 67 return 0;
68} 68}
69EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); 69EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
70 70
71int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, 71int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
72 struct ieee80211_key_conf *key) 72 struct ieee80211_key_conf *key)
73{ 73{
74 enum mt76x02_cipher_type cipher; 74 enum mt76x02_cipher_type cipher;
75 u8 key_data[32]; 75 u8 key_data[32];
@@ -79,25 +79,26 @@ int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
79 if (cipher == MT_CIPHER_NONE && key) 79 if (cipher == MT_CIPHER_NONE && key)
80 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
81 81
82 __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); 82 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
83 __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); 83 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
84 84
85 memset(iv_data, 0, sizeof(iv_data)); 85 memset(iv_data, 0, sizeof(iv_data));
86 if (key) { 86 if (key) {
87 __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 87 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
88 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 88 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
89 iv_data[3] = key->keyidx << 6; 89 iv_data[3] = key->keyidx << 6;
90 if (cipher >= MT_CIPHER_TKIP) 90 if (cipher >= MT_CIPHER_TKIP)
91 iv_data[3] |= 0x20; 91 iv_data[3] |= 0x20;
92 } 92 }
93 93
94 __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 94 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
95 95
96 return 0; 96 return 0;
97} 97}
98EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key); 98EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
99 99
100void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) 100void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
101 u8 vif_idx, u8 *mac)
101{ 102{
102 struct mt76_wcid_addr addr = {}; 103 struct mt76_wcid_addr addr = {};
103 u32 attr; 104 u32 attr;
@@ -105,10 +106,10 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
105 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | 106 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
106 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); 107 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
107 108
108 __mt76_wr(dev, MT_WCID_ATTR(idx), attr); 109 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
109 110
110 __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); 111 mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
111 __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); 112 mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
112 113
113 if (idx >= 128) 114 if (idx >= 128)
114 return; 115 return;
@@ -116,22 +117,22 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
116 if (mac) 117 if (mac)
117 memcpy(addr.macaddr, mac, ETH_ALEN); 118 memcpy(addr.macaddr, mac, ETH_ALEN);
118 119
119 __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); 120 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
120} 121}
121EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup); 122EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
122 123
123void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop) 124void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
124{ 125{
125 u32 val = __mt76_rr(dev, MT_WCID_DROP(idx)); 126 u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
126 u32 bit = MT_WCID_DROP_MASK(idx); 127 u32 bit = MT_WCID_DROP_MASK(idx);
127 128
128 /* prevent unnecessary writes */ 129 /* prevent unnecessary writes */
129 if ((val & bit) != (bit * drop)) 130 if ((val & bit) != (bit * drop))
130 __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); 131 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
131} 132}
132EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop); 133EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
133 134
134void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 135void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
135{ 136{
136 struct mt76_txq *mtxq; 137 struct mt76_txq *mtxq;
137 138
@@ -151,55 +152,13 @@ void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
151 mtxq->wcid = &mvif->group_wcid; 152 mtxq->wcid = &mvif->group_wcid;
152 } 153 }
153 154
154 mt76_txq_init(dev, txq); 155 mt76_txq_init(&dev->mt76, txq);
155} 156}
156EXPORT_SYMBOL_GPL(mt76x02_txq_init); 157EXPORT_SYMBOL_GPL(mt76x02_txq_init);
157 158
158static void
159mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
160 struct ieee80211_sta *sta, int len, u8 nss)
161{
162 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
163 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
164 u16 txwi_flags = 0;
165
166 if (info->flags & IEEE80211_TX_CTL_LDPC)
167 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
168 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
169 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
170 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
171 txwi_flags |= MT_TXWI_FLAGS_MMPS;
172 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
173 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
174 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
175 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
176 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
177 txwi->pktid |= MT_TXWI_PKTID_PROBE;
178 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
179 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
180
181 ba_size <<= sta->ht_cap.ampdu_factor;
182 ba_size = min_t(int, 63, ba_size - 1);
183 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
184 ba_size = 0;
185 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
186
187 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
188 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
189 sta->ht_cap.ampdu_density);
190 }
191
192 if (ieee80211_is_probe_resp(hdr->frame_control) ||
193 ieee80211_is_beacon(hdr->frame_control))
194 txwi_flags |= MT_TXWI_FLAGS_TS;
195
196 txwi->flags |= cpu_to_le16(txwi_flags);
197 txwi->len_ctl = cpu_to_le16(len);
198}
199
200static __le16 159static __le16
201mt76x02_mac_tx_rate_val(struct mt76_dev *dev, 160mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
202 const struct ieee80211_tx_rate *rate, u8 *nss_val) 161 const struct ieee80211_tx_rate *rate, u8 *nss_val)
203{ 162{
204 u16 rateval; 163 u16 rateval;
205 u8 phy, rate_idx; 164 u8 phy, rate_idx;
@@ -224,10 +183,10 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
224 bw = 1; 183 bw = 1;
225 } else { 184 } else {
226 const struct ieee80211_rate *r; 185 const struct ieee80211_rate *r;
227 int band = dev->chandef.chan->band; 186 int band = dev->mt76.chandef.chan->band;
228 u16 val; 187 u16 val;
229 188
230 r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; 189 r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
231 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 190 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
232 val = r->hw_value_short; 191 val = r->hw_value_short;
233 else 192 else
@@ -248,22 +207,22 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
248 return cpu_to_le16(rateval); 207 return cpu_to_le16(rateval);
249} 208}
250 209
251void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, 210void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
252 const struct ieee80211_tx_rate *rate) 211 const struct ieee80211_tx_rate *rate)
253{ 212{
254 spin_lock_bh(&dev->lock); 213 spin_lock_bh(&dev->mt76.lock);
255 wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); 214 wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
256 wcid->tx_rate_set = true; 215 wcid->tx_rate_set = true;
257 spin_unlock_bh(&dev->lock); 216 spin_unlock_bh(&dev->mt76.lock);
258} 217}
259 218
260bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, 219bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
261 struct mt76x02_tx_status *stat) 220 struct mt76x02_tx_status *stat)
262{ 221{
263 u32 stat1, stat2; 222 u32 stat1, stat2;
264 223
265 stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT); 224 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
266 stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO); 225 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
267 226
268 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); 227 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
269 if (!stat->valid) 228 if (!stat->valid)
@@ -339,17 +298,19 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
339 return 0; 298 return 0;
340} 299}
341 300
342void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, 301void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
343 struct sk_buff *skb, struct mt76_wcid *wcid, 302 struct sk_buff *skb, struct mt76_wcid *wcid,
344 struct ieee80211_sta *sta, int len) 303 struct ieee80211_sta *sta, int len)
345{ 304{
305 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
346 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
347 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 307 struct ieee80211_tx_rate *rate = &info->control.rates[0];
348 struct ieee80211_key_conf *key = info->control.hw_key; 308 struct ieee80211_key_conf *key = info->control.hw_key;
349 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); 309 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
310 u16 txwi_flags = 0;
350 u8 nss; 311 u8 nss;
351 s8 txpwr_adj, max_txpwr_adj; 312 s8 txpwr_adj, max_txpwr_adj;
352 u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf; 313 u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
353 314
354 memset(txwi, 0, sizeof(*txwi)); 315 memset(txwi, 0, sizeof(*txwi));
355 316
@@ -374,7 +335,7 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
374 txwi->eiv = *((__le32 *)&ccmp_pn[1]); 335 txwi->eiv = *((__le32 *)&ccmp_pn[1]);
375 } 336 }
376 337
377 spin_lock_bh(&dev->lock); 338 spin_lock_bh(&dev->mt76.lock);
378 if (wcid && (rate->idx < 0 || !rate->count)) { 339 if (wcid && (rate->idx < 0 || !rate->count)) {
379 txwi->rate = wcid->tx_rate; 340 txwi->rate = wcid->tx_rate;
380 max_txpwr_adj = wcid->max_txpwr_adj; 341 max_txpwr_adj = wcid->max_txpwr_adj;
@@ -383,26 +344,57 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
383 txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); 344 txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
384 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); 345 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
385 } 346 }
386 spin_unlock_bh(&dev->lock); 347 spin_unlock_bh(&dev->mt76.lock);
387 348
388 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf, 349 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
389 max_txpwr_adj); 350 max_txpwr_adj);
390 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); 351 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
391 352
392 if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4) 353 if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
393 txwi->txstream = 0x13; 354 txwi->txstream = 0x13;
394 else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 && 355 else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
395 !(txwi->rate & cpu_to_le16(rate_ht_mask))) 356 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
396 txwi->txstream = 0x93; 357 txwi->txstream = 0x93;
397 358
398 mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss); 359 if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
360 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
361 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
362 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
363 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
364 txwi_flags |= MT_TXWI_FLAGS_MMPS;
365 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
366 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
367 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
368 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
369 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
370 txwi->pktid |= MT_TXWI_PKTID_PROBE;
371 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
372 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
373
374 ba_size <<= sta->ht_cap.ampdu_factor;
375 ba_size = min_t(int, 63, ba_size - 1);
376 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
377 ba_size = 0;
378 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
379
380 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
381 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
382 sta->ht_cap.ampdu_density);
383 }
384
385 if (ieee80211_is_probe_resp(hdr->frame_control) ||
386 ieee80211_is_beacon(hdr->frame_control))
387 txwi_flags |= MT_TXWI_FLAGS_TS;
388
389 txwi->flags |= cpu_to_le16(txwi_flags);
390 txwi->len_ctl = cpu_to_le16(len);
399} 391}
400EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi); 392EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
401 393
402static void 394static void
403mt76x02_mac_fill_tx_status(struct mt76_dev *dev, 395mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
404 struct ieee80211_tx_info *info, 396 struct ieee80211_tx_info *info,
405 struct mt76x02_tx_status *st, int n_frames) 397 struct mt76x02_tx_status *st, int n_frames)
406{ 398{
407 struct ieee80211_tx_rate *rate = info->status.rates; 399 struct ieee80211_tx_rate *rate = info->status.rates;
408 int cur_idx, last_rate; 400 int cur_idx, last_rate;
@@ -413,7 +405,7 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
413 405
414 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); 406 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
415 mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate, 407 mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
416 dev->chandef.chan->band); 408 dev->mt76.chandef.chan->band);
417 if (last_rate < IEEE80211_TX_MAX_RATES - 1) 409 if (last_rate < IEEE80211_TX_MAX_RATES - 1)
418 rate[last_rate + 1].idx = -1; 410 rate[last_rate + 1].idx = -1;
419 411
@@ -441,8 +433,8 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
441 info->flags |= IEEE80211_TX_STAT_ACK; 433 info->flags |= IEEE80211_TX_STAT_ACK;
442} 434}
443 435
444void mt76x02_send_tx_status(struct mt76_dev *dev, 436void mt76x02_send_tx_status(struct mt76x02_dev *dev,
445 struct mt76x02_tx_status *stat, u8 *update) 437 struct mt76x02_tx_status *stat, u8 *update)
446{ 438{
447 struct ieee80211_tx_info info = {}; 439 struct ieee80211_tx_info info = {};
448 struct ieee80211_sta *sta = NULL; 440 struct ieee80211_sta *sta = NULL;
@@ -450,8 +442,8 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
450 struct mt76x02_sta *msta = NULL; 442 struct mt76x02_sta *msta = NULL;
451 443
452 rcu_read_lock(); 444 rcu_read_lock();
453 if (stat->wcid < ARRAY_SIZE(dev->wcid)) 445 if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
454 wcid = rcu_dereference(dev->wcid[stat->wcid]); 446 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
455 447
456 if (wcid) { 448 if (wcid) {
457 void *priv; 449 void *priv;
@@ -476,7 +468,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
476 } 468 }
477 469
478 mt76x02_mac_fill_tx_status(dev, &info, &msta->status, 470 mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
479 msta->n_frames); 471 msta->n_frames);
480 472
481 msta->status = *stat; 473 msta->status = *stat;
482 msta->n_frames = 1; 474 msta->n_frames = 1;
@@ -486,7 +478,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
486 *update = 1; 478 *update = 1;
487 } 479 }
488 480
489 ieee80211_tx_status_noskb(dev->hw, sta, &info); 481 ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
490 482
491out: 483out:
492 rcu_read_unlock(); 484 rcu_read_unlock();
@@ -561,21 +553,21 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
561} 553}
562EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate); 554EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
563 555
564void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr) 556void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
565{ 557{
566 ether_addr_copy(dev->macaddr, addr); 558 ether_addr_copy(dev->mt76.macaddr, addr);
567 559
568 if (!is_valid_ether_addr(dev->macaddr)) { 560 if (!is_valid_ether_addr(dev->mt76.macaddr)) {
569 eth_random_addr(dev->macaddr); 561 eth_random_addr(dev->mt76.macaddr);
570 dev_info(dev->dev, 562 dev_info(dev->mt76.dev,
571 "Invalid MAC address, using random address %pM\n", 563 "Invalid MAC address, using random address %pM\n",
572 dev->macaddr); 564 dev->mt76.macaddr);
573 } 565 }
574 566
575 __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); 567 mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
576 __mt76_wr(dev, MT_MAC_ADDR_DW1, 568 mt76_wr(dev, MT_MAC_ADDR_DW1,
577 get_unaligned_le16(dev->macaddr + 4) | 569 get_unaligned_le16(dev->mt76.macaddr + 4) |
578 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); 570 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
579} 571}
580EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); 572EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
581 573
@@ -697,7 +689,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
697 689
698 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { 690 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
699 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 691 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
700 ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat); 692 ret = mt76x02_mac_load_tx_status(dev, &stat);
701 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 693 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
702 694
703 if (!ret) 695 if (!ret)
@@ -706,7 +698,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
706 trace_mac_txstat_fetch(dev, &stat); 698 trace_mac_txstat_fetch(dev, &stat);
707 699
708 if (!irq) { 700 if (!irq) {
709 mt76x02_send_tx_status(&dev->mt76, &stat, &update); 701 mt76x02_send_tx_status(dev, &stat, &update);
710 continue; 702 continue;
711 } 703 }
712 704
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 4f7ee4620ab5..d99c18743969 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -198,28 +198,29 @@ mt76x02_skb_tx_info(struct sk_buff *skb)
198 return (void *)info->status.status_driver_data; 198 return (void *)info->status.status_driver_data;
199} 199}
200 200
201void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 201void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
202enum mt76x02_cipher_type 202enum mt76x02_cipher_type
203mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data); 203mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
204 204
205int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, 205int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
206 struct ieee80211_key_conf *key); 206 u8 key_idx, struct ieee80211_key_conf *key);
207int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, 207int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
208 struct ieee80211_key_conf *key); 208 struct ieee80211_key_conf *key);
209void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac); 209void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
210void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop); 210 u8 *mac);
211void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, 211void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
212 const struct ieee80211_tx_rate *rate); 212void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
213bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, 213 const struct ieee80211_tx_rate *rate);
214 struct mt76x02_tx_status *stat); 214bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
215void mt76x02_send_tx_status(struct mt76_dev *dev, 215 struct mt76x02_tx_status *stat);
216 struct mt76x02_tx_status *stat, u8 *update); 216void mt76x02_send_tx_status(struct mt76x02_dev *dev,
217 struct mt76x02_tx_status *stat, u8 *update);
217int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, 218int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
218 void *rxi); 219 void *rxi);
219int 220int
220mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate); 221mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
221void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr); 222void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
222void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, 223void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
223 struct sk_buff *skb, struct mt76_wcid *wcid, 224 struct sk_buff *skb, struct mt76_wcid *wcid,
224 struct ieee80211_sta *sta, int len); 225 struct ieee80211_sta *sta, int len);
225void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); 226void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6d565133b7af..1b853bb723fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -19,9 +19,7 @@
19#include <linux/firmware.h> 19#include <linux/firmware.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21 21
22#include "mt76.h"
23#include "mt76x02_mcu.h" 22#include "mt76x02_mcu.h"
24#include "mt76x02_dma.h"
25 23
26struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) 24struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
27{ 25{
@@ -37,7 +35,7 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
37EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); 35EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
38 36
39static struct sk_buff * 37static struct sk_buff *
40mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) 38mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
41{ 39{
42 unsigned long timeout; 40 unsigned long timeout;
43 41
@@ -45,17 +43,17 @@ mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
45 return NULL; 43 return NULL;
46 44
47 timeout = expires - jiffies; 45 timeout = expires - jiffies;
48 wait_event_timeout(dev->mmio.mcu.wait, 46 wait_event_timeout(dev->mt76.mmio.mcu.wait,
49 !skb_queue_empty(&dev->mmio.mcu.res_q), 47 !skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
50 timeout); 48 timeout);
51 return skb_dequeue(&dev->mmio.mcu.res_q); 49 return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
52} 50}
53 51
54static int 52static int
55mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, 53mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
56 struct sk_buff *skb, int cmd, int seq) 54 struct sk_buff *skb, int cmd, int seq)
57{ 55{
58 struct mt76_queue *q = &dev->q_tx[qid]; 56 struct mt76_queue *q = &dev->mt76.q_tx[qid];
59 struct mt76_queue_buf buf; 57 struct mt76_queue_buf buf;
60 dma_addr_t addr; 58 dma_addr_t addr;
61 u32 tx_info; 59 u32 tx_info;
@@ -66,24 +64,26 @@ mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
66 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | 64 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
67 FIELD_PREP(MT_MCU_MSG_LEN, skb->len); 65 FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
68 66
69 addr = dma_map_single(dev->dev, skb->data, skb->len, 67 addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
70 DMA_TO_DEVICE); 68 DMA_TO_DEVICE);
71 if (dma_mapping_error(dev->dev, addr)) 69 if (dma_mapping_error(dev->mt76.dev, addr))
72 return -ENOMEM; 70 return -ENOMEM;
73 71
74 buf.addr = addr; 72 buf.addr = addr;
75 buf.len = skb->len; 73 buf.len = skb->len;
74
76 spin_lock_bh(&q->lock); 75 spin_lock_bh(&q->lock);
77 dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 76 mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
78 dev->queue_ops->kick(dev, q); 77 mt76_queue_kick(dev, q);
79 spin_unlock_bh(&q->lock); 78 spin_unlock_bh(&q->lock);
80 79
81 return 0; 80 return 0;
82} 81}
83 82
84int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, 83int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
85 int cmd, bool wait_resp) 84 int cmd, bool wait_resp)
86{ 85{
86 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
87 unsigned long expires = jiffies + HZ; 87 unsigned long expires = jiffies + HZ;
88 int ret; 88 int ret;
89 u8 seq; 89 u8 seq;
@@ -91,11 +91,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
91 if (!skb) 91 if (!skb)
92 return -EINVAL; 92 return -EINVAL;
93 93
94 mutex_lock(&dev->mmio.mcu.mutex); 94 mutex_lock(&mdev->mmio.mcu.mutex);
95 95
96 seq = ++dev->mmio.mcu.msg_seq & 0xf; 96 seq = ++mdev->mmio.mcu.msg_seq & 0xf;
97 if (!seq) 97 if (!seq)
98 seq = ++dev->mmio.mcu.msg_seq & 0xf; 98 seq = ++mdev->mmio.mcu.msg_seq & 0xf;
99 99
100 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); 100 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
101 if (ret) 101 if (ret)
@@ -107,7 +107,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
107 107
108 skb = mt76x02_mcu_get_response(dev, expires); 108 skb = mt76x02_mcu_get_response(dev, expires);
109 if (!skb) { 109 if (!skb) {
110 dev_err(dev->dev, 110 dev_err(mdev->dev,
111 "MCU message %d (seq %d) timed out\n", cmd, 111 "MCU message %d (seq %d) timed out\n", cmd,
112 seq); 112 seq);
113 ret = -ETIMEDOUT; 113 ret = -ETIMEDOUT;
@@ -125,13 +125,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
125 } 125 }
126 126
127out: 127out:
128 mutex_unlock(&dev->mmio.mcu.mutex); 128 mutex_unlock(&mdev->mmio.mcu.mutex);
129 129
130 return ret; 130 return ret;
131} 131}
132EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); 132EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
133 133
134int mt76x02_mcu_function_select(struct mt76_dev *dev, 134int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
135 enum mcu_function func, 135 enum mcu_function func,
136 u32 val, bool wait_resp) 136 u32 val, bool wait_resp)
137{ 137{
@@ -144,13 +144,12 @@ int mt76x02_mcu_function_select(struct mt76_dev *dev,
144 .value = cpu_to_le32(val), 144 .value = cpu_to_le32(val),
145 }; 145 };
146 146
147 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 147 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
148 return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP, 148 return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
149 wait_resp);
150} 149}
151EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); 150EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
152 151
153int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, 152int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
154 bool wait_resp) 153 bool wait_resp)
155{ 154{
156 struct sk_buff *skb; 155 struct sk_buff *skb;
@@ -162,13 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
162 .level = cpu_to_le32(0), 161 .level = cpu_to_le32(0),
163 }; 162 };
164 163
165 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 164 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
166 return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, 165 return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
167 wait_resp);
168} 166}
169EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); 167EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
170 168
171int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, 169int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
172 u32 param, bool wait) 170 u32 param, bool wait)
173{ 171{
174 struct sk_buff *skb; 172 struct sk_buff *skb;
@@ -182,44 +180,44 @@ int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
182 int ret; 180 int ret;
183 181
184 if (wait) 182 if (wait)
185 dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 183 mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
186 184
187 skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 185 skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
188 ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); 186 ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
189 if (ret) 187 if (ret)
190 return ret; 188 return ret;
191 189
192 if (wait && 190 if (wait &&
193 WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0, 191 WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
194 BIT(31), BIT(31), 100))) 192 BIT(31), BIT(31), 100)))
195 return -ETIMEDOUT; 193 return -ETIMEDOUT;
196 194
197 return 0; 195 return 0;
198} 196}
199EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); 197EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
200 198
201int mt76x02_mcu_cleanup(struct mt76_dev *dev) 199int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
202{ 200{
203 struct sk_buff *skb; 201 struct sk_buff *skb;
204 202
205 dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1); 203 mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
206 usleep_range(20000, 30000); 204 usleep_range(20000, 30000);
207 205
208 while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL) 206 while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
209 dev_kfree_skb(skb); 207 dev_kfree_skb(skb);
210 208
211 return 0; 209 return 0;
212} 210}
213EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); 211EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
214 212
215void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, 213void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
216 const struct mt76x02_fw_header *h) 214 const struct mt76x02_fw_header *h)
217{ 215{
218 u16 bld = le16_to_cpu(h->build_ver); 216 u16 bld = le16_to_cpu(h->build_ver);
219 u16 ver = le16_to_cpu(h->fw_ver); 217 u16 ver = le16_to_cpu(h->fw_ver);
220 218
221 snprintf(dev->hw->wiphy->fw_version, 219 snprintf(dev->mt76.hw->wiphy->fw_version,
222 sizeof(dev->hw->wiphy->fw_version), 220 sizeof(dev->mt76.hw->wiphy->fw_version),
223 "%d.%d.%02d-b%x", 221 "%d.%d.%02d-b%x",
224 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); 222 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
225} 223}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index ce664f8b1c94..2d8fd2514570 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -17,6 +17,8 @@
17#ifndef __MT76x02_MCU_H 17#ifndef __MT76x02_MCU_H
18#define __MT76x02_MCU_H 18#define __MT76x02_MCU_H
19 19
20#include "mt76x02.h"
21
20#define MT_MCU_RESET_CTL 0x070C 22#define MT_MCU_RESET_CTL 0x070C
21#define MT_MCU_INT_LEVEL 0x0718 23#define MT_MCU_INT_LEVEL 0x0718
22#define MT_MCU_COM_REG0 0x0730 24#define MT_MCU_COM_REG0 0x0730
@@ -94,18 +96,18 @@ struct mt76x02_patch_header {
94 u8 pad[2]; 96 u8 pad[2];
95}; 97};
96 98
97int mt76x02_mcu_cleanup(struct mt76_dev *dev); 99int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
98int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, 100int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
99 u32 param, bool wait); 101 u32 param, bool wait);
100struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); 102struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
101int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, 103int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
102 int cmd, bool wait_resp); 104 int cmd, bool wait_resp);
103int mt76x02_mcu_function_select(struct mt76_dev *dev, 105int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
104 enum mcu_function func, 106 enum mcu_function func,
105 u32 val, bool wait_resp); 107 u32 val, bool wait_resp);
106int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, 108int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
107 bool wait_resp); 109 bool wait_resp);
108void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, 110void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
109 const struct mt76x02_fw_header *h); 111 const struct mt76x02_fw_header *h);
110 112
111#endif /* __MT76x02_MCU_H */ 113#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1b945079c802..39f092034240 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -65,7 +65,7 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
65 u8 update = 1; 65 u8 update = 1;
66 66
67 while (kfifo_get(&dev->txstatus_fifo, &stat)) 67 while (kfifo_get(&dev->txstatus_fifo, &stat))
68 mt76x02_send_tx_status(&dev->mt76, &stat, &update); 68 mt76x02_send_tx_status(dev, &stat, &update);
69} 69}
70 70
71static void mt76x02_tx_tasklet(unsigned long data) 71static void mt76x02_tx_tasklet(unsigned long data)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index d31ce1d7b689..0f1d7b5c9f68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -17,18 +17,17 @@
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19 19
20#include "mt76.h" 20#include "mt76x02.h"
21#include "mt76x02_phy.h" 21#include "mt76x02_phy.h"
22#include "mt76x02_mac.h"
23 22
24void mt76x02_phy_set_rxpath(struct mt76_dev *dev) 23void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
25{ 24{
26 u32 val; 25 u32 val;
27 26
28 val = __mt76_rr(dev, MT_BBP(AGC, 0)); 27 val = mt76_rr(dev, MT_BBP(AGC, 0));
29 val &= ~BIT(4); 28 val &= ~BIT(4);
30 29
31 switch (dev->chainmask & 0xf) { 30 switch (dev->mt76.chainmask & 0xf) {
32 case 2: 31 case 2:
33 val |= BIT(3); 32 val |= BIT(3);
34 break; 33 break;
@@ -37,23 +36,23 @@ void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
37 break; 36 break;
38 } 37 }
39 38
40 __mt76_wr(dev, MT_BBP(AGC, 0), val); 39 mt76_wr(dev, MT_BBP(AGC, 0), val);
41 mb(); 40 mb();
42 val = __mt76_rr(dev, MT_BBP(AGC, 0)); 41 val = mt76_rr(dev, MT_BBP(AGC, 0));
43} 42}
44EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath); 43EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
45 44
46void mt76x02_phy_set_txdac(struct mt76_dev *dev) 45void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
47{ 46{
48 int txpath; 47 int txpath;
49 48
50 txpath = (dev->chainmask >> 8) & 0xf; 49 txpath = (dev->mt76.chainmask >> 8) & 0xf;
51 switch (txpath) { 50 switch (txpath) {
52 case 2: 51 case 2:
53 __mt76_set(dev, MT_BBP(TXBE, 5), 0x3); 52 mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
54 break; 53 break;
55 default: 54 default:
56 __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); 55 mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
57 break; 56 break;
58 } 57 }
59} 58}
@@ -102,40 +101,38 @@ void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
102} 101}
103EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset); 102EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
104 103
105void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1) 104void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
106{ 105{
107 struct mt76_rate_power *t = &dev->rate_power; 106 struct mt76_rate_power *t = &dev->mt76.rate_power;
108 107
109 __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, 108 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
110 txp_0); 109 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
111 __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, 110
112 txp_1); 111 mt76_wr(dev, MT_TX_PWR_CFG_0,
113 112 mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
114 __mt76_wr(dev, MT_TX_PWR_CFG_0, 113 t->ofdm[2]));
115 mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], 114 mt76_wr(dev, MT_TX_PWR_CFG_1,
116 t->ofdm[2])); 115 mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
117 __mt76_wr(dev, MT_TX_PWR_CFG_1, 116 t->ht[2]));
118 mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], 117 mt76_wr(dev, MT_TX_PWR_CFG_2,
119 t->ht[2])); 118 mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
120 __mt76_wr(dev, MT_TX_PWR_CFG_2, 119 t->ht[10]));
121 mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], 120 mt76_wr(dev, MT_TX_PWR_CFG_3,
122 t->ht[10])); 121 mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
123 __mt76_wr(dev, MT_TX_PWR_CFG_3, 122 t->stbc[2]));
124 mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], 123 mt76_wr(dev, MT_TX_PWR_CFG_4,
125 t->stbc[2])); 124 mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
126 __mt76_wr(dev, MT_TX_PWR_CFG_4, 125 mt76_wr(dev, MT_TX_PWR_CFG_7,
127 mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); 126 mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
128 __mt76_wr(dev, MT_TX_PWR_CFG_7, 127 t->vht[9]));
129 mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], 128 mt76_wr(dev, MT_TX_PWR_CFG_8,
130 t->vht[9])); 129 mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
131 __mt76_wr(dev, MT_TX_PWR_CFG_8, 130 mt76_wr(dev, MT_TX_PWR_CFG_9,
132 mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); 131 mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
133 __mt76_wr(dev, MT_TX_PWR_CFG_9,
134 mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
135} 132}
136EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); 133EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
137 134
138int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev) 135int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
139{ 136{
140 struct mt76x02_sta *sta; 137 struct mt76x02_sta *sta;
141 struct mt76_wcid *wcid; 138 struct mt76_wcid *wcid;
@@ -145,8 +142,8 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
145 local_bh_disable(); 142 local_bh_disable();
146 rcu_read_lock(); 143 rcu_read_lock();
147 144
148 for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { 145 for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
149 unsigned long mask = dev->wcid_mask[i]; 146 unsigned long mask = dev->mt76.wcid_mask[i];
150 147
151 if (!mask) 148 if (!mask)
152 continue; 149 continue;
@@ -155,17 +152,17 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
155 if (!(mask & 1)) 152 if (!(mask & 1))
156 continue; 153 continue;
157 154
158 wcid = rcu_dereference(dev->wcid[j]); 155 wcid = rcu_dereference(dev->mt76.wcid[j]);
159 if (!wcid) 156 if (!wcid)
160 continue; 157 continue;
161 158
162 sta = container_of(wcid, struct mt76x02_sta, wcid); 159 sta = container_of(wcid, struct mt76x02_sta, wcid);
163 spin_lock(&dev->rx_lock); 160 spin_lock(&dev->mt76.rx_lock);
164 if (sta->inactive_count++ < 5) 161 if (sta->inactive_count++ < 5)
165 cur_rssi = ewma_signal_read(&sta->rssi); 162 cur_rssi = ewma_signal_read(&sta->rssi);
166 else 163 else
167 cur_rssi = 0; 164 cur_rssi = 0;
168 spin_unlock(&dev->rx_lock); 165 spin_unlock(&dev->mt76.rx_lock);
169 166
170 if (cur_rssi < min_rssi) 167 if (cur_rssi < min_rssi)
171 min_rssi = cur_rssi; 168 min_rssi = cur_rssi;
@@ -181,3 +178,81 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
181 return min_rssi; 178 return min_rssi;
182} 179}
183EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); 180EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
181
182void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
183{
184 int core_val, agc_val;
185
186 switch (width) {
187 case NL80211_CHAN_WIDTH_80:
188 core_val = 3;
189 agc_val = 7;
190 break;
191 case NL80211_CHAN_WIDTH_40:
192 core_val = 2;
193 agc_val = 3;
194 break;
195 default:
196 core_val = 0;
197 agc_val = 1;
198 break;
199 }
200
201 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
202 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
203 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
204 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
205}
206EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
207
208void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
209 bool primary_upper)
210{
211 switch (band) {
212 case NL80211_BAND_2GHZ:
213 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
214 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
215 break;
216 case NL80211_BAND_5GHZ:
217 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
218 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
219 break;
220 }
221
222 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
223 primary_upper);
224}
225EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
226
227bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
228{
229 u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
230 bool ret = false;
231 u32 false_cca;
232
233 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
234 dev->cal.false_cca = false_cca;
235 if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
236 dev->cal.agc_gain_adjust += 2;
237 ret = true;
238 } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
239 (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
240 dev->cal.agc_gain_adjust -= 2;
241 ret = true;
242 }
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
247
248void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
249{
250 dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
251 MT_BBP_AGC_GAIN);
252 dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
253 MT_BBP_AGC_GAIN);
254 memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
255 sizeof(dev->cal.agc_gain_cur));
256 dev->cal.low_gain = -1;
257}
258EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index e70ea6eeb077..2b316cf7c70c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -19,12 +19,43 @@
19 19
20#include "mt76x02_regs.h" 20#include "mt76x02_regs.h"
21 21
22static inline int
23mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
24{
25 switch (dev->mt76.chandef.width) {
26 case NL80211_CHAN_WIDTH_80:
27 return -62;
28 case NL80211_CHAN_WIDTH_40:
29 return -65;
30 default:
31 return -68;
32 }
33}
34
35static inline int
36mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
37{
38 switch (dev->mt76.chandef.width) {
39 case NL80211_CHAN_WIDTH_80:
40 return -76;
41 case NL80211_CHAN_WIDTH_40:
42 return -79;
43 default:
44 return -82;
45 }
46}
47
22void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset); 48void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
23void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2); 49void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
24void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); 50void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
25int mt76x02_get_max_rate_power(struct mt76_rate_power *r); 51int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
26void mt76x02_phy_set_rxpath(struct mt76_dev *dev); 52void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
27void mt76x02_phy_set_txdac(struct mt76_dev *dev); 53void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
28int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev); 54int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
55void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
56void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
57 bool primary_upper);
58bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
59void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
29 60
30#endif /* __MT76x02_PHY_H */ 61#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 24d1e6d747dd..f7de77d09d28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -205,8 +205,8 @@
205#define MT_TXQ_STA 0x0434 205#define MT_TXQ_STA 0x0434
206#define MT_RF_CSR_CFG 0x0500 206#define MT_RF_CSR_CFG 0x0500
207#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) 207#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
208#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) 208#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
209#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) 209#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
210#define MT_RF_CSR_CFG_WR BIT(30) 210#define MT_RF_CSR_CFG_WR BIT(30)
211#define MT_RF_CSR_CFG_KICK BIT(31) 211#define MT_RF_CSR_CFG_KICK BIT(31)
212 212
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 830377221739..d3de08872d6e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -71,7 +71,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
71} 71}
72EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb); 72EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
73 73
74s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, 74s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
75 const struct ieee80211_tx_rate *rate) 75 const struct ieee80211_tx_rate *rate)
76{ 76{
77 s8 max_txpwr; 77 s8 max_txpwr;
@@ -80,23 +80,23 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
80 u8 mcs = ieee80211_rate_get_vht_mcs(rate); 80 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
81 81
82 if (mcs == 8 || mcs == 9) { 82 if (mcs == 8 || mcs == 9) {
83 max_txpwr = dev->rate_power.vht[8]; 83 max_txpwr = dev->mt76.rate_power.vht[8];
84 } else { 84 } else {
85 u8 nss, idx; 85 u8 nss, idx;
86 86
87 nss = ieee80211_rate_get_vht_nss(rate); 87 nss = ieee80211_rate_get_vht_nss(rate);
88 idx = ((nss - 1) << 3) + mcs; 88 idx = ((nss - 1) << 3) + mcs;
89 max_txpwr = dev->rate_power.ht[idx & 0xf]; 89 max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
90 } 90 }
91 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 91 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
92 max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; 92 max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
93 } else { 93 } else {
94 enum nl80211_band band = dev->chandef.chan->band; 94 enum nl80211_band band = dev->mt76.chandef.chan->band;
95 95
96 if (band == NL80211_BAND_2GHZ) { 96 if (band == NL80211_BAND_2GHZ) {
97 const struct ieee80211_rate *r; 97 const struct ieee80211_rate *r;
98 struct wiphy *wiphy = dev->hw->wiphy; 98 struct wiphy *wiphy = dev->mt76.hw->wiphy;
99 struct mt76_rate_power *rp = &dev->rate_power; 99 struct mt76_rate_power *rp = &dev->mt76.rate_power;
100 100
101 r = &wiphy->bands[band]->bitrates[rate->idx]; 101 r = &wiphy->bands[band]->bitrates[rate->idx];
102 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) 102 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
@@ -104,7 +104,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
104 else 104 else
105 max_txpwr = rp->ofdm[r->hw_value & 0x7]; 105 max_txpwr = rp->ofdm[r->hw_value & 0x7];
106 } else { 106 } else {
107 max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; 107 max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
108 } 108 }
109 } 109 }
110 110
@@ -112,10 +112,8 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
112} 112}
113EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj); 113EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
114 114
115s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj) 115s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
116{ 116{
117 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
118
119 txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); 117 txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
120 txpwr -= (dev->target_power + dev->target_power_delta[0]); 118 txpwr -= (dev->target_power + dev->target_power_delta[0]);
121 txpwr = min_t(s8, txpwr, max_txpwr_adj); 119 txpwr = min_t(s8, txpwr, max_txpwr_adj);
@@ -133,7 +131,7 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
133{ 131{
134 s8 txpwr_adj; 132 s8 txpwr_adj;
135 133
136 txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr, 134 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
137 dev->mt76.rate_power.ofdm[4]); 135 dev->mt76.rate_power.ofdm[4]);
138 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, 136 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
139 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); 137 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
@@ -157,8 +155,9 @@ void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
157} 155}
158EXPORT_SYMBOL_GPL(mt76x02_tx_complete); 156EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
159 157
160bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update) 158bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
161{ 159{
160 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
162 struct mt76x02_tx_status stat; 161 struct mt76x02_tx_status stat;
163 162
164 if (!mt76x02_mac_load_tx_status(dev, &stat)) 163 if (!mt76x02_mac_load_tx_status(dev, &stat))
@@ -181,9 +180,9 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
181 int ret; 180 int ret;
182 181
183 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) 182 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
184 mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false); 183 mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
185 184
186 mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len); 185 mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
187 186
188 ret = mt76x02_insert_hdr_pad(skb); 187 ret = mt76x02_insert_hdr_pad(skb);
189 if (ret < 0) 188 if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 6b2138328eb2..0126e51d77ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -17,15 +17,15 @@
17#ifndef __MT76x02_USB_H 17#ifndef __MT76x02_USB_H
18#define __MT76x02_USB_H 18#define __MT76x02_USB_H
19 19
20#include "mt76.h" 20#include "mt76x02.h"
21 21
22void mt76x02u_init_mcu(struct mt76_dev *dev); 22void mt76x02u_init_mcu(struct mt76_dev *dev);
23void mt76x02u_mcu_fw_reset(struct mt76_dev *dev); 23void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
24int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, 24int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
25 int data_len, u32 max_payload, u32 offset); 25 int data_len, u32 max_payload, u32 offset);
26 26
27int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); 27int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
28int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, 28int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
29 struct sk_buff *skb, struct mt76_queue *q, 29 struct sk_buff *skb, struct mt76_queue *q,
30 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 30 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
31 u32 *tx_info); 31 u32 *tx_info);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 7c6c973af386..dc2226c722dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -34,17 +34,6 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
34} 34}
35EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); 35EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
36 36
37static int mt76x02u_check_skb_rooms(struct sk_buff *skb)
38{
39 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
40 u32 need_head;
41
42 need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN;
43 if (hdr_len % 4)
44 need_head += 2;
45 return skb_cow(skb, need_head);
46}
47
48int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) 37int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
49{ 38{
50 struct sk_buff *iter, *last = skb; 39 struct sk_buff *iter, *last = skb;
@@ -99,17 +88,14 @@ mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
99 return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); 88 return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
100} 89}
101 90
102int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, 91int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
103 struct sk_buff *skb, struct mt76_queue *q, 92 struct sk_buff *skb, struct mt76_queue *q,
104 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 93 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
105 u32 *tx_info) 94 u32 *tx_info)
106{ 95{
96 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
107 struct mt76x02_txwi *txwi; 97 struct mt76x02_txwi *txwi;
108 int err, len = skb->len; 98 int len = skb->len;
109
110 err = mt76x02u_check_skb_rooms(skb);
111 if (err < 0)
112 return -ENOMEM;
113 99
114 mt76x02_insert_hdr_pad(skb); 100 mt76x02_insert_hdr_pad(skb);
115 101
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index cb5f073f08af..da299b8a1334 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -17,8 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/firmware.h> 18#include <linux/firmware.h>
19 19
20#include "mt76.h" 20#include "mt76x02.h"
21#include "mt76x02_dma.h"
22#include "mt76x02_mcu.h" 21#include "mt76x02_mcu.h"
23#include "mt76x02_usb.h" 22#include "mt76x02_usb.h"
24 23
@@ -255,16 +254,16 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
255 return ret; 254 return ret;
256} 255}
257 256
258void mt76x02u_mcu_fw_reset(struct mt76_dev *dev) 257void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
259{ 258{
260 mt76u_vendor_request(dev, MT_VEND_DEV_MODE, 259 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
261 USB_DIR_OUT | USB_TYPE_VENDOR, 260 USB_DIR_OUT | USB_TYPE_VENDOR,
262 0x1, 0, NULL, 0); 261 0x1, 0, NULL, 0);
263} 262}
264EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset); 263EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
265 264
266static int 265static int
267__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, 266__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
268 const void *fw_data, int len, u32 dst_addr) 267 const void *fw_data, int len, u32 dst_addr)
269{ 268{
270 u8 *data = sg_virt(&buf->urb->sg[0]); 269 u8 *data = sg_virt(&buf->urb->sg[0]);
@@ -281,14 +280,14 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
281 memcpy(data + sizeof(info), fw_data, len); 280 memcpy(data + sizeof(info), fw_data, len);
282 memset(data + sizeof(info) + len, 0, 4); 281 memset(data + sizeof(info) + len, 0, 4);
283 282
284 mt76u_single_wr(dev, MT_VEND_WRITE_FCE, 283 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
285 MT_FCE_DMA_ADDR, dst_addr); 284 MT_FCE_DMA_ADDR, dst_addr);
286 len = roundup(len, 4); 285 len = roundup(len, 4);
287 mt76u_single_wr(dev, MT_VEND_WRITE_FCE, 286 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
288 MT_FCE_DMA_LEN, len << 16); 287 MT_FCE_DMA_LEN, len << 16);
289 288
290 buf->len = MT_CMD_HDR_LEN + len + sizeof(info); 289 buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
291 err = mt76u_submit_buf(dev, USB_DIR_OUT, 290 err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
292 MT_EP_OUT_INBAND_CMD, 291 MT_EP_OUT_INBAND_CMD,
293 buf, GFP_KERNEL, 292 buf, GFP_KERNEL,
294 mt76u_mcu_complete_urb, &cmpl); 293 mt76u_mcu_complete_urb, &cmpl);
@@ -297,31 +296,31 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
297 296
298 if (!wait_for_completion_timeout(&cmpl, 297 if (!wait_for_completion_timeout(&cmpl,
299 msecs_to_jiffies(1000))) { 298 msecs_to_jiffies(1000))) {
300 dev_err(dev->dev, "firmware upload timed out\n"); 299 dev_err(dev->mt76.dev, "firmware upload timed out\n");
301 usb_kill_urb(buf->urb); 300 usb_kill_urb(buf->urb);
302 return -ETIMEDOUT; 301 return -ETIMEDOUT;
303 } 302 }
304 303
305 if (mt76u_urb_error(buf->urb)) { 304 if (mt76u_urb_error(buf->urb)) {
306 dev_err(dev->dev, "firmware upload failed: %d\n", 305 dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
307 buf->urb->status); 306 buf->urb->status);
308 return buf->urb->status; 307 return buf->urb->status;
309 } 308 }
310 309
311 val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); 310 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
312 val++; 311 val++;
313 mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); 312 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
314 313
315 return 0; 314 return 0;
316} 315}
317 316
318int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, 317int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
319 int data_len, u32 max_payload, u32 offset) 318 int data_len, u32 max_payload, u32 offset)
320{ 319{
321 int err, len, pos = 0, max_len = max_payload - 8; 320 int err, len, pos = 0, max_len = max_payload - 8;
322 struct mt76u_buf buf; 321 struct mt76u_buf buf;
323 322
324 err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, 323 err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
325 GFP_KERNEL); 324 GFP_KERNEL);
326 if (err < 0) 325 if (err < 0)
327 return err; 326 return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 5851ab6b7e26..ca05332f81fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -48,21 +48,21 @@ struct ieee80211_rate mt76x02_rates[] = {
48EXPORT_SYMBOL_GPL(mt76x02_rates); 48EXPORT_SYMBOL_GPL(mt76x02_rates);
49 49
50void mt76x02_configure_filter(struct ieee80211_hw *hw, 50void mt76x02_configure_filter(struct ieee80211_hw *hw,
51 unsigned int changed_flags, 51 unsigned int changed_flags,
52 unsigned int *total_flags, u64 multicast) 52 unsigned int *total_flags, u64 multicast)
53{ 53{
54 struct mt76_dev *dev = hw->priv; 54 struct mt76x02_dev *dev = hw->priv;
55 u32 flags = 0; 55 u32 flags = 0;
56 56
57#define MT76_FILTER(_flag, _hw) do { \ 57#define MT76_FILTER(_flag, _hw) do { \
58 flags |= *total_flags & FIF_##_flag; \ 58 flags |= *total_flags & FIF_##_flag; \
59 dev->rxfilter &= ~(_hw); \ 59 dev->mt76.rxfilter &= ~(_hw); \
60 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 60 dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
61 } while (0) 61 } while (0)
62 62
63 mutex_lock(&dev->mutex); 63 mutex_lock(&dev->mt76.mutex);
64 64
65 dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; 65 dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
66 66
67 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); 67 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
68 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); 68 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
@@ -75,25 +75,25 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
75 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); 75 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
76 76
77 *total_flags = flags; 77 *total_flags = flags;
78 dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); 78 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
79 79
80 mutex_unlock(&dev->mutex); 80 mutex_unlock(&dev->mt76.mutex);
81} 81}
82EXPORT_SYMBOL_GPL(mt76x02_configure_filter); 82EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
83 83
84int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 84int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
85 struct ieee80211_sta *sta) 85 struct ieee80211_sta *sta)
86{ 86{
87 struct mt76_dev *dev = hw->priv; 87 struct mt76x02_dev *dev = hw->priv;
88 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 88 struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
89 struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 89 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
90 int ret = 0; 90 int ret = 0;
91 int idx = 0; 91 int idx = 0;
92 int i; 92 int i;
93 93
94 mutex_lock(&dev->mutex); 94 mutex_lock(&dev->mt76.mutex);
95 95
96 idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); 96 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
97 if (idx < 0) { 97 if (idx < 0) {
98 ret = -ENOSPC; 98 ret = -ENOSPC;
99 goto out; 99 goto out;
@@ -113,40 +113,40 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
113 113
114 ewma_signal_init(&msta->rssi); 114 ewma_signal_init(&msta->rssi);
115 115
116 rcu_assign_pointer(dev->wcid[idx], &msta->wcid); 116 rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
117 117
118out: 118out:
119 mutex_unlock(&dev->mutex); 119 mutex_unlock(&dev->mt76.mutex);
120 120
121 return ret; 121 return ret;
122} 122}
123EXPORT_SYMBOL_GPL(mt76x02_sta_add); 123EXPORT_SYMBOL_GPL(mt76x02_sta_add);
124 124
125int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 125int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
126 struct ieee80211_sta *sta) 126 struct ieee80211_sta *sta)
127{ 127{
128 struct mt76_dev *dev = hw->priv; 128 struct mt76x02_dev *dev = hw->priv;
129 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 129 struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
130 int idx = msta->wcid.idx; 130 int idx = msta->wcid.idx;
131 int i; 131 int i;
132 132
133 mutex_lock(&dev->mutex); 133 mutex_lock(&dev->mt76.mutex);
134 rcu_assign_pointer(dev->wcid[idx], NULL); 134 rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
135 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 135 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
136 mt76_txq_remove(dev, sta->txq[i]); 136 mt76_txq_remove(&dev->mt76, sta->txq[i]);
137 mt76x02_mac_wcid_set_drop(dev, idx, true); 137 mt76x02_mac_wcid_set_drop(dev, idx, true);
138 mt76_wcid_free(dev->wcid_mask, idx); 138 mt76_wcid_free(dev->mt76.wcid_mask, idx);
139 mt76x02_mac_wcid_setup(dev, idx, 0, NULL); 139 mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
140 mutex_unlock(&dev->mutex); 140 mutex_unlock(&dev->mt76.mutex);
141 141
142 return 0; 142 return 0;
143} 143}
144EXPORT_SYMBOL_GPL(mt76x02_sta_remove); 144EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
145 145
146void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, 146void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
147 unsigned int idx) 147 unsigned int idx)
148{ 148{
149 struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 149 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
150 150
151 mvif->idx = idx; 151 mvif->idx = idx;
152 mvif->group_wcid.idx = MT_VIF_WCID(idx); 152 mvif->group_wcid.idx = MT_VIF_WCID(idx);
@@ -158,11 +158,11 @@ EXPORT_SYMBOL_GPL(mt76x02_vif_init);
158int 158int
159mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 159mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
160{ 160{
161 struct mt76_dev *dev = hw->priv; 161 struct mt76x02_dev *dev = hw->priv;
162 unsigned int idx = 0; 162 unsigned int idx = 0;
163 163
164 if (vif->addr[0] & BIT(1)) 164 if (vif->addr[0] & BIT(1))
165 idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7); 165 idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
166 166
167 /* 167 /*
168 * Client mode typically only has one configurable BSSID register, 168 * Client mode typically only has one configurable BSSID register,
@@ -186,20 +186,20 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
186EXPORT_SYMBOL_GPL(mt76x02_add_interface); 186EXPORT_SYMBOL_GPL(mt76x02_add_interface);
187 187
188void mt76x02_remove_interface(struct ieee80211_hw *hw, 188void mt76x02_remove_interface(struct ieee80211_hw *hw,
189 struct ieee80211_vif *vif) 189 struct ieee80211_vif *vif)
190{ 190{
191 struct mt76_dev *dev = hw->priv; 191 struct mt76x02_dev *dev = hw->priv;
192 192
193 mt76_txq_remove(dev, vif->txq); 193 mt76_txq_remove(&dev->mt76, vif->txq);
194} 194}
195EXPORT_SYMBOL_GPL(mt76x02_remove_interface); 195EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
196 196
197int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 197int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
198 struct ieee80211_ampdu_params *params) 198 struct ieee80211_ampdu_params *params)
199{ 199{
200 enum ieee80211_ampdu_mlme_action action = params->action; 200 enum ieee80211_ampdu_mlme_action action = params->action;
201 struct ieee80211_sta *sta = params->sta; 201 struct ieee80211_sta *sta = params->sta;
202 struct mt76_dev *dev = hw->priv; 202 struct mt76x02_dev *dev = hw->priv;
203 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 203 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
204 struct ieee80211_txq *txq = sta->txq[params->tid]; 204 struct ieee80211_txq *txq = sta->txq[params->tid];
205 u16 tid = params->tid; 205 u16 tid = params->tid;
@@ -213,12 +213,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
213 213
214 switch (action) { 214 switch (action) {
215 case IEEE80211_AMPDU_RX_START: 215 case IEEE80211_AMPDU_RX_START:
216 mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size); 216 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
217 __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); 217 *ssn, params->buf_size);
218 mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
218 break; 219 break;
219 case IEEE80211_AMPDU_RX_STOP: 220 case IEEE80211_AMPDU_RX_STOP:
220 mt76_rx_aggr_stop(dev, &msta->wcid, tid); 221 mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
221 __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); 222 mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
223 BIT(16 + tid));
222 break; 224 break;
223 case IEEE80211_AMPDU_TX_OPERATIONAL: 225 case IEEE80211_AMPDU_TX_OPERATIONAL:
224 mtxq->aggr = true; 226 mtxq->aggr = true;
@@ -245,11 +247,11 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
245EXPORT_SYMBOL_GPL(mt76x02_ampdu_action); 247EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
246 248
247int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 249int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
248 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 250 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
249 struct ieee80211_key_conf *key) 251 struct ieee80211_key_conf *key)
250{ 252{
251 struct mt76_dev *dev = hw->priv; 253 struct mt76x02_dev *dev = hw->priv;
252 struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 254 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
253 struct mt76x02_sta *msta; 255 struct mt76x02_sta *msta;
254 struct mt76_wcid *wcid; 256 struct mt76_wcid *wcid;
255 int idx = key->keyidx; 257 int idx = key->keyidx;
@@ -295,7 +297,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
295 297
296 key = NULL; 298 key = NULL;
297 } 299 }
298 mt76_wcid_key_setup(dev, wcid, key); 300 mt76_wcid_key_setup(&dev->mt76, wcid, key);
299 301
300 if (!msta) { 302 if (!msta) {
301 if (key || wcid->hw_key_idx == idx) { 303 if (key || wcid->hw_key_idx == idx) {
@@ -312,13 +314,13 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
312EXPORT_SYMBOL_GPL(mt76x02_set_key); 314EXPORT_SYMBOL_GPL(mt76x02_set_key);
313 315
314int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 316int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
315 u16 queue, const struct ieee80211_tx_queue_params *params) 317 u16 queue, const struct ieee80211_tx_queue_params *params)
316{ 318{
317 struct mt76_dev *dev = hw->priv; 319 struct mt76x02_dev *dev = hw->priv;
318 u8 cw_min = 5, cw_max = 10, qid; 320 u8 cw_min = 5, cw_max = 10, qid;
319 u32 val; 321 u32 val;
320 322
321 qid = dev->q_tx[queue].hw_idx; 323 qid = dev->mt76.q_tx[queue].hw_idx;
322 324
323 if (params->cw_min) 325 if (params->cw_min)
324 cw_min = fls(params->cw_min); 326 cw_min = fls(params->cw_min);
@@ -329,27 +331,27 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
329 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | 331 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
330 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | 332 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
331 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); 333 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
332 __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); 334 mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
333 335
334 val = __mt76_rr(dev, MT_WMM_TXOP(qid)); 336 val = mt76_rr(dev, MT_WMM_TXOP(qid));
335 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); 337 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
336 val |= params->txop << MT_WMM_TXOP_SHIFT(qid); 338 val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
337 __mt76_wr(dev, MT_WMM_TXOP(qid), val); 339 mt76_wr(dev, MT_WMM_TXOP(qid), val);
338 340
339 val = __mt76_rr(dev, MT_WMM_AIFSN); 341 val = mt76_rr(dev, MT_WMM_AIFSN);
340 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); 342 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
341 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); 343 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
342 __mt76_wr(dev, MT_WMM_AIFSN, val); 344 mt76_wr(dev, MT_WMM_AIFSN, val);
343 345
344 val = __mt76_rr(dev, MT_WMM_CWMIN); 346 val = mt76_rr(dev, MT_WMM_CWMIN);
345 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); 347 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
346 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); 348 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
347 __mt76_wr(dev, MT_WMM_CWMIN, val); 349 mt76_wr(dev, MT_WMM_CWMIN, val);
348 350
349 val = __mt76_rr(dev, MT_WMM_CWMAX); 351 val = mt76_rr(dev, MT_WMM_CWMAX);
350 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); 352 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
351 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); 353 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
352 __mt76_wr(dev, MT_WMM_CWMAX, val); 354 mt76_wr(dev, MT_WMM_CWMAX, val);
353 355
354 return 0; 356 return 0;
355} 357}
@@ -359,7 +361,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
359 struct ieee80211_vif *vif, 361 struct ieee80211_vif *vif,
360 struct ieee80211_sta *sta) 362 struct ieee80211_sta *sta)
361{ 363{
362 struct mt76_dev *dev = hw->priv; 364 struct mt76x02_dev *dev = hw->priv;
363 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 365 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
364 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); 366 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
365 struct ieee80211_tx_rate rate = {}; 367 struct ieee80211_tx_rate rate = {};
@@ -425,7 +427,7 @@ const u16 mt76x02_beacon_offsets[16] = {
425}; 427};
426EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets); 428EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
427 429
428void mt76x02_set_beacon_offsets(struct mt76_dev *dev) 430void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
429{ 431{
430 u16 val, base = MT_BEACON_BASE; 432 u16 val, base = MT_BEACON_BASE;
431 u32 regs[4] = {}; 433 u32 regs[4] = {};
@@ -437,7 +439,7 @@ void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
437 } 439 }
438 440
439 for (i = 0; i < 4; i++) 441 for (i = 0; i < 4; i++)
440 __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); 442 mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
441} 443}
442EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets); 444EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
443 445
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index bbab021b5f1a..f39b622d03f4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -177,8 +177,8 @@ mt76x2_eeprom_load(struct mt76x02_dev *dev)
177 177
178 efuse = dev->mt76.otp.data; 178 efuse = dev->mt76.otp.data;
179 179
180 if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse, 180 if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
181 MT7662_EEPROM_SIZE, MT_EE_READ)) 181 MT_EE_READ))
182 goto out; 182 goto out;
183 183
184 if (found) { 184 if (found) {
@@ -248,22 +248,22 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
248 group = mt76x2_get_cal_channel_group(channel); 248 group = mt76x2_get_cal_channel_group(channel);
249 switch (group) { 249 switch (group) {
250 case MT_CH_5G_JAPAN: 250 case MT_CH_5G_JAPAN:
251 return mt76x02_eeprom_get(&dev->mt76, 251 return mt76x02_eeprom_get(dev,
252 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); 252 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
253 case MT_CH_5G_UNII_1: 253 case MT_CH_5G_UNII_1:
254 return mt76x02_eeprom_get(&dev->mt76, 254 return mt76x02_eeprom_get(dev,
255 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; 255 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
256 case MT_CH_5G_UNII_2: 256 case MT_CH_5G_UNII_2:
257 return mt76x02_eeprom_get(&dev->mt76, 257 return mt76x02_eeprom_get(dev,
258 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); 258 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
259 case MT_CH_5G_UNII_2E_1: 259 case MT_CH_5G_UNII_2E_1:
260 return mt76x02_eeprom_get(&dev->mt76, 260 return mt76x02_eeprom_get(dev,
261 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; 261 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
262 case MT_CH_5G_UNII_2E_2: 262 case MT_CH_5G_UNII_2E_2:
263 return mt76x02_eeprom_get(&dev->mt76, 263 return mt76x02_eeprom_get(dev,
264 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); 264 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
265 default: 265 default:
266 return mt76x02_eeprom_get(&dev->mt76, 266 return mt76x02_eeprom_get(dev,
267 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; 267 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
268 } 268 }
269} 269}
@@ -277,14 +277,13 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
277 u16 val; 277 u16 val;
278 278
279 if (chan->band == NL80211_BAND_2GHZ) 279 if (chan->band == NL80211_BAND_2GHZ)
280 val = mt76x02_eeprom_get(&dev->mt76, 280 val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
281 MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
282 else 281 else
283 val = mt76x2_get_5g_rx_gain(dev, channel); 282 val = mt76x2_get_5g_rx_gain(dev, channel);
284 283
285 mt76x2_set_rx_gain_group(dev, val); 284 mt76x2_set_rx_gain_group(dev, val);
286 285
287 mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g); 286 mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
288 mt76x2_set_rssi_offset(dev, 0, val); 287 mt76x2_set_rssi_offset(dev, 0, val);
289 mt76x2_set_rssi_offset(dev, 1, val >> 8); 288 mt76x2_set_rssi_offset(dev, 1, val >> 8);
290 289
@@ -293,7 +292,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
293 dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; 292 dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
294 dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; 293 dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
295 294
296 lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan); 295 lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
297 dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); 296 dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
298} 297}
299EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); 298EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
@@ -308,53 +307,49 @@ void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
308 307
309 memset(t, 0, sizeof(*t)); 308 memset(t, 0, sizeof(*t));
310 309
311 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK); 310 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
312 t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val); 311 t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
313 t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8); 312 t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
314 313
315 if (is_5ghz) 314 if (is_5ghz)
316 val = mt76x02_eeprom_get(&dev->mt76, 315 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
317 MT_EE_TX_POWER_OFDM_5G_6M);
318 else 316 else
319 val = mt76x02_eeprom_get(&dev->mt76, 317 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
320 MT_EE_TX_POWER_OFDM_2G_6M);
321 t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val); 318 t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
322 t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8); 319 t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
323 320
324 if (is_5ghz) 321 if (is_5ghz)
325 val = mt76x02_eeprom_get(&dev->mt76, 322 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
326 MT_EE_TX_POWER_OFDM_5G_24M);
327 else 323 else
328 val = mt76x02_eeprom_get(&dev->mt76, 324 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
329 MT_EE_TX_POWER_OFDM_2G_24M);
330 t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val); 325 t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
331 t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8); 326 t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
332 327
333 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0); 328 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
334 t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val); 329 t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
335 t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8); 330 t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
336 331
337 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4); 332 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
338 t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val); 333 t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
339 t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8); 334 t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
340 335
341 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8); 336 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
342 t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val); 337 t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
343 t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8); 338 t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
344 339
345 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12); 340 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
346 t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val); 341 t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
347 t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8); 342 t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
348 343
349 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0); 344 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
350 t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val); 345 t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
351 t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8); 346 t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
352 347
353 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4); 348 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
354 t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val); 349 t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
355 t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8); 350 t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
356 351
357 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8); 352 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
358 if (!is_5ghz) 353 if (!is_5ghz)
359 val >>= 8; 354 val >>= 8;
360 t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8); 355 t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
@@ -390,7 +385,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
390 t->chain[chain].target_power = data[2]; 385 t->chain[chain].target_power = data[2];
391 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); 386 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
392 387
393 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER); 388 val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
394 t->target_power = val >> 8; 389 t->target_power = val >> 8;
395} 390}
396 391
@@ -441,7 +436,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
441 t->chain[chain].target_power = data[2]; 436 t->chain[chain].target_power = data[2];
442 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); 437 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
443 438
444 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN); 439 val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
445 t->target_power = val & 0xff; 440 t->target_power = val & 0xff;
446} 441}
447 442
@@ -453,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
453 448
454 memset(t, 0, sizeof(*t)); 449 memset(t, 0, sizeof(*t));
455 450
456 bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40); 451 bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
457 bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80); 452 bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
458 453
459 if (chan->band == NL80211_BAND_5GHZ) { 454 if (chan->band == NL80211_BAND_5GHZ) {
460 bw40 >>= 8; 455 bw40 >>= 8;
@@ -469,7 +464,7 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
469 MT_EE_TX_POWER_1_START_2G); 464 MT_EE_TX_POWER_1_START_2G);
470 } 465 }
471 466
472 if (mt76x02_tssi_enabled(&dev->mt76) || 467 if (mt76x2_tssi_enabled(dev) ||
473 !mt76x02_field_valid(t->target_power)) 468 !mt76x02_field_valid(t->target_power))
474 t->target_power = t->chain[0].target_power; 469 t->target_power = t->chain[0].target_power;
475 470
@@ -486,23 +481,20 @@ int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
486 481
487 memset(t, 0, sizeof(*t)); 482 memset(t, 0, sizeof(*t));
488 483
489 if (!mt76x02_temp_tx_alc_enabled(&dev->mt76)) 484 if (!mt76x2_temp_tx_alc_enabled(dev))
490 return -EINVAL; 485 return -EINVAL;
491 486
492 if (!mt76x02_ext_pa_enabled(&dev->mt76, band)) 487 if (!mt76x02_ext_pa_enabled(dev, band))
493 return -EINVAL; 488 return -EINVAL;
494 489
495 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8; 490 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
496 t->temp_25_ref = val & 0x7f; 491 t->temp_25_ref = val & 0x7f;
497 if (band == NL80211_BAND_5GHZ) { 492 if (band == NL80211_BAND_5GHZ) {
498 slope = mt76x02_eeprom_get(&dev->mt76, 493 slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
499 MT_EE_RF_TEMP_COMP_SLOPE_5G); 494 bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
500 bounds = mt76x02_eeprom_get(&dev->mt76,
501 MT_EE_TX_POWER_EXT_PA_5G);
502 } else { 495 } else {
503 slope = mt76x02_eeprom_get(&dev->mt76, 496 slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
504 MT_EE_RF_TEMP_COMP_SLOPE_2G); 497 bounds = mt76x02_eeprom_get(dev,
505 bounds = mt76x02_eeprom_get(&dev->mt76,
506 MT_EE_TX_POWER_DELTA_BW80) >> 8; 498 MT_EE_TX_POWER_DELTA_BW80) >> 8;
507 } 499 }
508 500
@@ -523,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
523 if (ret) 515 if (ret)
524 return ret; 516 return ret;
525 517
526 mt76x02_eeprom_parse_hw_cap(&dev->mt76); 518 mt76x02_eeprom_parse_hw_cap(dev);
527 mt76x2_eeprom_get_macaddr(dev); 519 mt76x2_eeprom_get_macaddr(dev);
528 mt76_eeprom_override(&dev->mt76); 520 mt76_eeprom_override(&dev->mt76);
529 dev->mt76.macaddr[0] &= ~BIT(1); 521 dev->mt76.macaddr[0] &= ~BIT(1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index c97b31c77d83..9e735524d367 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -62,7 +62,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
62static inline bool 62static inline bool
63mt76x2_has_ext_lna(struct mt76x02_dev *dev) 63mt76x2_has_ext_lna(struct mt76x02_dev *dev)
64{ 64{
65 u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); 65 u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
66 66
67 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) 67 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
68 return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; 68 return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
@@ -70,4 +70,25 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
70 return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; 70 return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
71} 71}
72 72
73static inline bool
74mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
75{
76 u16 val;
77
78 val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
79 if (!(val & BIT(15)))
80 return false;
81
82 return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
83 MT_EE_NIC_CONF_1_TEMP_TX_ALC;
84}
85
86static inline bool
87mt76x2_tssi_enabled(struct mt76x02_dev *dev)
88{
89 return !mt76x2_temp_tx_alc_enabled(dev) &&
90 (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
91 MT_EE_NIC_CONF_1_TX_ALC_EN);
92}
93
73#endif 94#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index ccd9bc9d3e1e..3c73fdeaf30f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -167,6 +167,9 @@ void mt76x2_init_device(struct mt76x02_dev *dev)
167 hw->max_report_rates = 7; 167 hw->max_report_rates = 7;
168 hw->max_rate_tries = 1; 168 hw->max_rate_tries = 1;
169 hw->extra_tx_headroom = 2; 169 hw->extra_tx_headroom = 2;
170 if (mt76_is_usb(dev))
171 hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
172 MT_DMA_HDR_LEN;
170 173
171 hw->sta_data_size = sizeof(struct mt76x02_sta); 174 hw->sta_data_size = sizeof(struct mt76x02_sta);
172 hw->vif_data_size = sizeof(struct mt76x02_vif); 175 hw->vif_data_size = sizeof(struct mt76x02_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 134037a227d7..88bd62cfbdf9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -59,7 +59,6 @@ EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
59int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, 59int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
60 u8 channel) 60 u8 channel)
61{ 61{
62 struct mt76_dev *mdev = &dev->mt76;
63 struct sk_buff *skb; 62 struct sk_buff *skb;
64 struct { 63 struct {
65 u8 cr_mode; 64 u8 cr_mode;
@@ -76,8 +75,8 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
76 u32 val; 75 u32 val;
77 76
78 val = BIT(31); 77 val = BIT(31);
79 val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; 78 val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
80 val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00; 79 val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
81 msg.cfg = cpu_to_le32(val); 80 msg.cfg = cpu_to_le32(val);
82 81
83 /* first set the channel without the extension channel info */ 82 /* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index cbec8c6f1b2d..ab93125f46de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -100,8 +100,6 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
100 enum nl80211_band band); 100 enum nl80211_band band);
101void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, 101void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
102 enum nl80211_band band, u8 bw); 102 enum nl80211_band band, u8 bw);
103void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
104void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper);
105void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); 103void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
106 104
107#endif 105#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index f229c6eb65dc..3824290b219d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -43,7 +43,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
43 u16 eep_val; 43 u16 eep_val;
44 s8 offset = 0; 44 s8 offset = 0;
45 45
46 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); 46 eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
47 47
48 offset = eep_val & 0x7f; 48 offset = eep_val & 0x7f;
49 if ((eep_val & 0xff) == 0xff) 49 if ((eep_val & 0xff) == 0xff)
@@ -53,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
53 53
54 eep_val >>= 8; 54 eep_val >>= 8;
55 if (eep_val == 0x00 || eep_val == 0xff) { 55 if (eep_val == 0x00 || eep_val == 0xff) {
56 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); 56 eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
57 eep_val &= 0xff; 57 eep_val &= 0xff;
58 58
59 if (eep_val == 0x00 || eep_val == 0xff) 59 if (eep_val == 0x00 || eep_val == 0xff)
@@ -64,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
64 mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); 64 mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
65 mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); 65 mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
66 66
67 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 67 eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
68 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { 68 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
69 case 0: 69 case 0:
70 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); 70 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -143,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
143 mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); 143 mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
144 144
145 for (i = 0; i < 256; i++) 145 for (i = 0; i < 256; i++)
146 mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL); 146 mt76x02_mac_wcid_setup(dev, i, 0, NULL);
147 147
148 for (i = 0; i < MT_MAX_VIFS; i++) 148 for (i = 0; i < MT_MAX_VIFS; i++)
149 mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL); 149 mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
150 150
151 for (i = 0; i < 16; i++) 151 for (i = 0; i < 16; i++)
152 for (k = 0; k < 4; k++) 152 for (k = 0; k < 4; k++)
153 mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL); 153 mt76x02_mac_shared_key_setup(dev, i, k, NULL);
154 154
155 for (i = 0; i < 8; i++) { 155 for (i = 0; i < 8; i++) {
156 mt76x2_mac_set_bssid(dev, i, null_addr); 156 mt76x2_mac_set_bssid(dev, i, null_addr);
@@ -168,7 +168,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
168 MT_CH_TIME_CFG_EIFS_AS_BUSY | 168 MT_CH_TIME_CFG_EIFS_AS_BUSY |
169 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); 169 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
170 170
171 mt76x02_set_beacon_offsets(&dev->mt76); 171 mt76x02_set_beacon_offsets(dev);
172 172
173 mt76x2_set_tx_ackto(dev); 173 mt76x2_set_tx_ackto(dev);
174 174
@@ -337,7 +337,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
337{ 337{
338 cancel_delayed_work_sync(&dev->cal_work); 338 cancel_delayed_work_sync(&dev->cal_work);
339 cancel_delayed_work_sync(&dev->mac_work); 339 cancel_delayed_work_sync(&dev->mac_work);
340 mt76x02_mcu_set_radio_state(&dev->mt76, false, true); 340 mt76x02_mcu_set_radio_state(dev, false, true);
341 mt76x2_mac_stop(dev, false); 341 mt76x2_mac_stop(dev, false);
342} 342}
343 343
@@ -347,7 +347,7 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
347 tasklet_disable(&dev->pre_tbtt_tasklet); 347 tasklet_disable(&dev->pre_tbtt_tasklet);
348 mt76x2_stop_hardware(dev); 348 mt76x2_stop_hardware(dev);
349 mt76x02_dma_cleanup(dev); 349 mt76x02_dma_cleanup(dev);
350 mt76x02_mcu_cleanup(&dev->mt76); 350 mt76x02_mcu_cleanup(dev);
351} 351}
352 352
353struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) 353struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
index 08366c5988ea..4b331ed14bb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
@@ -36,7 +36,7 @@ mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
36 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) 36 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
37 return -ENOSPC; 37 return -ENOSPC;
38 38
39 mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len); 39 mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
40 40
41 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); 41 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
42 offset += sizeof(txwi); 42 offset += sizeof(txwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 65fef082e7cc..034a06295668 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -172,7 +172,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
172 int idx = msta->wcid.idx; 172 int idx = msta->wcid.idx;
173 173
174 mt76_stop_tx_queues(&dev->mt76, sta, true); 174 mt76_stop_tx_queues(&dev->mt76, sta, true);
175 mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps); 175 mt76x02_mac_wcid_set_drop(dev, idx, ps);
176} 176}
177 177
178static void 178static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 898aa229671c..d8fa9ba56437 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -140,7 +140,7 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
140 140
141 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); 141 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
142 142
143 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 143 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
144 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) 144 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
145 mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); 145 mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
146 146
@@ -152,8 +152,8 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
152 return -ETIMEDOUT; 152 return -ETIMEDOUT;
153 } 153 }
154 154
155 mt76x02_set_ethtool_fwver(dev, hdr);
155 dev_info(dev->mt76.dev, "Firmware running!\n"); 156 dev_info(dev->mt76.dev, "Firmware running!\n");
156 mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
157 157
158 release_firmware(fw); 158 release_firmware(fw);
159 159
@@ -183,6 +183,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
183 if (ret) 183 if (ret)
184 return ret; 184 return ret;
185 185
186 mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true); 186 mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
187 return 0; 187 return 0;
188} 188}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 40ea5f7480fb..5bda44540225 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -26,7 +26,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
26 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 26 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
27 u32 flag = 0; 27 u32 flag = 0;
28 28
29 if (!mt76x02_tssi_enabled(&dev->mt76)) 29 if (!mt76x2_tssi_enabled(dev))
30 return false; 30 return false;
31 31
32 if (mt76x2_channel_silent(dev)) 32 if (mt76x2_channel_silent(dev))
@@ -35,10 +35,10 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
35 if (chan->band == NL80211_BAND_5GHZ) 35 if (chan->band == NL80211_BAND_5GHZ)
36 flag |= BIT(0); 36 flag |= BIT(0);
37 37
38 if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 38 if (mt76x02_ext_pa_enabled(dev, chan->band))
39 flag |= BIT(8); 39 flag |= BIT(8);
40 40
41 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true); 41 mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
42 dev->cal.tssi_cal_done = true; 42 dev->cal.tssi_cal_done = true;
43 return true; 43 return true;
44} 44}
@@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
62 mt76x2_mac_stop(dev, false); 62 mt76x2_mac_stop(dev, false);
63 63
64 if (is_5ghz) 64 if (is_5ghz)
65 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true); 65 mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
66 66
67 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true); 67 mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
68 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true); 68 mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
69 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true); 69 mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
70 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true); 70 mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
71 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true); 71 mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
72 72
73 if (!mac_stopped) 73 if (!mac_stopped)
74 mt76x2_mac_resume(dev); 74 mt76x2_mac_resume(dev);
@@ -125,39 +125,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
125} 125}
126 126
127static void 127static void
128mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest)
129{
130 dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
131 dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
132}
133
134static int
135mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev)
136{
137 switch (dev->mt76.chandef.width) {
138 case NL80211_CHAN_WIDTH_80:
139 return -62;
140 case NL80211_CHAN_WIDTH_40:
141 return -65;
142 default:
143 return -68;
144 }
145}
146
147static int
148mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
149{
150 switch (dev->mt76.chandef.width) {
151 case NL80211_CHAN_WIDTH_80:
152 return -76;
153 case NL80211_CHAN_WIDTH_40:
154 return -79;
155 default:
156 return -82;
157 }
158}
159
160static void
161mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) 128mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
162{ 129{
163 u32 val; 130 u32 val;
@@ -183,25 +150,6 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
183} 150}
184 151
185static void 152static void
186mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev)
187{
188 u32 false_cca;
189 u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
190
191 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
192 dev->cal.false_cca = false_cca;
193 if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
194 dev->cal.agc_gain_adjust += 2;
195 else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
196 (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
197 dev->cal.agc_gain_adjust -= 2;
198 else
199 return;
200
201 mt76x2_phy_set_gain_val(dev);
202}
203
204static void
205mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) 153mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
206{ 154{
207 u8 *gain = dev->cal.agc_gain_init; 155 u8 *gain = dev->cal.agc_gain_init;
@@ -210,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
210 int low_gain; 158 int low_gain;
211 u32 val; 159 u32 val;
212 160
213 dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 161 dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
214 162
215 low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + 163 low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
216 (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); 164 (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
217 165
218 gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); 166 gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
219 dev->cal.low_gain = low_gain; 167 dev->cal.low_gain = low_gain;
220 168
221 if (!gain_change) { 169 if (!gain_change) {
222 mt76x2_phy_adjust_vga_gain(dev); 170 if (mt76x02_phy_adjust_vga_gain(dev))
171 mt76x2_phy_set_gain_val(dev);
223 return; 172 return;
224 } 173 }
225 174
@@ -337,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
337 mt76x2_configure_tx_delay(dev, band, bw); 286 mt76x2_configure_tx_delay(dev, band, bw);
338 mt76x2_phy_set_txpower(dev); 287 mt76x2_phy_set_txpower(dev);
339 288
340 mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); 289 mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
341 mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); 290 mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
342 291
343 mt76_rmw(dev, MT_EXT_CCA_CFG, 292 mt76_rmw(dev, MT_EXT_CCA_CFG,
344 (MT_EXT_CCA_CFG_CCA0 | 293 (MT_EXT_CCA_CFG_CCA0 |
@@ -361,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
361 mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); 310 mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
362 311
363 if (!dev->cal.init_cal_done) { 312 if (!dev->cal.init_cal_done) {
364 u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); 313 u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
365 314
366 if (val != 0xff) 315 if (val != 0xff)
367 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true); 316 mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
368 } 317 }
369 318
370 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true); 319 mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
371 320
372 /* Rx LPF calibration */ 321 /* Rx LPF calibration */
373 if (!dev->cal.init_cal_done) 322 if (!dev->cal.init_cal_done)
374 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true); 323 mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
375 324
376 dev->cal.init_cal_done = true; 325 dev->cal.init_cal_done = true;
377 326
@@ -384,14 +333,11 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
384 if (scan) 333 if (scan)
385 return 0; 334 return 0;
386 335
387 dev->cal.low_gain = -1;
388 mt76x2_phy_channel_calibrate(dev, true); 336 mt76x2_phy_channel_calibrate(dev, true);
389 mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init); 337 mt76x02_init_agc_gain(dev);
390 memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
391 sizeof(dev->cal.agc_gain_cur));
392 338
393 /* init default values for temp compensation */ 339 /* init default values for temp compensation */
394 if (mt76x02_tssi_enabled(&dev->mt76)) { 340 if (mt76x2_tssi_enabled(dev)) {
395 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 341 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
396 0x38); 342 0x38);
397 mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, 343 mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
@@ -449,7 +395,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev)
449{ 395{
450 int ret; 396 int ret;
451 397
452 ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true); 398 ret = mt76x02_mcu_set_radio_state(dev, true, true);
453 if (ret) 399 if (ret)
454 return ret; 400 return ret;
455 401
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index f00aed915ee8..e9fff5b7f125 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -65,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
65 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); 65 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
66 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); 66 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
67 67
68 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 68 if (mt76x02_ext_pa_enabled(dev, band)) {
69 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); 69 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
70 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); 70 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
71 } else { 71 } else {
@@ -76,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
76 pa_mode[0] = 0x0000ffff; 76 pa_mode[0] = 0x0000ffff;
77 pa_mode[1] = 0x00ff00ff; 77 pa_mode[1] = 0x00ff00ff;
78 78
79 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 79 if (mt76x02_ext_pa_enabled(dev, band)) {
80 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); 80 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
81 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); 81 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
82 } else { 82 } else {
@@ -84,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
84 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); 84 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
85 } 85 }
86 86
87 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) 87 if (mt76x02_ext_pa_enabled(dev, band))
88 pa_mode_adj = 0x04000000; 88 pa_mode_adj = 0x04000000;
89 else 89 else
90 pa_mode_adj = 0; 90 pa_mode_adj = 0;
@@ -98,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
98 mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); 98 mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
99 mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); 99 mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
100 100
101 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 101 if (mt76x02_ext_pa_enabled(dev, band)) {
102 u32 val; 102 u32 val;
103 103
104 if (band == NL80211_BAND_2GHZ) 104 if (band == NL80211_BAND_2GHZ)
@@ -187,7 +187,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
187 dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; 187 dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
188 dev->mt76.rate_power = t; 188 dev->mt76.rate_power = t;
189 189
190 mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1); 190 mt76x02_phy_set_txpower(dev, txp_0, txp_1);
191} 191}
192EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); 192EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
193 193
@@ -196,7 +196,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
196{ 196{
197 u32 cfg0, cfg1; 197 u32 cfg0, cfg1;
198 198
199 if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 199 if (mt76x02_ext_pa_enabled(dev, band)) {
200 cfg0 = bw ? 0x000b0c01 : 0x00101101; 200 cfg0 = bw ? 0x000b0c01 : 0x00101101;
201 cfg1 = 0x00011414; 201 cfg1 = 0x00011414;
202 } else { 202 } else {
@@ -210,50 +210,6 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
210} 210}
211EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); 211EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
212 212
213void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
214{
215 int core_val, agc_val;
216
217 switch (width) {
218 case NL80211_CHAN_WIDTH_80:
219 core_val = 3;
220 agc_val = 7;
221 break;
222 case NL80211_CHAN_WIDTH_40:
223 core_val = 2;
224 agc_val = 3;
225 break;
226 default:
227 core_val = 0;
228 agc_val = 1;
229 break;
230 }
231
232 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
233 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
234 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
235 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
236}
237EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
238
239void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper)
240{
241 switch (band) {
242 case NL80211_BAND_2GHZ:
243 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
244 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
245 break;
246 case NL80211_BAND_5GHZ:
247 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
248 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
249 break;
250 }
251
252 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
253 primary_upper);
254}
255EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
256
257void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) 213void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
258{ 214{
259 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 215 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@@ -275,7 +231,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
275 dev->cal.tssi_comp_pending = false; 231 dev->cal.tssi_comp_pending = false;
276 mt76x2_get_power_info(dev, &txp, chan); 232 mt76x2_get_power_info(dev, &txp, chan);
277 233
278 if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 234 if (mt76x02_ext_pa_enabled(dev, chan->band))
279 t.pa_mode = 1; 235 t.pa_mode = 1;
280 236
281 t.cal_mode = BIT(1); 237 t.cal_mode = BIT(1);
@@ -289,8 +245,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
289 return; 245 return;
290 246
291 usleep_range(10000, 20000); 247 usleep_range(10000, 20000);
292 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD, 248 mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
293 chan->hw_value, wait);
294 dev->cal.dpd_cal_done = true; 249 dev->cal.dpd_cal_done = true;
295 } 250 }
296} 251}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index c82f16efa327..13cce2937573 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -130,7 +130,7 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
130 put_unaligned_le32(val, dev->mt76.eeprom.data + i); 130 put_unaligned_le32(val, dev->mt76.eeprom.data + i);
131 } 131 }
132 132
133 mt76x02_eeprom_parse_hw_cap(&dev->mt76); 133 mt76x02_eeprom_parse_hw_cap(dev);
134 return 0; 134 return 0;
135} 135}
136 136
@@ -204,8 +204,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
204 if (err < 0) 204 if (err < 0)
205 return err; 205 return err;
206 206
207 mt76x02_mac_setaddr(&dev->mt76, 207 mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
208 dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
209 dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); 208 dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
210 209
211 mt76x2u_init_beacon_offsets(dev); 210 mt76x2u_init_beacon_offsets(dev);
@@ -237,8 +236,8 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
237 if (err < 0) 236 if (err < 0)
238 return err; 237 return err;
239 238
240 mt76x02_phy_set_rxpath(&dev->mt76); 239 mt76x02_phy_set_rxpath(dev);
241 mt76x02_phy_set_txdac(&dev->mt76); 240 mt76x02_phy_set_txdac(dev);
242 241
243 return mt76x2u_mac_stop(dev); 242 return mt76x2u_mac_stop(dev);
244} 243}
@@ -303,7 +302,7 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev)
303 302
304void mt76x2u_cleanup(struct mt76x02_dev *dev) 303void mt76x2u_cleanup(struct mt76x02_dev *dev)
305{ 304{
306 mt76x02_mcu_set_radio_state(&dev->mt76, false, false); 305 mt76x02_mcu_set_radio_state(dev, false, false);
307 mt76x2u_stop_hw(dev); 306 mt76x2u_stop_hw(dev);
308 mt76u_queues_deinit(&dev->mt76); 307 mt76u_queues_deinit(&dev->mt76);
309 mt76u_mcu_deinit(&dev->mt76); 308 mt76u_mcu_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index dbd635aa763b..db2194a92e67 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -32,7 +32,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
32 s8 offset = 0; 32 s8 offset = 0;
33 u16 eep_val; 33 u16 eep_val;
34 34
35 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); 35 eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
36 36
37 offset = eep_val & 0x7f; 37 offset = eep_val & 0x7f;
38 if ((eep_val & 0xff) == 0xff) 38 if ((eep_val & 0xff) == 0xff)
@@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
42 42
43 eep_val >>= 8; 43 eep_val >>= 8;
44 if (eep_val == 0x00 || eep_val == 0xff) { 44 if (eep_val == 0x00 || eep_val == 0xff) {
45 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); 45 eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
46 eep_val &= 0xff; 46 eep_val &= 0xff;
47 47
48 if (eep_val == 0x00 || eep_val == 0xff) 48 if (eep_val == 0x00 || eep_val == 0xff)
@@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
67 /* init fce */ 67 /* init fce */
68 mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); 68 mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
69 69
70 eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 70 eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
71 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { 71 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
72 case 0: 72 case 0:
73 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); 73 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 224609d6915f..1971a1b00038 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -50,9 +50,9 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw,
50 struct mt76x02_dev *dev = hw->priv; 50 struct mt76x02_dev *dev = hw->priv;
51 51
52 if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) 52 if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
53 mt76x02_mac_setaddr(&dev->mt76, vif->addr); 53 mt76x02_mac_setaddr(dev, vif->addr);
54 54
55 mt76x02_vif_init(&dev->mt76, vif, 0); 55 mt76x02_vif_init(dev, vif, 0);
56 return 0; 56 return 0;
57} 57}
58 58
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 259ceae2a3a9..3f1e558e5e6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -137,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
137 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); 137 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
138 138
139 /* vendor reset */ 139 /* vendor reset */
140 mt76x02u_mcu_fw_reset(&dev->mt76); 140 mt76x02u_mcu_fw_reset(dev);
141 usleep_range(5000, 10000); 141 usleep_range(5000, 10000);
142 142
143 /* enable FCE to send in-band cmd */ 143 /* enable FCE to send in-band cmd */
@@ -151,7 +151,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
151 /* FCE skip_fs_en */ 151 /* FCE skip_fs_en */
152 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); 152 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
153 153
154 err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), 154 err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
155 fw->size - sizeof(*hdr), 155 fw->size - sizeof(*hdr),
156 MCU_ROM_PATCH_MAX_PAYLOAD, 156 MCU_ROM_PATCH_MAX_PAYLOAD,
157 MT76U_MCU_ROM_PATCH_OFFSET); 157 MT76U_MCU_ROM_PATCH_OFFSET);
@@ -210,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
210 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); 210 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
211 211
212 /* vendor reset */ 212 /* vendor reset */
213 mt76x02u_mcu_fw_reset(&dev->mt76); 213 mt76x02u_mcu_fw_reset(dev);
214 usleep_range(5000, 10000); 214 usleep_range(5000, 10000);
215 215
216 /* enable USB_DMA_CFG */ 216 /* enable USB_DMA_CFG */
@@ -230,7 +230,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
230 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); 230 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
231 231
232 /* load ILM */ 232 /* load ILM */
233 err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), 233 err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
234 ilm_len, MCU_FW_URB_MAX_PAYLOAD, 234 ilm_len, MCU_FW_URB_MAX_PAYLOAD,
235 MT76U_MCU_ILM_OFFSET); 235 MT76U_MCU_ILM_OFFSET);
236 if (err < 0) { 236 if (err < 0) {
@@ -241,8 +241,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
241 /* load DLM */ 241 /* load DLM */
242 if (mt76xx_rev(dev) >= MT76XX_REV_E3) 242 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
243 dlm_offset += 0x800; 243 dlm_offset += 0x800;
244 err = mt76x02u_mcu_fw_send_data(&dev->mt76, 244 err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
245 fw->data + sizeof(*hdr) + ilm_len,
246 dlm_len, MCU_FW_URB_MAX_PAYLOAD, 245 dlm_len, MCU_FW_URB_MAX_PAYLOAD,
247 dlm_offset); 246 dlm_offset);
248 if (err < 0) { 247 if (err < 0) {
@@ -260,8 +259,8 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
260 mt76_set(dev, MT_MCU_COM_REG0, BIT(1)); 259 mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
261 /* enable FCE to send in-band cmd */ 260 /* enable FCE to send in-band cmd */
262 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); 261 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
262 mt76x02_set_ethtool_fwver(dev, hdr);
263 dev_dbg(dev->mt76.dev, "firmware running\n"); 263 dev_dbg(dev->mt76.dev, "firmware running\n");
264 mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
265 264
266out: 265out:
267 release_firmware(fw); 266 release_firmware(fw);
@@ -283,10 +282,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev)
283{ 282{
284 int err; 283 int err;
285 284
286 err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 285 err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
287 1, false);
288 if (err < 0) 286 if (err < 0)
289 return err; 287 return err;
290 288
291 return mt76x02_mcu_set_radio_state(&dev->mt76, true, false); 289 return mt76x02_mcu_set_radio_state(dev, true, false);
292} 290}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index b11f8a6a6254..ca96ba60510e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -29,12 +29,12 @@ void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
29 mt76x2u_mac_stop(dev); 29 mt76x2u_mac_stop(dev);
30 30
31 if (is_5ghz) 31 if (is_5ghz)
32 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false); 32 mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
33 33
34 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false); 34 mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
35 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); 35 mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
36 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false); 36 mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
37 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false); 37 mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
38 38
39 mt76x2u_mac_resume(dev); 39 mt76x2u_mac_resume(dev);
40} 40}
@@ -69,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
69 break; 69 break;
70 } 70 }
71 71
72 dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 72 dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
73 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, 73 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
74 mt76_rr(dev, MT_RX_STAT_1)); 74 mt76_rr(dev, MT_RX_STAT_1));
75 75
@@ -155,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
155 mt76x2_configure_tx_delay(dev, chan->band, bw); 155 mt76x2_configure_tx_delay(dev, chan->band, bw);
156 mt76x2_phy_set_txpower(dev); 156 mt76x2_phy_set_txpower(dev);
157 157
158 mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); 158 mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
159 mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); 159 mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
160 160
161 mt76_rmw(dev, MT_EXT_CCA_CFG, 161 mt76_rmw(dev, MT_EXT_CCA_CFG,
162 (MT_EXT_CCA_CFG_CCA0 | 162 (MT_EXT_CCA_CFG_CCA0 |
@@ -177,18 +177,17 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
177 mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); 177 mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
178 178
179 if (!dev->cal.init_cal_done) { 179 if (!dev->cal.init_cal_done) {
180 u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); 180 u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
181 181
182 if (val != 0xff) 182 if (val != 0xff)
183 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 183 mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
184 0, false);
185 } 184 }
186 185
187 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false); 186 mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
188 187
189 /* Rx LPF calibration */ 188 /* Rx LPF calibration */
190 if (!dev->cal.init_cal_done) 189 if (!dev->cal.init_cal_done)
191 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false); 190 mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
192 dev->cal.init_cal_done = true; 191 dev->cal.init_cal_done = true;
193 192
194 mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); 193 mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@@ -203,7 +202,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
203 if (scan) 202 if (scan)
204 return 0; 203 return 0;
205 204
206 if (mt76x02_tssi_enabled(&dev->mt76)) { 205 if (mt76x2_tssi_enabled(dev)) {
207 /* init default values for temp compensation */ 206 /* init default values for temp compensation */
208 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 207 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
209 0x38); 208 0x38);
@@ -218,10 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
218 chan = dev->mt76.chandef.chan; 217 chan = dev->mt76.chandef.chan;
219 if (chan->band == NL80211_BAND_5GHZ) 218 if (chan->band == NL80211_BAND_5GHZ)
220 flag |= BIT(0); 219 flag |= BIT(0);
221 if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 220 if (mt76x02_ext_pa_enabled(dev, chan->band))
222 flag |= BIT(8); 221 flag |= BIT(8);
223 mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, 222 mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
224 flag, false);
225 dev->cal.tssi_cal_done = true; 223 dev->cal.tssi_cal_done = true;
226 } 224 }
227 } 225 }
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index bf0e9e666bc4..7cbce03aa65b 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -96,7 +96,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
96{ 96{
97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
98 98
99 if (!ieee80211_is_data_qos(hdr->frame_control)) 99 if (!ieee80211_is_data_qos(hdr->frame_control) ||
100 !ieee80211_is_data_present(hdr->frame_control))
100 return; 101 return;
101 102
102 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 103 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 6a255643c1f0..5f0faf07c346 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -862,6 +862,7 @@ int mt76u_init(struct mt76_dev *dev,
862 .copy = mt76u_copy, 862 .copy = mt76u_copy,
863 .wr_rp = mt76u_wr_rp, 863 .wr_rp = mt76u_wr_rp,
864 .rd_rp = mt76u_rd_rp, 864 .rd_rp = mt76u_rd_rp,
865 .type = MT76_BUS_USB,
865 }; 866 };
866 struct mt76_usb *usb = &dev->usb; 867 struct mt76_usb *usb = &dev->usb;
867 868
diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig
index de84ce125c26..7628d9c1ea6a 100644
--- a/drivers/net/wireless/quantenna/Kconfig
+++ b/drivers/net/wireless/quantenna/Kconfig
@@ -1,7 +1,7 @@
1config WLAN_VENDOR_QUANTENNA 1config WLAN_VENDOR_QUANTENNA
2 bool "Quantenna wireless cards support" 2 bool "Quantenna wireless cards support"
3 default y 3 default y
4 ---help--- 4 help
5 If you have a wireless card belonging to this class, say Y. 5 If you have a wireless card belonging to this class, say Y.
6 6
7 Note that the answer to this question doesn't directly affect the 7 Note that the answer to this question doesn't directly affect the
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 8d1492a90bd1..b8c12a5f16b4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -11,7 +11,7 @@ config QTNFMAC_PEARL_PCIE
11 select QTNFMAC 11 select QTNFMAC
12 select FW_LOADER 12 select FW_LOADER
13 select CRC32 13 select CRC32
14 ---help--- 14 help
15 This option adds support for wireless adapters based on Quantenna 15 This option adds support for wireless adapters based on Quantenna
16 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe. 16 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
17 17
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 5aca12a51fe3..95c7b95c6f8a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1,18 +1,5 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Copyright (c) 2015-2016 Quantenna Communications, Inc. 2/* Copyright (c) 2018 Quantenna Communications */
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 3
17#include <linux/kernel.h> 4#include <linux/kernel.h>
18#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
index f21e97ede090..634480fe6a64 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
@@ -1,18 +1,5 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * Copyright (c) 2015-2016 Quantenna Communications, Inc. 2/* Copyright (c) 2015-2016 Quantenna Communications */
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 3
17#ifndef _QTN_FMAC_PCIE_IPC_H_ 4#ifndef _QTN_FMAC_PCIE_IPC_H_
18#define _QTN_FMAC_PCIE_IPC_H_ 5#define _QTN_FMAC_PCIE_IPC_H_
@@ -85,11 +72,6 @@
85 72
86#define QTN_EP_LHOST_TQE_PORT 4 73#define QTN_EP_LHOST_TQE_PORT 4
87 74
88enum qtnf_pcie_bda_ipc_flags {
89 QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0),
90 QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1),
91};
92
93enum qtnf_fw_loadtype { 75enum qtnf_fw_loadtype {
94 QTN_FW_DBEGIN, 76 QTN_FW_DBEGIN,
95 QTN_FW_DSUB, 77 QTN_FW_DSUB,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
index 0bfe285b6b48..6e9a5c61d46f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
@@ -1,28 +1,10 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * Copyright (c) 2015 Quantenna Communications, Inc. 2/* Copyright (c) 2015 Quantenna Communications */
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 3
17#ifndef __PEARL_PCIE_H 4#ifndef __PEARL_PCIE_H
18#define __PEARL_PCIE_H 5#define __PEARL_PCIE_H
19 6
20#define PCIE_GEN2_BASE (0xe9000000) 7/* Pearl PCIe HDP registers */
21#define PCIE_GEN3_BASE (0xe7000000)
22
23#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE)
24#define PCIE_HDP_OFFSET (0x2000)
25
26#define PCIE_HDP_CTRL(base) ((base) + 0x2c00) 8#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
27#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04) 9#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
28#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10) 10#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
@@ -86,7 +68,7 @@
86#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c) 68#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
87#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40) 69#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
88 70
89/* Host HBM pool registers */ 71/* Pearl PCIe HBM pool registers */
90#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00) 72#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
91#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04) 73#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
92#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08) 74#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
@@ -104,230 +86,13 @@
104#define HBM_INT_STATUS(base) ((base) + 0x2f9c) 86#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
105#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c) 87#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
106 88
107/* host HBM bit field definition */ 89/* Pearl PCIe HBM bit field definitions */
108#define HHBM_CONFIG_SOFT_RESET (BIT(8)) 90#define HHBM_CONFIG_SOFT_RESET (BIT(8))
109#define HHBM_WR_REQ (BIT(0)) 91#define HHBM_WR_REQ (BIT(0))
110#define HHBM_RD_REQ (BIT(1)) 92#define HHBM_RD_REQ (BIT(1))
111#define HHBM_DONE (BIT(31)) 93#define HHBM_DONE (BIT(31))
112#define HHBM_64BIT (BIT(10)) 94#define HHBM_64BIT (BIT(10))
113 95
114/* offsets for dual PCIE */
115#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710)
116#define PCIE_GEN2_CTL(base) ((base) + 0x080C)
117#define PCIE_GEN3_OFF(base) ((base) + 0x0890)
118#define PCIE_ATU_CTRL1(base) ((base) + 0x0904)
119#define PCIE_ATU_CTRL2(base) ((base) + 0x0908)
120#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C)
121#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910)
122#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914)
123#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918)
124#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C)
125#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C)
126#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988)
127#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C)
128#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC)
129#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4)
130#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8)
131#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0)
132#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4)
133#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8)
134#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC)
135#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0)
136#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00)
137#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980)
138#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C)
139#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0)
140#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8)
141#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC)
142#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10)
143#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18)
144#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C)
145#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24)
146#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28)
147#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34)
148#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C)
149#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40)
150#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44)
151#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48)
152#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C)
153#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C)
154#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70)
155#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78)
156#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C)
157#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80)
158#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84)
159#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88)
160#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C)
161#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90)
162#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00)
163#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34)
164#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000)
165#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004)
166#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008)
167#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c)
168#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010)
169#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014)
170#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018)
171#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c)
172#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000)
173#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004)
174#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008)
175#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c)
176#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010)
177#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014)
178#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018)
179#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c)
180#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020)
181#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024)
182#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028)
183#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c)
184#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030)
185#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034)
186#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038)
187#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c)
188#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040)
189#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044)
190#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048)
191#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c)
192#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050)
193#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054)
194#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058)
195#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c)
196#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060)
197#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064)
198#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068)
199#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c)
200#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070)
201#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074)
202#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078)
203#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c)
204
205#define PCIE_ID(base) ((base) + 0x0000)
206#define PCIE_CMD(base) ((base) + 0x0004)
207#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2))
208#define PCIE_CAP_PTR(base) ((base) + 0x0034)
209#define PCIE_MSI_LBAR(base) ((base) + 0x0054)
210#define PCIE_MSI_CTRL(base) ((base) + 0x0050)
211#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054)
212#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058)
213#define PCIE_MSI_DATA(base) ((base) + 0x005C)
214#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060)
215#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064)
216#define PCIE_DEVCAP(base) ((base) + 0x0074)
217#define PCIE_DEVCTLSTS(base) ((base) + 0x0078)
218
219#define PCIE_CMDSTS(base) ((base) + 0x0004)
220#define PCIE_LINK_STAT(base) ((base) + 0x80)
221#define PCIE_LINK_CTL2(base) ((base) + 0xa0)
222#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c)
223#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT)
224#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44)
225#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150)
226#define PCIE_PMCSR(base) ((base) + 0x44)
227#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100)
228
229/* PCIe link defines */
230#define PEARL_PCIE_LINKUP (0x7)
231#define PEARL_PCIE_DATA_LINK (BIT(0))
232#define PEARL_PCIE_PHY_LINK (BIT(1))
233#define PEARL_PCIE_LINK_RST (BIT(3))
234#define PEARL_PCIE_FATAL_ERR (BIT(5))
235#define PEARL_PCIE_NONFATAL_ERR (BIT(6))
236
237/* PCIe Lane defines */
238#define PCIE_G2_LANE_X1 ((BIT(0)) << 16)
239#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16)
240
241/* PCIe DLL link enable */
242#define PCIE_DLL_LINK_EN ((BIT(0)) << 5)
243
244#define PCIE_LINK_GEN1 (BIT(0))
245#define PCIE_LINK_GEN2 (BIT(1))
246#define PCIE_LINK_GEN3 (BIT(2))
247#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7)
248
249#define MSI_EN (BIT(0))
250#define MSI_64_EN (BIT(7))
251#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF)
252#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF))
253
254#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2))
255#define PCIE_MAX_BAR (6)
256
257#define PCIE_ATU_VIEW(base) ((base) + 0x0900)
258#define PCIE_ATU_CTL1(base) ((base) + 0x0904)
259#define PCIE_ATU_CTL2(base) ((base) + 0x0908)
260#define PCIE_ATU_LBAR(base) ((base) + 0x090c)
261#define PCIE_ATU_UBAR(base) ((base) + 0x0910)
262#define PCIE_ATU_LAR(base) ((base) + 0x0914)
263#define PCIE_ATU_LTAR(base) ((base) + 0x0918)
264#define PCIE_ATU_UTAR(base) ((base) + 0x091c)
265
266#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820)
267#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824)
268#define PCIE_MSI_ENABLE(base) ((base) + 0x0828)
269#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c)
270#define PCIE_MSI_STATUS(base) ((base) + 0x0830)
271#define PEARL_PCIE_MSI_REGION (0xce000000)
272#define PEARL_PCIE_MSI_DATA (0)
273#define PCIE_MSI_GPIO(base) ((base) + 0x0888)
274
275#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17))
276#define USE_BAR_MATCH_MODE
277#define PCIE_ATU_OB_REGION (BIT(0))
278#define PCIE_ATU_EN_REGION (BIT(31))
279#define PCIE_ATU_EN_MATCH (BIT(30))
280#define PCIE_BASE_REGION (0xb0000000)
281#define PCIE_MEM_MAP_SIZE (512 * 1024)
282
283#define PCIE_OB_REG_REGION (0xcf000000)
284#define PCIE_CONFIG_REGION (0xcf000000)
285#define PCIE_CONFIG_SIZE (4096)
286#define PCIE_CONFIG_CH (1)
287
288/* inbound mapping */
289#define PCIE_IB_BAR0 (0x00000000) /* ddr */
290#define PCIE_IB_BAR0_CH (0)
291#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */
292#define PCIE_IB_BAR3_CH (1)
293
294/* outbound mapping */
295#define PCIE_MEM_CH (0)
296#define PCIE_REG_CH (1)
297#define PCIE_MEM_REGION (0xc0000000)
298#define PCIE_MEM_SIZE (0x000fffff)
299#define PCIE_MEM_TAR (0x80000000)
300
301#define PCIE_MSI_REGION (0xce000000)
302#define PCIE_MSI_SIZE (KBYTE(4) - 1)
303#define PCIE_MSI_CH (1)
304
305/* size of config region */
306#define PCIE_CFG_SIZE (0x0000ffff)
307
308#define PCIE_ATU_DIR_IB (BIT(31))
309#define PCIE_ATU_DIR_OB (0)
310#define PCIE_ATU_DIR_CFG (2)
311#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30))
312
313#define PCIE_DMA_WR_0 (0)
314#define PCIE_DMA_WR_1 (1)
315#define PCIE_DMA_RD_0 (2)
316#define PCIE_DMA_RD_1 (3)
317
318#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0))
319#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1))
320#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2))
321#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3))
322#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4))
323#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8))
324#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9))
325#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26))
326
327#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31))
328#define PCIE_DMA_CHNL_CONTEXT_WR (0)
329#define PCIE_MAX_BAR (6)
330
331/* PCIe HDP interrupt status definition */ 96/* PCIe HDP interrupt status definition */
332#define PCIE_HDP_INT_EP_RXDMA (BIT(0)) 97#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
333#define PCIE_HDP_INT_HBM_UF (BIT(1)) 98#define PCIE_HDP_INT_HBM_UF (BIT(1))
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 73f6fc0d4a01..56040b181cf5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4918,11 +4918,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
4918 struct device *dev = &priv->udev->dev; 4918 struct device *dev = &priv->udev->dev;
4919 u32 queue, rts_rate; 4919 u32 queue, rts_rate;
4920 u16 pktlen = skb->len; 4920 u16 pktlen = skb->len;
4921 u16 seq_number;
4922 u16 rate_flag = tx_info->control.rates[0].flags; 4921 u16 rate_flag = tx_info->control.rates[0].flags;
4923 int tx_desc_size = priv->fops->tx_desc_size; 4922 int tx_desc_size = priv->fops->tx_desc_size;
4924 int ret; 4923 int ret;
4925 bool usedesc40, ampdu_enable, sgi = false, short_preamble = false; 4924 bool ampdu_enable, sgi = false, short_preamble = false;
4926 4925
4927 if (skb_headroom(skb) < tx_desc_size) { 4926 if (skb_headroom(skb) < tx_desc_size) {
4928 dev_warn(dev, 4927 dev_warn(dev,
@@ -4946,7 +4945,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
4946 if (ieee80211_is_action(hdr->frame_control)) 4945 if (ieee80211_is_action(hdr->frame_control))
4947 rtl8xxxu_dump_action(dev, hdr); 4946 rtl8xxxu_dump_action(dev, hdr);
4948 4947
4949 usedesc40 = (tx_desc_size == 40);
4950 tx_info->rate_driver_data[0] = hw; 4948 tx_info->rate_driver_data[0] = hw;
4951 4949
4952 if (control && control->sta) 4950 if (control && control->sta)
@@ -5013,7 +5011,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
5013 else 5011 else
5014 rts_rate = 0; 5012 rts_rate = 0;
5015 5013
5016 seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
5017 5014
5018 priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble, 5015 priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
5019 ampdu_enable, rts_rate); 5016 ampdu_enable, rts_rate);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 317c1b3101da..ba258318ee9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
3404 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); 3404 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
3405} 3405}
3406 3406
3407static u8 _rtl8821ae_mrate_idx_to_arfr_id(
3408 struct ieee80211_hw *hw, u8 rate_index,
3409 enum wireless_mode wirelessmode)
3410{
3411 struct rtl_priv *rtlpriv = rtl_priv(hw);
3412 struct rtl_phy *rtlphy = &rtlpriv->phy;
3413 u8 ret = 0;
3414 switch (rate_index) {
3415 case RATR_INX_WIRELESS_NGB:
3416 if (rtlphy->rf_type == RF_1T1R)
3417 ret = 1;
3418 else
3419 ret = 0;
3420 ; break;
3421 case RATR_INX_WIRELESS_N:
3422 case RATR_INX_WIRELESS_NG:
3423 if (rtlphy->rf_type == RF_1T1R)
3424 ret = 5;
3425 else
3426 ret = 4;
3427 ; break;
3428 case RATR_INX_WIRELESS_NB:
3429 if (rtlphy->rf_type == RF_1T1R)
3430 ret = 3;
3431 else
3432 ret = 2;
3433 ; break;
3434 case RATR_INX_WIRELESS_GB:
3435 ret = 6;
3436 break;
3437 case RATR_INX_WIRELESS_G:
3438 ret = 7;
3439 break;
3440 case RATR_INX_WIRELESS_B:
3441 ret = 8;
3442 break;
3443 case RATR_INX_WIRELESS_MC:
3444 if ((wirelessmode == WIRELESS_MODE_B)
3445 || (wirelessmode == WIRELESS_MODE_G)
3446 || (wirelessmode == WIRELESS_MODE_N_24G)
3447 || (wirelessmode == WIRELESS_MODE_AC_24G))
3448 ret = 6;
3449 else
3450 ret = 7;
3451 case RATR_INX_WIRELESS_AC_5N:
3452 if (rtlphy->rf_type == RF_1T1R)
3453 ret = 10;
3454 else
3455 ret = 9;
3456 break;
3457 case RATR_INX_WIRELESS_AC_24N:
3458 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
3459 if (rtlphy->rf_type == RF_1T1R)
3460 ret = 10;
3461 else
3462 ret = 9;
3463 } else {
3464 if (rtlphy->rf_type == RF_1T1R)
3465 ret = 11;
3466 else
3467 ret = 12;
3468 }
3469 break;
3470 default:
3471 ret = 0; break;
3472 }
3473 return ret;
3474}
3475
3476static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate) 3407static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
3477{ 3408{
3478 u8 i, j, tmp_rate; 3409 u8 i, j, tmp_rate;
@@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
3761 break; 3692 break;
3762 } 3693 }
3763 3694
3764 ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); 3695 ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
3765 sta_entry->ratr_index = ratr_index; 3696 sta_entry->ratr_index = ratr_index;
3766 ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode, 3697 ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
3767 ratr_bitmap); 3698 ratr_bitmap);
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 5d65521260b3..06996ad4f2bc 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
1/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. 1/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2015 Linaro Ltd. 2 * Copyright (C) 2015 Linaro Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
33 33
34#define QCOM_SCM_VMID_HLOS 0x3 34#define QCOM_SCM_VMID_HLOS 0x3
35#define QCOM_SCM_VMID_MSS_MSA 0xF 35#define QCOM_SCM_VMID_MSS_MSA 0xF
36#define QCOM_SCM_VMID_WLAN 0x18
37#define QCOM_SCM_VMID_WLAN_CE 0x19
36#define QCOM_SCM_PERM_READ 0x4 38#define QCOM_SCM_PERM_READ 0x4
37#define QCOM_SCM_PERM_WRITE 0x2 39#define QCOM_SCM_PERM_WRITE 0x2
38#define QCOM_SCM_PERM_EXEC 0x1 40#define QCOM_SCM_PERM_EXEC 0x1