aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKalle Valo <kvalo@codeaurora.org>2016-04-06 14:16:01 -0400
committerKalle Valo <kvalo@codeaurora.org>2016-04-06 14:16:01 -0400
commit97b9b84464086f97b7b5399ede45bdb64da7a017 (patch)
treed18f1cc8b94b981b8b2272b37a4ad12d44001e5e
parent4da46cebbd3b4dc445195a9672c99c1353af5695 (diff)
parent46167a8fd4248533ad15867e6988ff20e76de641 (diff)
Merge tag 'iwlwifi-next-for-kalle-2016-03-30' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
* Support for Link Quality measurement (Aviya) * Improvements in thermal (Chaya Rachel) * Various cleanups (many people) * Improvements in firmware error dump (Golan) * More work 9000 devices and MSIx (Haim) * Continuation of the Dynamic Queue Allocation work (Liad) * Scan timeout to cope with buggy firmware (Luca) * D0i3 improvements (Luca) * Make the paging less memory hungry (Matti) * 9000 new Rx path (Sara)
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-2000.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-5000.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c1315
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
47 files changed, 1475 insertions, 1907 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 16c4f383488f..492035f406e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
88 If unsure, don't enable this option, as some programs might 88 If unsure, don't enable this option, as some programs might
89 expect incoming broadcasts for their normal operations. 89 expect incoming broadcasts for their normal operations.
90 90
91config IWLWIFI_UAPSD
92 bool "enable U-APSD by default"
93 depends on IWLMVM
94 help
95 Say Y here to enable U-APSD by default. This may cause
96 interoperability problems with some APs, manifesting in lower than
97 expected throughput due to those APs not enabling aggregation
98
99 If unsure, say N.
100
101config IWLWIFI_PCIE_RTPM 91config IWLWIFI_PCIE_RTPM
102 bool "Enable runtime power management mode for PCIe devices" 92 bool "Enable runtime power management mode for PCIe devices"
103 depends on IWLMVM && PM 93 depends on IWLMVM && PM
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 85628127947f..614716251c39 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
1071 1071
1072static void iwl_setup_deferred_work(struct iwl_priv *priv) 1072static void iwl_setup_deferred_work(struct iwl_priv *priv)
1073{ 1073{
1074 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 1074 priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
1075 1075
1076 INIT_WORK(&priv->restart, iwl_bg_restart); 1076 INIT_WORK(&priv->restart, iwl_bg_restart);
1077 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 1077 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
index a90dbab6bbbe..ef22c3d168fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -34,10 +34,6 @@
34#define IWL1000_UCODE_API_MAX 5 34#define IWL1000_UCODE_API_MAX 5
35#define IWL100_UCODE_API_MAX 5 35#define IWL100_UCODE_API_MAX 5
36 36
37/* Oldest version we won't warn about */
38#define IWL1000_UCODE_API_OK 5
39#define IWL100_UCODE_API_OK 5
40
41/* Lowest firmware API version supported */ 37/* Lowest firmware API version supported */
42#define IWL1000_UCODE_API_MIN 1 38#define IWL1000_UCODE_API_MIN 1
43#define IWL100_UCODE_API_MIN 5 39#define IWL100_UCODE_API_MIN 5
@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
86#define IWL_DEVICE_1000 \ 82#define IWL_DEVICE_1000 \
87 .fw_name_pre = IWL1000_FW_PRE, \ 83 .fw_name_pre = IWL1000_FW_PRE, \
88 .ucode_api_max = IWL1000_UCODE_API_MAX, \ 84 .ucode_api_max = IWL1000_UCODE_API_MAX, \
89 .ucode_api_ok = IWL1000_UCODE_API_OK, \
90 .ucode_api_min = IWL1000_UCODE_API_MIN, \ 85 .ucode_api_min = IWL1000_UCODE_API_MIN, \
91 .device_family = IWL_DEVICE_FAMILY_1000, \ 86 .device_family = IWL_DEVICE_FAMILY_1000, \
92 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 87 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
112#define IWL_DEVICE_100 \ 107#define IWL_DEVICE_100 \
113 .fw_name_pre = IWL100_FW_PRE, \ 108 .fw_name_pre = IWL100_FW_PRE, \
114 .ucode_api_max = IWL100_UCODE_API_MAX, \ 109 .ucode_api_max = IWL100_UCODE_API_MAX, \
115 .ucode_api_ok = IWL100_UCODE_API_OK, \
116 .ucode_api_min = IWL100_UCODE_API_MIN, \ 110 .ucode_api_min = IWL100_UCODE_API_MIN, \
117 .device_family = IWL_DEVICE_FAMILY_100, \ 111 .device_family = IWL_DEVICE_FAMILY_100, \
118 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 112 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
136 IWL_DEVICE_100, 130 IWL_DEVICE_100,
137}; 131};
138 132
139MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); 133MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
140MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); 134MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
index a6da9594c4a5..dc246c997084 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -36,12 +36,6 @@
36#define IWL105_UCODE_API_MAX 6 36#define IWL105_UCODE_API_MAX 6
37#define IWL135_UCODE_API_MAX 6 37#define IWL135_UCODE_API_MAX 6
38 38
39/* Oldest version we won't warn about */
40#define IWL2030_UCODE_API_OK 6
41#define IWL2000_UCODE_API_OK 6
42#define IWL105_UCODE_API_OK 6
43#define IWL135_UCODE_API_OK 6
44
45/* Lowest firmware API version supported */ 39/* Lowest firmware API version supported */
46#define IWL2030_UCODE_API_MIN 5 40#define IWL2030_UCODE_API_MIN 5
47#define IWL2000_UCODE_API_MIN 5 41#define IWL2000_UCODE_API_MIN 5
@@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
114#define IWL_DEVICE_2000 \ 108#define IWL_DEVICE_2000 \
115 .fw_name_pre = IWL2000_FW_PRE, \ 109 .fw_name_pre = IWL2000_FW_PRE, \
116 .ucode_api_max = IWL2000_UCODE_API_MAX, \ 110 .ucode_api_max = IWL2000_UCODE_API_MAX, \
117 .ucode_api_ok = IWL2000_UCODE_API_OK, \
118 .ucode_api_min = IWL2000_UCODE_API_MIN, \ 111 .ucode_api_min = IWL2000_UCODE_API_MIN, \
119 .device_family = IWL_DEVICE_FAMILY_2000, \ 112 .device_family = IWL_DEVICE_FAMILY_2000, \
120 .max_inst_size = IWL60_RTC_INST_SIZE, \ 113 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
142#define IWL_DEVICE_2030 \ 135#define IWL_DEVICE_2030 \
143 .fw_name_pre = IWL2030_FW_PRE, \ 136 .fw_name_pre = IWL2030_FW_PRE, \
144 .ucode_api_max = IWL2030_UCODE_API_MAX, \ 137 .ucode_api_max = IWL2030_UCODE_API_MAX, \
145 .ucode_api_ok = IWL2030_UCODE_API_OK, \
146 .ucode_api_min = IWL2030_UCODE_API_MIN, \ 138 .ucode_api_min = IWL2030_UCODE_API_MIN, \
147 .device_family = IWL_DEVICE_FAMILY_2030, \ 139 .device_family = IWL_DEVICE_FAMILY_2030, \
148 .max_inst_size = IWL60_RTC_INST_SIZE, \ 140 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
163#define IWL_DEVICE_105 \ 155#define IWL_DEVICE_105 \
164 .fw_name_pre = IWL105_FW_PRE, \ 156 .fw_name_pre = IWL105_FW_PRE, \
165 .ucode_api_max = IWL105_UCODE_API_MAX, \ 157 .ucode_api_max = IWL105_UCODE_API_MAX, \
166 .ucode_api_ok = IWL105_UCODE_API_OK, \
167 .ucode_api_min = IWL105_UCODE_API_MIN, \ 158 .ucode_api_min = IWL105_UCODE_API_MIN, \
168 .device_family = IWL_DEVICE_FAMILY_105, \ 159 .device_family = IWL_DEVICE_FAMILY_105, \
169 .max_inst_size = IWL60_RTC_INST_SIZE, \ 160 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
191#define IWL_DEVICE_135 \ 182#define IWL_DEVICE_135 \
192 .fw_name_pre = IWL135_FW_PRE, \ 183 .fw_name_pre = IWL135_FW_PRE, \
193 .ucode_api_max = IWL135_UCODE_API_MAX, \ 184 .ucode_api_max = IWL135_UCODE_API_MAX, \
194 .ucode_api_ok = IWL135_UCODE_API_OK, \
195 .ucode_api_min = IWL135_UCODE_API_MIN, \ 185 .ucode_api_min = IWL135_UCODE_API_MIN, \
196 .device_family = IWL_DEVICE_FAMILY_135, \ 186 .device_family = IWL_DEVICE_FAMILY_135, \
197 .max_inst_size = IWL60_RTC_INST_SIZE, \ 187 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
210 .ht_params = &iwl2000_ht_params, 200 .ht_params = &iwl2000_ht_params,
211}; 201};
212 202
213MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); 203MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
214MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); 204MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
215MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); 205MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
216MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); 206MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
index 8b5afdef2d83..4dcdab6781cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -34,10 +34,6 @@
34#define IWL5000_UCODE_API_MAX 5 34#define IWL5000_UCODE_API_MAX 5
35#define IWL5150_UCODE_API_MAX 2 35#define IWL5150_UCODE_API_MAX 2
36 36
37/* Oldest version we won't warn about */
38#define IWL5000_UCODE_API_OK 5
39#define IWL5150_UCODE_API_OK 2
40
41/* Lowest firmware API version supported */ 37/* Lowest firmware API version supported */
42#define IWL5000_UCODE_API_MIN 1 38#define IWL5000_UCODE_API_MIN 1
43#define IWL5150_UCODE_API_MIN 1 39#define IWL5150_UCODE_API_MIN 1
@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
84#define IWL_DEVICE_5000 \ 80#define IWL_DEVICE_5000 \
85 .fw_name_pre = IWL5000_FW_PRE, \ 81 .fw_name_pre = IWL5000_FW_PRE, \
86 .ucode_api_max = IWL5000_UCODE_API_MAX, \ 82 .ucode_api_max = IWL5000_UCODE_API_MAX, \
87 .ucode_api_ok = IWL5000_UCODE_API_OK, \
88 .ucode_api_min = IWL5000_UCODE_API_MIN, \ 83 .ucode_api_min = IWL5000_UCODE_API_MIN, \
89 .device_family = IWL_DEVICE_FAMILY_5000, \ 84 .device_family = IWL_DEVICE_FAMILY_5000, \
90 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 85 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
132 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", 127 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
133 .fw_name_pre = IWL5000_FW_PRE, 128 .fw_name_pre = IWL5000_FW_PRE,
134 .ucode_api_max = IWL5000_UCODE_API_MAX, 129 .ucode_api_max = IWL5000_UCODE_API_MAX,
135 .ucode_api_ok = IWL5000_UCODE_API_OK,
136 .ucode_api_min = IWL5000_UCODE_API_MIN, 130 .ucode_api_min = IWL5000_UCODE_API_MIN,
137 .device_family = IWL_DEVICE_FAMILY_5000, 131 .device_family = IWL_DEVICE_FAMILY_5000,
138 .max_inst_size = IWLAGN_RTC_INST_SIZE, 132 .max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
149#define IWL_DEVICE_5150 \ 143#define IWL_DEVICE_5150 \
150 .fw_name_pre = IWL5150_FW_PRE, \ 144 .fw_name_pre = IWL5150_FW_PRE, \
151 .ucode_api_max = IWL5150_UCODE_API_MAX, \ 145 .ucode_api_max = IWL5150_UCODE_API_MAX, \
152 .ucode_api_ok = IWL5150_UCODE_API_OK, \
153 .ucode_api_min = IWL5150_UCODE_API_MIN, \ 146 .ucode_api_min = IWL5150_UCODE_API_MIN, \
154 .device_family = IWL_DEVICE_FAMILY_5150, \ 147 .device_family = IWL_DEVICE_FAMILY_5150, \
155 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 148 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
174 IWL_DEVICE_5150, 167 IWL_DEVICE_5150,
175}; 168};
176 169
177MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); 170MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
178MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); 171MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index 0b4ba781b631..9938f5340ac0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -36,13 +36,6 @@
36#define IWL6000G2_UCODE_API_MAX 6 36#define IWL6000G2_UCODE_API_MAX 6
37#define IWL6035_UCODE_API_MAX 6 37#define IWL6035_UCODE_API_MAX 6
38 38
39/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6
44#define IWL6035_UCODE_API_OK 6
45
46/* Lowest firmware API version supported */ 39/* Lowest firmware API version supported */
47#define IWL6000_UCODE_API_MIN 4 40#define IWL6000_UCODE_API_MIN 4
48#define IWL6050_UCODE_API_MIN 4 41#define IWL6050_UCODE_API_MIN 4
@@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
136#define IWL_DEVICE_6005 \ 129#define IWL_DEVICE_6005 \
137 .fw_name_pre = IWL6005_FW_PRE, \ 130 .fw_name_pre = IWL6005_FW_PRE, \
138 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 131 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
139 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
140 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 132 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
141 .device_family = IWL_DEVICE_FAMILY_6005, \ 133 .device_family = IWL_DEVICE_FAMILY_6005, \
142 .max_inst_size = IWL60_RTC_INST_SIZE, \ 134 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
191#define IWL_DEVICE_6030 \ 183#define IWL_DEVICE_6030 \
192 .fw_name_pre = IWL6030_FW_PRE, \ 184 .fw_name_pre = IWL6030_FW_PRE, \
193 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 185 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
194 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
195 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 186 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
196 .device_family = IWL_DEVICE_FAMILY_6030, \ 187 .device_family = IWL_DEVICE_FAMILY_6030, \
197 .max_inst_size = IWL60_RTC_INST_SIZE, \ 188 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
228#define IWL_DEVICE_6035 \ 219#define IWL_DEVICE_6035 \
229 .fw_name_pre = IWL6030_FW_PRE, \ 220 .fw_name_pre = IWL6030_FW_PRE, \
230 .ucode_api_max = IWL6035_UCODE_API_MAX, \ 221 .ucode_api_max = IWL6035_UCODE_API_MAX, \
231 .ucode_api_ok = IWL6035_UCODE_API_OK, \
232 .ucode_api_min = IWL6035_UCODE_API_MIN, \ 222 .ucode_api_min = IWL6035_UCODE_API_MIN, \
233 .device_family = IWL_DEVICE_FAMILY_6030, \ 223 .device_family = IWL_DEVICE_FAMILY_6030, \
234 .max_inst_size = IWL60_RTC_INST_SIZE, \ 224 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
282#define IWL_DEVICE_6000i \ 272#define IWL_DEVICE_6000i \
283 .fw_name_pre = IWL6000_FW_PRE, \ 273 .fw_name_pre = IWL6000_FW_PRE, \
284 .ucode_api_max = IWL6000_UCODE_API_MAX, \ 274 .ucode_api_max = IWL6000_UCODE_API_MAX, \
285 .ucode_api_ok = IWL6000_UCODE_API_OK, \
286 .ucode_api_min = IWL6000_UCODE_API_MIN, \ 275 .ucode_api_min = IWL6000_UCODE_API_MIN, \
287 .device_family = IWL_DEVICE_FAMILY_6000i, \ 276 .device_family = IWL_DEVICE_FAMILY_6000i, \
288 .max_inst_size = IWL60_RTC_INST_SIZE, \ 277 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
370 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", 359 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
371 .fw_name_pre = IWL6000_FW_PRE, 360 .fw_name_pre = IWL6000_FW_PRE,
372 .ucode_api_max = IWL6000_UCODE_API_MAX, 361 .ucode_api_max = IWL6000_UCODE_API_MAX,
373 .ucode_api_ok = IWL6000_UCODE_API_OK,
374 .ucode_api_min = IWL6000_UCODE_API_MIN, 362 .ucode_api_min = IWL6000_UCODE_API_MIN,
375 .device_family = IWL_DEVICE_FAMILY_6000, 363 .device_family = IWL_DEVICE_FAMILY_6000,
376 .max_inst_size = IWL60_RTC_INST_SIZE, 364 .max_inst_size = IWL60_RTC_INST_SIZE,
@@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
383 .led_mode = IWL_LED_BLINK, 371 .led_mode = IWL_LED_BLINK,
384}; 372};
385 373
386MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); 374MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
387MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); 375MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
388MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); 376MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
389MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); 377MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index fc475ce59b47..b6283c881d42 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -76,16 +76,10 @@
76#define IWL7265D_UCODE_API_MAX 21 76#define IWL7265D_UCODE_API_MAX 21
77#define IWL3168_UCODE_API_MAX 21 77#define IWL3168_UCODE_API_MAX 21
78 78
79/* Oldest version we won't warn about */
80#define IWL7260_UCODE_API_OK 13
81#define IWL7265_UCODE_API_OK 13
82#define IWL7265D_UCODE_API_OK 13
83#define IWL3168_UCODE_API_OK 20
84
85/* Lowest firmware API version supported */ 79/* Lowest firmware API version supported */
86#define IWL7260_UCODE_API_MIN 13 80#define IWL7260_UCODE_API_MIN 16
87#define IWL7265_UCODE_API_MIN 13 81#define IWL7265_UCODE_API_MIN 16
88#define IWL7265D_UCODE_API_MIN 13 82#define IWL7265D_UCODE_API_MIN 16
89#define IWL3168_UCODE_API_MIN 20 83#define IWL3168_UCODE_API_MIN 20
90 84
91/* NVM versions */ 85/* NVM versions */
@@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
179#define IWL_DEVICE_7000 \ 173#define IWL_DEVICE_7000 \
180 IWL_DEVICE_7000_COMMON, \ 174 IWL_DEVICE_7000_COMMON, \
181 .ucode_api_max = IWL7260_UCODE_API_MAX, \ 175 .ucode_api_max = IWL7260_UCODE_API_MAX, \
182 .ucode_api_ok = IWL7260_UCODE_API_OK, \
183 .ucode_api_min = IWL7260_UCODE_API_MIN 176 .ucode_api_min = IWL7260_UCODE_API_MIN
184 177
185#define IWL_DEVICE_7005 \ 178#define IWL_DEVICE_7005 \
186 IWL_DEVICE_7000_COMMON, \ 179 IWL_DEVICE_7000_COMMON, \
187 .ucode_api_max = IWL7265_UCODE_API_MAX, \ 180 .ucode_api_max = IWL7265_UCODE_API_MAX, \
188 .ucode_api_ok = IWL7265_UCODE_API_OK, \
189 .ucode_api_min = IWL7265_UCODE_API_MIN 181 .ucode_api_min = IWL7265_UCODE_API_MIN
190 182
191#define IWL_DEVICE_3008 \ 183#define IWL_DEVICE_3008 \
192 IWL_DEVICE_7000_COMMON, \ 184 IWL_DEVICE_7000_COMMON, \
193 .ucode_api_max = IWL3168_UCODE_API_MAX, \ 185 .ucode_api_max = IWL3168_UCODE_API_MAX, \
194 .ucode_api_ok = IWL3168_UCODE_API_OK, \
195 .ucode_api_min = IWL3168_UCODE_API_MIN 186 .ucode_api_min = IWL3168_UCODE_API_MIN
196 187
197#define IWL_DEVICE_7005D \ 188#define IWL_DEVICE_7005D \
198 IWL_DEVICE_7000_COMMON, \ 189 IWL_DEVICE_7000_COMMON, \
199 .ucode_api_max = IWL7265D_UCODE_API_MAX, \ 190 .ucode_api_max = IWL7265D_UCODE_API_MAX, \
200 .ucode_api_ok = IWL7265D_UCODE_API_OK, \
201 .ucode_api_min = IWL7265D_UCODE_API_MIN 191 .ucode_api_min = IWL7265D_UCODE_API_MIN
202 192
203const struct iwl_cfg iwl7260_2ac_cfg = { 193const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = {
388 .dccm_len = IWL7265_DCCM_LEN, 378 .dccm_len = IWL7265_DCCM_LEN,
389}; 379};
390 380
391MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 381MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
392MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 382MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
393MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK)); 383MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
394MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); 384MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
395MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); 385MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 97be104d1203..0728a288aa3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -73,12 +73,8 @@
73#define IWL8000_UCODE_API_MAX 21 73#define IWL8000_UCODE_API_MAX 21
74#define IWL8265_UCODE_API_MAX 21 74#define IWL8265_UCODE_API_MAX 21
75 75
76/* Oldest version we won't warn about */
77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
79
80/* Lowest firmware API version supported */ 76/* Lowest firmware API version supported */
81#define IWL8000_UCODE_API_MIN 13 77#define IWL8000_UCODE_API_MIN 16
82#define IWL8265_UCODE_API_MIN 20 78#define IWL8265_UCODE_API_MIN 20
83 79
84/* NVM versions */ 80/* NVM versions */
@@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
175#define IWL_DEVICE_8000 \ 171#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \ 172 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \ 173 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \ 174 .ucode_api_min = IWL8000_UCODE_API_MIN \
180 175
181#define IWL_DEVICE_8260 \ 176#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \ 177 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \ 178 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \ 179 .ucode_api_min = IWL8000_UCODE_API_MIN \
186 180
187#define IWL_DEVICE_8265 \ 181#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \ 182 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \ 183 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \ 184 .ucode_api_min = IWL8265_UCODE_API_MIN \
192 185
193const struct iwl_cfg iwl8260_2n_cfg = { 186const struct iwl_cfg iwl8260_2n_cfg = {
@@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
259 .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, 252 .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
260}; 253};
261 254
262MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 255MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
263MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK)); 256MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 318b1dc171f2..a3d35aa291a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH 8 * Copyright(c) 2015-2016 Intel Deutschland GmbH
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
18 * 18 *
19 * BSD LICENSE 19 * BSD LICENSE
20 * 20 *
21 * Copyright(c) 2015 Intel Deutschland GmbH 21 * Copyright(c) 2015-2016 Intel Deutschland GmbH
22 * All rights reserved. 22 * All rights reserved.
23 * 23 *
24 * Redistribution and use in source and binary forms, with or without 24 * Redistribution and use in source and binary forms, with or without
@@ -57,11 +57,8 @@
57/* Highest firmware API version supported */ 57/* Highest firmware API version supported */
58#define IWL9000_UCODE_API_MAX 21 58#define IWL9000_UCODE_API_MAX 21
59 59
60/* Oldest version we won't warn about */
61#define IWL9000_UCODE_API_OK 13
62
63/* Lowest firmware API version supported */ 60/* Lowest firmware API version supported */
64#define IWL9000_UCODE_API_MIN 13 61#define IWL9000_UCODE_API_MIN 16
65 62
66/* NVM versions */ 63/* NVM versions */
67#define IWL9000_NVM_VERSION 0x0a1d 64#define IWL9000_NVM_VERSION 0x0a1d
@@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
122 119
123#define IWL_DEVICE_9000 \ 120#define IWL_DEVICE_9000 \
124 .ucode_api_max = IWL9000_UCODE_API_MAX, \ 121 .ucode_api_max = IWL9000_UCODE_API_MAX, \
125 .ucode_api_ok = IWL9000_UCODE_API_OK, \
126 .ucode_api_min = IWL9000_UCODE_API_MIN, \ 122 .ucode_api_min = IWL9000_UCODE_API_MIN, \
127 .device_family = IWL_DEVICE_FAMILY_8000, \ 123 .device_family = IWL_DEVICE_FAMILY_8000, \
128 .max_inst_size = IWL60_RTC_INST_SIZE, \ 124 .max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -137,14 +133,15 @@ static const struct iwl_tt_params iwl9000_tt_params = {
137 .dccm2_len = IWL9000_DCCM2_LEN, \ 133 .dccm2_len = IWL9000_DCCM2_LEN, \
138 .smem_offset = IWL9000_SMEM_OFFSET, \ 134 .smem_offset = IWL9000_SMEM_OFFSET, \
139 .smem_len = IWL9000_SMEM_LEN, \ 135 .smem_len = IWL9000_SMEM_LEN, \
136 .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
140 .thermal_params = &iwl9000_tt_params, \ 137 .thermal_params = &iwl9000_tt_params, \
141 .apmg_not_supported = true, \ 138 .apmg_not_supported = true, \
142 .mq_rx_supported = true, \ 139 .mq_rx_supported = true, \
143 .vht_mu_mimo_supported = true, \ 140 .vht_mu_mimo_supported = true, \
144 .mac_addr_from_csr = true 141 .mac_addr_from_csr = true
145 142
146const struct iwl_cfg iwl9260_2ac_cfg = { 143const struct iwl_cfg iwl9560_2ac_cfg = {
147 .name = "Intel(R) Dual Band Wireless AC 9260", 144 .name = "Intel(R) Dual Band Wireless AC 9560",
148 .fw_name_pre = IWL9000_FW_PRE, 145 .fw_name_pre = IWL9000_FW_PRE,
149 IWL_DEVICE_9000, 146 IWL_DEVICE_9000,
150 .ht_params = &iwl9000_ht_params, 147 .ht_params = &iwl9000_ht_params,
@@ -163,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 160 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
164}; 161};
165 162
166MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK)); 163MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e4d346be350..08bb4f4e424a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -131,6 +131,8 @@ enum iwl_led_mode {
131#define IWL_MAX_WD_TIMEOUT 120000 131#define IWL_MAX_WD_TIMEOUT 120000
132 132
133#define IWL_DEFAULT_MAX_TX_POWER 22 133#define IWL_DEFAULT_MAX_TX_POWER 22
134#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
135 NETIF_F_TSO | NETIF_F_TSO6)
134 136
135/* Antenna presence definitions */ 137/* Antenna presence definitions */
136#define ANT_NONE 0x0 138#define ANT_NONE 0x0
@@ -277,8 +279,6 @@ struct iwl_pwr_tx_backoff {
277 * (.ucode) will be added to filename before loading from disk. The 279 * (.ucode) will be added to filename before loading from disk. The
278 * filename is constructed as fw_name_pre<api>.ucode. 280 * filename is constructed as fw_name_pre<api>.ucode.
279 * @ucode_api_max: Highest version of uCode API supported by driver. 281 * @ucode_api_max: Highest version of uCode API supported by driver.
280 * @ucode_api_ok: oldest version of the uCode API that is OK to load
281 * without a warning, for use in transitions
282 * @ucode_api_min: Lowest version of uCode API supported by driver. 282 * @ucode_api_min: Lowest version of uCode API supported by driver.
283 * @max_inst_size: The maximal length of the fw inst section 283 * @max_inst_size: The maximal length of the fw inst section
284 * @max_data_size: The maximal length of the fw data section 284 * @max_data_size: The maximal length of the fw data section
@@ -324,7 +324,6 @@ struct iwl_cfg {
324 const char *name; 324 const char *name;
325 const char *fw_name_pre; 325 const char *fw_name_pre;
326 const unsigned int ucode_api_max; 326 const unsigned int ucode_api_max;
327 const unsigned int ucode_api_ok;
328 const unsigned int ucode_api_min; 327 const unsigned int ucode_api_min;
329 const enum iwl_device_family device_family; 328 const enum iwl_device_family device_family;
330 const u32 max_data_size; 329 const u32 max_data_size;
@@ -439,7 +438,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg;
439extern const struct iwl_cfg iwl4165_2ac_cfg; 438extern const struct iwl_cfg iwl4165_2ac_cfg;
440extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; 439extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
441extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; 440extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
442extern const struct iwl_cfg iwl9260_2ac_cfg; 441extern const struct iwl_cfg iwl9560_2ac_cfg;
443extern const struct iwl_cfg iwl5165_2ac_cfg; 442extern const struct iwl_cfg iwl5165_2ac_cfg;
444#endif /* CONFIG_IWLMVM */ 443#endif /* CONFIG_IWLMVM */
445 444
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index f899666acb41..48e873732d4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
179 kfree(drv->fw.dbg_conf_tlv[i]); 179 kfree(drv->fw.dbg_conf_tlv[i]);
180 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) 180 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
181 kfree(drv->fw.dbg_trigger_tlv[i]); 181 kfree(drv->fw.dbg_trigger_tlv[i]);
182 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
183 kfree(drv->fw.dbg_mem_tlv[i]);
182 184
183 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) 185 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
184 iwl_free_fw_img(drv, drv->fw.img + i); 186 iwl_free_fw_img(drv, drv->fw.img + i);
@@ -297,6 +299,7 @@ struct iwl_firmware_pieces {
297 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; 299 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
298 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; 300 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
299 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; 301 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
302 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
300}; 303};
301 304
302/* 305/*
@@ -1041,6 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1041 iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); 1044 iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
1042 gscan_capa = true; 1045 gscan_capa = true;
1043 break; 1046 break;
1047 case IWL_UCODE_TLV_FW_MEM_SEG: {
1048 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
1049 (void *)tlv_data;
1050 u32 type;
1051
1052 if (tlv_len != (sizeof(*dbg_mem)))
1053 goto invalid_tlv_len;
1054
1055 type = le32_to_cpu(dbg_mem->data_type);
1056 drv->fw.dbg_dynamic_mem = true;
1057
1058 if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
1059 IWL_ERR(drv,
1060 "Skip unknown dbg mem segment: %u\n",
1061 dbg_mem->data_type);
1062 break;
1063 }
1064
1065 if (pieces->dbg_mem_tlv[type]) {
1066 IWL_ERR(drv,
1067 "Ignore duplicate mem segment: %u\n",
1068 dbg_mem->data_type);
1069 break;
1070 }
1071
1072 IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
1073 dbg_mem->data_type);
1074
1075 pieces->dbg_mem_tlv[type] = dbg_mem;
1076 break;
1077 }
1044 default: 1078 default:
1045 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); 1079 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
1046 break; 1080 break;
@@ -1060,11 +1094,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1060 return -EINVAL; 1094 return -EINVAL;
1061 } 1095 }
1062 1096
1063 if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && 1097 /*
1064 !gscan_capa, 1098 * If ucode advertises that it supports GSCAN but GSCAN
1065 "GSCAN is supported but capabilities TLV is unavailable\n")) 1099 * capabilities TLV is not present, or if it has an old format,
1100 * warn and continue without GSCAN.
1101 */
1102 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
1103 !gscan_capa) {
1104 IWL_DEBUG_INFO(drv,
1105 "GSCAN is supported but capabilities TLV is unavailable\n");
1066 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, 1106 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
1067 capa->_capa); 1107 capa->_capa);
1108 }
1068 1109
1069 return 0; 1110 return 0;
1070 1111
@@ -1199,7 +1240,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1199 int err; 1240 int err;
1200 struct iwl_firmware_pieces *pieces; 1241 struct iwl_firmware_pieces *pieces;
1201 const unsigned int api_max = drv->cfg->ucode_api_max; 1242 const unsigned int api_max = drv->cfg->ucode_api_max;
1202 unsigned int api_ok = drv->cfg->ucode_api_ok;
1203 const unsigned int api_min = drv->cfg->ucode_api_min; 1243 const unsigned int api_min = drv->cfg->ucode_api_min;
1204 size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; 1244 size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
1205 u32 api_ver; 1245 u32 api_ver;
@@ -1212,20 +1252,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1212 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; 1252 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1213 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; 1253 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
1214 1254
1215 if (!api_ok)
1216 api_ok = api_max;
1217
1218 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); 1255 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
1219 if (!pieces) 1256 if (!pieces)
1220 return; 1257 return;
1221 1258
1222 if (!ucode_raw) { 1259 if (!ucode_raw)
1223 if (drv->fw_index <= api_ok)
1224 IWL_ERR(drv,
1225 "request for firmware file '%s' failed.\n",
1226 drv->firmware_name);
1227 goto try_again; 1260 goto try_again;
1228 }
1229 1261
1230 IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", 1262 IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
1231 drv->firmware_name, ucode_raw->size); 1263 drv->firmware_name, ucode_raw->size);
@@ -1248,10 +1280,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1248 if (err) 1280 if (err)
1249 goto try_again; 1281 goto try_again;
1250 1282
1251 if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) 1283 api_ver = drv->fw.ucode_ver;
1252 api_ver = drv->fw.ucode_ver;
1253 else
1254 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
1255 1284
1256 /* 1285 /*
1257 * api_ver should match the api version forming part of the 1286 * api_ver should match the api version forming part of the
@@ -1267,19 +1296,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1267 api_max, api_ver); 1296 api_max, api_ver);
1268 goto try_again; 1297 goto try_again;
1269 } 1298 }
1270
1271 if (api_ver < api_ok) {
1272 if (api_ok != api_max)
1273 IWL_ERR(drv, "Firmware has old API version, "
1274 "expected v%u through v%u, got v%u.\n",
1275 api_ok, api_max, api_ver);
1276 else
1277 IWL_ERR(drv, "Firmware has old API version, "
1278 "expected v%u, got v%u.\n",
1279 api_max, api_ver);
1280 IWL_ERR(drv, "New firmware can be obtained from "
1281 "http://www.intellinuxwireless.org/.\n");
1282 }
1283 } 1299 }
1284 1300
1285 /* 1301 /*
@@ -1368,6 +1384,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1368 } 1384 }
1369 } 1385 }
1370 1386
1387 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
1388 if (pieces->dbg_mem_tlv[i]) {
1389 drv->fw.dbg_mem_tlv[i] =
1390 kmemdup(pieces->dbg_mem_tlv[i],
1391 sizeof(*drv->fw.dbg_mem_tlv[i]),
1392 GFP_KERNEL);
1393 if (!drv->fw.dbg_mem_tlv[i])
1394 goto out_free_fw;
1395 }
1396 }
1397
1371 /* Now that we can no longer fail, copy information */ 1398 /* Now that we can no longer fail, copy information */
1372 1399
1373 /* 1400 /*
@@ -1560,9 +1587,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1560 .power_level = IWL_POWER_INDEX_1, 1587 .power_level = IWL_POWER_INDEX_1,
1561 .d0i3_disable = true, 1588 .d0i3_disable = true,
1562 .d0i3_entry_delay = 1000, 1589 .d0i3_entry_delay = 1000,
1563#ifndef CONFIG_IWLWIFI_UAPSD 1590 .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
1564 .uapsd_disable = true,
1565#endif /* CONFIG_IWLWIFI_UAPSD */
1566 /* the rest are 0 by default */ 1591 /* the rest are 0 by default */
1567}; 1592};
1568IWL_EXPORT_SYMBOL(iwlwifi_mod_params); 1593IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1681,12 +1706,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
1681MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); 1706MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
1682 1707
1683module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, 1708module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1684 bool, S_IRUGO | S_IWUSR); 1709 uint, S_IRUGO | S_IWUSR);
1685#ifdef CONFIG_IWLWIFI_UAPSD 1710MODULE_PARM_DESC(uapsd_disable,
1686MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); 1711 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
1687#else
1688MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
1689#endif
1690 1712
1691/* 1713/*
1692 * set bt_coex_active to true, uCode will do kill/defer 1714 * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 8425e1a587d9..09b7ea28f4a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
105 IWL_FW_ERROR_DUMP_RB = 11, 105 IWL_FW_ERROR_DUMP_RB = 11,
106 IWL_FW_ERROR_DUMP_PAGING = 12, 106 IWL_FW_ERROR_DUMP_PAGING = 12,
107 IWL_FW_ERROR_DUMP_RADIO_REG = 13, 107 IWL_FW_ERROR_DUMP_RADIO_REG = 13,
108 IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
108 109
109 IWL_FW_ERROR_DUMP_MAX, 110 IWL_FW_ERROR_DUMP_MAX,
110}; 111};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 15ec4e2907d8..843232bd8bbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
142 IWL_UCODE_TLV_FW_DBG_CONF = 39, 142 IWL_UCODE_TLV_FW_DBG_CONF = 39,
143 IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, 143 IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
144 IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, 144 IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
145 IWL_UCODE_TLV_FW_MEM_SEG = 51,
145}; 146};
146 147
147struct iwl_ucode_tlv { 148struct iwl_ucode_tlv {
@@ -245,13 +246,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
245 246
246/** 247/**
247 * enum iwl_ucode_tlv_api - ucode api 248 * enum iwl_ucode_tlv_api - ucode api
248 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
249 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 249 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
250 * longer than the passive one, which is essential for fragmented scan. 250 * longer than the passive one, which is essential for fragmented scan.
251 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. 251 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
252 * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header 252 * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
253 * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params 253 * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
254 * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
255 * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority 254 * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
256 * instead of 3. 255 * instead of 3.
257 * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size 256 * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
@@ -260,12 +259,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
260 * @NUM_IWL_UCODE_TLV_API: number of bits used 259 * @NUM_IWL_UCODE_TLV_API: number of bits used
261 */ 260 */
262enum iwl_ucode_tlv_api { 261enum iwl_ucode_tlv_api {
263 IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
264 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, 262 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
265 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, 263 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
266 IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, 264 IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
267 IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, 265 IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
268 IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
269 IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, 266 IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
270 IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, 267 IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
271 268
@@ -324,6 +321,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
324 * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command 321 * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
325 * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in 322 * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
326 * regular image. 323 * regular image.
324 * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
325 * memory addresses from the firmware.
326 * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
327 * 327 *
328 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used 328 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
329 */ 329 */
@@ -361,6 +361,8 @@ enum iwl_ucode_tlv_capa {
361 IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, 361 IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
362 IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, 362 IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
363 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, 363 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
364 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
365 IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
364 366
365 NUM_IWL_UCODE_TLV_CAPA 367 NUM_IWL_UCODE_TLV_CAPA
366#ifdef __CHECKER__ 368#ifdef __CHECKER__
@@ -491,6 +493,37 @@ enum iwl_fw_dbg_monitor_mode {
491}; 493};
492 494
493/** 495/**
496 * enum iwl_fw_mem_seg_type - data types for dumping on error
497 *
498 * @FW_DBG_MEM_SMEM: the data type is SMEM
499 * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
500 * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
501 */
502enum iwl_fw_dbg_mem_seg_type {
503 FW_DBG_MEM_DCCM_LMAC = 0,
504 FW_DBG_MEM_DCCM_UMAC,
505 FW_DBG_MEM_SMEM,
506
507 /* Must be last */
508 FW_DBG_MEM_MAX,
509};
510
511/**
512 * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
513 *
514 * @data_type: enum %iwl_fw_mem_seg_type
515 * @ofs: the memory segment offset
516 * @len: the memory segment length, in bytes
517 *
518 * This parses IWL_UCODE_TLV_FW_MEM_SEG
519 */
520struct iwl_fw_dbg_mem_seg_tlv {
521 __le32 data_type;
522 __le32 ofs;
523 __le32 len;
524} __packed;
525
526/**
494 * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data 527 * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
495 * 528 *
496 * @version: version of the TLV - currently 0 529 * @version: version of the TLV - currently 0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 2942571c613f..e461d631893a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -286,6 +286,8 @@ struct iwl_fw {
286 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; 286 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
287 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; 287 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
288 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; 288 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
289 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
290 bool dbg_dynamic_mem;
289 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; 291 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
290 u8 dbg_dest_reg_num; 292 u8 dbg_dest_reg_num;
291 struct iwl_gscan_capabilities gscan_capa; 293 struct iwl_gscan_capabilities gscan_capa;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index d1a5dd1602f5..6c5c2f9f73a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
92 IWL_AMSDU_12K = 2, 92 IWL_AMSDU_12K = 2,
93}; 93};
94 94
95enum iwl_uapsd_disable {
96 IWL_DISABLE_UAPSD_BSS = BIT(0),
97 IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
98};
99
95/** 100/**
96 * struct iwl_mod_params 101 * struct iwl_mod_params
97 * 102 *
@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
109 * @debug_level: levels are IWL_DL_* 114 * @debug_level: levels are IWL_DL_*
110 * @ant_coupling: antenna coupling in dB, default = 0 115 * @ant_coupling: antenna coupling in dB, default = 0
111 * @nvm_file: specifies a external NVM file 116 * @nvm_file: specifies a external NVM file
112 * @uapsd_disable: disable U-APSD, default = 1 117 * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
118 * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
113 * @d0i3_disable: disable d0i3, default = 1, 119 * @d0i3_disable: disable d0i3, default = 1,
114 * @d0i3_entry_delay: time to wait after no refs are taken before 120 * @d0i3_entry_delay: time to wait after no refs are taken before
115 * entering D0i3 (in msecs) 121 * entering D0i3 (in msecs)
@@ -131,7 +137,7 @@ struct iwl_mod_params {
131#endif 137#endif
132 int ant_coupling; 138 int ant_coupling;
133 char *nvm_file; 139 char *nvm_file;
134 bool uapsd_disable; 140 u32 uapsd_disable;
135 bool d0i3_disable; 141 bool d0i3_disable;
136 unsigned int d0i3_entry_delay; 142 unsigned int d0i3_entry_delay;
137 bool lar_disable; 143 bool lar_disable;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index c46e596e12b1..6c1d20ded04b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * 34 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@ enum secure_load_status_reg {
345#define TXF_READ_MODIFY_DATA (0xa00448) 347#define TXF_READ_MODIFY_DATA (0xa00448)
346#define TXF_READ_MODIFY_ADDR (0xa0044c) 348#define TXF_READ_MODIFY_ADDR (0xa0044c)
347 349
350/* UMAC Internal Tx Fifo */
351#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538)
352#define TXF_CPU2_WR_PTR (0xA00514)
353#define TXF_CPU2_RD_PTR (0xA00510)
354#define TXF_CPU2_FENCE_PTR (0xA00518)
355#define TXF_CPU2_LOCK_FENCE (0xA00524)
356#define TXF_CPU2_NUM (0xA0053C)
357#define TXF_CPU2_READ_MODIFY_DATA (0xA00548)
358#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C)
359
348/* Radio registers access */ 360/* Radio registers access */
349#define RSP_RADIO_CMD (0xa02804) 361#define RSP_RADIO_CMD (0xa02804)
350#define RSP_RADIO_RDDAT (0xa02814) 362#define RSP_RADIO_RDDAT (0xa02814)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 91d74b3f666b..fa4ab4b9436f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * 34 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@ struct iwl_trans;
519 521
520struct iwl_trans_txq_scd_cfg { 522struct iwl_trans_txq_scd_cfg {
521 u8 fifo; 523 u8 fifo;
522 s8 sta_id; 524 u8 sta_id;
523 u8 tid; 525 u8 tid;
524 bool aggregate; 526 bool aggregate;
525 int frame_limit; 527 int frame_limit;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 23e7e2937566..2e06dfc1c477 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o 2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o 3iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o coex.o coex_legacy.o 5iwlmvm-y += power.o coex.o
6iwlmvm-y += tt.o offloading.o tdls.o 6iwlmvm-y += tt.o offloading.o tdls.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o 8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 2e098f8e0f83..35cdeca3d61e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
411 struct iwl_bt_coex_cmd bt_cmd = {}; 411 struct iwl_bt_coex_cmd bt_cmd = {};
412 u32 mode; 412 u32 mode;
413 413
414 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
415 return iwl_send_bt_init_conf_old(mvm);
416
417 lockdep_assert_held(&mvm->mutex); 414 lockdep_assert_held(&mvm->mutex);
418 415
419 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { 416 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
728 struct iwl_rx_packet *pkt = rxb_addr(rxb); 725 struct iwl_rx_packet *pkt = rxb_addr(rxb);
729 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; 726 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
730 727
731 if (!fw_has_api(&mvm->fw->ucode_capa,
732 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
733 iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
734 return;
735 }
736
737 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); 728 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
738 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); 729 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
739 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", 730 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
755 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 746 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
756 int ret; 747 int ret;
757 748
758 if (!fw_has_api(&mvm->fw->ucode_capa,
759 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
760 iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
761 return;
762 }
763
764 lockdep_assert_held(&mvm->mutex); 749 lockdep_assert_held(&mvm->mutex);
765 750
766 /* Ignore updates if we are in force mode */ 751 /* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
807 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; 792 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
808 enum iwl_bt_coex_lut_type lut_type; 793 enum iwl_bt_coex_lut_type lut_type;
809 794
810 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
811 return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
812
813 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) 795 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
814 return LINK_QUAL_AGG_TIME_LIMIT_DEF; 796 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
815 797
@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
834 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; 816 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
835 enum iwl_bt_coex_lut_type lut_type; 817 enum iwl_bt_coex_lut_type lut_type;
836 818
837 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
838 return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
839
840 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) 819 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
841 return true; 820 return true;
842 821
@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
864 if (ant & mvm->cfg->non_shared_ant) 843 if (ant & mvm->cfg->non_shared_ant)
865 return true; 844 return true;
866 845
867 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
868 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
869
870 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < 846 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
871 BT_HIGH_TRAFFIC; 847 BT_HIGH_TRAFFIC;
872} 848}
@@ -877,9 +853,6 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
877 if (mvm->cfg->bt_shared_single_ant) 853 if (mvm->cfg->bt_shared_single_ant)
878 return true; 854 return true;
879 855
880 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
881 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
882
883 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; 856 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
884} 857}
885 858
@@ -888,9 +861,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
888{ 861{
889 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); 862 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
890 863
891 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
892 return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
893
894 if (band != IEEE80211_BAND_2GHZ) 864 if (band != IEEE80211_BAND_2GHZ)
895 return false; 865 return false;
896 866
@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
937 907
938void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) 908void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
939{ 909{
940 if (!fw_has_api(&mvm->fw->ucode_capa,
941 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
942 iwl_mvm_bt_coex_vif_change_old(mvm);
943 return;
944 }
945
946 iwl_mvm_bt_coex_notif_handle(mvm); 910 iwl_mvm_bt_coex_notif_handle(mvm);
947} 911}
948 912
@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
955 u8 __maybe_unused lower_bound, upper_bound; 919 u8 __maybe_unused lower_bound, upper_bound;
956 u8 lut; 920 u8 lut;
957 921
958 if (!fw_has_api(&mvm->fw->ucode_capa,
959 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
960 iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
961 return;
962 }
963
964 if (!iwl_mvm_bt_is_plcr_supported(mvm)) 922 if (!iwl_mvm_bt_is_plcr_supported(mvm))
965 return; 923 return;
966 924
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644
index 015045733444..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
+++ /dev/null
@@ -1,1315 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <linuxwifi@intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
68#include <net/mac80211.h>
69
70#include "fw-api-coex.h"
71#include "iwl-modparams.h"
72#include "mvm.h"
73#include "iwl-debug.h"
74
75#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
76 [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
77 ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
78
79static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
80 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
81 BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
82 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
83 BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
84 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
85 BT_COEX_PRIO_TBL_PRIO_LOW, 0),
86 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
87 BT_COEX_PRIO_TBL_PRIO_LOW, 1),
88 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
89 BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
90 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
91 BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
92 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
93 BT_COEX_PRIO_TBL_DISABLED, 0),
94 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
95 BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
96 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
97 BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
98 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
99 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
100 0, 0, 0, 0, 0, 0,
101};
102
103#undef EVENT_PRIO_ANT
104
105static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
106{
107 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
108 return 0;
109
110 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
111 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
112 &iwl_bt_prio_tbl);
113}
114
115static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
116 cpu_to_le32(0xf0f0f0f0), /* 50% */
117 cpu_to_le32(0xc0c0c0c0), /* 25% */
118 cpu_to_le32(0xfcfcfcfc), /* 75% */
119 cpu_to_le32(0xfefefefe), /* 87.5% */
120};
121
122static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
123 {
124 cpu_to_le32(0x40000000),
125 cpu_to_le32(0x00000000),
126 cpu_to_le32(0x44000000),
127 cpu_to_le32(0x00000000),
128 cpu_to_le32(0x40000000),
129 cpu_to_le32(0x00000000),
130 cpu_to_le32(0x44000000),
131 cpu_to_le32(0x00000000),
132 cpu_to_le32(0xc0004000),
133 cpu_to_le32(0xf0005000),
134 cpu_to_le32(0xc0004000),
135 cpu_to_le32(0xf0005000),
136 },
137 {
138 cpu_to_le32(0x40000000),
139 cpu_to_le32(0x00000000),
140 cpu_to_le32(0x44000000),
141 cpu_to_le32(0x00000000),
142 cpu_to_le32(0x40000000),
143 cpu_to_le32(0x00000000),
144 cpu_to_le32(0x44000000),
145 cpu_to_le32(0x00000000),
146 cpu_to_le32(0xc0004000),
147 cpu_to_le32(0xf0005000),
148 cpu_to_le32(0xc0004000),
149 cpu_to_le32(0xf0005000),
150 },
151 {
152 cpu_to_le32(0x40000000),
153 cpu_to_le32(0x00000000),
154 cpu_to_le32(0x44000000),
155 cpu_to_le32(0x00000000),
156 cpu_to_le32(0x40000000),
157 cpu_to_le32(0x00000000),
158 cpu_to_le32(0x44000000),
159 cpu_to_le32(0x00000000),
160 cpu_to_le32(0xc0004000),
161 cpu_to_le32(0xf0005000),
162 cpu_to_le32(0xc0004000),
163 cpu_to_le32(0xf0005000),
164 },
165};
166
167static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
168 {
169 /* Tight */
170 cpu_to_le32(0xaaaaaaaa),
171 cpu_to_le32(0xaaaaaaaa),
172 cpu_to_le32(0xaeaaaaaa),
173 cpu_to_le32(0xaaaaaaaa),
174 cpu_to_le32(0xcc00ff28),
175 cpu_to_le32(0x0000aaaa),
176 cpu_to_le32(0xcc00aaaa),
177 cpu_to_le32(0x0000aaaa),
178 cpu_to_le32(0xc0004000),
179 cpu_to_le32(0x00004000),
180 cpu_to_le32(0xf0005000),
181 cpu_to_le32(0xf0005000),
182 },
183 {
184 /* Loose */
185 cpu_to_le32(0xaaaaaaaa),
186 cpu_to_le32(0xaaaaaaaa),
187 cpu_to_le32(0xaaaaaaaa),
188 cpu_to_le32(0xaaaaaaaa),
189 cpu_to_le32(0xcc00ff28),
190 cpu_to_le32(0x0000aaaa),
191 cpu_to_le32(0xcc00aaaa),
192 cpu_to_le32(0x0000aaaa),
193 cpu_to_le32(0x00000000),
194 cpu_to_le32(0x00000000),
195 cpu_to_le32(0xf0005000),
196 cpu_to_le32(0xf0005000),
197 },
198 {
199 /* Tx Tx disabled */
200 cpu_to_le32(0xaaaaaaaa),
201 cpu_to_le32(0xaaaaaaaa),
202 cpu_to_le32(0xeeaaaaaa),
203 cpu_to_le32(0xaaaaaaaa),
204 cpu_to_le32(0xcc00ff28),
205 cpu_to_le32(0x0000aaaa),
206 cpu_to_le32(0xcc00aaaa),
207 cpu_to_le32(0x0000aaaa),
208 cpu_to_le32(0xc0004000),
209 cpu_to_le32(0xc0004000),
210 cpu_to_le32(0xf0005000),
211 cpu_to_le32(0xf0005000),
212 },
213};
214
215/* 20MHz / 40MHz below / 40Mhz above*/
216static const __le64 iwl_ci_mask[][3] = {
217 /* dummy entry for channel 0 */
218 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
219 {
220 cpu_to_le64(0x0000001FFFULL),
221 cpu_to_le64(0x0ULL),
222 cpu_to_le64(0x00007FFFFFULL),
223 },
224 {
225 cpu_to_le64(0x000000FFFFULL),
226 cpu_to_le64(0x0ULL),
227 cpu_to_le64(0x0003FFFFFFULL),
228 },
229 {
230 cpu_to_le64(0x000003FFFCULL),
231 cpu_to_le64(0x0ULL),
232 cpu_to_le64(0x000FFFFFFCULL),
233 },
234 {
235 cpu_to_le64(0x00001FFFE0ULL),
236 cpu_to_le64(0x0ULL),
237 cpu_to_le64(0x007FFFFFE0ULL),
238 },
239 {
240 cpu_to_le64(0x00007FFF80ULL),
241 cpu_to_le64(0x00007FFFFFULL),
242 cpu_to_le64(0x01FFFFFF80ULL),
243 },
244 {
245 cpu_to_le64(0x0003FFFC00ULL),
246 cpu_to_le64(0x0003FFFFFFULL),
247 cpu_to_le64(0x0FFFFFFC00ULL),
248 },
249 {
250 cpu_to_le64(0x000FFFF000ULL),
251 cpu_to_le64(0x000FFFFFFCULL),
252 cpu_to_le64(0x3FFFFFF000ULL),
253 },
254 {
255 cpu_to_le64(0x007FFF8000ULL),
256 cpu_to_le64(0x007FFFFFE0ULL),
257 cpu_to_le64(0xFFFFFF8000ULL),
258 },
259 {
260 cpu_to_le64(0x01FFFE0000ULL),
261 cpu_to_le64(0x01FFFFFF80ULL),
262 cpu_to_le64(0xFFFFFE0000ULL),
263 },
264 {
265 cpu_to_le64(0x0FFFF00000ULL),
266 cpu_to_le64(0x0FFFFFFC00ULL),
267 cpu_to_le64(0x0ULL),
268 },
269 {
270 cpu_to_le64(0x3FFFC00000ULL),
271 cpu_to_le64(0x3FFFFFF000ULL),
272 cpu_to_le64(0x0)
273 },
274 {
275 cpu_to_le64(0xFFFE000000ULL),
276 cpu_to_le64(0xFFFFFF8000ULL),
277 cpu_to_le64(0x0)
278 },
279 {
280 cpu_to_le64(0xFFF8000000ULL),
281 cpu_to_le64(0xFFFFFE0000ULL),
282 cpu_to_le64(0x0)
283 },
284 {
285 cpu_to_le64(0xFFC0000000ULL),
286 cpu_to_le64(0x0ULL),
287 cpu_to_le64(0x0ULL)
288 },
289};
290
291enum iwl_bt_kill_msk {
292 BT_KILL_MSK_DEFAULT,
293 BT_KILL_MSK_NEVER,
294 BT_KILL_MSK_ALWAYS,
295 BT_KILL_MSK_MAX,
296};
297
298static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
299 [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
300 [BT_KILL_MSK_NEVER] = 0xffffffff,
301 [BT_KILL_MSK_ALWAYS] = 0,
302};
303
304static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
305 {
306 BT_KILL_MSK_ALWAYS,
307 BT_KILL_MSK_ALWAYS,
308 BT_KILL_MSK_ALWAYS,
309 },
310 {
311 BT_KILL_MSK_NEVER,
312 BT_KILL_MSK_NEVER,
313 BT_KILL_MSK_NEVER,
314 },
315 {
316 BT_KILL_MSK_NEVER,
317 BT_KILL_MSK_NEVER,
318 BT_KILL_MSK_NEVER,
319 },
320 {
321 BT_KILL_MSK_DEFAULT,
322 BT_KILL_MSK_NEVER,
323 BT_KILL_MSK_DEFAULT,
324 },
325};
326
327static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
328 {
329 BT_KILL_MSK_ALWAYS,
330 BT_KILL_MSK_ALWAYS,
331 BT_KILL_MSK_ALWAYS,
332 },
333 {
334 BT_KILL_MSK_ALWAYS,
335 BT_KILL_MSK_ALWAYS,
336 BT_KILL_MSK_ALWAYS,
337 },
338 {
339 BT_KILL_MSK_ALWAYS,
340 BT_KILL_MSK_ALWAYS,
341 BT_KILL_MSK_ALWAYS,
342 },
343 {
344 BT_KILL_MSK_DEFAULT,
345 BT_KILL_MSK_ALWAYS,
346 BT_KILL_MSK_DEFAULT,
347 },
348};
349
350struct corunning_block_luts {
351 u8 range;
352 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
353};
354
355/*
356 * Ranges for the antenna coupling calibration / co-running block LUT:
357 * LUT0: [ 0, 12[
358 * LUT1: [12, 20[
359 * LUT2: [20, 21[
360 * LUT3: [21, 23[
361 * LUT4: [23, 27[
362 * LUT5: [27, 30[
363 * LUT6: [30, 32[
364 * LUT7: [32, 33[
365 * LUT8: [33, - [
366 */
367static const struct corunning_block_luts antenna_coupling_ranges[] = {
368 {
369 .range = 0,
370 .lut20 = {
371 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
372 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
373 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
374 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
375 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
379 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
380 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
381 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
382 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
383 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
384 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
385 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
386 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
387 },
388 },
389 {
390 .range = 12,
391 .lut20 = {
392 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
393 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
394 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
395 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
396 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
400 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
401 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
402 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
403 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
404 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
405 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
406 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
407 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
408 },
409 },
410 {
411 .range = 20,
412 .lut20 = {
413 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
414 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
415 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
416 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
417 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
421 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
422 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
423 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
424 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
425 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
426 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
427 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
428 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
429 },
430 },
431 {
432 .range = 21,
433 .lut20 = {
434 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
435 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
436 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
437 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
438 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
442 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
443 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
444 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
445 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
446 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
447 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
448 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
449 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
450 },
451 },
452 {
453 .range = 23,
454 .lut20 = {
455 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
456 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
457 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
458 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
459 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
463 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
464 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
465 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
466 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
467 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
468 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
469 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
470 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
471 },
472 },
473 {
474 .range = 27,
475 .lut20 = {
476 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
477 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
478 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
479 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
480 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
484 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
485 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
486 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
487 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
488 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
489 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
490 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
491 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
492 },
493 },
494 {
495 .range = 30,
496 .lut20 = {
497 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
498 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
499 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
500 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
501 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
502 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
503 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
504 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
505 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
506 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
507 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
508 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
509 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
510 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
511 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
512 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
513 },
514 },
515 {
516 .range = 32,
517 .lut20 = {
518 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
519 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
520 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
521 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
522 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
523 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
524 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
525 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
526 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
527 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
528 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
529 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
530 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
531 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
532 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
533 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
534 },
535 },
536 {
537 .range = 33,
538 .lut20 = {
539 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
540 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
541 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
542 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
543 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
544 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
545 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
546 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
547 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
548 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
549 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
550 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
551 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
552 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
553 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
554 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
555 },
556 },
557};
558
559static enum iwl_bt_coex_lut_type
560iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
561{
562 struct ieee80211_chanctx_conf *chanctx_conf;
563 enum iwl_bt_coex_lut_type ret;
564 u16 phy_ctx_id;
565
566 /*
567 * Checking that we hold mvm->mutex is a good idea, but the rate
568 * control can't acquire the mutex since it runs in Tx path.
569 * So this is racy in that case, but in the worst case, the AMPDU
570 * size limit will be wrong for a short time which is not a big
571 * issue.
572 */
573
574 rcu_read_lock();
575
576 chanctx_conf = rcu_dereference(vif->chanctx_conf);
577
578 if (!chanctx_conf ||
579 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
580 rcu_read_unlock();
581 return BT_COEX_INVALID_LUT;
582 }
583
584 ret = BT_COEX_TX_DIS_LUT;
585
586 if (mvm->cfg->bt_shared_single_ant) {
587 rcu_read_unlock();
588 return ret;
589 }
590
591 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
592
593 if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
594 ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
595 else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
596 ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
597 /* else - default = TX TX disallowed */
598
599 rcu_read_unlock();
600
601 return ret;
602}
603
604int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
605{
606 struct iwl_bt_coex_cmd_old *bt_cmd;
607 struct iwl_host_cmd cmd = {
608 .id = BT_CONFIG,
609 .len = { sizeof(*bt_cmd), },
610 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
611 };
612 int ret;
613 u32 flags;
614
615 ret = iwl_send_bt_prio_tbl(mvm);
616 if (ret)
617 return ret;
618
619 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
620 if (!bt_cmd)
621 return -ENOMEM;
622 cmd.data[0] = bt_cmd;
623
624 lockdep_assert_held(&mvm->mutex);
625
626 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
627 switch (mvm->bt_force_ant_mode) {
628 case BT_FORCE_ANT_AUTO:
629 flags = BT_COEX_AUTO_OLD;
630 break;
631 case BT_FORCE_ANT_BT:
632 flags = BT_COEX_BT_OLD;
633 break;
634 case BT_FORCE_ANT_WIFI:
635 flags = BT_COEX_WIFI_OLD;
636 break;
637 default:
638 WARN_ON(1);
639 flags = 0;
640 }
641
642 bt_cmd->flags = cpu_to_le32(flags);
643 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
644 goto send_cmd;
645 }
646
647 bt_cmd->max_kill = 5;
648 bt_cmd->bt4_antenna_isolation_thr =
649 IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
650 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
651 bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
652 bt_cmd->bt4_tx_rx_max_freq0 = 15;
653 bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
654 bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
655
656 flags = iwlwifi_mod_params.bt_coex_active ?
657 BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
658 bt_cmd->flags = cpu_to_le32(flags);
659
660 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
661 BT_VALID_BT_PRIO_BOOST |
662 BT_VALID_MAX_KILL |
663 BT_VALID_3W_TMRS |
664 BT_VALID_KILL_ACK |
665 BT_VALID_KILL_CTS |
666 BT_VALID_REDUCED_TX_POWER |
667 BT_VALID_LUT |
668 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
669 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
670 BT_VALID_ANT_ISOLATION |
671 BT_VALID_ANT_ISOLATION_THRS |
672 BT_VALID_TXTX_DELTA_FREQ_THRS |
673 BT_VALID_TXRX_MAX_FREQ_0 |
674 BT_VALID_SYNC_TO_SCO |
675 BT_VALID_TTC |
676 BT_VALID_RRC);
677
678 if (IWL_MVM_BT_COEX_SYNC2SCO)
679 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
680
681 if (iwl_mvm_bt_is_plcr_supported(mvm)) {
682 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
683 BT_VALID_CORUN_LUT_40);
684 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
685 }
686
687 if (IWL_MVM_BT_COEX_MPLUT) {
688 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
689 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
690 }
691
692 if (IWL_MVM_BT_COEX_TTC)
693 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
694
695 if (iwl_mvm_bt_is_rrc_supported(mvm))
696 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
697
698 if (mvm->cfg->bt_shared_single_ant)
699 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
700 sizeof(iwl_single_shared_ant));
701 else
702 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
703 sizeof(iwl_combined_lookup));
704
705 /* Take first Co-running block LUT to get started */
706 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
707 sizeof(bt_cmd->bt4_corun_lut20));
708 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
709 sizeof(bt_cmd->bt4_corun_lut40));
710
711 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
712 sizeof(iwl_bt_prio_boost));
713 bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
714 bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
715
716send_cmd:
717 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
718 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
719
720 ret = iwl_mvm_send_cmd(mvm, &cmd);
721
722 kfree(bt_cmd);
723 return ret;
724}
725
726static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
727{
728 struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
729 u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
730 u32 ag = le32_to_cpu(notif->bt_activity_grading);
731 struct iwl_bt_coex_cmd_old *bt_cmd;
732 u8 ack_kill_msk, cts_kill_msk;
733 struct iwl_host_cmd cmd = {
734 .id = BT_CONFIG,
735 .data[0] = &bt_cmd,
736 .len = { sizeof(*bt_cmd), },
737 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
738 };
739 int ret = 0;
740
741 lockdep_assert_held(&mvm->mutex);
742
743 ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
744 cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
745
746 if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
747 mvm->bt_cts_kill_msk[0] == cts_kill_msk)
748 return 0;
749
750 mvm->bt_ack_kill_msk[0] = ack_kill_msk;
751 mvm->bt_cts_kill_msk[0] = cts_kill_msk;
752
753 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
754 if (!bt_cmd)
755 return -ENOMEM;
756 cmd.data[0] = bt_cmd;
757 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
758
759 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
760 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
761 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
762 BT_VALID_KILL_ACK |
763 BT_VALID_KILL_CTS);
764
765 ret = iwl_mvm_send_cmd(mvm, &cmd);
766
767 kfree(bt_cmd);
768 return ret;
769}
770
771static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
772 bool enable)
773{
774 struct iwl_bt_coex_cmd_old *bt_cmd;
775 /* Send ASYNC since this can be sent from an atomic context */
776 struct iwl_host_cmd cmd = {
777 .id = BT_CONFIG,
778 .len = { sizeof(*bt_cmd), },
779 .dataflags = { IWL_HCMD_DFL_DUP, },
780 .flags = CMD_ASYNC,
781 };
782 struct iwl_mvm_sta *mvmsta;
783 int ret;
784
785 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
786 if (!mvmsta)
787 return 0;
788
789 /* nothing to do */
790 if (mvmsta->bt_reduced_txpower == enable)
791 return 0;
792
793 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
794 if (!bt_cmd)
795 return -ENOMEM;
796 cmd.data[0] = bt_cmd;
797 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
798
799 bt_cmd->valid_bit_msk =
800 cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
801 bt_cmd->bt_reduced_tx_power = sta_id;
802
803 if (enable)
804 bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
805
806 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
807 enable ? "en" : "dis", sta_id);
808
809 mvmsta->bt_reduced_txpower = enable;
810
811 ret = iwl_mvm_send_cmd(mvm, &cmd);
812
813 kfree(bt_cmd);
814 return ret;
815}
816
817struct iwl_bt_iterator_data {
818 struct iwl_bt_coex_profile_notif_old *notif;
819 struct iwl_mvm *mvm;
820 struct ieee80211_chanctx_conf *primary;
821 struct ieee80211_chanctx_conf *secondary;
822 bool primary_ll;
823};
824
825static inline
826void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
827 struct ieee80211_vif *vif,
828 bool enable, int rssi)
829{
830 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
831
832 mvmvif->bf_data.last_bt_coex_event = rssi;
833 mvmvif->bf_data.bt_coex_max_thold =
834 enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
835 mvmvif->bf_data.bt_coex_min_thold =
836 enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
837}
838
839/* must be called under rcu_read_lock */
840static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
841 struct ieee80211_vif *vif)
842{
843 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
844 struct iwl_bt_iterator_data *data = _data;
845 struct iwl_mvm *mvm = data->mvm;
846 struct ieee80211_chanctx_conf *chanctx_conf;
847 enum ieee80211_smps_mode smps_mode;
848 u32 bt_activity_grading;
849 int ave_rssi;
850
851 lockdep_assert_held(&mvm->mutex);
852
853 switch (vif->type) {
854 case NL80211_IFTYPE_STATION:
855 /* default smps_mode for BSS / P2P client is AUTOMATIC */
856 smps_mode = IEEE80211_SMPS_AUTOMATIC;
857 break;
858 case NL80211_IFTYPE_AP:
859 if (!mvmvif->ap_ibss_active)
860 return;
861 break;
862 default:
863 return;
864 }
865
866 chanctx_conf = rcu_dereference(vif->chanctx_conf);
867
868 /* If channel context is invalid or not on 2.4GHz .. */
869 if ((!chanctx_conf ||
870 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
871 if (vif->type == NL80211_IFTYPE_STATION) {
872 /* ... relax constraints and disable rssi events */
873 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
874 smps_mode);
875 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
876 false);
877 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
878 }
879 return;
880 }
881
882 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
883 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
884 smps_mode = IEEE80211_SMPS_STATIC;
885 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
886 smps_mode = vif->type == NL80211_IFTYPE_AP ?
887 IEEE80211_SMPS_OFF :
888 IEEE80211_SMPS_DYNAMIC;
889
890 /* relax SMPS contraints for next association */
891 if (!vif->bss_conf.assoc)
892 smps_mode = IEEE80211_SMPS_AUTOMATIC;
893
894 if (mvmvif->phy_ctxt &&
895 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
896 smps_mode = IEEE80211_SMPS_AUTOMATIC;
897
898 IWL_DEBUG_COEX(data->mvm,
899 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
900 mvmvif->id, data->notif->bt_status, bt_activity_grading,
901 smps_mode);
902
903 if (vif->type == NL80211_IFTYPE_STATION)
904 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
905 smps_mode);
906
907 /* low latency is always primary */
908 if (iwl_mvm_vif_low_latency(mvmvif)) {
909 data->primary_ll = true;
910
911 data->secondary = data->primary;
912 data->primary = chanctx_conf;
913 }
914
915 if (vif->type == NL80211_IFTYPE_AP) {
916 if (!mvmvif->ap_ibss_active)
917 return;
918
919 if (chanctx_conf == data->primary)
920 return;
921
922 if (!data->primary_ll) {
923 /*
924 * downgrade the current primary no matter what its
925 * type is.
926 */
927 data->secondary = data->primary;
928 data->primary = chanctx_conf;
929 } else {
930 /* there is low latency vif - we will be secondary */
931 data->secondary = chanctx_conf;
932 }
933 return;
934 }
935
936 /*
937 * STA / P2P Client, try to be primary if first vif. If we are in low
938 * latency mode, we are already in primary and just don't do much
939 */
940 if (!data->primary || data->primary == chanctx_conf)
941 data->primary = chanctx_conf;
942 else if (!data->secondary)
943 /* if secondary is not NULL, it might be a GO */
944 data->secondary = chanctx_conf;
945
946 /*
947 * don't reduce the Tx power if one of these is true:
948 * we are in LOOSE
949 * single share antenna product
950 * BT is active
951 * we are associated
952 */
953 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
954 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
955 !data->notif->bt_status) {
956 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
957 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
958 return;
959 }
960
961 /* try to get the avg rssi from fw */
962 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
963
964 /* if the RSSI isn't valid, fake it is very low */
965 if (!ave_rssi)
966 ave_rssi = -100;
967 if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
968 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
969 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
970 } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
971 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
972 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
973 }
974
975 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
976 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
977}
978
979static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
980{
981 struct iwl_bt_iterator_data data = {
982 .mvm = mvm,
983 .notif = &mvm->last_bt_notif_old,
984 };
985 struct iwl_bt_coex_ci_cmd_old cmd = {};
986 u8 ci_bw_idx;
987
988 /* Ignore updates if we are in force mode */
989 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
990 return;
991
992 rcu_read_lock();
993 ieee80211_iterate_active_interfaces_atomic(
994 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
995 iwl_mvm_bt_notif_iterator, &data);
996
997 if (data.primary) {
998 struct ieee80211_chanctx_conf *chan = data.primary;
999
1000 if (WARN_ON(!chan->def.chan)) {
1001 rcu_read_unlock();
1002 return;
1003 }
1004
1005 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1006 ci_bw_idx = 0;
1007 cmd.co_run_bw_primary = 0;
1008 } else {
1009 cmd.co_run_bw_primary = 1;
1010 if (chan->def.center_freq1 >
1011 chan->def.chan->center_freq)
1012 ci_bw_idx = 2;
1013 else
1014 ci_bw_idx = 1;
1015 }
1016
1017 cmd.bt_primary_ci =
1018 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1019 cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
1020 }
1021
1022 if (data.secondary) {
1023 struct ieee80211_chanctx_conf *chan = data.secondary;
1024
1025 if (WARN_ON(!data.secondary->def.chan)) {
1026 rcu_read_unlock();
1027 return;
1028 }
1029
1030 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1031 ci_bw_idx = 0;
1032 cmd.co_run_bw_secondary = 0;
1033 } else {
1034 cmd.co_run_bw_secondary = 1;
1035 if (chan->def.center_freq1 >
1036 chan->def.chan->center_freq)
1037 ci_bw_idx = 2;
1038 else
1039 ci_bw_idx = 1;
1040 }
1041
1042 cmd.bt_secondary_ci =
1043 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1044 cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
1045 }
1046
1047 rcu_read_unlock();
1048
1049 /* Don't spam the fw with the same command over and over */
1050 if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
1051 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
1052 sizeof(cmd), &cmd))
1053 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
1054 memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
1055 }
1056
1057 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1058 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1059}
1060
1061void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1062 struct iwl_rx_cmd_buffer *rxb)
1063{
1064 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1065 struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
1066
1067 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
1068 IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
1069 notif->bt_status ? "ON" : "OFF");
1070 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
1071 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
1072 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
1073 le32_to_cpu(notif->primary_ch_lut));
1074 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
1075 le32_to_cpu(notif->secondary_ch_lut));
1076 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
1077 le32_to_cpu(notif->bt_activity_grading));
1078 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
1079 notif->bt_agg_traffic_load);
1080
1081 /* remember this notification for future use: rssi fluctuations */
1082 memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
1083
1084 iwl_mvm_bt_coex_notif_handle(mvm);
1085}
1086
1087static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1088 struct ieee80211_vif *vif)
1089{
1090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1091 struct iwl_bt_iterator_data *data = _data;
1092 struct iwl_mvm *mvm = data->mvm;
1093
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096
1097 struct ieee80211_chanctx_conf *chanctx_conf;
1098
1099 rcu_read_lock();
1100 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1101 /* If channel context is invalid or not on 2.4GHz - don't count it */
1102 if (!chanctx_conf ||
1103 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
1104 rcu_read_unlock();
1105 return;
1106 }
1107 rcu_read_unlock();
1108
1109 if (vif->type != NL80211_IFTYPE_STATION ||
1110 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1111 return;
1112
1113 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1114 lockdep_is_held(&mvm->mutex));
1115
1116 /* This can happen if the station has been removed right now */
1117 if (IS_ERR_OR_NULL(sta))
1118 return;
1119
1120 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1121}
1122
1123void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1124 enum ieee80211_rssi_event_data rssi_event)
1125{
1126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1127 struct iwl_bt_iterator_data data = {
1128 .mvm = mvm,
1129 };
1130 int ret;
1131
1132 lockdep_assert_held(&mvm->mutex);
1133
1134 /* Ignore updates if we are in force mode */
1135 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1136 return;
1137
1138 /*
1139 * Rssi update while not associated - can happen since the statistics
1140 * are handled asynchronously
1141 */
1142 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1143 return;
1144
1145 /* No BT - reports should be disabled */
1146 if (!mvm->last_bt_notif_old.bt_status)
1147 return;
1148
1149 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
1150 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
1151
1152 /*
1153 * Check if rssi is good enough for reduced Tx power, but not in loose
1154 * scheme.
1155 */
1156 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
1157 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
1158 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
1159 false);
1160 else
1161 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
1162
1163 if (ret)
1164 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
1165
1166 ieee80211_iterate_active_interfaces_atomic(
1167 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1168 iwl_mvm_bt_rssi_iterator, &data);
1169
1170 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1171 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1172}
1173
1174#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
1175#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
1176
1177u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1178 struct ieee80211_sta *sta)
1179{
1180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1181 enum iwl_bt_coex_lut_type lut_type;
1182
1183 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1184 BT_HIGH_TRAFFIC)
1185 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1186
1187 if (mvm->last_bt_notif_old.ttc_enabled)
1188 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1189
1190 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1191
1192 if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
1193 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1194
1195 /* tight coex, high bt traffic, reduce AGG time limit */
1196 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
1197}
1198
1199bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1200 struct ieee80211_sta *sta)
1201{
1202 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1203 enum iwl_bt_coex_lut_type lut_type;
1204
1205 if (mvm->last_bt_notif_old.ttc_enabled)
1206 return true;
1207
1208 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1209 BT_HIGH_TRAFFIC)
1210 return true;
1211
1212 /*
1213 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
1214 * since BT is already killed.
1215 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
1216 * we Tx.
1217 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
1218 */
1219 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1220 return lut_type != BT_COEX_LOOSE_LUT;
1221}
1222
1223bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
1224{
1225 u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1226 return ag < BT_HIGH_TRAFFIC;
1227}
1228
1229bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1230 enum ieee80211_band band)
1231{
1232 u32 bt_activity =
1233 le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1234
1235 if (band != IEEE80211_BAND_2GHZ)
1236 return false;
1237
1238 return bt_activity >= BT_LOW_TRAFFIC;
1239}
1240
1241void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
1242{
1243 iwl_mvm_bt_coex_notif_handle(mvm);
1244}
1245
1246void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1247 struct iwl_rx_cmd_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1251 u8 __maybe_unused lower_bound, upper_bound;
1252 u8 lut;
1253
1254 struct iwl_bt_coex_cmd_old *bt_cmd;
1255 struct iwl_host_cmd cmd = {
1256 .id = BT_CONFIG,
1257 .len = { sizeof(*bt_cmd), },
1258 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259 };
1260
1261 if (!iwl_mvm_bt_is_plcr_supported(mvm))
1262 return;
1263
1264 lockdep_assert_held(&mvm->mutex);
1265
1266 /* Ignore updates if we are in force mode */
1267 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1268 return;
1269
1270 if (ant_isolation == mvm->last_ant_isol)
1271 return;
1272
1273 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
1274 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
1275 break;
1276
1277 lower_bound = antenna_coupling_ranges[lut].range;
1278
1279 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
1280 upper_bound = antenna_coupling_ranges[lut + 1].range;
1281 else
1282 upper_bound = antenna_coupling_ranges[lut].range;
1283
1284 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
1285 ant_isolation, lower_bound, upper_bound, lut);
1286
1287 mvm->last_ant_isol = ant_isolation;
1288
1289 if (mvm->last_corun_lut == lut)
1290 return;
1291
1292 mvm->last_corun_lut = lut;
1293
1294 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
1295 if (!bt_cmd)
1296 return;
1297 cmd.data[0] = bt_cmd;
1298
1299 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
1300 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
1301 BT_VALID_CORUN_LUT_20 |
1302 BT_VALID_CORUN_LUT_40);
1303
1304 /* For the moment, use the same LUT for 20GHz and 40GHz */
1305 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
1306 sizeof(bt_cmd->bt4_corun_lut20));
1307
1308 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1309 sizeof(bt_cmd->bt4_corun_lut40));
1310
1311 if (iwl_mvm_send_cmd(mvm, &cmd))
1312 IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
1313
1314 kfree(bt_cmd);
1315}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 4b560e4417ee..b96b1c6a97fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -75,7 +75,6 @@
75#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) 75#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
76#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ 76#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
77#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ 77#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
78#define IWL_MVM_P2P_UAPSD_STANDALONE 0
79#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 78#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
80#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) 79#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
81#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) 80#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c1a313149eed..e3561bbc2468 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
723 return -EIO; 723 return -EIO;
724 } 724 }
725 725
726 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); 726 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
727 if (ret) 727 if (ret)
728 return ret; 728 return ret;
729 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 729 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 14004456bf55..3a279d3403ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
1425 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1425 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1426} 1426}
1427 1427
1428static const char * const chanwidths[] = {
1429 [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
1430 [NL80211_CHAN_WIDTH_20] = "ht20",
1431 [NL80211_CHAN_WIDTH_40] = "ht40",
1432 [NL80211_CHAN_WIDTH_80] = "vht80",
1433 [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
1434 [NL80211_CHAN_WIDTH_160] = "vht160",
1435};
1436
1437static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
1438 struct iwl_rx_packet *pkt, void *data)
1439{
1440 struct ieee80211_vif *vif = data;
1441 struct iwl_mvm *mvm =
1442 container_of(notif_wait, struct iwl_mvm, notif_wait);
1443 struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
1444 u32 num_of_stations = le32_to_cpu(report->number_of_stations);
1445 int i;
1446
1447 IWL_INFO(mvm, "LQM report:\n");
1448 IWL_INFO(mvm, "\tstatus: %d\n", report->status);
1449 IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
1450 IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
1451 le32_to_cpu(report->tx_frame_dropped));
1452 IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
1453 le32_to_cpu(report->time_in_measurement_window));
1454 IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
1455 le32_to_cpu(report->total_air_time_other_stations));
1456 IWL_INFO(mvm, "\tchannel_freq: %d\n",
1457 vif->bss_conf.chandef.center_freq1);
1458 IWL_INFO(mvm, "\tchannel_width: %s\n",
1459 chanwidths[vif->bss_conf.chandef.width]);
1460 IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
1461 for (i = 0; i < num_of_stations; i++)
1462 IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
1463 report->frequent_stations_air_time[i]);
1464
1465 return true;
1466}
1467
1468static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
1469 char *buf, size_t count,
1470 loff_t *ppos)
1471{
1472 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1473 struct iwl_mvm *mvm = mvmvif->mvm;
1474 struct iwl_notification_wait wait_lqm_notif;
1475 static u16 lqm_notif[] = {
1476 WIDE_ID(MAC_CONF_GROUP,
1477 LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
1478 };
1479 int err;
1480 u32 duration;
1481 u32 timeout;
1482
1483 if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
1484 return -EINVAL;
1485
1486 iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
1487 lqm_notif, ARRAY_SIZE(lqm_notif),
1488 iwl_mvm_lqm_notif_wait, vif);
1489 mutex_lock(&mvm->mutex);
1490 err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
1491 duration, timeout);
1492 mutex_unlock(&mvm->mutex);
1493
1494 if (err) {
1495 IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
1496 iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
1497 return err;
1498 }
1499
1500 /* wait for 2 * timeout (safety guard) and convert to jiffies*/
1501 timeout = msecs_to_jiffies((timeout * 2) / 1000);
1502
1503 err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
1504 timeout);
1505 if (err)
1506 IWL_ERR(mvm, "Getting lqm notif timed out\n");
1507
1508 return count;
1509}
1510
1428#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ 1511#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
1429 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) 1512 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
1430#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ 1513#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
1449MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); 1532MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
1450MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); 1533MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
1451MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); 1534MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
1535MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
1452 1536
1453void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1537void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1454{ 1538{
@@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1488 S_IRUSR | S_IWUSR); 1572 S_IRUSR | S_IWUSR);
1489 MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 1573 MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
1490 S_IRUSR | S_IWUSR); 1574 S_IRUSR | S_IWUSR);
1575 MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
1491 1576
1492 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 1577 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1493 mvmvif == mvm->bf_allowed_vif) 1578 mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a43b3921c4c1..362a54601a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -65,6 +65,7 @@
65 *****************************************************************************/ 65 *****************************************************************************/
66#include <linux/vmalloc.h> 66#include <linux/vmalloc.h>
67#include <linux/ieee80211.h> 67#include <linux/ieee80211.h>
68#include <linux/netdevice.h>
68 69
69#include "mvm.h" 70#include "mvm.h"
70#include "fw-dbg.h" 71#include "fw-dbg.h"
@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
463 return pos; 464 return pos;
464} 465}
465 466
466static
467int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
468 char *buf, int pos, int bufsz)
469{
470 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
471
472 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
473 BT_MBOX_PRINT(0, LE_PROF1, false);
474 BT_MBOX_PRINT(0, LE_PROF2, false);
475 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
476 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
477 BT_MBOX_PRINT(0, INBAND_S, false);
478 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
479 BT_MBOX_PRINT(0, LE_SCAN, false);
480 BT_MBOX_PRINT(0, LE_ADV, false);
481 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
482 BT_MBOX_PRINT(0, OPEN_CON_1, true);
483
484 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
485
486 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
487 BT_MBOX_PRINT(1, IP_SR, false);
488 BT_MBOX_PRINT(1, LE_MSTR, false);
489 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
490 BT_MBOX_PRINT(1, MSG_TYPE, false);
491 BT_MBOX_PRINT(1, SSN, true);
492
493 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
494
495 BT_MBOX_PRINT(2, SNIFF_ACT, false);
496 BT_MBOX_PRINT(2, PAG, false);
497 BT_MBOX_PRINT(2, INQUIRY, false);
498 BT_MBOX_PRINT(2, CONN, false);
499 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
500 BT_MBOX_PRINT(2, DISC, false);
501 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
502 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
503 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
504 BT_MBOX_PRINT(2, SCO_DURATION, true);
505
506 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
507
508 BT_MBOX_PRINT(3, SCO_STATE, false);
509 BT_MBOX_PRINT(3, SNIFF_STATE, false);
510 BT_MBOX_PRINT(3, A2DP_STATE, false);
511 BT_MBOX_PRINT(3, ACL_STATE, false);
512 BT_MBOX_PRINT(3, MSTR_STATE, false);
513 BT_MBOX_PRINT(3, OBX_STATE, false);
514 BT_MBOX_PRINT(3, OPEN_CON_2, false);
515 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
516 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
517 BT_MBOX_PRINT(3, INBAND_P, false);
518 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
519 BT_MBOX_PRINT(3, SSN_2, false);
520 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
521
522 return pos;
523}
524
525static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, 467static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
526 size_t count, loff_t *ppos) 468 size_t count, loff_t *ppos)
527{ 469{
528 struct iwl_mvm *mvm = file->private_data; 470 struct iwl_mvm *mvm = file->private_data;
471 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
529 char *buf; 472 char *buf;
530 int ret, pos = 0, bufsz = sizeof(char) * 1024; 473 int ret, pos = 0, bufsz = sizeof(char) * 1024;
531 474
@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
535 478
536 mutex_lock(&mvm->mutex); 479 mutex_lock(&mvm->mutex);
537 480
538 if (!fw_has_api(&mvm->fw->ucode_capa, 481 pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
539 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { 482
540 struct iwl_bt_coex_profile_notif_old *notif = 483 pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
541 &mvm->last_bt_notif_old; 484 notif->bt_ci_compliance);
542 485 pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
543 pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); 486 le32_to_cpu(notif->primary_ch_lut));
544 487 pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
545 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", 488 le32_to_cpu(notif->secondary_ch_lut));
546 notif->bt_ci_compliance); 489 pos += scnprintf(buf + pos,
547 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", 490 bufsz - pos, "bt_activity_grading = %d\n",
548 le32_to_cpu(notif->primary_ch_lut)); 491 le32_to_cpu(notif->bt_activity_grading));
549 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", 492 pos += scnprintf(buf + pos, bufsz - pos,
550 le32_to_cpu(notif->secondary_ch_lut)); 493 "antenna isolation = %d CORUN LUT index = %d\n",
551 pos += scnprintf(buf+pos, 494 mvm->last_ant_isol, mvm->last_corun_lut);
552 bufsz-pos, "bt_activity_grading = %d\n", 495 pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
553 le32_to_cpu(notif->bt_activity_grading)); 496 (notif->ttc_rrc_status >> 4) & 0xF);
554 pos += scnprintf(buf+pos, bufsz-pos, 497 pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
555 "antenna isolation = %d CORUN LUT index = %d\n", 498 notif->ttc_rrc_status & 0xF);
556 mvm->last_ant_isol, mvm->last_corun_lut);
557 pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
558 notif->rrc_enabled);
559 pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
560 notif->ttc_enabled);
561 } else {
562 struct iwl_bt_coex_profile_notif *notif =
563 &mvm->last_bt_notif;
564
565 pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
566
567 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
568 notif->bt_ci_compliance);
569 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
570 le32_to_cpu(notif->primary_ch_lut));
571 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
572 le32_to_cpu(notif->secondary_ch_lut));
573 pos += scnprintf(buf+pos,
574 bufsz-pos, "bt_activity_grading = %d\n",
575 le32_to_cpu(notif->bt_activity_grading));
576 pos += scnprintf(buf+pos, bufsz-pos,
577 "antenna isolation = %d CORUN LUT index = %d\n",
578 mvm->last_ant_isol, mvm->last_corun_lut);
579 pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
580 (notif->ttc_rrc_status >> 4) & 0xF);
581 pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
582 notif->ttc_rrc_status & 0xF);
583 }
584 499
585 pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", 500 pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
586 IWL_MVM_BT_COEX_SYNC2SCO); 501 IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
602 size_t count, loff_t *ppos) 517 size_t count, loff_t *ppos)
603{ 518{
604 struct iwl_mvm *mvm = file->private_data; 519 struct iwl_mvm *mvm = file->private_data;
520 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
605 char buf[256]; 521 char buf[256];
606 int bufsz = sizeof(buf); 522 int bufsz = sizeof(buf);
607 int pos = 0; 523 int pos = 0;
608 524
609 mutex_lock(&mvm->mutex); 525 mutex_lock(&mvm->mutex);
610 526
611 if (!fw_has_api(&mvm->fw->ucode_capa, 527 pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
612 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { 528 pos += scnprintf(buf + pos, bufsz - pos,
613 struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; 529 "\tPrimary Channel Bitmap 0x%016llx\n",
614 530 le64_to_cpu(cmd->bt_primary_ci));
615 pos += scnprintf(buf+pos, bufsz-pos, 531 pos += scnprintf(buf + pos, bufsz - pos,
616 "Channel inhibition CMD\n"); 532 "\tSecondary Channel Bitmap 0x%016llx\n",
617 pos += scnprintf(buf+pos, bufsz-pos, 533 le64_to_cpu(cmd->bt_secondary_ci));
618 "\tPrimary Channel Bitmap 0x%016llx\n",
619 le64_to_cpu(cmd->bt_primary_ci));
620 pos += scnprintf(buf+pos, bufsz-pos,
621 "\tSecondary Channel Bitmap 0x%016llx\n",
622 le64_to_cpu(cmd->bt_secondary_ci));
623
624 pos += scnprintf(buf+pos, bufsz-pos,
625 "BT Configuration CMD - 0=default, 1=never, 2=always\n");
626 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
627 mvm->bt_ack_kill_msk[0]);
628 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
629 mvm->bt_cts_kill_msk[0]);
630
631 } else {
632 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
633
634 pos += scnprintf(buf+pos, bufsz-pos,
635 "Channel inhibition CMD\n");
636 pos += scnprintf(buf+pos, bufsz-pos,
637 "\tPrimary Channel Bitmap 0x%016llx\n",
638 le64_to_cpu(cmd->bt_primary_ci));
639 pos += scnprintf(buf+pos, bufsz-pos,
640 "\tSecondary Channel Bitmap 0x%016llx\n",
641 le64_to_cpu(cmd->bt_secondary_ci));
642 }
643 534
644 mutex_unlock(&mvm->mutex); 535 mutex_unlock(&mvm->mutex);
645 536
@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
990 struct iwl_rss_config_cmd cmd = { 881 struct iwl_rss_config_cmd cmd = {
991 .flags = cpu_to_le32(IWL_RSS_ENABLE), 882 .flags = cpu_to_le32(IWL_RSS_ENABLE),
992 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 883 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
884 IWL_RSS_HASH_TYPE_IPV4_UDP |
993 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 885 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
994 IWL_RSS_HASH_TYPE_IPV6_TCP | 886 IWL_RSS_HASH_TYPE_IPV6_TCP |
887 IWL_RSS_HASH_TYPE_IPV6_UDP |
995 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 888 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
996 }; 889 };
997 int ret, i, num_repeats, nbytes = count / 2; 890 int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
1015 memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, 908 memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
1016 ARRAY_SIZE(cmd.indirection_table) % nbytes); 909 ARRAY_SIZE(cmd.indirection_table) % nbytes);
1017 910
1018 memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); 911 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
1019 912
1020 mutex_lock(&mvm->mutex); 913 mutex_lock(&mvm->mutex);
1021 ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 914 ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index 7a16e55df012..4c086d048097 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
268 IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, 268 IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
269}; 269};
270 270
271enum iwl_rx_l3_proto_values {
272 IWL_RX_L3_TYPE_NONE,
273 IWL_RX_L3_TYPE_IPV4,
274 IWL_RX_L3_TYPE_IPV4_FRAG,
275 IWL_RX_L3_TYPE_IPV6_FRAG,
276 IWL_RX_L3_TYPE_IPV6,
277 IWL_RX_L3_TYPE_IPV6_IN_IPV4,
278 IWL_RX_L3_TYPE_ARP,
279 IWL_RX_L3_TYPE_EAPOL,
280};
281
282#define IWL_RX_L3_PROTO_POS 4
283
271enum iwl_rx_l3l4_flags { 284enum iwl_rx_l3l4_flags {
272 IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), 285 IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
273 IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), 286 IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
274 IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), 287 IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
275 IWL_RX_L3L4_TCP_ACK = BIT(3), 288 IWL_RX_L3L4_TCP_ACK = BIT(3),
276 IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4, 289 IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
277 IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, 290 IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
278 IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, 291 IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
279}; 292};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index ba3f0bbddde8..dadcccd88255 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2016 Intel Deutschland GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
193#define IWL_BAR_DFAULT_RETRY_LIMIT 60 194#define IWL_BAR_DFAULT_RETRY_LIMIT 60
194#define IWL_LOW_RETRY_LIMIT 7 195#define IWL_LOW_RETRY_LIMIT 7
195 196
197/**
198 * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
199 * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
200 * from mac header end. For normal case it is 4 words for SNAP.
201 * note: tx_cmd, mac header and pad are not counted in the offset.
202 * This is used to help the offload in case there is tunneling such as
203 * IPv6 in IPv4, in such case the ip header offset should point to the
204 * inner ip header and IPv4 checksum of the external header should be
205 * calculated by driver.
206 * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
207 * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
208 * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
209 * field. Doesn't include the pad.
210 * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
211 * alignment
212 * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
213 */
214enum iwl_tx_offload_assist_flags_pos {
215 TX_CMD_OFFLD_IP_HDR = 0,
216 TX_CMD_OFFLD_L4_EN = 6,
217 TX_CMD_OFFLD_L3_EN = 7,
218 TX_CMD_OFFLD_MH_SIZE = 8,
219 TX_CMD_OFFLD_PAD = 13,
220 TX_CMD_OFFLD_AMSDU = 14,
221};
222
223#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
224#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
225
196/* TODO: complete documentation for try_cnt and btkill_cnt */ 226/* TODO: complete documentation for try_cnt and btkill_cnt */
197/** 227/**
198 * struct iwl_tx_cmd - TX command struct to FW 228 * struct iwl_tx_cmd - TX command struct to FW
199 * ( TX_CMD = 0x1c ) 229 * ( TX_CMD = 0x1c )
200 * @len: in bytes of the payload, see below for details 230 * @len: in bytes of the payload, see below for details
231 * @offload_assist: TX offload configuration
201 * @tx_flags: combination of TX_CMD_FLG_* 232 * @tx_flags: combination of TX_CMD_FLG_*
202 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is 233 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
203 * cleared. Combination of RATE_MCS_* 234 * cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
231 */ 262 */
232struct iwl_tx_cmd { 263struct iwl_tx_cmd {
233 __le16 len; 264 __le16 len;
234 __le16 next_frame_len; 265 __le16 offload_assist;
235 __le32 tx_flags; 266 __le32 tx_flags;
236 struct { 267 struct {
237 u8 try_cnt; 268 u8 try_cnt;
@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
255 __le16 reserved4; 286 __le16 reserved4;
256 u8 payload[0]; 287 u8 payload[0];
257 struct ieee80211_hdr hdr[0]; 288 struct ieee80211_hdr hdr[0];
258} __packed; /* TX_CMD_API_S_VER_3 */ 289} __packed; /* TX_CMD_API_S_VER_6 */
259 290
260/* 291/*
261 * TX response related data 292 * TX response related data
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 4a0fc47c81f2..60eed8485aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -80,12 +80,39 @@
80#include "fw-api-stats.h" 80#include "fw-api-stats.h"
81#include "fw-api-tof.h" 81#include "fw-api-tof.h"
82 82
83/* Tx queue numbers */ 83/* Tx queue numbers for non-DQA mode */
84enum { 84enum {
85 IWL_MVM_OFFCHANNEL_QUEUE = 8, 85 IWL_MVM_OFFCHANNEL_QUEUE = 8,
86 IWL_MVM_CMD_QUEUE = 9, 86 IWL_MVM_CMD_QUEUE = 9,
87}; 87};
88 88
89/*
90 * DQA queue numbers
91 *
92 * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
93 * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
94 * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
95 * that we are never left without the possibility to connect to an AP.
96 * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
97 * Each MGMT queue is mapped to a single STA
98 * MGMT frames are frames that return true on ieee80211_is_mgmt()
99 * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
100 * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
101 * DATA frames are intended for !ieee80211_is_mgmt() frames, but if
102 * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
103 * as well
104 * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
105 */
106enum iwl_mvm_dqa_txq {
107 IWL_MVM_DQA_CMD_QUEUE = 0,
108 IWL_MVM_DQA_GCAST_QUEUE = 3,
109 IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
110 IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
111 IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
112 IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
113 IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
114};
115
89enum iwl_mvm_tx_fifo { 116enum iwl_mvm_tx_fifo {
90 IWL_MVM_TX_FIFO_BK = 0, 117 IWL_MVM_TX_FIFO_BK = 0,
91 IWL_MVM_TX_FIFO_BE, 118 IWL_MVM_TX_FIFO_BE,
@@ -279,6 +306,11 @@ enum {
279/* Please keep this enum *SORTED* by hex value. 306/* Please keep this enum *SORTED* by hex value.
280 * Needed for binary search, otherwise a warning will be triggered. 307 * Needed for binary search, otherwise a warning will be triggered.
281 */ 308 */
309enum iwl_mac_conf_subcmd_ids {
310 LINK_QUALITY_MEASUREMENT_CMD = 0x1,
311 LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
312};
313
282enum iwl_phy_ops_subcmd_ids { 314enum iwl_phy_ops_subcmd_ids {
283 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, 315 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
284 CTDP_CONFIG_CMD = 0x03, 316 CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +319,10 @@ enum iwl_phy_ops_subcmd_ids {
287 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, 319 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
288}; 320};
289 321
322enum iwl_system_subcmd_ids {
323 SHARED_MEM_CFG_CMD = 0x0,
324};
325
290enum iwl_data_path_subcmd_ids { 326enum iwl_data_path_subcmd_ids {
291 UPDATE_MU_GROUPS_CMD = 0x1, 327 UPDATE_MU_GROUPS_CMD = 0x1,
292 TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, 328 TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +338,8 @@ enum iwl_prot_offload_subcmd_ids {
302enum { 338enum {
303 LEGACY_GROUP = 0x0, 339 LEGACY_GROUP = 0x0,
304 LONG_GROUP = 0x1, 340 LONG_GROUP = 0x1,
341 SYSTEM_GROUP = 0x2,
342 MAC_CONF_GROUP = 0x3,
305 PHY_OPS_GROUP = 0x4, 343 PHY_OPS_GROUP = 0x4,
306 DATA_PATH_GROUP = 0x5, 344 DATA_PATH_GROUP = 0x5,
307 PROT_OFFLOAD_GROUP = 0xb, 345 PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1961,7 @@ struct iwl_tdls_config_res {
1923 1961
1924#define TX_FIFO_MAX_NUM 8 1962#define TX_FIFO_MAX_NUM 8
1925#define RX_FIFO_MAX_NUM 2 1963#define RX_FIFO_MAX_NUM 2
1964#define TX_FIFO_INTERNAL_MAX_NUM 6
1926 1965
1927/** 1966/**
1928 * Shared memory configuration information from the FW 1967 * Shared memory configuration information from the FW
@@ -1940,6 +1979,12 @@ struct iwl_tdls_config_res {
1940 * @page_buff_addr: used by UMAC and performance debug (page miss analysis), 1979 * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
1941 * when paging is not supported this should be 0 1980 * when paging is not supported this should be 0
1942 * @page_buff_size: size of %page_buff_addr 1981 * @page_buff_size: size of %page_buff_addr
1982 * @rxfifo_addr: Start address of rxFifo
1983 * @internal_txfifo_addr: start address of internalFifo
1984 * @internal_txfifo_size: internal fifos' size
1985 *
1986 * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
1987 * set, the last 3 members don't exist.
1943 */ 1988 */
1944struct iwl_shared_mem_cfg { 1989struct iwl_shared_mem_cfg {
1945 __le32 shared_mem_addr; 1990 __le32 shared_mem_addr;
@@ -1951,7 +1996,10 @@ struct iwl_shared_mem_cfg {
1951 __le32 rxfifo_size[RX_FIFO_MAX_NUM]; 1996 __le32 rxfifo_size[RX_FIFO_MAX_NUM];
1952 __le32 page_buff_addr; 1997 __le32 page_buff_addr;
1953 __le32 page_buff_size; 1998 __le32 page_buff_size;
1954} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ 1999 __le32 rxfifo_addr;
2000 __le32 internal_txfifo_addr;
2001 __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
2002} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
1955 2003
1956/** 2004/**
1957 * VHT MU-MIMO group configuration 2005 * VHT MU-MIMO group configuration
@@ -2002,4 +2050,60 @@ struct iwl_stored_beacon_notif {
2002 u8 data[MAX_STORED_BEACON_SIZE]; 2050 u8 data[MAX_STORED_BEACON_SIZE];
2003} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ 2051} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
2004 2052
2053#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
2054
2055enum iwl_lqm_cmd_operatrions {
2056 LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
2057 LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
2058};
2059
2060enum iwl_lqm_status {
2061 LQM_STATUS_SUCCESS = 0,
2062 LQM_STATUS_TIMEOUT = 1,
2063 LQM_STATUS_ABORT = 2,
2064};
2065
2066/**
2067 * Link Quality Measurement command
2068 * @cmd_operatrion: command operation to be performed (start or stop)
2069 * as defined above.
2070 * @mac_id: MAC ID the measurement applies to.
2071 * @measurement_time: time of the total measurement to be performed, in uSec.
2072 * @timeout: maximum time allowed until a response is sent, in uSec.
2073 */
2074struct iwl_link_qual_msrmnt_cmd {
2075 __le32 cmd_operation;
2076 __le32 mac_id;
2077 __le32 measurement_time;
2078 __le32 timeout;
2079} __packed /* LQM_CMD_API_S_VER_1 */;
2080
2081/**
2082 * Link Quality Measurement notification
2083 *
2084 * @frequent_stations_air_time: an array containing the total air time
2085 * (in uSec) used by the most frequently transmitting stations.
2086 * @number_of_stations: the number of uniqe stations included in the array
2087 * (a number between 0 to 16)
2088 * @total_air_time_other_stations: the total air time (uSec) used by all the
2089 * stations which are not included in the above report.
2090 * @time_in_measurement_window: the total time in uSec in which a measurement
2091 * took place.
2092 * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
2093 * measurement
2094 * @mac_id: MAC ID the measurement applies to.
2095 * @status: return status. may be one of the LQM_STATUS_* defined above.
2096 * @reserved: reserved.
2097 */
2098struct iwl_link_qual_msrmnt_notif {
2099 __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
2100 __le32 number_of_stations;
2101 __le32 total_air_time_other_stations;
2102 __le32 time_in_measurement_window;
2103 __le32 tx_frame_dropped;
2104 __le32 mac_id;
2105 __le32 status;
2106 __le32 reserved[3];
2107} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
2108
2005#endif /* __fw_api_h__ */ 2109#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 4856eac120f6..cbb5947b3fab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
32 * 32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 Intel Deutschland GmbH 35 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
265 *dump_data = iwl_fw_error_next_data(*dump_data); 265 *dump_data = iwl_fw_error_next_data(*dump_data);
266 } 266 }
267 267
268 if (fw_has_capa(&mvm->fw->ucode_capa,
269 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
270 /* Pull UMAC internal TXF data from all TXFs */
271 for (i = 0;
272 i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
273 i++) {
274 /* Mark the number of TXF we're pulling now */
275 iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
276
277 fifo_hdr = (void *)(*dump_data)->data;
278 fifo_data = (void *)fifo_hdr->data;
279 fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
280
281 /* No need to try to read the data if the length is 0 */
282 if (fifo_len == 0)
283 continue;
284
285 /* Add a TLV for the internal FIFOs */
286 (*dump_data)->type =
287 cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
288 (*dump_data)->len =
289 cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
290
291 fifo_hdr->fifo_num = cpu_to_le32(i);
292 fifo_hdr->available_bytes =
293 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
294 TXF_CPU2_FIFO_ITEM_CNT));
295 fifo_hdr->wr_ptr =
296 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
297 TXF_CPU2_WR_PTR));
298 fifo_hdr->rd_ptr =
299 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
300 TXF_CPU2_RD_PTR));
301 fifo_hdr->fence_ptr =
302 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
303 TXF_CPU2_FENCE_PTR));
304 fifo_hdr->fence_mode =
305 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
306 TXF_CPU2_LOCK_FENCE));
307
308 /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
309 iwl_trans_write_prph(mvm->trans,
310 TXF_CPU2_READ_MODIFY_ADDR,
311 TXF_CPU2_WR_PTR);
312
313 /* Dummy-read to advance the read pointer to head */
314 iwl_trans_read_prph(mvm->trans,
315 TXF_CPU2_READ_MODIFY_DATA);
316
317 /* Read FIFO */
318 fifo_len /= sizeof(u32); /* Size in DWORDS */
319 for (j = 0; j < fifo_len; j++)
320 fifo_data[j] =
321 iwl_trans_read_prph(mvm->trans,
322 TXF_CPU2_READ_MODIFY_DATA);
323 *dump_data = iwl_fw_error_next_data(*dump_data);
324 }
325 }
326
268 iwl_trans_release_nic_access(mvm->trans, &flags); 327 iwl_trans_release_nic_access(mvm->trans, &flags);
269} 328}
270 329
@@ -429,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
429 struct iwl_fw_error_dump_trigger_desc *dump_trig; 488 struct iwl_fw_error_dump_trigger_desc *dump_trig;
430 struct iwl_mvm_dump_ptrs *fw_error_dump; 489 struct iwl_mvm_dump_ptrs *fw_error_dump;
431 u32 sram_len, sram_ofs; 490 u32 sram_len, sram_ofs;
491 struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
492 mvm->fw->dbg_mem_tlv;
432 u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; 493 u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
433 u32 smem_len = mvm->cfg->smem_len; 494 u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
434 u32 sram2_len = mvm->cfg->dccm2_len; 495 u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
435 bool monitor_dump_only = false; 496 bool monitor_dump_only = false;
436 int i; 497 int i;
437 498
@@ -494,6 +555,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
494 sizeof(struct iwl_fw_error_dump_fifo); 555 sizeof(struct iwl_fw_error_dump_fifo);
495 } 556 }
496 557
558 if (fw_has_capa(&mvm->fw->ucode_capa,
559 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
560 for (i = 0;
561 i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
562 i++) {
563 if (!mem_cfg->internal_txfifo_size[i])
564 continue;
565
566 /* Add header info */
567 fifo_data_len +=
568 mem_cfg->internal_txfifo_size[i] +
569 sizeof(*dump_data) +
570 sizeof(struct iwl_fw_error_dump_fifo);
571 }
572 }
573
497 /* Make room for PRPH registers */ 574 /* Make room for PRPH registers */
498 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { 575 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
499 /* The range includes both boundaries */ 576 /* The range includes both boundaries */
@@ -511,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
511 588
512 file_len = sizeof(*dump_file) + 589 file_len = sizeof(*dump_file) +
513 sizeof(*dump_data) * 2 + 590 sizeof(*dump_data) * 2 +
514 sram_len + sizeof(*dump_mem) +
515 fifo_data_len + 591 fifo_data_len +
516 prph_len + 592 prph_len +
517 radio_len + 593 radio_len +
@@ -525,6 +601,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
525 if (sram2_len) 601 if (sram2_len)
526 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; 602 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
527 603
604 /* Make room for MEM segments */
605 for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
606 if (fw_dbg_mem[i])
607 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
608 le32_to_cpu(fw_dbg_mem[i]->len);
609 }
610
528 /* Make room for fw's virtual image pages, if it exists */ 611 /* Make room for fw's virtual image pages, if it exists */
529 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) 612 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
530 file_len += mvm->num_of_paging_blk * 613 file_len += mvm->num_of_paging_blk *
@@ -550,6 +633,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
550 file_len += sizeof(*dump_data) + sizeof(*dump_trig) + 633 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
551 mvm->fw_dump_desc->len; 634 mvm->fw_dump_desc->len;
552 635
636 if (!mvm->fw->dbg_dynamic_mem)
637 file_len += sram_len + sizeof(*dump_mem);
638
553 dump_file = vzalloc(file_len); 639 dump_file = vzalloc(file_len);
554 if (!dump_file) { 640 if (!dump_file) {
555 kfree(fw_error_dump); 641 kfree(fw_error_dump);
@@ -599,16 +685,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
599 if (monitor_dump_only) 685 if (monitor_dump_only)
600 goto dump_trans_data; 686 goto dump_trans_data;
601 687
602 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 688 if (!mvm->fw->dbg_dynamic_mem) {
603 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); 689 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
604 dump_mem = (void *)dump_data->data; 690 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
605 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); 691 dump_mem = (void *)dump_data->data;
606 dump_mem->offset = cpu_to_le32(sram_ofs); 692 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
607 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, 693 dump_mem->offset = cpu_to_le32(sram_ofs);
608 sram_len); 694 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
695 sram_len);
696 dump_data = iwl_fw_error_next_data(dump_data);
697 }
698
699 for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
700 if (fw_dbg_mem[i]) {
701 u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
702 u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
703
704 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
705 dump_data->len = cpu_to_le32(len +
706 sizeof(*dump_mem));
707 dump_mem = (void *)dump_data->data;
708 dump_mem->type = fw_dbg_mem[i]->data_type;
709 dump_mem->offset = cpu_to_le32(ofs);
710 iwl_trans_read_mem_bytes(mvm->trans, ofs,
711 dump_mem->data,
712 len);
713 dump_data = iwl_fw_error_next_data(dump_data);
714 }
715 }
609 716
610 if (smem_len) { 717 if (smem_len) {
611 dump_data = iwl_fw_error_next_data(dump_data);
612 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 718 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
613 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); 719 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
614 dump_mem = (void *)dump_data->data; 720 dump_mem = (void *)dump_data->data;
@@ -616,10 +722,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
616 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); 722 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
617 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, 723 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
618 dump_mem->data, smem_len); 724 dump_mem->data, smem_len);
725 dump_data = iwl_fw_error_next_data(dump_data);
619 } 726 }
620 727
621 if (sram2_len) { 728 if (sram2_len) {
622 dump_data = iwl_fw_error_next_data(dump_data);
623 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 729 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
624 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); 730 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
625 dump_mem = (void *)dump_data->data; 731 dump_mem = (void *)dump_data->data;
@@ -627,11 +733,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
627 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); 733 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
628 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, 734 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
629 dump_mem->data, sram2_len); 735 dump_mem->data, sram2_len);
736 dump_data = iwl_fw_error_next_data(dump_data);
630 } 737 }
631 738
632 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && 739 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
633 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { 740 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
634 dump_data = iwl_fw_error_next_data(dump_data);
635 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 741 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
636 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + 742 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
637 sizeof(*dump_mem)); 743 sizeof(*dump_mem));
@@ -640,6 +746,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
640 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); 746 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
641 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, 747 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
642 dump_mem->data, IWL8260_ICCM_LEN); 748 dump_mem->data, IWL8260_ICCM_LEN);
749 dump_data = iwl_fw_error_next_data(dump_data);
643 } 750 }
644 751
645 /* Dump fw's virtual image */ 752 /* Dump fw's virtual image */
@@ -649,7 +756,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
649 struct page *pages = 756 struct page *pages =
650 mvm->fw_paging_db[i].fw_paging_block; 757 mvm->fw_paging_db[i].fw_paging_block;
651 758
652 dump_data = iwl_fw_error_next_data(dump_data);
653 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 759 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
654 dump_data->len = cpu_to_le32(sizeof(*paging) + 760 dump_data->len = cpu_to_le32(sizeof(*paging) +
655 PAGING_BLOCK_SIZE); 761 PAGING_BLOCK_SIZE);
@@ -657,10 +763,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
657 paging->index = cpu_to_le32(i); 763 paging->index = cpu_to_le32(i);
658 memcpy(paging->data, page_address(pages), 764 memcpy(paging->data, page_address(pages),
659 PAGING_BLOCK_SIZE); 765 PAGING_BLOCK_SIZE);
766 dump_data = iwl_fw_error_next_data(dump_data);
660 } 767 }
661 } 768 }
662 769
663 dump_data = iwl_fw_error_next_data(dump_data);
664 if (prph_len) 770 if (prph_len)
665 iwl_dump_prph(mvm->trans, &dump_data); 771 iwl_dump_prph(mvm->trans, &dump_data);
666 772
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 594cd0dc7df9..6ad5c602e84c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -64,6 +64,7 @@
64 * 64 *
65 *****************************************************************************/ 65 *****************************************************************************/
66#include <net/mac80211.h> 66#include <net/mac80211.h>
67#include <linux/netdevice.h>
67 68
68#include "iwl-trans.h" 69#include "iwl-trans.h"
69#include "iwl-op-mode.h" 70#include "iwl-op-mode.h"
@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
114 struct iwl_rss_config_cmd cmd = { 115 struct iwl_rss_config_cmd cmd = {
115 .flags = cpu_to_le32(IWL_RSS_ENABLE), 116 .flags = cpu_to_le32(IWL_RSS_ENABLE),
116 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 117 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
118 IWL_RSS_HASH_TYPE_IPV4_UDP |
117 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 119 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
118 IWL_RSS_HASH_TYPE_IPV6_TCP | 120 IWL_RSS_HASH_TYPE_IPV6_TCP |
121 IWL_RSS_HASH_TYPE_IPV6_UDP |
119 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 122 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
120 }; 123 };
121 124
125 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 126 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
123 cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; 127 cmd.indirection_table[i] =
124 memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); 128 1 + (i % (mvm->trans->num_rx_queues - 1));
129 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
125 130
126 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 131 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
127} 132}
@@ -174,8 +179,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
174 } 179 }
175 } 180 }
176 181
177 if (sec_idx >= IWL_UCODE_SECTION_MAX) { 182 /*
178 IWL_ERR(mvm, "driver didn't find paging image\n"); 183 * If paging is enabled there should be at least 2 more sections left
184 * (one for CSS and one for Paging data)
185 */
186 if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
187 IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
179 iwl_free_fw_paging(mvm); 188 iwl_free_fw_paging(mvm);
180 return -EINVAL; 189 return -EINVAL;
181 } 190 }
@@ -410,7 +419,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
410 goto exit; 419 goto exit;
411 } 420 }
412 421
413 mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, 422 /* Add an extra page for headers */
423 mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
424 FW_PAGING_SIZE,
414 GFP_KERNEL); 425 GFP_KERNEL);
415 if (!mvm->trans->paging_download_buf) { 426 if (!mvm->trans->paging_download_buf) {
416 ret = -ENOMEM; 427 ret = -ENOMEM;
@@ -641,7 +652,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
641 */ 652 */
642 653
643 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 654 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
644 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; 655 if (iwl_mvm_is_dqa_supported(mvm))
656 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
657 else
658 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
645 659
646 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 660 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
647 atomic_set(&mvm->mac80211_queue_stop_count[i], 0); 661 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -788,17 +802,22 @@ out:
788static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) 802static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
789{ 803{
790 struct iwl_host_cmd cmd = { 804 struct iwl_host_cmd cmd = {
791 .id = SHARED_MEM_CFG,
792 .flags = CMD_WANT_SKB, 805 .flags = CMD_WANT_SKB,
793 .data = { NULL, }, 806 .data = { NULL, },
794 .len = { 0, }, 807 .len = { 0, },
795 }; 808 };
796 struct iwl_rx_packet *pkt;
797 struct iwl_shared_mem_cfg *mem_cfg; 809 struct iwl_shared_mem_cfg *mem_cfg;
810 struct iwl_rx_packet *pkt;
798 u32 i; 811 u32 i;
799 812
800 lockdep_assert_held(&mvm->mutex); 813 lockdep_assert_held(&mvm->mutex);
801 814
815 if (fw_has_capa(&mvm->fw->ucode_capa,
816 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
817 cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
818 else
819 cmd.id = SHARED_MEM_CFG;
820
802 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) 821 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
803 return; 822 return;
804 823
@@ -824,6 +843,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
824 le32_to_cpu(mem_cfg->page_buff_addr); 843 le32_to_cpu(mem_cfg->page_buff_addr);
825 mvm->shared_mem_cfg.page_buff_size = 844 mvm->shared_mem_cfg.page_buff_size =
826 le32_to_cpu(mem_cfg->page_buff_size); 845 le32_to_cpu(mem_cfg->page_buff_size);
846
847 /* new API has more data */
848 if (fw_has_capa(&mvm->fw->ucode_capa,
849 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
850 mvm->shared_mem_cfg.rxfifo_addr =
851 le32_to_cpu(mem_cfg->rxfifo_addr);
852 mvm->shared_mem_cfg.internal_txfifo_addr =
853 le32_to_cpu(mem_cfg->internal_txfifo_addr);
854
855 BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
856 sizeof(mem_cfg->internal_txfifo_size));
857
858 for (i = 0;
859 i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
860 i++)
861 mvm->shared_mem_cfg.internal_txfifo_size[i] =
862 le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
863 }
864
827 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); 865 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
828 866
829 iwl_free_resp(&cmd); 867 iwl_free_resp(&cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index e885db3464b0..5f950568e92c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
252 .exclude_vif = exclude_vif, 252 .exclude_vif = exclude_vif,
253 .used_hw_queues = 253 .used_hw_queues =
254 BIT(IWL_MVM_OFFCHANNEL_QUEUE) | 254 BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
255 BIT(mvm->aux_queue) | 255 BIT(mvm->aux_queue),
256 BIT(IWL_MVM_CMD_QUEUE),
257 }; 256 };
258 257
258 if (iwl_mvm_is_dqa_supported(mvm))
259 data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
260 else
261 data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
262
259 lockdep_assert_held(&mvm->mutex); 263 lockdep_assert_held(&mvm->mutex);
260 264
261 /* mark all VIF used hw queues */ 265 /* mark all VIF used hw queues */
@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
425 return 0; 429 return 0;
426 } 430 }
427 431
428 /* Find available queues, and allocate them to the ACs */ 432 /*
433 * Find available queues, and allocate them to the ACs. When in
434 * DQA-mode they aren't really used, and this is done only so the
435 * mac80211 ieee80211_check_queues() function won't fail
436 */
429 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 437 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
430 u8 queue = find_first_zero_bit(&used_hw_queues, 438 u8 queue = find_first_zero_bit(&used_hw_queues,
431 mvm->first_agg_queue); 439 mvm->first_agg_queue);
432 440
433 if (queue >= mvm->first_agg_queue) { 441 if (!iwl_mvm_is_dqa_supported(mvm) &&
442 queue >= mvm->first_agg_queue) {
434 IWL_ERR(mvm, "Failed to allocate queue\n"); 443 IWL_ERR(mvm, "Failed to allocate queue\n");
435 ret = -EIO; 444 ret = -EIO;
436 goto exit_fail; 445 goto exit_fail;
@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
442 451
443 /* Allocate the CAB queue for softAP and GO interfaces */ 452 /* Allocate the CAB queue for softAP and GO interfaces */
444 if (vif->type == NL80211_IFTYPE_AP) { 453 if (vif->type == NL80211_IFTYPE_AP) {
445 u8 queue = find_first_zero_bit(&used_hw_queues, 454 u8 queue;
446 mvm->first_agg_queue);
447 455
448 if (queue >= mvm->first_agg_queue) { 456 if (!iwl_mvm_is_dqa_supported(mvm)) {
449 IWL_ERR(mvm, "Failed to allocate cab queue\n"); 457 queue = find_first_zero_bit(&used_hw_queues,
450 ret = -EIO; 458 mvm->first_agg_queue);
451 goto exit_fail; 459
460 if (queue >= mvm->first_agg_queue) {
461 IWL_ERR(mvm, "Failed to allocate cab queue\n");
462 ret = -EIO;
463 goto exit_fail;
464 }
465 } else {
466 queue = IWL_MVM_DQA_GCAST_QUEUE;
452 } 467 }
453 468
454 vif->cab_queue = queue; 469 vif->cab_queue = queue;
@@ -495,6 +510,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
495 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); 510 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
496 /* fall through */ 511 /* fall through */
497 default: 512 default:
513 /* If DQA is supported - queues will be enabled when needed */
514 if (iwl_mvm_is_dqa_supported(mvm))
515 break;
516
498 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 517 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
499 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], 518 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
500 vif->hw_queue[ac], 519 vif->hw_queue[ac],
@@ -523,6 +542,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
523 IWL_MAX_TID_COUNT, 0); 542 IWL_MAX_TID_COUNT, 0);
524 /* fall through */ 543 /* fall through */
525 default: 544 default:
545 /*
546 * If DQA is supported - queues were already disabled, since in
547 * DQA-mode the queues are a property of the STA and not of the
548 * vif, and at this point the STA was already deleted
549 */
550 if (iwl_mvm_is_dqa_supported(mvm))
551 break;
552
526 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 553 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
527 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 554 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
528 vif->hw_queue[ac], 555 vif->hw_queue[ac],
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..4f5ec495b460 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
665 } 665 }
666 666
667 hw->netdev_features |= mvm->cfg->features; 667 hw->netdev_features |= mvm->cfg->features;
668 if (!iwl_mvm_is_csum_supported(mvm)) 668 if (!iwl_mvm_is_csum_supported(mvm)) {
669 hw->netdev_features &= ~NETIF_F_RXCSUM; 669 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
670 670 NETIF_F_RXCSUM);
671 if (IWL_MVM_SW_TX_CSUM_OFFLOAD) 671 /* We may support SW TX CSUM */
672 hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 672 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
673 NETIF_F_TSO | NETIF_F_TSO6; 673 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
674 }
674 675
675 ret = ieee80211_register_hw(mvm->hw); 676 ret = ieee80211_register_hw(mvm->hw);
676 if (ret) 677 if (ret)
@@ -992,6 +993,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
992 iwl_mvm_reset_phy_ctxts(mvm); 993 iwl_mvm_reset_phy_ctxts(mvm);
993 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 994 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
994 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); 995 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
996 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
995 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); 997 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
996 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 998 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
997 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); 999 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1178,6 +1180,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1178 1180
1179 flush_work(&mvm->d0i3_exit_work); 1181 flush_work(&mvm->d0i3_exit_work);
1180 flush_work(&mvm->async_handlers_wk); 1182 flush_work(&mvm->async_handlers_wk);
1183 flush_work(&mvm->add_stream_wk);
1181 cancel_delayed_work_sync(&mvm->fw_dump_wk); 1184 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1182 iwl_mvm_free_fw_dump_desc(mvm); 1185 iwl_mvm_free_fw_dump_desc(mvm);
1183 1186
@@ -1821,6 +1824,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1821 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 1824 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1822 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 1825 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1823 1826
1827 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1828 mvmvif->lqm_active)
1829 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1830 0, 0);
1831
1824 /* 1832 /*
1825 * If we're not associated yet, take the (new) BSSID before associating 1833 * If we're not associated yet, take the (new) BSSID before associating
1826 * so the firmware knows. If we're already associated, then use the old 1834 * so the firmware knows. If we're already associated, then use the old
@@ -2340,7 +2348,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2340 return; 2348 return;
2341 } 2349 }
2342 2350
2343 if (iwlwifi_mod_params.uapsd_disable) { 2351 if (!vif->p2p &&
2352 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2344 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2353 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2345 return; 2354 return;
2346 } 2355 }
@@ -2376,6 +2385,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2376 peer_addr, action); 2385 peer_addr, action);
2377} 2386}
2378 2387
2388static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2389 struct iwl_mvm_sta *mvm_sta)
2390{
2391 struct iwl_mvm_tid_data *tid_data;
2392 struct sk_buff *skb;
2393 int i;
2394
2395 spin_lock_bh(&mvm_sta->lock);
2396 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2397 tid_data = &mvm_sta->tid_data[i];
2398 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2399 ieee80211_free_txskb(mvm->hw, skb);
2400 }
2401 spin_unlock_bh(&mvm_sta->lock);
2402}
2403
2379static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 2404static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2380 struct ieee80211_vif *vif, 2405 struct ieee80211_vif *vif,
2381 struct ieee80211_sta *sta, 2406 struct ieee80211_sta *sta,
@@ -2396,6 +2421,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2396 /* if a STA is being removed, reuse its ID */ 2421 /* if a STA is being removed, reuse its ID */
2397 flush_work(&mvm->sta_drained_wk); 2422 flush_work(&mvm->sta_drained_wk);
2398 2423
2424 /*
2425 * If we are in a STA removal flow and in DQA mode:
2426 *
2427 * This is after the sync_rcu part, so the queues have already been
2428 * flushed. No more TXs on their way in mac80211's path, and no more in
2429 * the queues.
2430 * Also, we won't be getting any new TX frames for this station.
2431 * What we might have are deferred TX frames that need to be taken care
2432 * of.
2433 *
2434 * Drop any still-queued deferred-frame before removing the STA, and
2435 * make sure the worker is no longer handling frames for this STA.
2436 */
2437 if (old_state == IEEE80211_STA_NONE &&
2438 new_state == IEEE80211_STA_NOTEXIST &&
2439 iwl_mvm_is_dqa_supported(mvm)) {
2440 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2441
2442 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2443 flush_work(&mvm->add_stream_wk);
2444
2445 /*
2446 * No need to make sure deferred TX indication is off since the
2447 * worker will already remove it if it was on
2448 */
2449 }
2450
2399 mutex_lock(&mvm->mutex); 2451 mutex_lock(&mvm->mutex);
2400 if (old_state == IEEE80211_STA_NOTEXIST && 2452 if (old_state == IEEE80211_STA_NOTEXIST &&
2401 new_state == IEEE80211_STA_NONE) { 2453 new_state == IEEE80211_STA_NONE) {
@@ -3628,6 +3680,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3628 3680
3629 break; 3681 break;
3630 case NL80211_IFTYPE_STATION: 3682 case NL80211_IFTYPE_STATION:
3683 if (mvmvif->lqm_active)
3684 iwl_mvm_send_lqm_cmd(vif,
3685 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3686 0, 0);
3687
3631 /* Schedule the time event to a bit before beacon 1, 3688 /* Schedule the time event to a bit before beacon 1,
3632 * to make sure we're in the new channel when the 3689 * to make sure we're in the new channel when the
3633 * GO/AP arrives. 3690 * GO/AP arrives.
@@ -3727,6 +3784,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3727 if (!vif || vif->type != NL80211_IFTYPE_STATION) 3784 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3728 return; 3785 return;
3729 3786
3787 /* Make sure we're done with the deferred traffic before flushing */
3788 if (iwl_mvm_is_dqa_supported(mvm))
3789 flush_work(&mvm->add_stream_wk);
3790
3730 mutex_lock(&mvm->mutex); 3791 mutex_lock(&mvm->mutex);
3731 mvmvif = iwl_mvm_vif_from_mac80211(vif); 3792 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3732 3793
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 9abbc93e3c06..2d685e02d488 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -208,7 +208,7 @@ enum iwl_power_scheme {
208}; 208};
209 209
210#define IWL_CONN_MAX_LISTEN_INTERVAL 10 210#define IWL_CONN_MAX_LISTEN_INTERVAL 10
211#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 211#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
212 212
213#ifdef CONFIG_IWLWIFI_DEBUGFS 213#ifdef CONFIG_IWLWIFI_DEBUGFS
214enum iwl_dbgfs_pm_mask { 214enum iwl_dbgfs_pm_mask {
@@ -453,6 +453,12 @@ struct iwl_mvm_vif {
453 453
454 /* TCP Checksum Offload */ 454 /* TCP Checksum Offload */
455 netdev_features_t features; 455 netdev_features_t features;
456
457 /*
458 * link quality measurement - used to check whether this interface
459 * is in the middle of a link quality measurement
460 */
461 bool lqm_active;
456}; 462};
457 463
458static inline struct iwl_mvm_vif * 464static inline struct iwl_mvm_vif *
@@ -602,6 +608,9 @@ struct iwl_mvm_shared_mem_cfg {
602 u32 rxfifo_size[RX_FIFO_MAX_NUM]; 608 u32 rxfifo_size[RX_FIFO_MAX_NUM];
603 u32 page_buff_addr; 609 u32 page_buff_addr;
604 u32 page_buff_size; 610 u32 page_buff_size;
611 u32 rxfifo_addr;
612 u32 internal_txfifo_addr;
613 u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
605}; 614};
606 615
607struct iwl_mvm { 616struct iwl_mvm {
@@ -656,10 +665,17 @@ struct iwl_mvm {
656 /* Map to HW queue */ 665 /* Map to HW queue */
657 u32 hw_queue_to_mac80211; 666 u32 hw_queue_to_mac80211;
658 u8 hw_queue_refcount; 667 u8 hw_queue_refcount;
668 u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
669 /*
670 * This is to mark that queue is reserved for a STA but not yet
671 * allocated. This is needed to make sure we have at least one
672 * available queue to use when adding a new STA
673 */
659 bool setup_reserved; 674 bool setup_reserved;
660 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ 675 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
661 } queue_info[IWL_MAX_HW_QUEUES]; 676 } queue_info[IWL_MAX_HW_QUEUES];
662 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ 677 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
678 struct work_struct add_stream_wk; /* To add streams to queues */
663 atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; 679 atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
664 680
665 const char *nvm_file_name; 681 const char *nvm_file_name;
@@ -679,11 +695,11 @@ struct iwl_mvm {
679 struct iwl_rx_phy_info last_phy_info; 695 struct iwl_rx_phy_info last_phy_info;
680 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; 696 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
681 struct work_struct sta_drained_wk; 697 struct work_struct sta_drained_wk;
698 unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
682 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; 699 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
683 atomic_t pending_frames[IWL_MVM_STATION_COUNT]; 700 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
684 u32 tfd_drained[IWL_MVM_STATION_COUNT]; 701 u32 tfd_drained[IWL_MVM_STATION_COUNT];
685 u8 rx_ba_sessions; 702 u8 rx_ba_sessions;
686 u32 secret_key[IWL_RSS_HASH_KEY_CNT];
687 703
688 /* configured by mac80211 */ 704 /* configured by mac80211 */
689 u32 rts_threshold; 705 u32 rts_threshold;
@@ -694,6 +710,7 @@ struct iwl_mvm {
694 struct iwl_mcast_filter_cmd *mcast_filter_cmd; 710 struct iwl_mcast_filter_cmd *mcast_filter_cmd;
695 enum iwl_mvm_scan_type scan_type; 711 enum iwl_mvm_scan_type scan_type;
696 enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; 712 enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
713 struct timer_list scan_timer;
697 714
698 /* max number of simultaneous scans the FW supports */ 715 /* max number of simultaneous scans the FW supports */
699 unsigned int max_scans; 716 unsigned int max_scans;
@@ -1063,7 +1080,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
1063{ 1080{
1064 return fw_has_capa(&mvm->fw->ucode_capa, 1081 return fw_has_capa(&mvm->fw->ucode_capa,
1065 IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && 1082 IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
1066 IWL_MVM_P2P_UAPSD_STANDALONE; 1083 !(iwlwifi_mod_params.uapsd_disable &
1084 IWL_DISABLE_UAPSD_P2P_CLIENT);
1067} 1085}
1068 1086
1069static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) 1087static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1297,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
1297int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); 1315int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
1298int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); 1316int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
1299void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); 1317void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
1318void iwl_mvm_scan_timeout(unsigned long data);
1300 1319
1301/* Scheduled scan */ 1320/* Scheduled scan */
1302void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, 1321void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1453,22 +1472,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1453u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 1472u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1454 struct ieee80211_tx_info *info, u8 ac); 1473 struct ieee80211_tx_info *info, u8 ac);
1455 1474
1456bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
1457void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
1458int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
1459void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1460 struct iwl_rx_cmd_buffer *rxb);
1461void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1462 enum ieee80211_rssi_event_data);
1463u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1464 struct ieee80211_sta *sta);
1465bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1466 struct ieee80211_sta *sta);
1467bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1468 enum ieee80211_band band);
1469void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1470 struct iwl_rx_cmd_buffer *rxb);
1471
1472/* beacon filtering */ 1475/* beacon filtering */
1473#ifdef CONFIG_IWLWIFI_DEBUGFS 1476#ifdef CONFIG_IWLWIFI_DEBUGFS
1474void 1477void
@@ -1634,4 +1637,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1634void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1637void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1635 const char *errmsg); 1638 const char *errmsg);
1636 1639
1640/* Link Quality Measurement */
1641int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
1642 enum iwl_lqm_cmd_operatrions operation,
1643 u32 duration, u32 timeout);
1644bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
1645
1637#endif /* __IWL_MVM_H__ */ 1646#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..656541c5360a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
292 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, 292 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
293 RX_HANDLER_ASYNC_LOCKED), 293 RX_HANDLER_ASYNC_LOCKED),
294 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, 294 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
295 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), 295 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
296 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, 296 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
297 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), 297 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
298 298
@@ -421,6 +421,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
421/* Please keep this array *SORTED* by hex value. 421/* Please keep this array *SORTED* by hex value.
422 * Access is done through binary search 422 * Access is done through binary search
423 */ 423 */
424static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
425 HCMD_NAME(SHARED_MEM_CFG_CMD),
426};
427
428/* Please keep this array *SORTED* by hex value.
429 * Access is done through binary search
430 */
431static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
432 HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
433 HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
434};
435
436/* Please keep this array *SORTED* by hex value.
437 * Access is done through binary search
438 */
424static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { 439static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
425 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), 440 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
426 HCMD_NAME(CTDP_CONFIG_CMD), 441 HCMD_NAME(CTDP_CONFIG_CMD),
@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
449static const struct iwl_hcmd_arr iwl_mvm_groups[] = { 464static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
450 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 465 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
451 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 466 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
467 [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
468 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
452 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), 469 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
453 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), 470 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
454 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), 471 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -562,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
562 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); 579 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
563 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); 580 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
564 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); 581 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
582 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
565 583
566 spin_lock_init(&mvm->d0i3_tx_lock); 584 spin_lock_init(&mvm->d0i3_tx_lock);
567 spin_lock_init(&mvm->refs_lock); 585 spin_lock_init(&mvm->refs_lock);
@@ -601,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
601 trans_cfg.command_groups = iwl_mvm_groups; 619 trans_cfg.command_groups = iwl_mvm_groups;
602 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); 620 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
603 621
604 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; 622 if (iwl_mvm_is_dqa_supported(mvm))
623 trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
624 else
625 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
605 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; 626 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
606 trans_cfg.scd_set_active = true; 627 trans_cfg.scd_set_active = true;
607 628
@@ -707,8 +728,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
707 728
708 iwl_mvm_tof_init(mvm); 729 iwl_mvm_tof_init(mvm);
709 730
710 /* init RSS hash key */ 731 setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
711 get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); 732 (unsigned long)mvm);
712 733
713 return op_mode; 734 return op_mode;
714 735
@@ -765,6 +786,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
765 786
766 iwl_mvm_tof_clean(mvm); 787 iwl_mvm_tof_clean(mvm);
767 788
789 del_timer_sync(&mvm->scan_timer);
790
791 mutex_destroy(&mvm->mutex);
792 mutex_destroy(&mvm->d0i3_suspend_mutex);
793
768 ieee80211_free_hw(mvm->hw); 794 ieee80211_free_hw(mvm->hw);
769} 795}
770 796
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f313910cd026..7b1f6ad6062b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
227 cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); 227 cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
228 } 228 }
229 229
230 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; 230 cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
231 231
232 if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & 232 if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
233 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { 233 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 9a54f2d2a66b..b2bc3d96a13f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
294{ 294{
295 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 295 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); 296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
297 u16 flags = le16_to_cpu(desc->l3l4_flags);
298 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
299 IWL_RX_L3_PROTO_POS);
297 300
298 if (mvmvif->features & NETIF_F_RXCSUM && 301 if (mvmvif->features & NETIF_F_RXCSUM &&
299 desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) && 302 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
300 desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK)) 303 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
304 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
305 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
301 skb->ip_summed = CHECKSUM_UNNECESSARY; 306 skb->ip_summed = CHECKSUM_UNNECESSARY;
302} 307}
303 308
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 09eb72c4ae43..c1d1be9c5d01 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -70,6 +70,7 @@
70 70
71#include "mvm.h" 71#include "mvm.h"
72#include "fw-api-scan.h" 72#include "fw-api-scan.h"
73#include "iwl-io.h"
73 74
74#define IWL_DENSE_EBS_SCAN_RATIO 5 75#define IWL_DENSE_EBS_SCAN_RATIO 5
75#define IWL_SPARSE_EBS_SCAN_RATIO 1 76#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
398 ieee80211_scan_completed(mvm->hw, 399 ieee80211_scan_completed(mvm->hw,
399 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); 400 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
400 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 401 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
402 del_timer(&mvm->scan_timer);
403 } else {
404 IWL_ERR(mvm,
405 "got scan complete notification but no scan is running\n");
401 } 406 }
402 407
403 mvm->last_ebs_successful = 408 mvm->last_ebs_successful =
@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
961 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | 966 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
962 SCAN_CONFIG_FLAG_SET_TX_CHAINS | 967 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
963 SCAN_CONFIG_FLAG_SET_RX_CHAINS | 968 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
969 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
964 SCAN_CONFIG_FLAG_SET_ALL_TIMES | 970 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
965 SCAN_CONFIG_FLAG_SET_LEGACY_RATES | 971 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
966 SCAN_CONFIG_FLAG_SET_MAC_ADDR | 972 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1216 return -EIO; 1222 return -EIO;
1217} 1223}
1218 1224
1225#define SCAN_TIMEOUT (16 * HZ)
1226
1227void iwl_mvm_scan_timeout(unsigned long data)
1228{
1229 struct iwl_mvm *mvm = (struct iwl_mvm *)data;
1230
1231 IWL_ERR(mvm, "regular scan timed out\n");
1232
1233 del_timer(&mvm->scan_timer);
1234 iwl_force_nmi(mvm->trans);
1235}
1236
1219int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1237int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1220 struct cfg80211_scan_request *req, 1238 struct cfg80211_scan_request *req,
1221 struct ieee80211_scan_ies *ies) 1239 struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1295 mvm->scan_status |= IWL_MVM_SCAN_REGULAR; 1313 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1296 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); 1314 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1297 1315
1316 mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
1317
1298 return 0; 1318 return 0;
1299} 1319}
1300 1320
@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1412 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { 1432 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1413 ieee80211_scan_completed(mvm->hw, aborted); 1433 ieee80211_scan_completed(mvm->hw, aborted);
1414 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1434 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1435 del_timer(&mvm->scan_timer);
1415 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { 1436 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1416 ieee80211_sched_scan_stopped(mvm->hw); 1437 ieee80211_sched_scan_stopped(mvm->hw);
1417 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 1438 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@ out:
1607 * to release the scan reference here. 1628 * to release the scan reference here.
1608 */ 1629 */
1609 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1630 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1631 del_timer(&mvm->scan_timer);
1610 if (notify) 1632 if (notify)
1611 ieee80211_scan_completed(mvm->hw, true); 1633 ieee80211_scan_completed(mvm->hw, true);
1612 } else if (notify) { 1634 } else if (notify) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index c2def1232a8c..443a42855c9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
193 } 193 }
194 } 194 }
195 195
196 if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { 196 if (sta) {
197 BUILD_BUG_ON(sizeof(sf_full_timeout) != 197 BUILD_BUG_ON(sizeof(sf_full_timeout) !=
198 sizeof(__le32) * SF_NUM_SCENARIO * 198 sizeof(__le32) * SF_NUM_SCENARIO *
199 SF_NUM_TIMEOUT_TYPES); 199 SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
220 struct ieee80211_sta *sta; 220 struct ieee80211_sta *sta;
221 int ret = 0; 221 int ret = 0;
222 222
223 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
224 sf_cmd.state = cpu_to_le32(new_state);
225
226 if (mvm->cfg->disable_dummy_notification) 223 if (mvm->cfg->disable_dummy_notification)
227 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); 224 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
228 225
@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
235 232
236 switch (new_state) { 233 switch (new_state) {
237 case SF_UNINIT: 234 case SF_UNINIT:
238 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) 235 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
239 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
240 break; 236 break;
241 case SF_FULL_ON: 237 case SF_FULL_ON:
242 if (sta_id == IWL_MVM_STATION_COUNT) { 238 if (sta_id == IWL_MVM_STATION_COUNT) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ef99942d7169..12614b7b7fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
111 111
112/* send station add/update command to firmware */ 112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
114 bool update) 114 bool update, unsigned int flags)
115{ 115{
116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
126 u32 status; 126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0; 127 u32 agg_size = 0, mpdu_dens = 0;
128 128
129 if (!update) { 129 if (!update || (flags & STA_MODIFY_QUEUES)) {
130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
132 } 135 }
133 136
134 switch (sta->bandwidth) { 137 switch (sta->bandwidth) {
@@ -274,6 +277,211 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
274 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); 277 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
275} 278}
276 279
280static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
281 struct ieee80211_sta *sta, u8 ac, int tid,
282 struct ieee80211_hdr *hdr)
283{
284 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
285 struct iwl_trans_txq_scd_cfg cfg = {
286 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
287 .sta_id = mvmsta->sta_id,
288 .tid = tid,
289 .frame_limit = IWL_FRAME_LIMIT,
290 };
291 unsigned int wdg_timeout =
292 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
293 u8 mac_queue = mvmsta->vif->hw_queue[ac];
294 int queue = -1;
295 int ssn;
296
297 lockdep_assert_held(&mvm->mutex);
298
299 spin_lock_bh(&mvm->queue_info_lock);
300
301 /*
302 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
303 * exists
304 */
305 if (!ieee80211_is_data_qos(hdr->frame_control) ||
306 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
307 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
308 IWL_MVM_DQA_MAX_MGMT_QUEUE);
309 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
310 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
311 queue);
312
313 /* If no such queue is found, we'll use a DATA queue instead */
314 }
315
316 if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
317 queue = mvmsta->reserved_queue;
318 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
319 }
320
321 if (queue < 0)
322 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
323 IWL_MVM_DQA_MAX_DATA_QUEUE);
324 if (queue >= 0)
325 mvm->queue_info[queue].setup_reserved = false;
326
327 spin_unlock_bh(&mvm->queue_info_lock);
328
329 /* TODO: support shared queues for same RA */
330 if (queue < 0)
331 return -ENOSPC;
332
333 /*
334 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
335 * but for configuring the SCD to send A-MPDUs we need to mark the queue
336 * as aggregatable.
337 * Mark all DATA queues as allowing to be aggregated at some point
338 */
339 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
340 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
341
342 IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
343 queue, mvmsta->sta_id, tid);
344
345 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
346 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
347 wdg_timeout);
348
349 spin_lock_bh(&mvmsta->lock);
350 mvmsta->tid_data[tid].txq_id = queue;
351 mvmsta->tfd_queue_msk |= BIT(queue);
352
353 if (mvmsta->reserved_queue == queue)
354 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
355 spin_unlock_bh(&mvmsta->lock);
356
357 return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
358}
359
360static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
361{
362 if (tid == IWL_MAX_TID_COUNT)
363 return IEEE80211_AC_VO; /* MGMT */
364
365 return tid_to_mac80211_ac[tid];
366}
367
368static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
369 struct ieee80211_sta *sta, int tid)
370{
371 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
372 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
373 struct sk_buff *skb;
374 struct ieee80211_hdr *hdr;
375 struct sk_buff_head deferred_tx;
376 u8 mac_queue;
377 bool no_queue = false; /* Marks if there is a problem with the queue */
378 u8 ac;
379
380 lockdep_assert_held(&mvm->mutex);
381
382 skb = skb_peek(&tid_data->deferred_tx_frames);
383 if (!skb)
384 return;
385 hdr = (void *)skb->data;
386
387 ac = iwl_mvm_tid_to_ac_queue(tid);
388 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
389
390 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
391 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
392 IWL_ERR(mvm,
393 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
394 mvmsta->sta_id, tid);
395
396 /*
397 * Mark queue as problematic so later the deferred traffic is
398 * freed, as we can do nothing with it
399 */
400 no_queue = true;
401 }
402
403 __skb_queue_head_init(&deferred_tx);
404
405 /* Disable bottom-halves when entering TX path */
406 local_bh_disable();
407 spin_lock(&mvmsta->lock);
408 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
409 spin_unlock(&mvmsta->lock);
410
411 while ((skb = __skb_dequeue(&deferred_tx)))
412 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
413 ieee80211_free_txskb(mvm->hw, skb);
414 local_bh_enable();
415
416 /* Wake queue */
417 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
418}
419
420void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
421{
422 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
423 add_stream_wk);
424 struct ieee80211_sta *sta;
425 struct iwl_mvm_sta *mvmsta;
426 unsigned long deferred_tid_traffic;
427 int sta_id, tid;
428
429 mutex_lock(&mvm->mutex);
430
431 /* Go over all stations with deferred traffic */
432 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
433 IWL_MVM_STATION_COUNT) {
434 clear_bit(sta_id, mvm->sta_deferred_frames);
435 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
436 lockdep_is_held(&mvm->mutex));
437 if (IS_ERR_OR_NULL(sta))
438 continue;
439
440 mvmsta = iwl_mvm_sta_from_mac80211(sta);
441 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
442
443 for_each_set_bit(tid, &deferred_tid_traffic,
444 IWL_MAX_TID_COUNT + 1)
445 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
446 }
447
448 mutex_unlock(&mvm->mutex);
449}
450
451static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
452 struct ieee80211_sta *sta,
453 enum nl80211_iftype vif_type)
454{
455 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
456 int queue;
457
458 spin_lock_bh(&mvm->queue_info_lock);
459
460 /* Make sure we have free resources for this STA */
461 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
462 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
463 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
464 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
465 else
466 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
467 IWL_MVM_DQA_MAX_DATA_QUEUE);
468 if (queue < 0) {
469 spin_unlock_bh(&mvm->queue_info_lock);
470 IWL_ERR(mvm, "No available queues for new station\n");
471 return -ENOSPC;
472 }
473 mvm->queue_info[queue].setup_reserved = true;
474
475 spin_unlock_bh(&mvm->queue_info_lock);
476
477 mvmsta->reserved_queue = queue;
478
479 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
480 queue, mvmsta->sta_id);
481
482 return 0;
483}
484
277int iwl_mvm_add_sta(struct iwl_mvm *mvm, 485int iwl_mvm_add_sta(struct iwl_mvm *mvm,
278 struct ieee80211_vif *vif, 486 struct ieee80211_vif *vif,
279 struct ieee80211_sta *sta) 487 struct ieee80211_sta *sta)
@@ -314,18 +522,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
314 ret = iwl_mvm_tdls_sta_init(mvm, sta); 522 ret = iwl_mvm_tdls_sta_init(mvm, sta);
315 if (ret) 523 if (ret)
316 return ret; 524 return ret;
317 } else { 525 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
318 for (i = 0; i < IEEE80211_NUM_ACS; i++) 526 for (i = 0; i < IEEE80211_NUM_ACS; i++)
319 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) 527 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
320 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); 528 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
321 } 529 }
322 530
323 /* for HW restart - reset everything but the sequence number */ 531 /* for HW restart - reset everything but the sequence number */
324 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 532 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
325 u16 seq = mvm_sta->tid_data[i].seq_number; 533 u16 seq = mvm_sta->tid_data[i].seq_number;
326 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 534 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
327 mvm_sta->tid_data[i].seq_number = seq; 535 mvm_sta->tid_data[i].seq_number = seq;
536
537 if (!iwl_mvm_is_dqa_supported(mvm))
538 continue;
539
540 /*
541 * Mark all queues for this STA as unallocated and defer TX
542 * frames until the queue is allocated
543 */
544 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
545 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
328 } 546 }
547 mvm_sta->deferred_traffic_tid_map = 0;
329 mvm_sta->agg_tids = 0; 548 mvm_sta->agg_tids = 0;
330 549
331 if (iwl_mvm_has_new_rx_api(mvm) && 550 if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +557,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
338 mvm_sta->dup_data = dup_data; 557 mvm_sta->dup_data = dup_data;
339 } 558 }
340 559
341 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); 560 if (iwl_mvm_is_dqa_supported(mvm)) {
561 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
562 ieee80211_vif_type_p2p(vif));
563 if (ret)
564 goto err;
565 }
566
567 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
342 if (ret) 568 if (ret)
343 goto err; 569 goto err;
344 570
@@ -364,7 +590,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
364 struct ieee80211_vif *vif, 590 struct ieee80211_vif *vif,
365 struct ieee80211_sta *sta) 591 struct ieee80211_sta *sta)
366{ 592{
367 return iwl_mvm_sta_send_to_fw(mvm, sta, true); 593 return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
368} 594}
369 595
370int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 596int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +735,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
509 mutex_unlock(&mvm->mutex); 735 mutex_unlock(&mvm->mutex);
510} 736}
511 737
738static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
739 struct ieee80211_vif *vif,
740 struct iwl_mvm_sta *mvm_sta)
741{
742 int ac;
743 int i;
744
745 lockdep_assert_held(&mvm->mutex);
746
747 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
748 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
749 continue;
750
751 ac = iwl_mvm_tid_to_ac_queue(i);
752 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
753 vif->hw_queue[ac], i, 0);
754 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
755 }
756}
757
512int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 758int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
513 struct ieee80211_vif *vif, 759 struct ieee80211_vif *vif,
514 struct ieee80211_sta *sta) 760 struct ieee80211_sta *sta)
@@ -537,6 +783,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
537 return ret; 783 return ret;
538 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 784 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
539 785
786 /* If DQA is supported - the queues can be disabled now */
787 if (iwl_mvm_is_dqa_supported(mvm))
788 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
789
540 /* if we are associated - we can't remove the AP STA now */ 790 /* if we are associated - we can't remove the AP STA now */
541 if (vif->bss_conf.assoc) 791 if (vif->bss_conf.assoc)
542 return ret; 792 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1a8f69a41405..e3efdcd900f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
34 * 34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH 37 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
38 * All rights reserved. 38 * All rights reserved.
39 * 39 *
40 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -80,6 +80,60 @@ struct iwl_mvm;
80struct iwl_mvm_vif; 80struct iwl_mvm_vif;
81 81
82/** 82/**
83 * DOC: DQA - Dynamic Queue Allocation -introduction
84 *
85 * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
86 * driver to allow dynamic allocation of queues on-demand, rather than allocate
87 * them statically ahead of time. Ideally, we would like to allocate one queue
88 * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
89 * even if it also needs to send traffic to a sleeping STA1, without being
90 * blocked by the sleeping station.
91 *
92 * Although the queues in DQA mode are dynamically allocated, there are still
93 * some queues that are statically allocated:
94 * TXQ #0 - command queue
95 * TXQ #1 - aux frames
96 * TXQ #2 - P2P device frames
97 * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
98 * TXQ #4 - BSS DATA frames queue
99 * TXQ #5-8 - Non-QoS and MGMT frames queue pool
100 * TXQ #9 - P2P GO/SoftAP probe responses
101 * TXQ #10-31 - DATA frames queue pool
102 * The queues are dynamically taken from either the MGMT frames queue pool or
103 * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
104 * queue.
105 *
106 * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
107 * until a queue is allocated for it, and only then can be TXed. Therefore, it
108 * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
109 * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
110 *
111 * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
112 * queues in the pool. If there is no longer a free MGMT queue to allocate, a
113 * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
114 * a problem for aggregations, they too will use a MGMT queue.
115 *
116 * When adding a STA, a DATA queue is reserved for it so that it can TX from
117 * it. If no such free queue exists for reserving, the STA addition will fail.
118 *
119 * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
120 * new RA/TID comes in for an existing STA, one of the STA's queues will become
121 * shared and will serve more than the single TID (but always for the same RA!).
122 *
123 * When a RA/TID needs to become aggregated, no new queue is required to be
124 * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
125 * however, that a shared queue cannot be aggregated, and only after the other
126 * TIDs become inactive and are removed - only then can the queue be
127 * reconfigured and become aggregated.
128 *
129 * When removing a station, its queues are returned to the pool for reuse. Here
130 * we also need to make sure that we are synced with the worker thread that TXes
131 * the deferred frames so we don't get into a situation where the queues are
132 * removed and then the worker puts deferred frames onto the released queues or
133 * tries to allocate new queues for a STA we don't need anymore.
134 */
135
136/**
83 * DOC: station table - introduction 137 * DOC: station table - introduction
84 * 138 *
85 * The station table is a list of data structure that reprensent the stations. 139 * The station table is a list of data structure that reprensent the stations.
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
253 307
254/** 308/**
255 * struct iwl_mvm_tid_data - holds the states for each RA / TID 309 * struct iwl_mvm_tid_data - holds the states for each RA / TID
310 * @deferred_tx_frames: deferred TX frames for this RA/TID
256 * @seq_number: the next WiFi sequence number to use 311 * @seq_number: the next WiFi sequence number to use
257 * @next_reclaimed: the WiFi sequence number of the next packet to be acked. 312 * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
258 * This is basically (last acked packet++). 313 * This is basically (last acked packet++).
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
260 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
261 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 316 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
262 * @state: state of the BA agreement establishment / tear down. 317 * @state: state of the BA agreement establishment / tear down.
263 * @txq_id: Tx queue used by the BA session 318 * @txq_id: Tx queue used by the BA session / DQA
264 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 319 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
265 * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 320 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
266 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 321 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
268 * @tx_time: medium time consumed by this A-MPDU 323 * @tx_time: medium time consumed by this A-MPDU
269 */ 324 */
270struct iwl_mvm_tid_data { 325struct iwl_mvm_tid_data {
326 struct sk_buff_head deferred_tx_frames;
271 u16 seq_number; 327 u16 seq_number;
272 u16 next_reclaimed; 328 u16 next_reclaimed;
273 /* The rest is Tx AGG related */ 329 /* The rest is Tx AGG related */
@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data {
316 * we need to signal the EOSP 372 * we need to signal the EOSP
317 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 373 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
318 * and from Tx response flow, it needs a spinlock. 374 * and from Tx response flow, it needs a spinlock.
319 * @tid_data: per tid data. Look at %iwl_mvm_tid_data. 375 * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
376 * @reserved_queue: the queue reserved for this STA for DQA purposes
377 * Every STA has is given one reserved queue to allow it to operate. If no
378 * such queue can be guaranteed, the STA addition will fail.
320 * @tx_protection: reference counter for controlling the Tx protection. 379 * @tx_protection: reference counter for controlling the Tx protection.
321 * @tt_tx_protection: is thermal throttling enable Tx protection? 380 * @tt_tx_protection: is thermal throttling enable Tx protection?
322 * @disable_tx: is tx to this STA disabled? 381 * @disable_tx: is tx to this STA disabled?
@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data {
329 * the BA window. To be used for UAPSD only. 388 * the BA window. To be used for UAPSD only.
330 * @ptk_pn: per-queue PTK PN data structures 389 * @ptk_pn: per-queue PTK PN data structures
331 * @dup_data: per queue duplicate packet detection data 390 * @dup_data: per queue duplicate packet detection data
391 * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
332 * 392 *
333 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 393 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
334 * in the structure for use by driver. This structure is placed in that 394 * in the structure for use by driver. This structure is placed in that
@@ -345,12 +405,16 @@ struct iwl_mvm_sta {
345 bool bt_reduced_txpower; 405 bool bt_reduced_txpower;
346 bool next_status_eosp; 406 bool next_status_eosp;
347 spinlock_t lock; 407 spinlock_t lock;
348 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; 408 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
349 struct iwl_lq_sta lq_sta; 409 struct iwl_lq_sta lq_sta;
350 struct ieee80211_vif *vif; 410 struct ieee80211_vif *vif;
351 struct iwl_mvm_key_pn __rcu *ptk_pn[4]; 411 struct iwl_mvm_key_pn __rcu *ptk_pn[4];
352 struct iwl_mvm_rxq_dup_data *dup_data; 412 struct iwl_mvm_rxq_dup_data *dup_data;
353 413
414 u16 deferred_traffic_tid_map;
415
416 u8 reserved_queue;
417
354 /* Temporary, until the new TLC will control the Tx protection */ 418 /* Temporary, until the new TLC will control the Tx protection */
355 s8 tx_protection; 419 s8 tx_protection;
356 bool tt_tx_protection; 420 bool tt_tx_protection;
@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta {
378 u32 tfd_queue_msk; 442 u32 tfd_queue_msk;
379}; 443};
380 444
445/**
446 * Send the STA info to the FW.
447 *
448 * @mvm: the iwl_mvm* to use
449 * @sta: the STA
450 * @update: this is true if the FW is being updated about a STA it already knows
451 * about. Otherwise (if this is a new STA), this should be false.
452 * @flags: if update==true, this marks what is being changed via ORs of values
453 * from enum iwl_sta_modify_flag. Otherwise, this is ignored.
454 */
381int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 455int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
382 bool update); 456 bool update, unsigned int flags);
383int iwl_mvm_add_sta(struct iwl_mvm *mvm, 457int iwl_mvm_add_sta(struct iwl_mvm *mvm,
384 struct ieee80211_vif *vif, 458 struct ieee80211_vif *vif,
385 struct ieee80211_sta *sta); 459 struct ieee80211_sta *sta);
@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
459 struct iwl_mvm_vif *mvmvif, 533 struct iwl_mvm_vif *mvmvif,
460 bool disable); 534 bool disable);
461void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 535void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
536void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
462 537
463#endif /* __sta_h__ */ 538#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f1f28255a3a6..eb3f460ce1b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
204 if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) 204 if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
205 return; 205 return;
206 206
207 /*
208 * We are now handling a temperature notification from the firmware
209 * in ASYNC and hold the mutex. thermal_notify_framework will call
210 * us back through get_temp() which ought to send a SYNC command to
211 * the firmware and hence to take the mutex.
212 * Avoid the deadlock by unlocking the mutex here.
213 */
214 if (mvm->tz_device.tzone) { 207 if (mvm->tz_device.tzone) {
215 struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; 208 struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
216 209
217 mutex_unlock(&mvm->mutex);
218 thermal_notify_framework(tz_dev->tzone, 210 thermal_notify_framework(tz_dev->tzone,
219 tz_dev->fw_trips_index[ths_crossed]); 211 tz_dev->fw_trips_index[ths_crossed]);
220 mutex_lock(&mvm->mutex);
221 } 212 }
222#endif /* CONFIG_THERMAL */ 213#endif /* CONFIG_THERMAL */
223} 214}
@@ -796,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
796{ 787{
797 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); 788 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
798 789
799 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
800 return -EBUSY;
801
802 *state = mvm->cooling_dev.cur_state; 790 *state = mvm->cooling_dev.cur_state;
803 791
804 return 0; 792 return 0;
@@ -813,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
813 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) 801 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
814 return -EIO; 802 return -EIO;
815 803
816 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
817 return -EBUSY;
818
819 mutex_lock(&mvm->mutex); 804 mutex_lock(&mvm->mutex);
820 805
821 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { 806 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 75870e68a7c3..efb9b98c4c98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -67,6 +67,7 @@
67#include <linux/etherdevice.h> 67#include <linux/etherdevice.h>
68#include <linux/tcp.h> 68#include <linux/tcp.h>
69#include <net/ip.h> 69#include <net/ip.h>
70#include <net/ipv6.h>
70 71
71#include "iwl-trans.h" 72#include "iwl-trans.h"
72#include "iwl-eeprom-parse.h" 73#include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
98 addr, tid, ssn); 99 addr, tid, ssn);
99} 100}
100 101
102#define OPT_HDR(type, skb, off) \
103 (type *)(skb_network_header(skb) + (off))
104
105static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
106 struct ieee80211_hdr *hdr,
107 struct ieee80211_tx_info *info,
108 struct iwl_tx_cmd *tx_cmd)
109{
110#if IS_ENABLED(CONFIG_INET)
111 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
112 u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
113 u8 protocol = 0;
114
115 /*
116 * Do not compute checksum if already computed or if transport will
117 * compute it
118 */
119 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
120 return;
121
122 /* We do not expect to be requested to csum stuff we do not support */
123 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
124 (skb->protocol != htons(ETH_P_IP) &&
125 skb->protocol != htons(ETH_P_IPV6)),
126 "No support for requested checksum\n")) {
127 skb_checksum_help(skb);
128 return;
129 }
130
131 if (skb->protocol == htons(ETH_P_IP)) {
132 protocol = ip_hdr(skb)->protocol;
133 } else {
134#if IS_ENABLED(CONFIG_IPV6)
135 struct ipv6hdr *ipv6h =
136 (struct ipv6hdr *)skb_network_header(skb);
137 unsigned int off = sizeof(*ipv6h);
138
139 protocol = ipv6h->nexthdr;
140 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
141 /* only supported extension headers */
142 if (protocol != NEXTHDR_ROUTING &&
143 protocol != NEXTHDR_HOP &&
144 protocol != NEXTHDR_DEST &&
145 protocol != NEXTHDR_FRAGMENT) {
146 skb_checksum_help(skb);
147 return;
148 }
149
150 if (protocol == NEXTHDR_FRAGMENT) {
151 struct frag_hdr *hp =
152 OPT_HDR(struct frag_hdr, skb, off);
153
154 protocol = hp->nexthdr;
155 off += sizeof(struct frag_hdr);
156 } else {
157 struct ipv6_opt_hdr *hp =
158 OPT_HDR(struct ipv6_opt_hdr, skb, off);
159
160 protocol = hp->nexthdr;
161 off += ipv6_optlen(hp);
162 }
163 }
164 /* if we get here - protocol now should be TCP/UDP */
165#endif
166 }
167
168 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
169 WARN_ON_ONCE(1);
170 skb_checksum_help(skb);
171 return;
172 }
173
174 /* enable L4 csum */
175 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
176
177 /*
178 * Set offset to IP header (snap).
179 * We don't support tunneling so no need to take care of inner header.
180 * Size is in words.
181 */
182 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
183
184 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
185 if (skb->protocol == htons(ETH_P_IP) &&
186 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
187 ip_hdr(skb)->check = 0;
188 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
189 }
190
191 /* reset UDP/TCP header csum */
192 if (protocol == IPPROTO_TCP)
193 tcp_hdr(skb)->check = 0;
194 else
195 udp_hdr(skb)->check = 0;
196
197 /* mac header len should include IV, size is in words */
198 if (info->control.hw_key)
199 mh_len += info->control.hw_key->iv_len;
200 mh_len /= 2;
201 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
202
203 tx_cmd->offload_assist = cpu_to_le16(offload_assist);
204#endif
205}
206
101/* 207/*
102 * Sets most of the Tx cmd's fields 208 * Sets most of the Tx cmd's fields
103 */ 209 */
@@ -126,6 +232,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
126 u8 *qc = ieee80211_get_qos_ctl(hdr); 232 u8 *qc = ieee80211_get_qos_ctl(hdr);
127 tx_cmd->tid_tspec = qc[0] & 0xf; 233 tx_cmd->tid_tspec = qc[0] & 0xf;
128 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 234 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
235 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
236 tx_cmd->offload_assist |=
237 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
129 } else if (ieee80211_is_back_req(fc)) { 238 } else if (ieee80211_is_back_req(fc)) {
130 struct ieee80211_bar *bar = (void *)skb->data; 239 struct ieee80211_bar *bar = (void *)skb->data;
131 u16 control = le16_to_cpu(bar->control); 240 u16 control = le16_to_cpu(bar->control);
@@ -186,9 +295,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
186 /* Total # bytes to be transmitted */ 295 /* Total # bytes to be transmitted */
187 tx_cmd->len = cpu_to_le16((u16)skb->len + 296 tx_cmd->len = cpu_to_le16((u16)skb->len +
188 (uintptr_t)info->driver_data[0]); 297 (uintptr_t)info->driver_data[0]);
189 tx_cmd->next_frame_len = 0;
190 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 298 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
191 tx_cmd->sta_id = sta_id; 299 tx_cmd->sta_id = sta_id;
300
301 /* padding is inserted later in transport */
302 if (ieee80211_hdrlen(fc) % 4 &&
303 !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
304 tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
305
306 iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
192} 307}
193 308
194/* 309/*
@@ -459,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
459 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; 574 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
460 u16 amsdu_add, snap_ip_tcp, pad, i = 0; 575 u16 amsdu_add, snap_ip_tcp, pad, i = 0;
461 unsigned int dbg_max_amsdu_len; 576 unsigned int dbg_max_amsdu_len;
577 netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
462 u8 *qc, tid, txf; 578 u8 *qc, tid, txf;
463 579
464 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + 580 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -478,6 +594,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
478 } 594 }
479 595
480 /* 596 /*
597 * Do not build AMSDU for IPv6 with extension headers.
598 * ask stack to segment and checkum the generated MPDUs for us.
599 */
600 if (skb->protocol == htons(ETH_P_IPV6) &&
601 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
602 IPPROTO_TCP) {
603 num_subframes = 1;
604 pad = 0;
605 netdev_features &= ~NETIF_F_CSUM_MASK;
606 goto segment;
607 }
608
609 /*
481 * No need to lock amsdu_in_ampdu_allowed since it can't be modified 610 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
482 * during an BA session. 611 * during an BA session.
483 */ 612 */
@@ -570,7 +699,7 @@ segment:
570 skb_shinfo(skb)->gso_size = num_subframes * mss; 699 skb_shinfo(skb)->gso_size = num_subframes * mss;
571 memcpy(cb, skb->cb, sizeof(cb)); 700 memcpy(cb, skb->cb, sizeof(cb));
572 701
573 next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); 702 next = skb_gso_segment(skb, netdev_features);
574 skb_shinfo(skb)->gso_size = mss; 703 skb_shinfo(skb)->gso_size = mss;
575 if (WARN_ON_ONCE(IS_ERR(next))) 704 if (WARN_ON_ONCE(IS_ERR(next)))
576 return -EINVAL; 705 return -EINVAL;
@@ -632,6 +761,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
632} 761}
633#endif 762#endif
634 763
764static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
765 struct iwl_mvm_sta *mvm_sta, u8 tid,
766 struct sk_buff *skb)
767{
768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
769 u8 mac_queue = info->hw_queue;
770 struct sk_buff_head *deferred_tx_frames;
771
772 lockdep_assert_held(&mvm_sta->lock);
773
774 mvm_sta->deferred_traffic_tid_map |= BIT(tid);
775 set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
776
777 deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
778
779 skb_queue_tail(deferred_tx_frames, skb);
780
781 /*
782 * The first deferred frame should've stopped the MAC queues, so we
783 * should never get a second deferred frame for the RA/TID.
784 */
785 if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
786 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
787 skb_queue_len(deferred_tx_frames))) {
788 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
789 schedule_work(&mvm->add_stream_wk);
790 }
791}
792
635/* 793/*
636 * Sets the fields in the Tx cmd that are crypto related 794 * Sets the fields in the Tx cmd that are crypto related
637 */ 795 */
@@ -647,7 +805,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
647 u16 seq_number = 0; 805 u16 seq_number = 0;
648 u8 tid = IWL_MAX_TID_COUNT; 806 u8 tid = IWL_MAX_TID_COUNT;
649 u8 txq_id = info->hw_queue; 807 u8 txq_id = info->hw_queue;
650 bool is_data_qos = false, is_ampdu = false; 808 bool is_ampdu = false;
651 int hdrlen; 809 int hdrlen;
652 810
653 mvmsta = iwl_mvm_sta_from_mac80211(sta); 811 mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -687,8 +845,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
687 seq_number &= IEEE80211_SCTL_SEQ; 845 seq_number &= IEEE80211_SCTL_SEQ;
688 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 846 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
689 hdr->seq_ctrl |= cpu_to_le16(seq_number); 847 hdr->seq_ctrl |= cpu_to_le16(seq_number);
690 is_data_qos = true;
691 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 848 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
849 } else if (iwl_mvm_is_dqa_supported(mvm) &&
850 (ieee80211_is_qos_nullfunc(fc) ||
851 ieee80211_is_nullfunc(fc))) {
852 /*
853 * nullfunc frames should go to the MGMT queue regardless of QOS
854 */
855 tid = IWL_MAX_TID_COUNT;
856 txq_id = mvmsta->tid_data[tid].txq_id;
692 } 857 }
693 858
694 /* Copy MAC header from skb into command buffer */ 859 /* Copy MAC header from skb into command buffer */
@@ -709,13 +874,30 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
709 txq_id = mvmsta->tid_data[tid].txq_id; 874 txq_id = mvmsta->tid_data[tid].txq_id;
710 } 875 }
711 876
877 if (iwl_mvm_is_dqa_supported(mvm)) {
878 if (unlikely(mvmsta->tid_data[tid].txq_id ==
879 IEEE80211_INVAL_HW_QUEUE)) {
880 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
881
882 /*
883 * The frame is now deferred, and the worker scheduled
884 * will re-allocate it, so we can free it for now.
885 */
886 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
887 spin_unlock(&mvmsta->lock);
888 return 0;
889 }
890
891 txq_id = mvmsta->tid_data[tid].txq_id;
892 }
893
712 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, 894 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
713 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); 895 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
714 896
715 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) 897 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
716 goto drop_unlock_sta; 898 goto drop_unlock_sta;
717 899
718 if (is_data_qos && !ieee80211_has_morefrags(fc)) 900 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
719 mvmsta->tid_data[tid].seq_number = seq_number + 0x10; 901 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
720 902
721 spin_unlock(&mvmsta->lock); 903 spin_unlock(&mvmsta->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 53cdc5760f68..486c98541afc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); 491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
492} 492}
493 493
494static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
495{
496 struct iwl_trans *trans = mvm->trans;
497 struct iwl_error_event_table_v1 table;
498 u32 base;
499
500 base = mvm->error_event_table;
501 if (mvm->cur_ucode == IWL_UCODE_INIT) {
502 if (!base)
503 base = mvm->fw->init_errlog_ptr;
504 } else {
505 if (!base)
506 base = mvm->fw->inst_errlog_ptr;
507 }
508
509 if (base < 0x800000) {
510 IWL_ERR(mvm,
511 "Not valid error log pointer 0x%08X for %s uCode\n",
512 base,
513 (mvm->cur_ucode == IWL_UCODE_INIT)
514 ? "Init" : "RT");
515 return;
516 }
517
518 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
519
520 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
521 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
522 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
523 mvm->status, table.valid);
524 }
525
526 /* Do not change this output - scripts rely on it */
527
528 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
529
530 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
531 table.data1, table.data2, table.data3,
532 table.blink2, table.ilink1, table.ilink2,
533 table.bcon_time, table.gp1, table.gp2,
534 table.gp3, table.ucode_ver, 0,
535 table.hw_ver, table.brd_ver);
536 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
537 desc_lookup(table.error_id));
538 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
539 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
540 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
541 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
542 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
543 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
544 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
545 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
546 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
547 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
548 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
549 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
550 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
551 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
552 IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
553 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
554 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
555 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
556 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
557 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
558 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
559 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
560 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
561 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
562 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
563 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
564 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
565 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
566 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
567 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
568 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
569 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
570
571 if (mvm->support_umac_log)
572 iwl_mvm_dump_umac_error_log(mvm);
573}
574
575void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) 494void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
576{ 495{
577 struct iwl_trans *trans = mvm->trans; 496 struct iwl_trans *trans = mvm->trans;
578 struct iwl_error_event_table table; 497 struct iwl_error_event_table table;
579 u32 base; 498 u32 base;
580 499
581 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
582 iwl_mvm_dump_nic_error_log_old(mvm);
583 return;
584 }
585
586 base = mvm->error_event_table; 500 base = mvm->error_event_table;
587 if (mvm->cur_ucode == IWL_UCODE_INIT) { 501 if (mvm->cur_ucode == IWL_UCODE_INIT) {
588 if (!base) 502 if (!base)
@@ -694,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
694 mvm->queue_info[queue].hw_queue_refcount++; 608 mvm->queue_info[queue].hw_queue_refcount++;
695 if (mvm->queue_info[queue].hw_queue_refcount > 1) 609 if (mvm->queue_info[queue].hw_queue_refcount > 1)
696 enable_queue = false; 610 enable_queue = false;
611 else
612 mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
697 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); 613 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
698 614
699 IWL_DEBUG_TX_QUEUES(mvm, 615 IWL_DEBUG_TX_QUEUES(mvm,
@@ -779,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
779 return; 695 return;
780 } 696 }
781 697
698 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
699
782 /* Make sure queue info is correct even though we overwrite it */ 700 /* Make sure queue info is correct even though we overwrite it */
783 WARN(mvm->queue_info[queue].hw_queue_refcount || 701 WARN(mvm->queue_info[queue].hw_queue_refcount ||
784 mvm->queue_info[queue].tid_bitmap || 702 mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +997,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1079out: 997out:
1080 ieee80211_connection_loss(vif); 998 ieee80211_connection_loss(vif);
1081} 999}
1000
1001int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
1002 enum iwl_lqm_cmd_operatrions operation,
1003 u32 duration, u32 timeout)
1004{
1005 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
1006 struct iwl_link_qual_msrmnt_cmd cmd = {
1007 .cmd_operation = cpu_to_le32(operation),
1008 .mac_id = cpu_to_le32(mvm_vif->id),
1009 .measurement_time = cpu_to_le32(duration),
1010 .timeout = cpu_to_le32(timeout),
1011 };
1012 u32 cmdid =
1013 iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
1014 int ret;
1015
1016 if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
1017 IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
1018 return -EOPNOTSUPP;
1019
1020 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1021 return -EINVAL;
1022
1023 switch (operation) {
1024 case LQM_CMD_OPERATION_START_MEASUREMENT:
1025 if (iwl_mvm_lqm_active(mvm_vif->mvm))
1026 return -EBUSY;
1027 if (!vif->bss_conf.assoc)
1028 return -EINVAL;
1029 mvm_vif->lqm_active = true;
1030 break;
1031 case LQM_CMD_OPERATION_STOP_MEASUREMENT:
1032 if (!iwl_mvm_lqm_active(mvm_vif->mvm))
1033 return -EINVAL;
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
1039 ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
1040 &cmd);
1041
1042 /* command failed - roll back lqm_active state */
1043 if (ret) {
1044 mvm_vif->lqm_active =
1045 operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
1046 }
1047
1048 return ret;
1049}
1050
1051static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
1052 struct ieee80211_vif *vif)
1053{
1054 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
1055 bool *lqm_active = _data;
1056
1057 *lqm_active = *lqm_active || mvm_vif->lqm_active;
1058}
1059
1060bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
1061{
1062 bool ret = false;
1063
1064 lockdep_assert_held(&mvm->mutex);
1065 ieee80211_iterate_active_interfaces_atomic(
1066 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1067 iwl_mvm_lqm_active_iterator, &ret);
1068
1069 return ret;
1070}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 05b968506836..41c6dd5b9ccc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -483,17 +483,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
483 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, 483 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
484 484
485/* 9000 Series */ 485/* 9000 Series */
486 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
487 {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
486 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, 488 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
487 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, 489 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
488 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, 490 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
489 {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, 491 {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
490 {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, 492 {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
491 {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, 493 {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
492 {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, 494 {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
493 {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, 495 {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
494 {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, 496 {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
495 {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, 497 {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
496 {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, 498 {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
497#endif /* CONFIG_IWLMVM */ 499#endif /* CONFIG_IWLMVM */
498 500
499 {0} 501 {0}
@@ -651,10 +653,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
651 /* The PCI device starts with a reference taken and we are 653 /* The PCI device starts with a reference taken and we are
652 * supposed to release it here. But to simplify the 654 * supposed to release it here. But to simplify the
653 * interaction with the opmode, we don't do it now, but let 655 * interaction with the opmode, we don't do it now, but let
654 * the opmode release it when it's ready. To account for this 656 * the opmode release it when it's ready.
655 * reference, we start with ref_count set to 1.
656 */ 657 */
657 trans_pcie->ref_count = 1;
658 658
659 return 0; 659 return 0;
660 660
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index dadafbdef9d9..9ce4ec6cab2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
348struct iwl_trans_pcie { 348struct iwl_trans_pcie {
349 struct iwl_rxq *rxq; 349 struct iwl_rxq *rxq;
350 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 350 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
351 struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; 351 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
352 struct iwl_rb_allocator rba; 352 struct iwl_rb_allocator rba;
353 struct iwl_trans *trans; 353 struct iwl_trans *trans;
354 struct iwl_drv *drv; 354 struct iwl_drv *drv;
@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
403 bool cmd_hold_nic_awake; 403 bool cmd_hold_nic_awake;
404 bool ref_cmd_in_flight; 404 bool ref_cmd_in_flight;
405 405
406 /* protect ref counter */
407 spinlock_t ref_lock;
408 u32 ref_count;
409
410 dma_addr_t fw_mon_phys; 406 dma_addr_t fw_mon_phys;
411 struct page *fw_mon_page; 407 struct page *fw_mon_page;
412 u32 fw_mon_size; 408 u32 fw_mon_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4be3c35afd19..7f8a2322cda2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
210 if (trans->cfg->mq_rx_supported) 210 if (trans->cfg->mq_rx_supported)
211 iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), 211 iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
212 rxq->write_actual); 212 rxq->write_actual);
213 else 213 /*
214 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 214 * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
215 * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
216 * not wake the NIC.
217 */
218 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
215} 219}
216 220
217static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 221static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -908,6 +912,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
908 allocator_pool_size = trans->num_rx_queues * 912 allocator_pool_size = trans->num_rx_queues *
909 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 913 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
910 num_alloc = queue_size + allocator_pool_size; 914 num_alloc = queue_size + allocator_pool_size;
915 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
916 ARRAY_SIZE(trans_pcie->rx_pool));
911 for (i = 0; i < num_alloc; i++) { 917 for (i = 0; i < num_alloc; i++) {
912 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 918 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
913 919
@@ -1805,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
1805 struct msix_entry *entry = dev_id; 1811 struct msix_entry *entry = dev_id;
1806 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1812 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1807 struct iwl_trans *trans = trans_pcie->trans; 1813 struct iwl_trans *trans = trans_pcie->trans;
1808 struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; 1814 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1809 u32 inta_fh, inta_hw; 1815 u32 inta_fh, inta_hw;
1810 1816
1811 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1817 lock_map_acquire(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..5e1a13e82d60 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1321 * after this call. 1321 * after this call.
1322 */ 1322 */
1323 iwl_pcie_reset_ict(trans); 1323 iwl_pcie_reset_ict(trans);
1324 iwl_enable_interrupts(trans);
1324 1325
1325 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1326 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1326 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1327 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1434 int ret, i; 1435 int ret, i;
1435 1436
1436 if (trans->cfg->mq_rx_supported) { 1437 if (trans->cfg->mq_rx_supported) {
1437 max_vector = min_t(u32, (num_possible_cpus() + 1), 1438 max_vector = min_t(u32, (num_possible_cpus() + 2),
1438 IWL_MAX_RX_HW_QUEUES); 1439 IWL_MAX_RX_HW_QUEUES);
1439 for (i = 0; i < max_vector; i++) 1440 for (i = 0; i < max_vector; i++)
1440 trans_pcie->msix_entries[i].entry = i; 1441 trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1465 1466
1466 ret = pci_enable_msi(pdev); 1467 ret = pci_enable_msi(pdev);
1467 if (ret) { 1468 if (ret) {
1468 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); 1469 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1469 /* enable rfkill interrupt: hw bug w/a */ 1470 /* enable rfkill interrupt: hw bug w/a */
1470 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1471 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1471 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1472 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1499 IWL_ERR(trans_pcie->trans, 1500 IWL_ERR(trans_pcie->trans,
1500 "Error allocating IRQ %d\n", i); 1501 "Error allocating IRQ %d\n", i);
1501 for (j = 0; j < i; j++) 1502 for (j = 0; j < i; j++)
1502 free_irq(trans_pcie->msix_entries[i].vector, 1503 free_irq(trans_pcie->msix_entries[j].vector,
1503 &trans_pcie->msix_entries[i]); 1504 &trans_pcie->msix_entries[j]);
1504 pci_disable_msix(pdev); 1505 pci_disable_msix(pdev);
1505 return ret; 1506 return ret;
1506 } 1507 }
@@ -1694,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1694 } 1695 }
1695 1696
1696 free_percpu(trans_pcie->tso_hdr_page); 1697 free_percpu(trans_pcie->tso_hdr_page);
1698 mutex_destroy(&trans_pcie->mutex);
1697 iwl_trans_free(trans); 1699 iwl_trans_free(trans);
1698} 1700}
1699 1701
@@ -2014,38 +2016,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2014void iwl_trans_pcie_ref(struct iwl_trans *trans) 2016void iwl_trans_pcie_ref(struct iwl_trans *trans)
2015{ 2017{
2016 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2018 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2017 unsigned long flags;
2018 2019
2019 if (iwlwifi_mod_params.d0i3_disable) 2020 if (iwlwifi_mod_params.d0i3_disable)
2020 return; 2021 return;
2021 2022
2022 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
2023 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
2024 trans_pcie->ref_count++;
2025 pm_runtime_get(&trans_pcie->pci_dev->dev); 2023 pm_runtime_get(&trans_pcie->pci_dev->dev);
2026 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 2024
2025#ifdef CONFIG_PM
2026 IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2027 atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2028#endif /* CONFIG_PM */
2027} 2029}
2028 2030
2029void iwl_trans_pcie_unref(struct iwl_trans *trans) 2031void iwl_trans_pcie_unref(struct iwl_trans *trans)
2030{ 2032{
2031 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2032 unsigned long flags;
2033 2034
2034 if (iwlwifi_mod_params.d0i3_disable) 2035 if (iwlwifi_mod_params.d0i3_disable)
2035 return; 2036 return;
2036 2037
2037 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
2038 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
2039 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
2040 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
2041 return;
2042 }
2043 trans_pcie->ref_count--;
2044
2045 pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); 2038 pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
2046 pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); 2039 pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
2047 2040
2048 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 2041#ifdef CONFIG_PM
2042 IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2043 atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2044#endif /* CONFIG_PM */
2049} 2045}
2050 2046
2051static const char *get_csr_string(int cmd) 2047static const char *get_csr_string(int cmd)
@@ -2793,7 +2789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2793 trans_pcie->trans = trans; 2789 trans_pcie->trans = trans;
2794 spin_lock_init(&trans_pcie->irq_lock); 2790 spin_lock_init(&trans_pcie->irq_lock);
2795 spin_lock_init(&trans_pcie->reg_lock); 2791 spin_lock_init(&trans_pcie->reg_lock);
2796 spin_lock_init(&trans_pcie->ref_lock);
2797 mutex_init(&trans_pcie->mutex); 2792 mutex_init(&trans_pcie->mutex);
2798 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2793 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2799 trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); 2794 trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 16ad820ca824..e1f7a3febb50 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
596 } 596 }
597} 597}
598 598
599static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
600{
601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
602
603 lockdep_assert_held(&trans_pcie->reg_lock);
604
605 if (trans_pcie->ref_cmd_in_flight) {
606 trans_pcie->ref_cmd_in_flight = false;
607 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
608 iwl_trans_pcie_unref(trans);
609 }
610
611 if (!trans->cfg->base_params->apmg_wake_up_wa)
612 return;
613 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
614 return;
615
616 trans_pcie->cmd_hold_nic_awake = false;
617 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
618 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
619}
620
599/* 621/*
600 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 622 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
601 */ 623 */
@@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
620 } 642 }
621 iwl_pcie_txq_free_tfd(trans, txq); 643 iwl_pcie_txq_free_tfd(trans, txq);
622 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); 644 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
645
646 if (q->read_ptr == q->write_ptr) {
647 unsigned long flags;
648
649 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
650 if (txq_id != trans_pcie->cmd_queue) {
651 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
652 q->id);
653 iwl_trans_pcie_unref(trans);
654 } else {
655 iwl_pcie_clear_cmd_in_flight(trans);
656 }
657 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
658 }
623 } 659 }
624 txq->active = false; 660 txq->active = false;
625 661
@@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1148 return 0; 1184 return 0;
1149} 1185}
1150 1186
1151static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1152{
1153 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1154
1155 lockdep_assert_held(&trans_pcie->reg_lock);
1156
1157 if (trans_pcie->ref_cmd_in_flight) {
1158 trans_pcie->ref_cmd_in_flight = false;
1159 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
1160 iwl_trans_pcie_unref(trans);
1161 }
1162
1163 if (trans->cfg->base_params->apmg_wake_up_wa) {
1164 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
1165 return 0;
1166
1167 trans_pcie->cmd_hold_nic_awake = false;
1168 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1169 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1170 }
1171 return 0;
1172}
1173
1174/* 1187/*
1175 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1188 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1176 * 1189 *
@@ -2197,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2197 __le16 fc; 2210 __le16 fc;
2198 u8 hdr_len; 2211 u8 hdr_len;
2199 u16 wifi_seq; 2212 u16 wifi_seq;
2213 bool amsdu;
2200 2214
2201 txq = &trans_pcie->txq[txq_id]; 2215 txq = &trans_pcie->txq[txq_id];
2202 q = &txq->q; 2216 q = &txq->q;
@@ -2288,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2288 */ 2302 */
2289 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2303 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2290 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; 2304 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
2291 tb1_len = ALIGN(len, 4); 2305 /* do not align A-MSDU to dword as the subframe header aligns it */
2292 2306 amsdu = ieee80211_is_data_qos(fc) &&
2293 /* Tell NIC about any 2-byte padding after MAC header */ 2307 (*ieee80211_get_qos_ctl(hdr) &
2294 if (tb1_len != len) 2308 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2295 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2309 if (trans_pcie->sw_csum_tx || !amsdu) {
2310 tb1_len = ALIGN(len, 4);
2311 /* Tell NIC about any 2-byte padding after MAC header */
2312 if (tb1_len != len)
2313 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2314 } else {
2315 tb1_len = len;
2316 }
2296 2317
2297 /* The first TB points to the scratchbuf data - min_copy bytes */ 2318 /* The first TB points to the scratchbuf data - min_copy bytes */
2298 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, 2319 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2310 goto out_err; 2331 goto out_err;
2311 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2332 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2312 2333
2313 if (ieee80211_is_data_qos(fc) && 2334 if (amsdu) {
2314 (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
2315 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2335 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2316 out_meta, dev_cmd, 2336 out_meta, dev_cmd,
2317 tb1_len))) 2337 tb1_len)))