aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/mvm
diff options
context:
space:
mode:
authorKalle Valo <kvalo@codeaurora.org>2015-11-17 13:57:38 -0500
committerKalle Valo <kvalo@codeaurora.org>2015-11-18 07:28:30 -0500
commite705c12146aa9c69ca498d4ebb83ba7138f9b41f (patch)
treeb55d4eb7a83c2ec117f460684eb71c89eee6a709 /drivers/net/wireless/iwlwifi/mvm
parent7ac9a364c1721a863ecc6cc9aba66e10114908db (diff)
iwlwifi: move under intel vendor directory
Part of reorganising wireless drivers directory and Kconfig. Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/mvm')
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c211
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c1005
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c1315
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h139
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c2104
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c1483
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c1516
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.h103
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h476
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h425
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h387
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h467
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h389
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h238
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h730
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h414
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h284
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h386
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h646
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h1773
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c1166
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c136
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c1452
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c4260
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1535
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c864
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c217
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c1434
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c295
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c1040
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c328
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c3983
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h392
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c612
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c1552
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c340
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c1810
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h426
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c732
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c872
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h249
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tof.c306
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tof.h94
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c460
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c1115
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c1083
48 files changed, 0 insertions, 41353 deletions
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
deleted file mode 100644
index 8c2c3d13b092..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o coex.o coex_legacy.o
6iwlmvm-y += tt.o offloading.o tdls.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
9iwlmvm-y += tof.o
10iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
11
12ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
deleted file mode 100644
index a1376539d2dc..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <net/mac80211.h>
65#include "fw-api.h"
66#include "mvm.h"
67
68struct iwl_mvm_iface_iterator_data {
69 struct ieee80211_vif *ignore_vif;
70 int idx;
71
72 struct iwl_mvm_phy_ctxt *phyctxt;
73
74 u16 ids[MAX_MACS_IN_BINDING];
75 u16 colors[MAX_MACS_IN_BINDING];
76};
77
78static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
79 struct iwl_mvm_iface_iterator_data *data)
80{
81 struct iwl_binding_cmd cmd;
82 struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
83 int i, ret;
84 u32 status;
85
86 memset(&cmd, 0, sizeof(cmd));
87
88 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
89 phyctxt->color));
90 cmd.action = cpu_to_le32(action);
91 cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
92 phyctxt->color));
93
94 for (i = 0; i < MAX_MACS_IN_BINDING; i++)
95 cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
96 for (i = 0; i < data->idx; i++)
97 cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
98 data->colors[i]));
99
100 status = 0;
101 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
102 sizeof(cmd), &cmd, &status);
103 if (ret) {
104 IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
105 action, ret);
106 return ret;
107 }
108
109 if (status) {
110 IWL_ERR(mvm, "Binding command failed: %u\n", status);
111 ret = -EIO;
112 }
113
114 return ret;
115}
116
117static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
118 struct ieee80211_vif *vif)
119{
120 struct iwl_mvm_iface_iterator_data *data = _data;
121 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
122
123 if (vif == data->ignore_vif)
124 return;
125
126 if (mvmvif->phy_ctxt != data->phyctxt)
127 return;
128
129 if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
130 return;
131
132 data->ids[data->idx] = mvmvif->id;
133 data->colors[data->idx] = mvmvif->color;
134 data->idx++;
135}
136
137static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
138 struct ieee80211_vif *vif,
139 struct iwl_mvm_phy_ctxt *phyctxt,
140 bool add)
141{
142 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
143 struct iwl_mvm_iface_iterator_data data = {
144 .ignore_vif = vif,
145 .phyctxt = phyctxt,
146 };
147 u32 action = FW_CTXT_ACTION_MODIFY;
148
149 lockdep_assert_held(&mvm->mutex);
150
151 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
152 IEEE80211_IFACE_ITER_NORMAL,
153 iwl_mvm_iface_iterator,
154 &data);
155
156 /*
157 * If there are no other interfaces yet we
158 * need to create a new binding.
159 */
160 if (data.idx == 0) {
161 if (add)
162 action = FW_CTXT_ACTION_ADD;
163 else
164 action = FW_CTXT_ACTION_REMOVE;
165 }
166
167 if (add) {
168 if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
169 return -EINVAL;
170
171 data.ids[data.idx] = mvmvif->id;
172 data.colors[data.idx] = mvmvif->color;
173 data.idx++;
174 }
175
176 return iwl_mvm_binding_cmd(mvm, action, &data);
177}
178
179int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
180{
181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
182
183 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
184 return -EINVAL;
185
186 /*
187 * Update SF - Disable if needed. if this fails, SF might still be on
188 * while many macs are bound, which is forbidden - so fail the binding.
189 */
190 if (iwl_mvm_sf_update(mvm, vif, false))
191 return -EINVAL;
192
193 return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
194}
195
196int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
197{
198 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199 int ret;
200
201 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
202 return -EINVAL;
203
204 ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
205
206 if (!ret)
207 if (iwl_mvm_sf_update(mvm, vif, true))
208 IWL_ERR(mvm, "Failed to update SF state\n");
209
210 return ret;
211}
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
deleted file mode 100644
index e290ac67d975..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ /dev/null
@@ -1,1005 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
68#include <net/mac80211.h>
69
70#include "fw-api-coex.h"
71#include "iwl-modparams.h"
72#include "mvm.h"
73#include "iwl-debug.h"
74
75/* 20MHz / 40MHz below / 40Mhz above*/
76static const __le64 iwl_ci_mask[][3] = {
77 /* dummy entry for channel 0 */
78 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
79 {
80 cpu_to_le64(0x0000001FFFULL),
81 cpu_to_le64(0x0ULL),
82 cpu_to_le64(0x00007FFFFFULL),
83 },
84 {
85 cpu_to_le64(0x000000FFFFULL),
86 cpu_to_le64(0x0ULL),
87 cpu_to_le64(0x0003FFFFFFULL),
88 },
89 {
90 cpu_to_le64(0x000003FFFCULL),
91 cpu_to_le64(0x0ULL),
92 cpu_to_le64(0x000FFFFFFCULL),
93 },
94 {
95 cpu_to_le64(0x00001FFFE0ULL),
96 cpu_to_le64(0x0ULL),
97 cpu_to_le64(0x007FFFFFE0ULL),
98 },
99 {
100 cpu_to_le64(0x00007FFF80ULL),
101 cpu_to_le64(0x00007FFFFFULL),
102 cpu_to_le64(0x01FFFFFF80ULL),
103 },
104 {
105 cpu_to_le64(0x0003FFFC00ULL),
106 cpu_to_le64(0x0003FFFFFFULL),
107 cpu_to_le64(0x0FFFFFFC00ULL),
108 },
109 {
110 cpu_to_le64(0x000FFFF000ULL),
111 cpu_to_le64(0x000FFFFFFCULL),
112 cpu_to_le64(0x3FFFFFF000ULL),
113 },
114 {
115 cpu_to_le64(0x007FFF8000ULL),
116 cpu_to_le64(0x007FFFFFE0ULL),
117 cpu_to_le64(0xFFFFFF8000ULL),
118 },
119 {
120 cpu_to_le64(0x01FFFE0000ULL),
121 cpu_to_le64(0x01FFFFFF80ULL),
122 cpu_to_le64(0xFFFFFE0000ULL),
123 },
124 {
125 cpu_to_le64(0x0FFFF00000ULL),
126 cpu_to_le64(0x0FFFFFFC00ULL),
127 cpu_to_le64(0x0ULL),
128 },
129 {
130 cpu_to_le64(0x3FFFC00000ULL),
131 cpu_to_le64(0x3FFFFFF000ULL),
132 cpu_to_le64(0x0)
133 },
134 {
135 cpu_to_le64(0xFFFE000000ULL),
136 cpu_to_le64(0xFFFFFF8000ULL),
137 cpu_to_le64(0x0)
138 },
139 {
140 cpu_to_le64(0xFFF8000000ULL),
141 cpu_to_le64(0xFFFFFE0000ULL),
142 cpu_to_le64(0x0)
143 },
144 {
145 cpu_to_le64(0xFFC0000000ULL),
146 cpu_to_le64(0x0ULL),
147 cpu_to_le64(0x0ULL)
148 },
149};
150
151struct corunning_block_luts {
152 u8 range;
153 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
154};
155
156/*
157 * Ranges for the antenna coupling calibration / co-running block LUT:
158 * LUT0: [ 0, 12[
159 * LUT1: [12, 20[
160 * LUT2: [20, 21[
161 * LUT3: [21, 23[
162 * LUT4: [23, 27[
163 * LUT5: [27, 30[
164 * LUT6: [30, 32[
165 * LUT7: [32, 33[
166 * LUT8: [33, - [
167 */
168static const struct corunning_block_luts antenna_coupling_ranges[] = {
169 {
170 .range = 0,
171 .lut20 = {
172 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
173 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
174 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
175 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
176 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
177 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
178 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
179 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
180 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
181 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
182 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
183 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
184 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
185 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
186 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
187 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
188 },
189 },
190 {
191 .range = 12,
192 .lut20 = {
193 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
194 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
195 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
196 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
197 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
198 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
199 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
200 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
201 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
202 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
203 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
204 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
205 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
206 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
207 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
208 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
209 },
210 },
211 {
212 .range = 20,
213 .lut20 = {
214 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
215 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
216 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
217 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
218 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
219 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
220 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
221 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
222 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
223 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
224 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
225 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
226 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
227 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
228 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
229 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
230 },
231 },
232 {
233 .range = 21,
234 .lut20 = {
235 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
236 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
237 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
238 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
239 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
240 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
241 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
242 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
243 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
244 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
245 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
246 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
247 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
248 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
249 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
250 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
251 },
252 },
253 {
254 .range = 23,
255 .lut20 = {
256 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
257 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
258 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
259 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
260 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
261 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
262 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
263 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
264 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
265 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
266 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
267 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
268 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
269 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
270 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
271 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
272 },
273 },
274 {
275 .range = 27,
276 .lut20 = {
277 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
278 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
279 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
280 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
281 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
282 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
283 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
284 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
285 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
286 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
287 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
288 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
289 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
290 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
291 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
292 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
293 },
294 },
295 {
296 .range = 30,
297 .lut20 = {
298 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
299 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
300 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
301 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
302 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
303 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
304 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
305 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
306 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
307 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
308 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
309 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
310 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
311 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
312 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
313 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
314 },
315 },
316 {
317 .range = 32,
318 .lut20 = {
319 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
320 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
321 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
322 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
323 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
324 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
325 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
326 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
327 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
328 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
329 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
330 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
331 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
332 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
333 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
334 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
335 },
336 },
337 {
338 .range = 33,
339 .lut20 = {
340 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
341 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
342 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
343 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
344 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
345 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
346 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
347 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
348 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
349 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
350 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
351 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
352 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
353 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
354 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
355 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
356 },
357 },
358};
359
360static enum iwl_bt_coex_lut_type
361iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
362{
363 struct ieee80211_chanctx_conf *chanctx_conf;
364 enum iwl_bt_coex_lut_type ret;
365 u16 phy_ctx_id;
366 u32 primary_ch_phy_id, secondary_ch_phy_id;
367
368 /*
369 * Checking that we hold mvm->mutex is a good idea, but the rate
370 * control can't acquire the mutex since it runs in Tx path.
371 * So this is racy in that case, but in the worst case, the AMPDU
372 * size limit will be wrong for a short time which is not a big
373 * issue.
374 */
375
376 rcu_read_lock();
377
378 chanctx_conf = rcu_dereference(vif->chanctx_conf);
379
380 if (!chanctx_conf ||
381 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
382 rcu_read_unlock();
383 return BT_COEX_INVALID_LUT;
384 }
385
386 ret = BT_COEX_TX_DIS_LUT;
387
388 if (mvm->cfg->bt_shared_single_ant) {
389 rcu_read_unlock();
390 return ret;
391 }
392
393 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
394 primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
395 secondary_ch_phy_id =
396 le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
397
398 if (primary_ch_phy_id == phy_ctx_id)
399 ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
400 else if (secondary_ch_phy_id == phy_ctx_id)
401 ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
402 /* else - default = TX TX disallowed */
403
404 rcu_read_unlock();
405
406 return ret;
407}
408
409int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
410{
411 struct iwl_bt_coex_cmd bt_cmd = {};
412 u32 mode;
413
414 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
415 return iwl_send_bt_init_conf_old(mvm);
416
417 lockdep_assert_held(&mvm->mutex);
418
419 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
420 switch (mvm->bt_force_ant_mode) {
421 case BT_FORCE_ANT_BT:
422 mode = BT_COEX_BT;
423 break;
424 case BT_FORCE_ANT_WIFI:
425 mode = BT_COEX_WIFI;
426 break;
427 default:
428 WARN_ON(1);
429 mode = 0;
430 }
431
432 bt_cmd.mode = cpu_to_le32(mode);
433 goto send_cmd;
434 }
435
436 mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
437 bt_cmd.mode = cpu_to_le32(mode);
438
439 if (IWL_MVM_BT_COEX_SYNC2SCO)
440 bt_cmd.enabled_modules |=
441 cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
442
443 if (iwl_mvm_bt_is_plcr_supported(mvm))
444 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
445
446 if (IWL_MVM_BT_COEX_MPLUT) {
447 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
448 bt_cmd.enabled_modules |=
449 cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
450 }
451
452 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
453
454send_cmd:
455 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
456 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
457
458 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
459}
460
461static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
462 bool enable)
463{
464 struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
465 struct iwl_mvm_sta *mvmsta;
466 u32 value;
467 int ret;
468
469 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
470 if (!mvmsta)
471 return 0;
472
473 /* nothing to do */
474 if (mvmsta->bt_reduced_txpower == enable)
475 return 0;
476
477 value = mvmsta->sta_id;
478
479 if (enable)
480 value |= BT_REDUCED_TX_POWER_BIT;
481
482 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
483 enable ? "en" : "dis", sta_id);
484
485 cmd.reduced_txp = cpu_to_le32(value);
486 mvmsta->bt_reduced_txpower = enable;
487
488 ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
489 sizeof(cmd), &cmd);
490
491 return ret;
492}
493
494struct iwl_bt_iterator_data {
495 struct iwl_bt_coex_profile_notif *notif;
496 struct iwl_mvm *mvm;
497 struct ieee80211_chanctx_conf *primary;
498 struct ieee80211_chanctx_conf *secondary;
499 bool primary_ll;
500};
501
502static inline
503void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
504 struct ieee80211_vif *vif,
505 bool enable, int rssi)
506{
507 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
508
509 mvmvif->bf_data.last_bt_coex_event = rssi;
510 mvmvif->bf_data.bt_coex_max_thold =
511 enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
512 mvmvif->bf_data.bt_coex_min_thold =
513 enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
514}
515
516/* must be called under rcu_read_lock */
517static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
518 struct ieee80211_vif *vif)
519{
520 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
521 struct iwl_bt_iterator_data *data = _data;
522 struct iwl_mvm *mvm = data->mvm;
523 struct ieee80211_chanctx_conf *chanctx_conf;
524 /* default smps_mode is AUTOMATIC - only used for client modes */
525 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
526 u32 bt_activity_grading;
527 int ave_rssi;
528
529 lockdep_assert_held(&mvm->mutex);
530
531 switch (vif->type) {
532 case NL80211_IFTYPE_STATION:
533 break;
534 case NL80211_IFTYPE_AP:
535 if (!mvmvif->ap_ibss_active)
536 return;
537 break;
538 default:
539 return;
540 }
541
542 chanctx_conf = rcu_dereference(vif->chanctx_conf);
543
544 /* If channel context is invalid or not on 2.4GHz .. */
545 if ((!chanctx_conf ||
546 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
547 if (vif->type == NL80211_IFTYPE_STATION) {
548 /* ... relax constraints and disable rssi events */
549 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
550 smps_mode);
551 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
552 false);
553 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
554 }
555 return;
556 }
557
558 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
559 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
560 smps_mode = IEEE80211_SMPS_STATIC;
561 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
562 smps_mode = IEEE80211_SMPS_DYNAMIC;
563
564 /* relax SMPS constraints for next association */
565 if (!vif->bss_conf.assoc)
566 smps_mode = IEEE80211_SMPS_AUTOMATIC;
567
568 if (mvmvif->phy_ctxt &&
569 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
570 mvmvif->phy_ctxt->id))
571 smps_mode = IEEE80211_SMPS_AUTOMATIC;
572
573 IWL_DEBUG_COEX(data->mvm,
574 "mac %d: bt_activity_grading %d smps_req %d\n",
575 mvmvif->id, bt_activity_grading, smps_mode);
576
577 if (vif->type == NL80211_IFTYPE_STATION)
578 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
579 smps_mode);
580
581 /* low latency is always primary */
582 if (iwl_mvm_vif_low_latency(mvmvif)) {
583 data->primary_ll = true;
584
585 data->secondary = data->primary;
586 data->primary = chanctx_conf;
587 }
588
589 if (vif->type == NL80211_IFTYPE_AP) {
590 if (!mvmvif->ap_ibss_active)
591 return;
592
593 if (chanctx_conf == data->primary)
594 return;
595
596 if (!data->primary_ll) {
597 /*
598 * downgrade the current primary no matter what its
599 * type is.
600 */
601 data->secondary = data->primary;
602 data->primary = chanctx_conf;
603 } else {
604 /* there is low latency vif - we will be secondary */
605 data->secondary = chanctx_conf;
606 }
607 return;
608 }
609
610 /*
611 * STA / P2P Client, try to be primary if first vif. If we are in low
612 * latency mode, we are already in primary and just don't do much
613 */
614 if (!data->primary || data->primary == chanctx_conf)
615 data->primary = chanctx_conf;
616 else if (!data->secondary)
617 /* if secondary is not NULL, it might be a GO */
618 data->secondary = chanctx_conf;
619
620 /*
621 * don't reduce the Tx power if one of these is true:
622 * we are in LOOSE
623 * single share antenna product
624 * BT is active
625 * we are associated
626 */
627 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
628 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
629 le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
630 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
631 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
632 return;
633 }
634
635 /* try to get the avg rssi from fw */
636 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
637
638 /* if the RSSI isn't valid, fake it is very low */
639 if (!ave_rssi)
640 ave_rssi = -100;
641 if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
642 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
643 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
644 } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
645 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
646 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
647 }
648
649 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
650 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
651}
652
653static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
654{
655 struct iwl_bt_iterator_data data = {
656 .mvm = mvm,
657 .notif = &mvm->last_bt_notif,
658 };
659 struct iwl_bt_coex_ci_cmd cmd = {};
660 u8 ci_bw_idx;
661
662 /* Ignore updates if we are in force mode */
663 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
664 return;
665
666 rcu_read_lock();
667 ieee80211_iterate_active_interfaces_atomic(
668 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
669 iwl_mvm_bt_notif_iterator, &data);
670
671 if (data.primary) {
672 struct ieee80211_chanctx_conf *chan = data.primary;
673 if (WARN_ON(!chan->def.chan)) {
674 rcu_read_unlock();
675 return;
676 }
677
678 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
679 ci_bw_idx = 0;
680 } else {
681 if (chan->def.center_freq1 >
682 chan->def.chan->center_freq)
683 ci_bw_idx = 2;
684 else
685 ci_bw_idx = 1;
686 }
687
688 cmd.bt_primary_ci =
689 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
690 cmd.primary_ch_phy_id =
691 cpu_to_le32(*((u16 *)data.primary->drv_priv));
692 }
693
694 if (data.secondary) {
695 struct ieee80211_chanctx_conf *chan = data.secondary;
696 if (WARN_ON(!data.secondary->def.chan)) {
697 rcu_read_unlock();
698 return;
699 }
700
701 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
702 ci_bw_idx = 0;
703 } else {
704 if (chan->def.center_freq1 >
705 chan->def.chan->center_freq)
706 ci_bw_idx = 2;
707 else
708 ci_bw_idx = 1;
709 }
710
711 cmd.bt_secondary_ci =
712 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
713 cmd.secondary_ch_phy_id =
714 cpu_to_le32(*((u16 *)data.secondary->drv_priv));
715 }
716
717 rcu_read_unlock();
718
719 /* Don't spam the fw with the same command over and over */
720 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
721 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
722 sizeof(cmd), &cmd))
723 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
724 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
725 }
726}
727
728void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
729 struct iwl_rx_cmd_buffer *rxb)
730{
731 struct iwl_rx_packet *pkt = rxb_addr(rxb);
732 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
733
734 if (!fw_has_api(&mvm->fw->ucode_capa,
735 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
736 iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
737 return;
738 }
739
740 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
741 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
742 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
743 le32_to_cpu(notif->primary_ch_lut));
744 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
745 le32_to_cpu(notif->secondary_ch_lut));
746 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
747 le32_to_cpu(notif->bt_activity_grading));
748
749 /* remember this notification for future use: rssi fluctuations */
750 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
751
752 iwl_mvm_bt_coex_notif_handle(mvm);
753}
754
755void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
756 enum ieee80211_rssi_event_data rssi_event)
757{
758 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
759 int ret;
760
761 if (!fw_has_api(&mvm->fw->ucode_capa,
762 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
763 iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
764 return;
765 }
766
767 lockdep_assert_held(&mvm->mutex);
768
769 /* Ignore updates if we are in force mode */
770 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
771 return;
772
773 /*
774 * Rssi update while not associated - can happen since the statistics
775 * are handled asynchronously
776 */
777 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
778 return;
779
780 /* No BT - reports should be disabled */
781 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
782 return;
783
784 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
785 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
786
787 /*
788 * Check if rssi is good enough for reduced Tx power, but not in loose
789 * scheme.
790 */
791 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
792 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
793 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
794 false);
795 else
796 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
797
798 if (ret)
799 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
800}
801
802#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
803#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
804
805u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
806 struct ieee80211_sta *sta)
807{
808 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
809 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
810 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
811 enum iwl_bt_coex_lut_type lut_type;
812
813 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
814 return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
815
816 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
817 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
818
819 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
820 BT_HIGH_TRAFFIC)
821 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
822
823 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
824
825 if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
826 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
827
828 /* tight coex, high bt traffic, reduce AGG time limit */
829 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
830}
831
832bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
833 struct ieee80211_sta *sta)
834{
835 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
836 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
837 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
838 enum iwl_bt_coex_lut_type lut_type;
839
840 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
841 return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
842
843 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
844 return true;
845
846 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
847 BT_HIGH_TRAFFIC)
848 return true;
849
850 /*
851 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
852 * since BT is already killed.
853 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
854 * we Tx.
855 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
856 */
857 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
858 return lut_type != BT_COEX_LOOSE_LUT;
859}
860
861bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
862{
863 /* there is no other antenna, shared antenna is always available */
864 if (mvm->cfg->bt_shared_single_ant)
865 return true;
866
867 if (ant & mvm->cfg->non_shared_ant)
868 return true;
869
870 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
871 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
872
873 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
874 BT_HIGH_TRAFFIC;
875}
876
877bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
878{
879 /* there is no other antenna, shared antenna is always available */
880 if (mvm->cfg->bt_shared_single_ant)
881 return true;
882
883 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
884 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
885
886 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
887}
888
889bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
890 enum ieee80211_band band)
891{
892 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
893
894 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
895 return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
896
897 if (band != IEEE80211_BAND_2GHZ)
898 return false;
899
900 return bt_activity >= BT_LOW_TRAFFIC;
901}
902
903u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
904 struct ieee80211_tx_info *info, u8 ac)
905{
906 __le16 fc = hdr->frame_control;
907
908 if (info->band != IEEE80211_BAND_2GHZ)
909 return 0;
910
911 if (unlikely(mvm->bt_tx_prio))
912 return mvm->bt_tx_prio - 1;
913
914 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
915 if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
916 is_multicast_ether_addr(hdr->addr1) ||
917 ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
918 ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
919 return 3;
920
921 switch (ac) {
922 case IEEE80211_AC_BE:
923 return 1;
924 case IEEE80211_AC_VO:
925 return 3;
926 case IEEE80211_AC_VI:
927 return 2;
928 default:
929 break;
930 }
931
932 return 0;
933}
934
935void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
936{
937 if (!fw_has_api(&mvm->fw->ucode_capa,
938 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
939 iwl_mvm_bt_coex_vif_change_old(mvm);
940 return;
941 }
942
943 iwl_mvm_bt_coex_notif_handle(mvm);
944}
945
946void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
947 struct iwl_rx_cmd_buffer *rxb)
948{
949 struct iwl_rx_packet *pkt = rxb_addr(rxb);
950 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
951 struct iwl_bt_coex_corun_lut_update_cmd cmd = {};
952 u8 __maybe_unused lower_bound, upper_bound;
953 u8 lut;
954
955 if (!fw_has_api(&mvm->fw->ucode_capa,
956 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
957 iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
958 return;
959 }
960
961 if (!iwl_mvm_bt_is_plcr_supported(mvm))
962 return;
963
964 lockdep_assert_held(&mvm->mutex);
965
966 /* Ignore updates if we are in force mode */
967 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
968 return;
969
970 if (ant_isolation == mvm->last_ant_isol)
971 return;
972
973 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
974 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
975 break;
976
977 lower_bound = antenna_coupling_ranges[lut].range;
978
979 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
980 upper_bound = antenna_coupling_ranges[lut + 1].range;
981 else
982 upper_bound = antenna_coupling_ranges[lut].range;
983
984 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
985 ant_isolation, lower_bound, upper_bound, lut);
986
987 mvm->last_ant_isol = ant_isolation;
988
989 if (mvm->last_corun_lut == lut)
990 return;
991
992 mvm->last_corun_lut = lut;
993
994 /* For the moment, use the same LUT for 20GHz and 40GHz */
995 memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20,
996 sizeof(cmd.corun_lut20));
997
998 memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
999 sizeof(cmd.corun_lut40));
1000
1001 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
1002 sizeof(cmd), &cmd))
1003 IWL_ERR(mvm,
1004 "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
1005}
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644
index 61c07b05fcaa..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ /dev/null
@@ -1,1315 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
68#include <net/mac80211.h>
69
70#include "fw-api-coex.h"
71#include "iwl-modparams.h"
72#include "mvm.h"
73#include "iwl-debug.h"
74
75#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
76 [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
77 ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
78
79static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
80 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
81 BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
82 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
83 BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
84 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
85 BT_COEX_PRIO_TBL_PRIO_LOW, 0),
86 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
87 BT_COEX_PRIO_TBL_PRIO_LOW, 1),
88 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
89 BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
90 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
91 BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
92 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
93 BT_COEX_PRIO_TBL_DISABLED, 0),
94 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
95 BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
96 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
97 BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
98 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
99 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
100 0, 0, 0, 0, 0, 0,
101};
102
103#undef EVENT_PRIO_ANT
104
105static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
106{
107 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
108 return 0;
109
110 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
111 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
112 &iwl_bt_prio_tbl);
113}
114
115static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
116 cpu_to_le32(0xf0f0f0f0), /* 50% */
117 cpu_to_le32(0xc0c0c0c0), /* 25% */
118 cpu_to_le32(0xfcfcfcfc), /* 75% */
119 cpu_to_le32(0xfefefefe), /* 87.5% */
120};
121
122static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
123 {
124 cpu_to_le32(0x40000000),
125 cpu_to_le32(0x00000000),
126 cpu_to_le32(0x44000000),
127 cpu_to_le32(0x00000000),
128 cpu_to_le32(0x40000000),
129 cpu_to_le32(0x00000000),
130 cpu_to_le32(0x44000000),
131 cpu_to_le32(0x00000000),
132 cpu_to_le32(0xc0004000),
133 cpu_to_le32(0xf0005000),
134 cpu_to_le32(0xc0004000),
135 cpu_to_le32(0xf0005000),
136 },
137 {
138 cpu_to_le32(0x40000000),
139 cpu_to_le32(0x00000000),
140 cpu_to_le32(0x44000000),
141 cpu_to_le32(0x00000000),
142 cpu_to_le32(0x40000000),
143 cpu_to_le32(0x00000000),
144 cpu_to_le32(0x44000000),
145 cpu_to_le32(0x00000000),
146 cpu_to_le32(0xc0004000),
147 cpu_to_le32(0xf0005000),
148 cpu_to_le32(0xc0004000),
149 cpu_to_le32(0xf0005000),
150 },
151 {
152 cpu_to_le32(0x40000000),
153 cpu_to_le32(0x00000000),
154 cpu_to_le32(0x44000000),
155 cpu_to_le32(0x00000000),
156 cpu_to_le32(0x40000000),
157 cpu_to_le32(0x00000000),
158 cpu_to_le32(0x44000000),
159 cpu_to_le32(0x00000000),
160 cpu_to_le32(0xc0004000),
161 cpu_to_le32(0xf0005000),
162 cpu_to_le32(0xc0004000),
163 cpu_to_le32(0xf0005000),
164 },
165};
166
167static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
168 {
169 /* Tight */
170 cpu_to_le32(0xaaaaaaaa),
171 cpu_to_le32(0xaaaaaaaa),
172 cpu_to_le32(0xaeaaaaaa),
173 cpu_to_le32(0xaaaaaaaa),
174 cpu_to_le32(0xcc00ff28),
175 cpu_to_le32(0x0000aaaa),
176 cpu_to_le32(0xcc00aaaa),
177 cpu_to_le32(0x0000aaaa),
178 cpu_to_le32(0xc0004000),
179 cpu_to_le32(0x00004000),
180 cpu_to_le32(0xf0005000),
181 cpu_to_le32(0xf0005000),
182 },
183 {
184 /* Loose */
185 cpu_to_le32(0xaaaaaaaa),
186 cpu_to_le32(0xaaaaaaaa),
187 cpu_to_le32(0xaaaaaaaa),
188 cpu_to_le32(0xaaaaaaaa),
189 cpu_to_le32(0xcc00ff28),
190 cpu_to_le32(0x0000aaaa),
191 cpu_to_le32(0xcc00aaaa),
192 cpu_to_le32(0x0000aaaa),
193 cpu_to_le32(0x00000000),
194 cpu_to_le32(0x00000000),
195 cpu_to_le32(0xf0005000),
196 cpu_to_le32(0xf0005000),
197 },
198 {
199 /* Tx Tx disabled */
200 cpu_to_le32(0xaaaaaaaa),
201 cpu_to_le32(0xaaaaaaaa),
202 cpu_to_le32(0xeeaaaaaa),
203 cpu_to_le32(0xaaaaaaaa),
204 cpu_to_le32(0xcc00ff28),
205 cpu_to_le32(0x0000aaaa),
206 cpu_to_le32(0xcc00aaaa),
207 cpu_to_le32(0x0000aaaa),
208 cpu_to_le32(0xc0004000),
209 cpu_to_le32(0xc0004000),
210 cpu_to_le32(0xf0005000),
211 cpu_to_le32(0xf0005000),
212 },
213};
214
215/* 20MHz / 40MHz below / 40Mhz above*/
216static const __le64 iwl_ci_mask[][3] = {
217 /* dummy entry for channel 0 */
218 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
219 {
220 cpu_to_le64(0x0000001FFFULL),
221 cpu_to_le64(0x0ULL),
222 cpu_to_le64(0x00007FFFFFULL),
223 },
224 {
225 cpu_to_le64(0x000000FFFFULL),
226 cpu_to_le64(0x0ULL),
227 cpu_to_le64(0x0003FFFFFFULL),
228 },
229 {
230 cpu_to_le64(0x000003FFFCULL),
231 cpu_to_le64(0x0ULL),
232 cpu_to_le64(0x000FFFFFFCULL),
233 },
234 {
235 cpu_to_le64(0x00001FFFE0ULL),
236 cpu_to_le64(0x0ULL),
237 cpu_to_le64(0x007FFFFFE0ULL),
238 },
239 {
240 cpu_to_le64(0x00007FFF80ULL),
241 cpu_to_le64(0x00007FFFFFULL),
242 cpu_to_le64(0x01FFFFFF80ULL),
243 },
244 {
245 cpu_to_le64(0x0003FFFC00ULL),
246 cpu_to_le64(0x0003FFFFFFULL),
247 cpu_to_le64(0x0FFFFFFC00ULL),
248 },
249 {
250 cpu_to_le64(0x000FFFF000ULL),
251 cpu_to_le64(0x000FFFFFFCULL),
252 cpu_to_le64(0x3FFFFFF000ULL),
253 },
254 {
255 cpu_to_le64(0x007FFF8000ULL),
256 cpu_to_le64(0x007FFFFFE0ULL),
257 cpu_to_le64(0xFFFFFF8000ULL),
258 },
259 {
260 cpu_to_le64(0x01FFFE0000ULL),
261 cpu_to_le64(0x01FFFFFF80ULL),
262 cpu_to_le64(0xFFFFFE0000ULL),
263 },
264 {
265 cpu_to_le64(0x0FFFF00000ULL),
266 cpu_to_le64(0x0FFFFFFC00ULL),
267 cpu_to_le64(0x0ULL),
268 },
269 {
270 cpu_to_le64(0x3FFFC00000ULL),
271 cpu_to_le64(0x3FFFFFF000ULL),
272 cpu_to_le64(0x0)
273 },
274 {
275 cpu_to_le64(0xFFFE000000ULL),
276 cpu_to_le64(0xFFFFFF8000ULL),
277 cpu_to_le64(0x0)
278 },
279 {
280 cpu_to_le64(0xFFF8000000ULL),
281 cpu_to_le64(0xFFFFFE0000ULL),
282 cpu_to_le64(0x0)
283 },
284 {
285 cpu_to_le64(0xFFC0000000ULL),
286 cpu_to_le64(0x0ULL),
287 cpu_to_le64(0x0ULL)
288 },
289};
290
291enum iwl_bt_kill_msk {
292 BT_KILL_MSK_DEFAULT,
293 BT_KILL_MSK_NEVER,
294 BT_KILL_MSK_ALWAYS,
295 BT_KILL_MSK_MAX,
296};
297
298static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
299 [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
300 [BT_KILL_MSK_NEVER] = 0xffffffff,
301 [BT_KILL_MSK_ALWAYS] = 0,
302};
303
304static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
305 {
306 BT_KILL_MSK_ALWAYS,
307 BT_KILL_MSK_ALWAYS,
308 BT_KILL_MSK_ALWAYS,
309 },
310 {
311 BT_KILL_MSK_NEVER,
312 BT_KILL_MSK_NEVER,
313 BT_KILL_MSK_NEVER,
314 },
315 {
316 BT_KILL_MSK_NEVER,
317 BT_KILL_MSK_NEVER,
318 BT_KILL_MSK_NEVER,
319 },
320 {
321 BT_KILL_MSK_DEFAULT,
322 BT_KILL_MSK_NEVER,
323 BT_KILL_MSK_DEFAULT,
324 },
325};
326
327static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
328 {
329 BT_KILL_MSK_ALWAYS,
330 BT_KILL_MSK_ALWAYS,
331 BT_KILL_MSK_ALWAYS,
332 },
333 {
334 BT_KILL_MSK_ALWAYS,
335 BT_KILL_MSK_ALWAYS,
336 BT_KILL_MSK_ALWAYS,
337 },
338 {
339 BT_KILL_MSK_ALWAYS,
340 BT_KILL_MSK_ALWAYS,
341 BT_KILL_MSK_ALWAYS,
342 },
343 {
344 BT_KILL_MSK_DEFAULT,
345 BT_KILL_MSK_ALWAYS,
346 BT_KILL_MSK_DEFAULT,
347 },
348};
349
350struct corunning_block_luts {
351 u8 range;
352 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
353};
354
355/*
356 * Ranges for the antenna coupling calibration / co-running block LUT:
357 * LUT0: [ 0, 12[
358 * LUT1: [12, 20[
359 * LUT2: [20, 21[
360 * LUT3: [21, 23[
361 * LUT4: [23, 27[
362 * LUT5: [27, 30[
363 * LUT6: [30, 32[
364 * LUT7: [32, 33[
365 * LUT8: [33, - [
366 */
367static const struct corunning_block_luts antenna_coupling_ranges[] = {
368 {
369 .range = 0,
370 .lut20 = {
371 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
372 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
373 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
374 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
375 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
379 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
380 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
381 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
382 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
383 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
384 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
385 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
386 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
387 },
388 },
389 {
390 .range = 12,
391 .lut20 = {
392 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
393 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
394 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
395 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
396 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
400 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
401 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
402 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
403 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
404 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
405 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
406 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
407 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
408 },
409 },
410 {
411 .range = 20,
412 .lut20 = {
413 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
414 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
415 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
416 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
417 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
421 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
422 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
423 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
424 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
425 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
426 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
427 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
428 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
429 },
430 },
431 {
432 .range = 21,
433 .lut20 = {
434 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
435 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
436 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
437 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
438 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
442 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
443 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
444 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
445 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
446 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
447 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
448 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
449 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
450 },
451 },
452 {
453 .range = 23,
454 .lut20 = {
455 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
456 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
457 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
458 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
459 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
463 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
464 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
465 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
466 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
467 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
468 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
469 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
470 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
471 },
472 },
473 {
474 .range = 27,
475 .lut20 = {
476 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
477 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
478 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
479 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
480 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
484 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
485 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
486 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
487 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
488 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
489 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
490 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
491 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
492 },
493 },
494 {
495 .range = 30,
496 .lut20 = {
497 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
498 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
499 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
500 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
501 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
502 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
503 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
504 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
505 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
506 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
507 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
508 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
509 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
510 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
511 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
512 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
513 },
514 },
515 {
516 .range = 32,
517 .lut20 = {
518 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
519 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
520 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
521 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
522 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
523 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
524 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
525 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
526 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
527 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
528 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
529 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
530 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
531 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
532 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
533 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
534 },
535 },
536 {
537 .range = 33,
538 .lut20 = {
539 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
540 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
541 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
542 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
543 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
544 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
545 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
546 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
547 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
548 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
549 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
550 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
551 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
552 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
553 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
554 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
555 },
556 },
557};
558
559static enum iwl_bt_coex_lut_type
560iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
561{
562 struct ieee80211_chanctx_conf *chanctx_conf;
563 enum iwl_bt_coex_lut_type ret;
564 u16 phy_ctx_id;
565
566 /*
567 * Checking that we hold mvm->mutex is a good idea, but the rate
568 * control can't acquire the mutex since it runs in Tx path.
569 * So this is racy in that case, but in the worst case, the AMPDU
570 * size limit will be wrong for a short time which is not a big
571 * issue.
572 */
573
574 rcu_read_lock();
575
576 chanctx_conf = rcu_dereference(vif->chanctx_conf);
577
578 if (!chanctx_conf ||
579 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
580 rcu_read_unlock();
581 return BT_COEX_INVALID_LUT;
582 }
583
584 ret = BT_COEX_TX_DIS_LUT;
585
586 if (mvm->cfg->bt_shared_single_ant) {
587 rcu_read_unlock();
588 return ret;
589 }
590
591 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
592
593 if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
594 ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
595 else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
596 ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
597 /* else - default = TX TX disallowed */
598
599 rcu_read_unlock();
600
601 return ret;
602}
603
604int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
605{
606 struct iwl_bt_coex_cmd_old *bt_cmd;
607 struct iwl_host_cmd cmd = {
608 .id = BT_CONFIG,
609 .len = { sizeof(*bt_cmd), },
610 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
611 };
612 int ret;
613 u32 flags;
614
615 ret = iwl_send_bt_prio_tbl(mvm);
616 if (ret)
617 return ret;
618
619 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
620 if (!bt_cmd)
621 return -ENOMEM;
622 cmd.data[0] = bt_cmd;
623
624 lockdep_assert_held(&mvm->mutex);
625
626 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
627 switch (mvm->bt_force_ant_mode) {
628 case BT_FORCE_ANT_AUTO:
629 flags = BT_COEX_AUTO_OLD;
630 break;
631 case BT_FORCE_ANT_BT:
632 flags = BT_COEX_BT_OLD;
633 break;
634 case BT_FORCE_ANT_WIFI:
635 flags = BT_COEX_WIFI_OLD;
636 break;
637 default:
638 WARN_ON(1);
639 flags = 0;
640 }
641
642 bt_cmd->flags = cpu_to_le32(flags);
643 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
644 goto send_cmd;
645 }
646
647 bt_cmd->max_kill = 5;
648 bt_cmd->bt4_antenna_isolation_thr =
649 IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
650 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
651 bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
652 bt_cmd->bt4_tx_rx_max_freq0 = 15;
653 bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
654 bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
655
656 flags = iwlwifi_mod_params.bt_coex_active ?
657 BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
658 bt_cmd->flags = cpu_to_le32(flags);
659
660 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
661 BT_VALID_BT_PRIO_BOOST |
662 BT_VALID_MAX_KILL |
663 BT_VALID_3W_TMRS |
664 BT_VALID_KILL_ACK |
665 BT_VALID_KILL_CTS |
666 BT_VALID_REDUCED_TX_POWER |
667 BT_VALID_LUT |
668 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
669 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
670 BT_VALID_ANT_ISOLATION |
671 BT_VALID_ANT_ISOLATION_THRS |
672 BT_VALID_TXTX_DELTA_FREQ_THRS |
673 BT_VALID_TXRX_MAX_FREQ_0 |
674 BT_VALID_SYNC_TO_SCO |
675 BT_VALID_TTC |
676 BT_VALID_RRC);
677
678 if (IWL_MVM_BT_COEX_SYNC2SCO)
679 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
680
681 if (iwl_mvm_bt_is_plcr_supported(mvm)) {
682 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
683 BT_VALID_CORUN_LUT_40);
684 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
685 }
686
687 if (IWL_MVM_BT_COEX_MPLUT) {
688 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
689 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
690 }
691
692 if (IWL_MVM_BT_COEX_TTC)
693 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
694
695 if (iwl_mvm_bt_is_rrc_supported(mvm))
696 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
697
698 if (mvm->cfg->bt_shared_single_ant)
699 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
700 sizeof(iwl_single_shared_ant));
701 else
702 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
703 sizeof(iwl_combined_lookup));
704
705 /* Take first Co-running block LUT to get started */
706 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
707 sizeof(bt_cmd->bt4_corun_lut20));
708 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
709 sizeof(bt_cmd->bt4_corun_lut40));
710
711 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
712 sizeof(iwl_bt_prio_boost));
713 bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
714 bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
715
716send_cmd:
717 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
718 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
719
720 ret = iwl_mvm_send_cmd(mvm, &cmd);
721
722 kfree(bt_cmd);
723 return ret;
724}
725
726static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
727{
728 struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
729 u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
730 u32 ag = le32_to_cpu(notif->bt_activity_grading);
731 struct iwl_bt_coex_cmd_old *bt_cmd;
732 u8 ack_kill_msk, cts_kill_msk;
733 struct iwl_host_cmd cmd = {
734 .id = BT_CONFIG,
735 .data[0] = &bt_cmd,
736 .len = { sizeof(*bt_cmd), },
737 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
738 };
739 int ret = 0;
740
741 lockdep_assert_held(&mvm->mutex);
742
743 ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
744 cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
745
746 if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
747 mvm->bt_cts_kill_msk[0] == cts_kill_msk)
748 return 0;
749
750 mvm->bt_ack_kill_msk[0] = ack_kill_msk;
751 mvm->bt_cts_kill_msk[0] = cts_kill_msk;
752
753 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
754 if (!bt_cmd)
755 return -ENOMEM;
756 cmd.data[0] = bt_cmd;
757 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
758
759 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
760 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
761 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
762 BT_VALID_KILL_ACK |
763 BT_VALID_KILL_CTS);
764
765 ret = iwl_mvm_send_cmd(mvm, &cmd);
766
767 kfree(bt_cmd);
768 return ret;
769}
770
771static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
772 bool enable)
773{
774 struct iwl_bt_coex_cmd_old *bt_cmd;
775 /* Send ASYNC since this can be sent from an atomic context */
776 struct iwl_host_cmd cmd = {
777 .id = BT_CONFIG,
778 .len = { sizeof(*bt_cmd), },
779 .dataflags = { IWL_HCMD_DFL_DUP, },
780 .flags = CMD_ASYNC,
781 };
782 struct iwl_mvm_sta *mvmsta;
783 int ret;
784
785 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
786 if (!mvmsta)
787 return 0;
788
789 /* nothing to do */
790 if (mvmsta->bt_reduced_txpower == enable)
791 return 0;
792
793 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
794 if (!bt_cmd)
795 return -ENOMEM;
796 cmd.data[0] = bt_cmd;
797 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
798
799 bt_cmd->valid_bit_msk =
800 cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
801 bt_cmd->bt_reduced_tx_power = sta_id;
802
803 if (enable)
804 bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
805
806 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
807 enable ? "en" : "dis", sta_id);
808
809 mvmsta->bt_reduced_txpower = enable;
810
811 ret = iwl_mvm_send_cmd(mvm, &cmd);
812
813 kfree(bt_cmd);
814 return ret;
815}
816
817struct iwl_bt_iterator_data {
818 struct iwl_bt_coex_profile_notif_old *notif;
819 struct iwl_mvm *mvm;
820 struct ieee80211_chanctx_conf *primary;
821 struct ieee80211_chanctx_conf *secondary;
822 bool primary_ll;
823};
824
825static inline
826void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
827 struct ieee80211_vif *vif,
828 bool enable, int rssi)
829{
830 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
831
832 mvmvif->bf_data.last_bt_coex_event = rssi;
833 mvmvif->bf_data.bt_coex_max_thold =
834 enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
835 mvmvif->bf_data.bt_coex_min_thold =
836 enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
837}
838
839/* must be called under rcu_read_lock */
840static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
841 struct ieee80211_vif *vif)
842{
843 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
844 struct iwl_bt_iterator_data *data = _data;
845 struct iwl_mvm *mvm = data->mvm;
846 struct ieee80211_chanctx_conf *chanctx_conf;
847 enum ieee80211_smps_mode smps_mode;
848 u32 bt_activity_grading;
849 int ave_rssi;
850
851 lockdep_assert_held(&mvm->mutex);
852
853 switch (vif->type) {
854 case NL80211_IFTYPE_STATION:
855 /* default smps_mode for BSS / P2P client is AUTOMATIC */
856 smps_mode = IEEE80211_SMPS_AUTOMATIC;
857 break;
858 case NL80211_IFTYPE_AP:
859 if (!mvmvif->ap_ibss_active)
860 return;
861 break;
862 default:
863 return;
864 }
865
866 chanctx_conf = rcu_dereference(vif->chanctx_conf);
867
868 /* If channel context is invalid or not on 2.4GHz .. */
869 if ((!chanctx_conf ||
870 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
871 if (vif->type == NL80211_IFTYPE_STATION) {
872 /* ... relax constraints and disable rssi events */
873 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
874 smps_mode);
875 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
876 false);
877 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
878 }
879 return;
880 }
881
882 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
883 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
884 smps_mode = IEEE80211_SMPS_STATIC;
885 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
886 smps_mode = vif->type == NL80211_IFTYPE_AP ?
887 IEEE80211_SMPS_OFF :
888 IEEE80211_SMPS_DYNAMIC;
889
890 /* relax SMPS contraints for next association */
891 if (!vif->bss_conf.assoc)
892 smps_mode = IEEE80211_SMPS_AUTOMATIC;
893
894 if (mvmvif->phy_ctxt &&
895 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
896 smps_mode = IEEE80211_SMPS_AUTOMATIC;
897
898 IWL_DEBUG_COEX(data->mvm,
899 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
900 mvmvif->id, data->notif->bt_status, bt_activity_grading,
901 smps_mode);
902
903 if (vif->type == NL80211_IFTYPE_STATION)
904 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
905 smps_mode);
906
907 /* low latency is always primary */
908 if (iwl_mvm_vif_low_latency(mvmvif)) {
909 data->primary_ll = true;
910
911 data->secondary = data->primary;
912 data->primary = chanctx_conf;
913 }
914
915 if (vif->type == NL80211_IFTYPE_AP) {
916 if (!mvmvif->ap_ibss_active)
917 return;
918
919 if (chanctx_conf == data->primary)
920 return;
921
922 if (!data->primary_ll) {
923 /*
924 * downgrade the current primary no matter what its
925 * type is.
926 */
927 data->secondary = data->primary;
928 data->primary = chanctx_conf;
929 } else {
930 /* there is low latency vif - we will be secondary */
931 data->secondary = chanctx_conf;
932 }
933 return;
934 }
935
936 /*
937 * STA / P2P Client, try to be primary if first vif. If we are in low
938 * latency mode, we are already in primary and just don't do much
939 */
940 if (!data->primary || data->primary == chanctx_conf)
941 data->primary = chanctx_conf;
942 else if (!data->secondary)
943 /* if secondary is not NULL, it might be a GO */
944 data->secondary = chanctx_conf;
945
946 /*
947 * don't reduce the Tx power if one of these is true:
948 * we are in LOOSE
949 * single share antenna product
950 * BT is active
951 * we are associated
952 */
953 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
954 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
955 !data->notif->bt_status) {
956 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
957 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
958 return;
959 }
960
961 /* try to get the avg rssi from fw */
962 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
963
964 /* if the RSSI isn't valid, fake it is very low */
965 if (!ave_rssi)
966 ave_rssi = -100;
967 if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
968 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
969 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
970 } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
971 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
972 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
973 }
974
975 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
976 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
977}
978
979static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
980{
981 struct iwl_bt_iterator_data data = {
982 .mvm = mvm,
983 .notif = &mvm->last_bt_notif_old,
984 };
985 struct iwl_bt_coex_ci_cmd_old cmd = {};
986 u8 ci_bw_idx;
987
988 /* Ignore updates if we are in force mode */
989 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
990 return;
991
992 rcu_read_lock();
993 ieee80211_iterate_active_interfaces_atomic(
994 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
995 iwl_mvm_bt_notif_iterator, &data);
996
997 if (data.primary) {
998 struct ieee80211_chanctx_conf *chan = data.primary;
999
1000 if (WARN_ON(!chan->def.chan)) {
1001 rcu_read_unlock();
1002 return;
1003 }
1004
1005 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1006 ci_bw_idx = 0;
1007 cmd.co_run_bw_primary = 0;
1008 } else {
1009 cmd.co_run_bw_primary = 1;
1010 if (chan->def.center_freq1 >
1011 chan->def.chan->center_freq)
1012 ci_bw_idx = 2;
1013 else
1014 ci_bw_idx = 1;
1015 }
1016
1017 cmd.bt_primary_ci =
1018 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1019 cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
1020 }
1021
1022 if (data.secondary) {
1023 struct ieee80211_chanctx_conf *chan = data.secondary;
1024
1025 if (WARN_ON(!data.secondary->def.chan)) {
1026 rcu_read_unlock();
1027 return;
1028 }
1029
1030 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1031 ci_bw_idx = 0;
1032 cmd.co_run_bw_secondary = 0;
1033 } else {
1034 cmd.co_run_bw_secondary = 1;
1035 if (chan->def.center_freq1 >
1036 chan->def.chan->center_freq)
1037 ci_bw_idx = 2;
1038 else
1039 ci_bw_idx = 1;
1040 }
1041
1042 cmd.bt_secondary_ci =
1043 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1044 cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
1045 }
1046
1047 rcu_read_unlock();
1048
1049 /* Don't spam the fw with the same command over and over */
1050 if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
1051 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
1052 sizeof(cmd), &cmd))
1053 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
1054 memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
1055 }
1056
1057 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1058 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1059}
1060
1061void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1062 struct iwl_rx_cmd_buffer *rxb)
1063{
1064 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1065 struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
1066
1067 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
1068 IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
1069 notif->bt_status ? "ON" : "OFF");
1070 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
1071 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
1072 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
1073 le32_to_cpu(notif->primary_ch_lut));
1074 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
1075 le32_to_cpu(notif->secondary_ch_lut));
1076 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
1077 le32_to_cpu(notif->bt_activity_grading));
1078 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
1079 notif->bt_agg_traffic_load);
1080
1081 /* remember this notification for future use: rssi fluctuations */
1082 memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
1083
1084 iwl_mvm_bt_coex_notif_handle(mvm);
1085}
1086
1087static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1088 struct ieee80211_vif *vif)
1089{
1090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1091 struct iwl_bt_iterator_data *data = _data;
1092 struct iwl_mvm *mvm = data->mvm;
1093
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096
1097 struct ieee80211_chanctx_conf *chanctx_conf;
1098
1099 rcu_read_lock();
1100 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1101 /* If channel context is invalid or not on 2.4GHz - don't count it */
1102 if (!chanctx_conf ||
1103 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
1104 rcu_read_unlock();
1105 return;
1106 }
1107 rcu_read_unlock();
1108
1109 if (vif->type != NL80211_IFTYPE_STATION ||
1110 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1111 return;
1112
1113 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1114 lockdep_is_held(&mvm->mutex));
1115
1116 /* This can happen if the station has been removed right now */
1117 if (IS_ERR_OR_NULL(sta))
1118 return;
1119
1120 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1121}
1122
1123void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1124 enum ieee80211_rssi_event_data rssi_event)
1125{
1126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1127 struct iwl_bt_iterator_data data = {
1128 .mvm = mvm,
1129 };
1130 int ret;
1131
1132 lockdep_assert_held(&mvm->mutex);
1133
1134 /* Ignore updates if we are in force mode */
1135 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1136 return;
1137
1138 /*
1139 * Rssi update while not associated - can happen since the statistics
1140 * are handled asynchronously
1141 */
1142 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1143 return;
1144
1145 /* No BT - reports should be disabled */
1146 if (!mvm->last_bt_notif_old.bt_status)
1147 return;
1148
1149 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
1150 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
1151
1152 /*
1153 * Check if rssi is good enough for reduced Tx power, but not in loose
1154 * scheme.
1155 */
1156 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
1157 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
1158 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
1159 false);
1160 else
1161 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
1162
1163 if (ret)
1164 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
1165
1166 ieee80211_iterate_active_interfaces_atomic(
1167 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1168 iwl_mvm_bt_rssi_iterator, &data);
1169
1170 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1171 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1172}
1173
1174#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
1175#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
1176
1177u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1178 struct ieee80211_sta *sta)
1179{
1180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1181 enum iwl_bt_coex_lut_type lut_type;
1182
1183 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1184 BT_HIGH_TRAFFIC)
1185 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1186
1187 if (mvm->last_bt_notif_old.ttc_enabled)
1188 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1189
1190 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1191
1192 if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
1193 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1194
1195 /* tight coex, high bt traffic, reduce AGG time limit */
1196 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
1197}
1198
1199bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1200 struct ieee80211_sta *sta)
1201{
1202 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1203 enum iwl_bt_coex_lut_type lut_type;
1204
1205 if (mvm->last_bt_notif_old.ttc_enabled)
1206 return true;
1207
1208 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1209 BT_HIGH_TRAFFIC)
1210 return true;
1211
1212 /*
1213 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
1214 * since BT is already killed.
1215 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
1216 * we Tx.
1217 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
1218 */
1219 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1220 return lut_type != BT_COEX_LOOSE_LUT;
1221}
1222
1223bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
1224{
1225 u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1226 return ag < BT_HIGH_TRAFFIC;
1227}
1228
1229bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1230 enum ieee80211_band band)
1231{
1232 u32 bt_activity =
1233 le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1234
1235 if (band != IEEE80211_BAND_2GHZ)
1236 return false;
1237
1238 return bt_activity >= BT_LOW_TRAFFIC;
1239}
1240
1241void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
1242{
1243 iwl_mvm_bt_coex_notif_handle(mvm);
1244}
1245
1246void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1247 struct iwl_rx_cmd_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1251 u8 __maybe_unused lower_bound, upper_bound;
1252 u8 lut;
1253
1254 struct iwl_bt_coex_cmd_old *bt_cmd;
1255 struct iwl_host_cmd cmd = {
1256 .id = BT_CONFIG,
1257 .len = { sizeof(*bt_cmd), },
1258 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259 };
1260
1261 if (!iwl_mvm_bt_is_plcr_supported(mvm))
1262 return;
1263
1264 lockdep_assert_held(&mvm->mutex);
1265
1266 /* Ignore updates if we are in force mode */
1267 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1268 return;
1269
1270 if (ant_isolation == mvm->last_ant_isol)
1271 return;
1272
1273 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
1274 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
1275 break;
1276
1277 lower_bound = antenna_coupling_ranges[lut].range;
1278
1279 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
1280 upper_bound = antenna_coupling_ranges[lut + 1].range;
1281 else
1282 upper_bound = antenna_coupling_ranges[lut].range;
1283
1284 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
1285 ant_isolation, lower_bound, upper_bound, lut);
1286
1287 mvm->last_ant_isol = ant_isolation;
1288
1289 if (mvm->last_corun_lut == lut)
1290 return;
1291
1292 mvm->last_corun_lut = lut;
1293
1294 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
1295 if (!bt_cmd)
1296 return;
1297 cmd.data[0] = bt_cmd;
1298
1299 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
1300 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
1301 BT_VALID_CORUN_LUT_20 |
1302 BT_VALID_CORUN_LUT_40);
1303
1304 /* For the moment, use the same LUT for 20GHz and 40GHz */
1305 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
1306 sizeof(bt_cmd->bt4_corun_lut20));
1307
1308 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1309 sizeof(bt_cmd->bt4_corun_lut40));
1310
1311 if (iwl_mvm_send_cmd(mvm, &cmd))
1312 IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
1313
1314 kfree(bt_cmd);
1315}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
deleted file mode 100644
index 5c21231e195d..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ /dev/null
@@ -1,139 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#ifndef __MVM_CONSTANTS_H
66#define __MVM_CONSTANTS_H
67
68#include <linux/ieee80211.h>
69
70#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
71#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
72#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
73#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
74#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
75#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
76#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
77#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
78#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
79#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
80 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
81 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
82 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
83#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
84#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
85#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
86#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
87#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
88#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
89#define IWL_MVM_PS_SNOOZE_INTERVAL 25
90#define IWL_MVM_PS_SNOOZE_WINDOW 50
91#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
92#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
93#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62
94#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65
95#define IWL_MVM_BT_COEX_SYNC2SCO 1
96#define IWL_MVM_BT_COEX_CORUNNING 0
97#define IWL_MVM_BT_COEX_MPLUT 1
98#define IWL_MVM_BT_COEX_RRC 1
99#define IWL_MVM_BT_COEX_TTC 1
100#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
101#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
102#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
103#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
104#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
105#define IWL_MVM_QUOTA_THRESHOLD 4
106#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
107#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
108#define IWL_MVM_TOF_IS_RESPONDER 0
109#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
110#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
111#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
112#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
113#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
114#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
115#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
116#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
117#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
118#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
119#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
120#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
121#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
122#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
123#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
124#define IWL_MVM_RS_MISSED_RATE_MAX 15
125#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
126#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
127#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
128#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
129#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
130#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
131#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
132#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
133#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
134#define IWL_MVM_RS_AGG_DISABLE_START 3
135#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
136#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
137#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
138
139#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
deleted file mode 100644
index 85ae902df7c0..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ /dev/null
@@ -1,2104 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <linux/ip.h>
68#include <linux/fs.h>
69#include <net/cfg80211.h>
70#include <net/ipv6.h>
71#include <net/tcp.h>
72#include <net/addrconf.h>
73#include "iwl-modparams.h"
74#include "fw-api.h"
75#include "mvm.h"
76
77void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
78 struct ieee80211_vif *vif,
79 struct cfg80211_gtk_rekey_data *data)
80{
81 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
82 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
83
84 if (iwlwifi_mod_params.sw_crypto)
85 return;
86
87 mutex_lock(&mvm->mutex);
88
89 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
90 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
91 mvmvif->rekey_data.replay_ctr =
92 cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
93 mvmvif->rekey_data.valid = true;
94
95 mutex_unlock(&mvm->mutex);
96}
97
98#if IS_ENABLED(CONFIG_IPV6)
99void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
100 struct ieee80211_vif *vif,
101 struct inet6_dev *idev)
102{
103 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
104 struct inet6_ifaddr *ifa;
105 int idx = 0;
106
107 read_lock_bh(&idev->lock);
108 list_for_each_entry(ifa, &idev->addr_list, if_list) {
109 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
110 idx++;
111 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
112 break;
113 }
114 read_unlock_bh(&idev->lock);
115
116 mvmvif->num_target_ipv6_addrs = idx;
117}
118#endif
119
120void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
121 struct ieee80211_vif *vif, int idx)
122{
123 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
124
125 mvmvif->tx_key_idx = idx;
126}
127
128static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
129{
130 int i;
131
132 for (i = 0; i < IWL_P1K_SIZE; i++)
133 out[i] = cpu_to_le16(p1k[i]);
134}
135
136struct wowlan_key_data {
137 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
138 struct iwl_wowlan_tkip_params_cmd *tkip;
139 bool error, use_rsc_tsc, use_tkip;
140 int wep_key_idx;
141};
142
143static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
144 struct ieee80211_vif *vif,
145 struct ieee80211_sta *sta,
146 struct ieee80211_key_conf *key,
147 void *_data)
148{
149 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
151 struct wowlan_key_data *data = _data;
152 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
153 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
154 struct iwl_p1k_cache *rx_p1ks;
155 u8 *rx_mic_key;
156 struct ieee80211_key_seq seq;
157 u32 cur_rx_iv32 = 0;
158 u16 p1k[IWL_P1K_SIZE];
159 int ret, i;
160
161 mutex_lock(&mvm->mutex);
162
163 switch (key->cipher) {
164 case WLAN_CIPHER_SUITE_WEP40:
165 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
166 struct {
167 struct iwl_mvm_wep_key_cmd wep_key_cmd;
168 struct iwl_mvm_wep_key wep_key;
169 } __packed wkc = {
170 .wep_key_cmd.mac_id_n_color =
171 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
172 mvmvif->color)),
173 .wep_key_cmd.num_keys = 1,
174 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
175 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
176 .wep_key.key_index = key->keyidx,
177 .wep_key.key_size = key->keylen,
178 };
179
180 /*
181 * This will fail -- the key functions don't set support
182 * pairwise WEP keys. However, that's better than silently
183 * failing WoWLAN. Or maybe not?
184 */
185 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
186 break;
187
188 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
189 if (key->keyidx == mvmvif->tx_key_idx) {
190 /* TX key must be at offset 0 */
191 wkc.wep_key.key_offset = 0;
192 } else {
193 /* others start at 1 */
194 data->wep_key_idx++;
195 wkc.wep_key.key_offset = data->wep_key_idx;
196 }
197
198 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
199 data->error = ret != 0;
200
201 mvm->ptk_ivlen = key->iv_len;
202 mvm->ptk_icvlen = key->icv_len;
203 mvm->gtk_ivlen = key->iv_len;
204 mvm->gtk_icvlen = key->icv_len;
205
206 /* don't upload key again */
207 goto out_unlock;
208 }
209 default:
210 data->error = true;
211 goto out_unlock;
212 case WLAN_CIPHER_SUITE_AES_CMAC:
213 /*
214 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
215 * but we also shouldn't abort suspend due to that. It does have
216 * support for the IGTK key renewal, but doesn't really use the
217 * IGTK for anything. This means we could spuriously wake up or
218 * be deauthenticated, but that was considered acceptable.
219 */
220 goto out_unlock;
221 case WLAN_CIPHER_SUITE_TKIP:
222 if (sta) {
223 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
224 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
225
226 rx_p1ks = data->tkip->rx_uni;
227
228 ieee80211_get_key_tx_seq(key, &seq);
229 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
230 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
231
232 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
233 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
234
235 memcpy(data->tkip->mic_keys.tx,
236 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
237 IWL_MIC_KEY_SIZE);
238
239 rx_mic_key = data->tkip->mic_keys.rx_unicast;
240 } else {
241 tkip_sc =
242 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
243 rx_p1ks = data->tkip->rx_multi;
244 rx_mic_key = data->tkip->mic_keys.rx_mcast;
245 }
246
247 /*
248 * For non-QoS this relies on the fact that both the uCode and
249 * mac80211 use TID 0 (as they need to to avoid replay attacks)
250 * for checking the IV in the frames.
251 */
252 for (i = 0; i < IWL_NUM_RSC; i++) {
253 ieee80211_get_key_rx_seq(key, i, &seq);
254 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
255 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
256 /* wrapping isn't allowed, AP must rekey */
257 if (seq.tkip.iv32 > cur_rx_iv32)
258 cur_rx_iv32 = seq.tkip.iv32;
259 }
260
261 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
262 cur_rx_iv32, p1k);
263 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
264 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
265 cur_rx_iv32 + 1, p1k);
266 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
267
268 memcpy(rx_mic_key,
269 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
270 IWL_MIC_KEY_SIZE);
271
272 data->use_tkip = true;
273 data->use_rsc_tsc = true;
274 break;
275 case WLAN_CIPHER_SUITE_CCMP:
276 if (sta) {
277 u64 pn64;
278
279 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
280 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
281
282 pn64 = atomic64_read(&key->tx_pn);
283 aes_tx_sc->pn = cpu_to_le64(pn64);
284 } else {
285 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
286 }
287
288 /*
289 * For non-QoS this relies on the fact that both the uCode and
290 * mac80211 use TID 0 for checking the IV in the frames.
291 */
292 for (i = 0; i < IWL_NUM_RSC; i++) {
293 u8 *pn = seq.ccmp.pn;
294
295 ieee80211_get_key_rx_seq(key, i, &seq);
296 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
297 ((u64)pn[4] << 8) |
298 ((u64)pn[3] << 16) |
299 ((u64)pn[2] << 24) |
300 ((u64)pn[1] << 32) |
301 ((u64)pn[0] << 40));
302 }
303 data->use_rsc_tsc = true;
304 break;
305 }
306
307 /*
308 * The D3 firmware hardcodes the key offset 0 as the key it uses
309 * to transmit packets to the AP, i.e. the PTK.
310 */
311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len;
315 } else {
316 /*
317 * firmware only supports TSC/RSC for a single key,
318 * so if there are multiple keep overwriting them
319 * with new ones -- this relies on mac80211 doing
320 * list_add_tail().
321 */
322 key->hw_key_idx = 1;
323 mvm->gtk_ivlen = key->iv_len;
324 mvm->gtk_icvlen = key->icv_len;
325 }
326
327 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
328 data->error = ret != 0;
329out_unlock:
330 mutex_unlock(&mvm->mutex);
331}
332
333static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
334 struct cfg80211_wowlan *wowlan)
335{
336 struct iwl_wowlan_patterns_cmd *pattern_cmd;
337 struct iwl_host_cmd cmd = {
338 .id = WOWLAN_PATTERNS,
339 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
340 };
341 int i, err;
342
343 if (!wowlan->n_patterns)
344 return 0;
345
346 cmd.len[0] = sizeof(*pattern_cmd) +
347 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
348
349 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
350 if (!pattern_cmd)
351 return -ENOMEM;
352
353 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
354
355 for (i = 0; i < wowlan->n_patterns; i++) {
356 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
357
358 memcpy(&pattern_cmd->patterns[i].mask,
359 wowlan->patterns[i].mask, mask_len);
360 memcpy(&pattern_cmd->patterns[i].pattern,
361 wowlan->patterns[i].pattern,
362 wowlan->patterns[i].pattern_len);
363 pattern_cmd->patterns[i].mask_size = mask_len;
364 pattern_cmd->patterns[i].pattern_size =
365 wowlan->patterns[i].pattern_len;
366 }
367
368 cmd.data[0] = pattern_cmd;
369 err = iwl_mvm_send_cmd(mvm, &cmd);
370 kfree(pattern_cmd);
371 return err;
372}
373
374enum iwl_mvm_tcp_packet_type {
375 MVM_TCP_TX_SYN,
376 MVM_TCP_RX_SYNACK,
377 MVM_TCP_TX_DATA,
378 MVM_TCP_RX_ACK,
379 MVM_TCP_RX_WAKE,
380 MVM_TCP_TX_FIN,
381};
382
383static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
384{
385 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
386 return cpu_to_le16(be16_to_cpu((__force __be16)check));
387}
388
389static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
390 struct cfg80211_wowlan_tcp *tcp,
391 void *_pkt, u8 *mask,
392 __le16 *pseudo_hdr_csum,
393 enum iwl_mvm_tcp_packet_type ptype)
394{
395 struct {
396 struct ethhdr eth;
397 struct iphdr ip;
398 struct tcphdr tcp;
399 u8 data[];
400 } __packed *pkt = _pkt;
401 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
402 int i;
403
404 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
405 pkt->ip.version = 4;
406 pkt->ip.ihl = 5;
407 pkt->ip.protocol = IPPROTO_TCP;
408
409 switch (ptype) {
410 case MVM_TCP_TX_SYN:
411 case MVM_TCP_TX_DATA:
412 case MVM_TCP_TX_FIN:
413 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
414 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
415 pkt->ip.ttl = 128;
416 pkt->ip.saddr = tcp->src;
417 pkt->ip.daddr = tcp->dst;
418 pkt->tcp.source = cpu_to_be16(tcp->src_port);
419 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
420 /* overwritten for TX SYN later */
421 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
422 pkt->tcp.window = cpu_to_be16(65000);
423 break;
424 case MVM_TCP_RX_SYNACK:
425 case MVM_TCP_RX_ACK:
426 case MVM_TCP_RX_WAKE:
427 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
428 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
429 pkt->ip.saddr = tcp->dst;
430 pkt->ip.daddr = tcp->src;
431 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
432 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
433 break;
434 default:
435 WARN_ON(1);
436 return;
437 }
438
439 switch (ptype) {
440 case MVM_TCP_TX_SYN:
441 /* firmware assumes 8 option bytes - 8 NOPs for now */
442 memset(pkt->data, 0x01, 8);
443 ip_tot_len += 8;
444 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
445 pkt->tcp.syn = 1;
446 break;
447 case MVM_TCP_TX_DATA:
448 ip_tot_len += tcp->payload_len;
449 memcpy(pkt->data, tcp->payload, tcp->payload_len);
450 pkt->tcp.psh = 1;
451 pkt->tcp.ack = 1;
452 break;
453 case MVM_TCP_TX_FIN:
454 pkt->tcp.fin = 1;
455 pkt->tcp.ack = 1;
456 break;
457 case MVM_TCP_RX_SYNACK:
458 pkt->tcp.syn = 1;
459 pkt->tcp.ack = 1;
460 break;
461 case MVM_TCP_RX_ACK:
462 pkt->tcp.ack = 1;
463 break;
464 case MVM_TCP_RX_WAKE:
465 ip_tot_len += tcp->wake_len;
466 pkt->tcp.psh = 1;
467 pkt->tcp.ack = 1;
468 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
469 break;
470 }
471
472 switch (ptype) {
473 case MVM_TCP_TX_SYN:
474 case MVM_TCP_TX_DATA:
475 case MVM_TCP_TX_FIN:
476 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
477 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
478 break;
479 case MVM_TCP_RX_WAKE:
480 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
481 u8 tmp = tcp->wake_mask[i];
482 mask[i + 6] |= tmp << 6;
483 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
484 mask[i + 7] = tmp >> 2;
485 }
486 /* fall through for ethernet/IP/TCP headers mask */
487 case MVM_TCP_RX_SYNACK:
488 case MVM_TCP_RX_ACK:
489 mask[0] = 0xff; /* match ethernet */
490 /*
491 * match ethernet, ip.version, ip.ihl
492 * the ip.ihl half byte is really masked out by firmware
493 */
494 mask[1] = 0x7f;
495 mask[2] = 0x80; /* match ip.protocol */
496 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
497 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
498 mask[5] = 0x80; /* match tcp flags */
499 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
500 break;
501 };
502
503 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
504 pkt->ip.saddr, pkt->ip.daddr);
505}
506
507static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
508 struct ieee80211_vif *vif,
509 struct cfg80211_wowlan_tcp *tcp)
510{
511 struct iwl_wowlan_remote_wake_config *cfg;
512 struct iwl_host_cmd cmd = {
513 .id = REMOTE_WAKE_CONFIG_CMD,
514 .len = { sizeof(*cfg), },
515 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
516 };
517 int ret;
518
519 if (!tcp)
520 return 0;
521
522 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
523 if (!cfg)
524 return -ENOMEM;
525 cmd.data[0] = cfg;
526
527 cfg->max_syn_retries = 10;
528 cfg->max_data_retries = 10;
529 cfg->tcp_syn_ack_timeout = 1; /* seconds */
530 cfg->tcp_ack_timeout = 1; /* seconds */
531
532 /* SYN (TX) */
533 iwl_mvm_build_tcp_packet(
534 vif, tcp, cfg->syn_tx.data, NULL,
535 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
536 MVM_TCP_TX_SYN);
537 cfg->syn_tx.info.tcp_payload_length = 0;
538
539 /* SYN/ACK (RX) */
540 iwl_mvm_build_tcp_packet(
541 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
542 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
543 MVM_TCP_RX_SYNACK);
544 cfg->synack_rx.info.tcp_payload_length = 0;
545
546 /* KEEPALIVE/ACK (TX) */
547 iwl_mvm_build_tcp_packet(
548 vif, tcp, cfg->keepalive_tx.data, NULL,
549 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
550 MVM_TCP_TX_DATA);
551 cfg->keepalive_tx.info.tcp_payload_length =
552 cpu_to_le16(tcp->payload_len);
553 cfg->sequence_number_offset = tcp->payload_seq.offset;
554 /* length must be 0..4, the field is little endian */
555 cfg->sequence_number_length = tcp->payload_seq.len;
556 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
557 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
558 if (tcp->payload_tok.len) {
559 cfg->token_offset = tcp->payload_tok.offset;
560 cfg->token_length = tcp->payload_tok.len;
561 cfg->num_tokens =
562 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
563 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
564 tcp->tokens_size);
565 } else {
566 /* set tokens to max value to almost never run out */
567 cfg->num_tokens = cpu_to_le16(65535);
568 }
569
570 /* ACK (RX) */
571 iwl_mvm_build_tcp_packet(
572 vif, tcp, cfg->keepalive_ack_rx.data,
573 cfg->keepalive_ack_rx.rx_mask,
574 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
575 MVM_TCP_RX_ACK);
576 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
577
578 /* WAKEUP (RX) */
579 iwl_mvm_build_tcp_packet(
580 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
581 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
582 MVM_TCP_RX_WAKE);
583 cfg->wake_rx.info.tcp_payload_length =
584 cpu_to_le16(tcp->wake_len);
585
586 /* FIN */
587 iwl_mvm_build_tcp_packet(
588 vif, tcp, cfg->fin_tx.data, NULL,
589 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
590 MVM_TCP_TX_FIN);
591 cfg->fin_tx.info.tcp_payload_length = 0;
592
593 ret = iwl_mvm_send_cmd(mvm, &cmd);
594 kfree(cfg);
595
596 return ret;
597}
598
599static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
600 struct ieee80211_sta *ap_sta)
601{
602 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
603 struct ieee80211_chanctx_conf *ctx;
604 u8 chains_static, chains_dynamic;
605 struct cfg80211_chan_def chandef;
606 int ret, i;
607 struct iwl_binding_cmd binding_cmd = {};
608 struct iwl_time_quota_cmd quota_cmd = {};
609 u32 status;
610
611 /* add back the PHY */
612 if (WARN_ON(!mvmvif->phy_ctxt))
613 return -EINVAL;
614
615 rcu_read_lock();
616 ctx = rcu_dereference(vif->chanctx_conf);
617 if (WARN_ON(!ctx)) {
618 rcu_read_unlock();
619 return -EINVAL;
620 }
621 chandef = ctx->def;
622 chains_static = ctx->rx_chains_static;
623 chains_dynamic = ctx->rx_chains_dynamic;
624 rcu_read_unlock();
625
626 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
627 chains_static, chains_dynamic);
628 if (ret)
629 return ret;
630
631 /* add back the MAC */
632 mvmvif->uploaded = false;
633
634 if (WARN_ON(!vif->bss_conf.assoc))
635 return -EINVAL;
636
637 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
638 if (ret)
639 return ret;
640
641 /* add back binding - XXX refactor? */
642 binding_cmd.id_and_color =
643 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
644 mvmvif->phy_ctxt->color));
645 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
646 binding_cmd.phy =
647 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
648 mvmvif->phy_ctxt->color));
649 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
650 mvmvif->color));
651 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
652 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
653
654 status = 0;
655 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
656 sizeof(binding_cmd), &binding_cmd,
657 &status);
658 if (ret) {
659 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
660 return ret;
661 }
662
663 if (status) {
664 IWL_ERR(mvm, "Binding command failed: %u\n", status);
665 return -EIO;
666 }
667
668 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
669 if (ret)
670 return ret;
671 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
672
673 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
674 if (ret)
675 return ret;
676
677 /* and some quota */
678 quota_cmd.quotas[0].id_and_color =
679 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
680 mvmvif->phy_ctxt->color));
681 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
682 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
683
684 for (i = 1; i < MAX_BINDINGS; i++)
685 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
686
687 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
688 sizeof(quota_cmd), &quota_cmd);
689 if (ret)
690 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
691
692 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
693 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
694
695 return 0;
696}
697
698static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
699 struct ieee80211_vif *vif)
700{
701 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
702 struct iwl_nonqos_seq_query_cmd query_cmd = {
703 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
704 .mac_id_n_color =
705 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
706 mvmvif->color)),
707 };
708 struct iwl_host_cmd cmd = {
709 .id = NON_QOS_TX_COUNTER_CMD,
710 .flags = CMD_WANT_SKB,
711 };
712 int err;
713 u32 size;
714
715 cmd.data[0] = &query_cmd;
716 cmd.len[0] = sizeof(query_cmd);
717
718 err = iwl_mvm_send_cmd(mvm, &cmd);
719 if (err)
720 return err;
721
722 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
723 if (size < sizeof(__le16)) {
724 err = -EINVAL;
725 } else {
726 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
727 /* firmware returns next, not last-used seqno */
728 err = (u16) (err - 0x10);
729 }
730
731 iwl_free_resp(&cmd);
732 return err;
733}
734
735void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
736{
737 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
738 struct iwl_nonqos_seq_query_cmd query_cmd = {
739 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
740 .mac_id_n_color =
741 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
742 mvmvif->color)),
743 .value = cpu_to_le16(mvmvif->seqno),
744 };
745
746 /* return if called during restart, not resume from D3 */
747 if (!mvmvif->seqno_valid)
748 return;
749
750 mvmvif->seqno_valid = false;
751
752 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
753 sizeof(query_cmd), &query_cmd))
754 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
755}
756
757static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
758{
759 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
760
761 iwl_trans_stop_device(mvm->trans);
762
763 /*
764 * Set the HW restart bit -- this is mostly true as we're
765 * going to load new firmware and reprogram that, though
766 * the reprogramming is going to be manual to avoid adding
767 * all the MACs that aren't support.
768 * We don't have to clear up everything though because the
769 * reprogramming is manual. When we resume, we'll actually
770 * go through a proper restart sequence again to switch
771 * back to the runtime firmware image.
772 */
773 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
774
775 /* We reprogram keys and shouldn't allocate new key indices */
776 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
777
778 mvm->ptk_ivlen = 0;
779 mvm->ptk_icvlen = 0;
780 mvm->ptk_ivlen = 0;
781 mvm->ptk_icvlen = 0;
782
783 return iwl_mvm_load_d3_fw(mvm);
784}
785
786static int
787iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
788 struct cfg80211_wowlan *wowlan,
789 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
790 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
791 struct ieee80211_sta *ap_sta)
792{
793 int ret;
794 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
795
796 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
797
798 wowlan_config_cmd->is_11n_connection =
799 ap_sta->ht_cap.ht_supported;
800
801 /* Query the last used seqno and set it */
802 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
803 if (ret < 0)
804 return ret;
805
806 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
807
808 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
809
810 if (wowlan->disconnect)
811 wowlan_config_cmd->wakeup_filter |=
812 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
813 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
814 if (wowlan->magic_pkt)
815 wowlan_config_cmd->wakeup_filter |=
816 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
817 if (wowlan->gtk_rekey_failure)
818 wowlan_config_cmd->wakeup_filter |=
819 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
820 if (wowlan->eap_identity_req)
821 wowlan_config_cmd->wakeup_filter |=
822 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
823 if (wowlan->four_way_handshake)
824 wowlan_config_cmd->wakeup_filter |=
825 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
826 if (wowlan->n_patterns)
827 wowlan_config_cmd->wakeup_filter |=
828 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
829
830 if (wowlan->rfkill_release)
831 wowlan_config_cmd->wakeup_filter |=
832 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
833
834 if (wowlan->tcp) {
835 /*
836 * Set the "link change" (really "link lost") flag as well
837 * since that implies losing the TCP connection.
838 */
839 wowlan_config_cmd->wakeup_filter |=
840 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
841 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
842 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
843 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
844 }
845
846 return 0;
847}
848
849static int
850iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
851 struct cfg80211_wowlan *wowlan,
852 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
853 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
854 struct ieee80211_sta *ap_sta)
855{
856 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
857 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
858 struct wowlan_key_data key_data = {
859 .use_rsc_tsc = false,
860 .tkip = &tkip_cmd,
861 .use_tkip = false,
862 };
863 int ret;
864
865 ret = iwl_mvm_switch_to_d3(mvm);
866 if (ret)
867 return ret;
868
869 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
870 if (ret)
871 return ret;
872
873 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
874 if (!key_data.rsc_tsc)
875 return -ENOMEM;
876
877 if (!iwlwifi_mod_params.sw_crypto) {
878 /*
879 * This needs to be unlocked due to lock ordering
880 * constraints. Since we're in the suspend path
881 * that isn't really a problem though.
882 */
883 mutex_unlock(&mvm->mutex);
884 ieee80211_iter_keys(mvm->hw, vif,
885 iwl_mvm_wowlan_program_keys,
886 &key_data);
887 mutex_lock(&mvm->mutex);
888 if (key_data.error) {
889 ret = -EIO;
890 goto out;
891 }
892
893 if (key_data.use_rsc_tsc) {
894 struct iwl_host_cmd rsc_tsc_cmd = {
895 .id = WOWLAN_TSC_RSC_PARAM,
896 .data[0] = key_data.rsc_tsc,
897 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
898 .len[0] = sizeof(*key_data.rsc_tsc),
899 };
900
901 ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
902 if (ret)
903 goto out;
904 }
905
906 if (key_data.use_tkip) {
907 ret = iwl_mvm_send_cmd_pdu(mvm,
908 WOWLAN_TKIP_PARAM,
909 0, sizeof(tkip_cmd),
910 &tkip_cmd);
911 if (ret)
912 goto out;
913 }
914
915 if (mvmvif->rekey_data.valid) {
916 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
917 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
918 NL80211_KCK_LEN);
919 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
920 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
921 NL80211_KEK_LEN);
922 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
923 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
924
925 ret = iwl_mvm_send_cmd_pdu(mvm,
926 WOWLAN_KEK_KCK_MATERIAL, 0,
927 sizeof(kek_kck_cmd),
928 &kek_kck_cmd);
929 if (ret)
930 goto out;
931 }
932 }
933
934 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
935 sizeof(*wowlan_config_cmd),
936 wowlan_config_cmd);
937 if (ret)
938 goto out;
939
940 ret = iwl_mvm_send_patterns(mvm, wowlan);
941 if (ret)
942 goto out;
943
944 ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
945 if (ret)
946 goto out;
947
948 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
949
950out:
951 kfree(key_data.rsc_tsc);
952 return ret;
953}
954
955static int
956iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
957 struct cfg80211_wowlan *wowlan,
958 struct cfg80211_sched_scan_request *nd_config,
959 struct ieee80211_vif *vif)
960{
961 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
962 int ret;
963
964 ret = iwl_mvm_switch_to_d3(mvm);
965 if (ret)
966 return ret;
967
968 /* rfkill release can be either for wowlan or netdetect */
969 if (wowlan->rfkill_release)
970 wowlan_config_cmd.wakeup_filter |=
971 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
972
973 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
974 sizeof(wowlan_config_cmd),
975 &wowlan_config_cmd);
976 if (ret)
977 return ret;
978
979 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
980 IWL_MVM_SCAN_NETDETECT);
981 if (ret)
982 return ret;
983
984 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
985 return -EBUSY;
986
987 /* save the sched scan matchsets... */
988 if (nd_config->n_match_sets) {
989 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
990 sizeof(*nd_config->match_sets) *
991 nd_config->n_match_sets,
992 GFP_KERNEL);
993 if (mvm->nd_match_sets)
994 mvm->n_nd_match_sets = nd_config->n_match_sets;
995 }
996
997 /* ...and the sched scan channels for later reporting */
998 mvm->nd_channels = kmemdup(nd_config->channels,
999 sizeof(*nd_config->channels) *
1000 nd_config->n_channels,
1001 GFP_KERNEL);
1002 if (mvm->nd_channels)
1003 mvm->n_nd_channels = nd_config->n_channels;
1004
1005 return 0;
1006}
1007
1008static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
1009{
1010 kfree(mvm->nd_match_sets);
1011 mvm->nd_match_sets = NULL;
1012 mvm->n_nd_match_sets = 0;
1013 kfree(mvm->nd_channels);
1014 mvm->nd_channels = NULL;
1015 mvm->n_nd_channels = 0;
1016}
1017
1018static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1019 struct cfg80211_wowlan *wowlan,
1020 bool test)
1021{
1022 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1023 struct ieee80211_vif *vif = NULL;
1024 struct iwl_mvm_vif *mvmvif = NULL;
1025 struct ieee80211_sta *ap_sta = NULL;
1026 struct iwl_d3_manager_config d3_cfg_cmd_data = {
1027 /*
1028 * Program the minimum sleep time to 10 seconds, as many
1029 * platforms have issues processing a wakeup signal while
1030 * still being in the process of suspending.
1031 */
1032 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1033 };
1034 struct iwl_host_cmd d3_cfg_cmd = {
1035 .id = D3_CONFIG_CMD,
1036 .flags = CMD_WANT_SKB,
1037 .data[0] = &d3_cfg_cmd_data,
1038 .len[0] = sizeof(d3_cfg_cmd_data),
1039 };
1040 int ret;
1041 int len __maybe_unused;
1042
1043 if (!wowlan) {
1044 /*
1045 * mac80211 shouldn't get here, but for D3 test
1046 * it doesn't warrant a warning
1047 */
1048 WARN_ON(!test);
1049 return -EINVAL;
1050 }
1051
1052 mutex_lock(&mvm->mutex);
1053
1054 vif = iwl_mvm_get_bss_vif(mvm);
1055 if (IS_ERR_OR_NULL(vif)) {
1056 ret = 1;
1057 goto out_noreset;
1058 }
1059
1060 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1061
1062 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
1063 /* if we're not associated, this must be netdetect */
1064 if (!wowlan->nd_config && !mvm->nd_config) {
1065 ret = 1;
1066 goto out_noreset;
1067 }
1068
1069 ret = iwl_mvm_netdetect_config(
1070 mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif);
1071 if (ret)
1072 goto out;
1073
1074 mvm->net_detect = true;
1075 } else {
1076 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1077
1078 ap_sta = rcu_dereference_protected(
1079 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1080 lockdep_is_held(&mvm->mutex));
1081 if (IS_ERR_OR_NULL(ap_sta)) {
1082 ret = -EINVAL;
1083 goto out_noreset;
1084 }
1085
1086 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1087 vif, mvmvif, ap_sta);
1088 if (ret)
1089 goto out_noreset;
1090 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1091 vif, mvmvif, ap_sta);
1092 if (ret)
1093 goto out;
1094
1095 mvm->net_detect = false;
1096 }
1097
1098 ret = iwl_mvm_power_update_device(mvm);
1099 if (ret)
1100 goto out;
1101
1102 ret = iwl_mvm_power_update_mac(mvm);
1103 if (ret)
1104 goto out;
1105
1106#ifdef CONFIG_IWLWIFI_DEBUGFS
1107 if (mvm->d3_wake_sysassert)
1108 d3_cfg_cmd_data.wakeup_flags |=
1109 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1110#endif
1111
1112 /* must be last -- this switches firmware state */
1113 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1114 if (ret)
1115 goto out;
1116#ifdef CONFIG_IWLWIFI_DEBUGFS
1117 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1118 if (len >= sizeof(u32)) {
1119 mvm->d3_test_pme_ptr =
1120 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1121 }
1122#endif
1123 iwl_free_resp(&d3_cfg_cmd);
1124
1125 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1126
1127 iwl_trans_d3_suspend(mvm->trans, test);
1128 out:
1129 if (ret < 0) {
1130 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1131 ieee80211_restart_hw(mvm->hw);
1132 iwl_mvm_free_nd(mvm);
1133 }
1134 out_noreset:
1135 mutex_unlock(&mvm->mutex);
1136
1137 return ret;
1138}
1139
1140static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1141{
1142 struct iwl_notification_wait wait_d3;
1143 static const u16 d3_notif[] = { D3_CONFIG_CMD };
1144 int ret;
1145
1146 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1147 d3_notif, ARRAY_SIZE(d3_notif),
1148 NULL, NULL);
1149
1150 ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1151 if (ret)
1152 goto remove_notif;
1153
1154 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1155 WARN_ON_ONCE(ret);
1156 return ret;
1157
1158remove_notif:
1159 iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1160 return ret;
1161}
1162
1163int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1164{
1165 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1166 int ret;
1167
1168 /* make sure the d0i3 exit work is not pending */
1169 flush_work(&mvm->d0i3_exit_work);
1170
1171 ret = iwl_trans_suspend(mvm->trans);
1172 if (ret)
1173 return ret;
1174
1175 mvm->trans->wowlan_d0i3 = wowlan->any;
1176 if (mvm->trans->wowlan_d0i3) {
1177 /* 'any' trigger means d0i3 usage */
1178 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1179 ret = iwl_mvm_enter_d0i3_sync(mvm);
1180
1181 if (ret)
1182 return ret;
1183 }
1184
1185 mutex_lock(&mvm->d0i3_suspend_mutex);
1186 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1187 mutex_unlock(&mvm->d0i3_suspend_mutex);
1188
1189 iwl_trans_d3_suspend(mvm->trans, false);
1190
1191 return 0;
1192 }
1193
1194 return __iwl_mvm_suspend(hw, wowlan, false);
1195}
1196
1197/* converted data from the different status responses */
1198struct iwl_wowlan_status_data {
1199 u16 pattern_number;
1200 u16 qos_seq_ctr[8];
1201 u32 wakeup_reasons;
1202 u32 wake_packet_length;
1203 u32 wake_packet_bufsize;
1204 const u8 *wake_packet;
1205};
1206
1207static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1208 struct ieee80211_vif *vif,
1209 struct iwl_wowlan_status_data *status)
1210{
1211 struct sk_buff *pkt = NULL;
1212 struct cfg80211_wowlan_wakeup wakeup = {
1213 .pattern_idx = -1,
1214 };
1215 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1216 u32 reasons = status->wakeup_reasons;
1217
1218 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1219 wakeup_report = NULL;
1220 goto report;
1221 }
1222
1223 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1224 wakeup.magic_pkt = true;
1225
1226 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1227 wakeup.pattern_idx =
1228 status->pattern_number;
1229
1230 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1231 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1232 wakeup.disconnect = true;
1233
1234 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1235 wakeup.gtk_rekey_failure = true;
1236
1237 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1238 wakeup.rfkill_release = true;
1239
1240 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1241 wakeup.eap_identity_req = true;
1242
1243 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1244 wakeup.four_way_handshake = true;
1245
1246 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1247 wakeup.tcp_connlost = true;
1248
1249 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1250 wakeup.tcp_nomoretokens = true;
1251
1252 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1253 wakeup.tcp_match = true;
1254
1255 if (status->wake_packet_bufsize) {
1256 int pktsize = status->wake_packet_bufsize;
1257 int pktlen = status->wake_packet_length;
1258 const u8 *pktdata = status->wake_packet;
1259 struct ieee80211_hdr *hdr = (void *)pktdata;
1260 int truncated = pktlen - pktsize;
1261
1262 /* this would be a firmware bug */
1263 if (WARN_ON_ONCE(truncated < 0))
1264 truncated = 0;
1265
1266 if (ieee80211_is_data(hdr->frame_control)) {
1267 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1268 int ivlen = 0, icvlen = 4; /* also FCS */
1269
1270 pkt = alloc_skb(pktsize, GFP_KERNEL);
1271 if (!pkt)
1272 goto report;
1273
1274 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
1275 pktdata += hdrlen;
1276 pktsize -= hdrlen;
1277
1278 if (ieee80211_has_protected(hdr->frame_control)) {
1279 /*
1280 * This is unlocked and using gtk_i(c)vlen,
1281 * but since everything is under RTNL still
1282 * that's not really a problem - changing
1283 * it would be difficult.
1284 */
1285 if (is_multicast_ether_addr(hdr->addr1)) {
1286 ivlen = mvm->gtk_ivlen;
1287 icvlen += mvm->gtk_icvlen;
1288 } else {
1289 ivlen = mvm->ptk_ivlen;
1290 icvlen += mvm->ptk_icvlen;
1291 }
1292 }
1293
1294 /* if truncated, FCS/ICV is (partially) gone */
1295 if (truncated >= icvlen) {
1296 icvlen = 0;
1297 truncated -= icvlen;
1298 } else {
1299 icvlen -= truncated;
1300 truncated = 0;
1301 }
1302
1303 pktsize -= ivlen + icvlen;
1304 pktdata += ivlen;
1305
1306 memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
1307
1308 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1309 goto report;
1310 wakeup.packet = pkt->data;
1311 wakeup.packet_present_len = pkt->len;
1312 wakeup.packet_len = pkt->len - truncated;
1313 wakeup.packet_80211 = false;
1314 } else {
1315 int fcslen = 4;
1316
1317 if (truncated >= 4) {
1318 truncated -= 4;
1319 fcslen = 0;
1320 } else {
1321 fcslen -= truncated;
1322 truncated = 0;
1323 }
1324 pktsize -= fcslen;
1325 wakeup.packet = status->wake_packet;
1326 wakeup.packet_present_len = pktsize;
1327 wakeup.packet_len = pktlen - truncated;
1328 wakeup.packet_80211 = true;
1329 }
1330 }
1331
1332 report:
1333 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1334 kfree_skb(pkt);
1335}
1336
1337static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1338 struct ieee80211_key_seq *seq)
1339{
1340 u64 pn;
1341
1342 pn = le64_to_cpu(sc->pn);
1343 seq->ccmp.pn[0] = pn >> 40;
1344 seq->ccmp.pn[1] = pn >> 32;
1345 seq->ccmp.pn[2] = pn >> 24;
1346 seq->ccmp.pn[3] = pn >> 16;
1347 seq->ccmp.pn[4] = pn >> 8;
1348 seq->ccmp.pn[5] = pn;
1349}
1350
1351static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1352 struct ieee80211_key_seq *seq)
1353{
1354 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1355 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1356}
1357
1358static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
1359 struct ieee80211_key_conf *key)
1360{
1361 int tid;
1362
1363 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1364
1365 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1366 struct ieee80211_key_seq seq = {};
1367
1368 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1369 ieee80211_set_key_rx_seq(key, tid, &seq);
1370 }
1371}
1372
1373static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1374 struct ieee80211_key_conf *key)
1375{
1376 int tid;
1377
1378 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1379
1380 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1381 struct ieee80211_key_seq seq = {};
1382
1383 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1384 ieee80211_set_key_rx_seq(key, tid, &seq);
1385 }
1386}
1387
1388static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1389 struct iwl_wowlan_status *status)
1390{
1391 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1392
1393 switch (key->cipher) {
1394 case WLAN_CIPHER_SUITE_CCMP:
1395 iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
1396 break;
1397 case WLAN_CIPHER_SUITE_TKIP:
1398 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1399 break;
1400 default:
1401 WARN_ON(1);
1402 }
1403}
1404
1405struct iwl_mvm_d3_gtk_iter_data {
1406 struct iwl_wowlan_status *status;
1407 void *last_gtk;
1408 u32 cipher;
1409 bool find_phase, unhandled_cipher;
1410 int num_keys;
1411};
1412
1413static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1414 struct ieee80211_vif *vif,
1415 struct ieee80211_sta *sta,
1416 struct ieee80211_key_conf *key,
1417 void *_data)
1418{
1419 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1420
1421 if (data->unhandled_cipher)
1422 return;
1423
1424 switch (key->cipher) {
1425 case WLAN_CIPHER_SUITE_WEP40:
1426 case WLAN_CIPHER_SUITE_WEP104:
1427 /* ignore WEP completely, nothing to do */
1428 return;
1429 case WLAN_CIPHER_SUITE_CCMP:
1430 case WLAN_CIPHER_SUITE_TKIP:
1431 /* we support these */
1432 break;
1433 default:
1434 /* everything else (even CMAC for MFP) - disconnect from AP */
1435 data->unhandled_cipher = true;
1436 return;
1437 }
1438
1439 data->num_keys++;
1440
1441 /*
1442 * pairwise key - update sequence counters only;
1443 * note that this assumes no TDLS sessions are active
1444 */
1445 if (sta) {
1446 struct ieee80211_key_seq seq = {};
1447 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1448
1449 if (data->find_phase)
1450 return;
1451
1452 switch (key->cipher) {
1453 case WLAN_CIPHER_SUITE_CCMP:
1454 iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
1455 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1456 break;
1457 case WLAN_CIPHER_SUITE_TKIP:
1458 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1459 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1460 ieee80211_set_key_tx_seq(key, &seq);
1461 break;
1462 }
1463
1464 /* that's it for this key */
1465 return;
1466 }
1467
1468 if (data->find_phase) {
1469 data->last_gtk = key;
1470 data->cipher = key->cipher;
1471 return;
1472 }
1473
1474 if (data->status->num_of_gtk_rekeys)
1475 ieee80211_remove_key(key);
1476 else if (data->last_gtk == key)
1477 iwl_mvm_set_key_rx_seq(key, data->status);
1478}
1479
1480static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1481 struct ieee80211_vif *vif,
1482 struct iwl_wowlan_status *status)
1483{
1484 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1485 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1486 .status = status,
1487 };
1488 u32 disconnection_reasons =
1489 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1490 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1491
1492 if (!status || !vif->bss_conf.bssid)
1493 return false;
1494
1495 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1496 return false;
1497
1498 /* find last GTK that we used initially, if any */
1499 gtkdata.find_phase = true;
1500 ieee80211_iter_keys(mvm->hw, vif,
1501 iwl_mvm_d3_update_gtks, &gtkdata);
1502 /* not trying to keep connections with MFP/unhandled ciphers */
1503 if (gtkdata.unhandled_cipher)
1504 return false;
1505 if (!gtkdata.num_keys)
1506 goto out;
1507 if (!gtkdata.last_gtk)
1508 return false;
1509
1510 /*
1511 * invalidate all other GTKs that might still exist and update
1512 * the one that we used
1513 */
1514 gtkdata.find_phase = false;
1515 ieee80211_iter_keys(mvm->hw, vif,
1516 iwl_mvm_d3_update_gtks, &gtkdata);
1517
1518 if (status->num_of_gtk_rekeys) {
1519 struct ieee80211_key_conf *key;
1520 struct {
1521 struct ieee80211_key_conf conf;
1522 u8 key[32];
1523 } conf = {
1524 .conf.cipher = gtkdata.cipher,
1525 .conf.keyidx = status->gtk.key_index,
1526 };
1527
1528 switch (gtkdata.cipher) {
1529 case WLAN_CIPHER_SUITE_CCMP:
1530 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1531 memcpy(conf.conf.key, status->gtk.decrypt_key,
1532 WLAN_KEY_LEN_CCMP);
1533 break;
1534 case WLAN_CIPHER_SUITE_TKIP:
1535 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1536 memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1537 /* leave TX MIC key zeroed, we don't use it anyway */
1538 memcpy(conf.conf.key +
1539 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1540 status->gtk.tkip_mic_key, 8);
1541 break;
1542 }
1543
1544 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1545 if (IS_ERR(key))
1546 return false;
1547 iwl_mvm_set_key_rx_seq(key, status);
1548 }
1549
1550 if (status->num_of_gtk_rekeys) {
1551 __be64 replay_ctr =
1552 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1553 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1554 (void *)&replay_ctr, GFP_KERNEL);
1555 }
1556
1557out:
1558 mvmvif->seqno_valid = true;
1559 /* +0x10 because the set API expects next-to-use, not last-used */
1560 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1561
1562 return true;
1563}
1564
1565static struct iwl_wowlan_status *
1566iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1567{
1568 u32 base = mvm->error_event_table;
1569 struct error_table_start {
1570 /* cf. struct iwl_error_event_table */
1571 u32 valid;
1572 u32 error_id;
1573 } err_info;
1574 struct iwl_host_cmd cmd = {
1575 .id = WOWLAN_GET_STATUSES,
1576 .flags = CMD_WANT_SKB,
1577 };
1578 struct iwl_wowlan_status *status, *fw_status;
1579 int ret, len, status_size;
1580
1581 iwl_trans_read_mem_bytes(mvm->trans, base,
1582 &err_info, sizeof(err_info));
1583
1584 if (err_info.valid) {
1585 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1586 err_info.valid, err_info.error_id);
1587 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1588 struct cfg80211_wowlan_wakeup wakeup = {
1589 .rfkill_release = true,
1590 };
1591 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1592 GFP_KERNEL);
1593 }
1594 return ERR_PTR(-EIO);
1595 }
1596
1597 /* only for tracing for now */
1598 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1599 if (ret)
1600 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1601
1602 ret = iwl_mvm_send_cmd(mvm, &cmd);
1603 if (ret) {
1604 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1605 return ERR_PTR(ret);
1606 }
1607
1608 /* RF-kill already asserted again... */
1609 if (!cmd.resp_pkt) {
1610 fw_status = ERR_PTR(-ERFKILL);
1611 goto out_free_resp;
1612 }
1613
1614 status_size = sizeof(*fw_status);
1615
1616 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1617 if (len < status_size) {
1618 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1619 fw_status = ERR_PTR(-EIO);
1620 goto out_free_resp;
1621 }
1622
1623 status = (void *)cmd.resp_pkt->data;
1624 if (len != (status_size +
1625 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1626 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1627 fw_status = ERR_PTR(-EIO);
1628 goto out_free_resp;
1629 }
1630
1631 fw_status = kmemdup(status, len, GFP_KERNEL);
1632
1633out_free_resp:
1634 iwl_free_resp(&cmd);
1635 return fw_status;
1636}
1637
1638/* releases the MVM mutex */
1639static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1640 struct ieee80211_vif *vif)
1641{
1642 struct iwl_wowlan_status_data status;
1643 struct iwl_wowlan_status *fw_status;
1644 int i;
1645 bool keep;
1646 struct ieee80211_sta *ap_sta;
1647 struct iwl_mvm_sta *mvm_ap_sta;
1648
1649 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1650 if (IS_ERR_OR_NULL(fw_status))
1651 goto out_unlock;
1652
1653 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1654 for (i = 0; i < 8; i++)
1655 status.qos_seq_ctr[i] =
1656 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1657 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1658 status.wake_packet_length =
1659 le32_to_cpu(fw_status->wake_packet_length);
1660 status.wake_packet_bufsize =
1661 le32_to_cpu(fw_status->wake_packet_bufsize);
1662 status.wake_packet = fw_status->wake_packet;
1663
1664 /* still at hard-coded place 0 for D3 image */
1665 ap_sta = rcu_dereference_protected(
1666 mvm->fw_id_to_mac_id[0],
1667 lockdep_is_held(&mvm->mutex));
1668 if (IS_ERR_OR_NULL(ap_sta))
1669 goto out_free;
1670
1671 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1672 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1673 u16 seq = status.qos_seq_ctr[i];
1674 /* firmware stores last-used value, we store next value */
1675 seq += 0x10;
1676 mvm_ap_sta->tid_data[i].seq_number = seq;
1677 }
1678
1679 /* now we have all the data we need, unlock to avoid mac80211 issues */
1680 mutex_unlock(&mvm->mutex);
1681
1682 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1683
1684 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1685
1686 kfree(fw_status);
1687 return keep;
1688
1689out_free:
1690 kfree(fw_status);
1691out_unlock:
1692 mutex_unlock(&mvm->mutex);
1693 return false;
1694}
1695
1696struct iwl_mvm_nd_query_results {
1697 u32 matched_profiles;
1698 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
1699};
1700
1701static int
1702iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1703 struct iwl_mvm_nd_query_results *results)
1704{
1705 struct iwl_scan_offload_profiles_query *query;
1706 struct iwl_host_cmd cmd = {
1707 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1708 .flags = CMD_WANT_SKB,
1709 };
1710 int ret, len;
1711
1712 ret = iwl_mvm_send_cmd(mvm, &cmd);
1713 if (ret) {
1714 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1715 return ret;
1716 }
1717
1718 /* RF-kill already asserted again... */
1719 if (!cmd.resp_pkt) {
1720 ret = -ERFKILL;
1721 goto out_free_resp;
1722 }
1723
1724 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1725 if (len < sizeof(*query)) {
1726 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1727 ret = -EIO;
1728 goto out_free_resp;
1729 }
1730
1731 query = (void *)cmd.resp_pkt->data;
1732
1733 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1734 memcpy(results->matches, query->matches, sizeof(results->matches));
1735
1736#ifdef CONFIG_IWLWIFI_DEBUGFS
1737 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1738#endif
1739
1740out_free_resp:
1741 iwl_free_resp(&cmd);
1742 return ret;
1743}
1744
1745static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1746 struct ieee80211_vif *vif)
1747{
1748 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1749 struct cfg80211_wowlan_wakeup wakeup = {
1750 .pattern_idx = -1,
1751 };
1752 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1753 struct iwl_mvm_nd_query_results query;
1754 struct iwl_wowlan_status *fw_status;
1755 unsigned long matched_profiles;
1756 u32 reasons = 0;
1757 int i, j, n_matches, ret;
1758
1759 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1760 if (!IS_ERR_OR_NULL(fw_status)) {
1761 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1762 kfree(fw_status);
1763 }
1764
1765 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1766 wakeup.rfkill_release = true;
1767
1768 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1769 goto out;
1770
1771 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1772 if (ret || !query.matched_profiles) {
1773 wakeup_report = NULL;
1774 goto out;
1775 }
1776
1777 matched_profiles = query.matched_profiles;
1778 if (mvm->n_nd_match_sets) {
1779 n_matches = hweight_long(matched_profiles);
1780 } else {
1781 IWL_ERR(mvm, "no net detect match information available\n");
1782 n_matches = 0;
1783 }
1784
1785 net_detect = kzalloc(sizeof(*net_detect) +
1786 (n_matches * sizeof(net_detect->matches[0])),
1787 GFP_KERNEL);
1788 if (!net_detect || !n_matches)
1789 goto out_report_nd;
1790
1791 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1792 struct iwl_scan_offload_profile_match *fw_match;
1793 struct cfg80211_wowlan_nd_match *match;
1794 int idx, n_channels = 0;
1795
1796 fw_match = &query.matches[i];
1797
1798 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
1799 n_channels += hweight8(fw_match->matching_channels[j]);
1800
1801 match = kzalloc(sizeof(*match) +
1802 (n_channels * sizeof(*match->channels)),
1803 GFP_KERNEL);
1804 if (!match)
1805 goto out_report_nd;
1806
1807 net_detect->matches[net_detect->n_matches++] = match;
1808
1809 /* We inverted the order of the SSIDs in the scan
1810 * request, so invert the index here.
1811 */
1812 idx = mvm->n_nd_match_sets - i - 1;
1813 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
1814 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
1815 match->ssid.ssid_len);
1816
1817 if (mvm->n_nd_channels < n_channels)
1818 continue;
1819
1820 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
1821 if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
1822 match->channels[match->n_channels++] =
1823 mvm->nd_channels[j]->center_freq;
1824 }
1825
1826out_report_nd:
1827 wakeup.net_detect = net_detect;
1828out:
1829 iwl_mvm_free_nd(mvm);
1830
1831 mutex_unlock(&mvm->mutex);
1832 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1833
1834 if (net_detect) {
1835 for (i = 0; i < net_detect->n_matches; i++)
1836 kfree(net_detect->matches[i]);
1837 kfree(net_detect);
1838 }
1839}
1840
1841static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
1842{
1843#ifdef CONFIG_IWLWIFI_DEBUGFS
1844 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
1845 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
1846 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1847
1848 if (!mvm->store_d3_resume_sram)
1849 return;
1850
1851 if (!mvm->d3_resume_sram) {
1852 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
1853 if (!mvm->d3_resume_sram)
1854 return;
1855 }
1856
1857 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
1858#endif
1859}
1860
1861static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1862 struct ieee80211_vif *vif)
1863{
1864 /* skip the one we keep connection on */
1865 if (data == vif)
1866 return;
1867
1868 if (vif->type == NL80211_IFTYPE_STATION)
1869 ieee80211_resume_disconnect(vif);
1870}
1871
1872static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1873{
1874 struct ieee80211_vif *vif = NULL;
1875 int ret;
1876 enum iwl_d3_status d3_status;
1877 bool keep = false;
1878
1879 mutex_lock(&mvm->mutex);
1880
1881 /* get the BSS vif pointer again */
1882 vif = iwl_mvm_get_bss_vif(mvm);
1883 if (IS_ERR_OR_NULL(vif))
1884 goto err;
1885
1886 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
1887 if (ret)
1888 goto err;
1889
1890 if (d3_status != IWL_D3_STATUS_ALIVE) {
1891 IWL_INFO(mvm, "Device was reset during suspend\n");
1892 goto err;
1893 }
1894
1895 /* query SRAM first in case we want event logging */
1896 iwl_mvm_read_d3_sram(mvm);
1897
1898 /*
1899 * Query the current location and source from the D3 firmware so we
1900 * can play it back when we re-intiailize the D0 firmware
1901 */
1902 iwl_mvm_update_changed_regdom(mvm);
1903
1904 if (mvm->net_detect) {
1905 iwl_mvm_query_netdetect_reasons(mvm, vif);
1906 /* has unlocked the mutex, so skip that */
1907 goto out;
1908 } else {
1909 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
1910#ifdef CONFIG_IWLWIFI_DEBUGFS
1911 if (keep)
1912 mvm->keep_vif = vif;
1913#endif
1914 /* has unlocked the mutex, so skip that */
1915 goto out_iterate;
1916 }
1917
1918err:
1919 iwl_mvm_free_nd(mvm);
1920 mutex_unlock(&mvm->mutex);
1921
1922out_iterate:
1923 if (!test)
1924 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
1925 IEEE80211_IFACE_ITER_NORMAL,
1926 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
1927
1928out:
1929 /* return 1 to reconfigure the device */
1930 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1931 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
1932
1933 /* We always return 1, which causes mac80211 to do a reconfig
1934 * with IEEE80211_RECONFIG_TYPE_RESTART. This type of
1935 * reconfig calls iwl_mvm_restart_complete(), where we unref
1936 * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
1937 * reference here.
1938 */
1939 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1940 return 1;
1941}
1942
1943static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
1944{
1945 iwl_trans_resume(mvm->trans);
1946
1947 return __iwl_mvm_resume(mvm, false);
1948}
1949
1950static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
1951{
1952 bool exit_now;
1953 enum iwl_d3_status d3_status;
1954
1955 iwl_trans_d3_resume(mvm->trans, &d3_status, false);
1956
1957 /*
1958 * make sure to clear D0I3_DEFER_WAKEUP before
1959 * calling iwl_trans_resume(), which might wait
1960 * for d0i3 exit completion.
1961 */
1962 mutex_lock(&mvm->d0i3_suspend_mutex);
1963 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1964 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1965 &mvm->d0i3_suspend_flags);
1966 mutex_unlock(&mvm->d0i3_suspend_mutex);
1967 if (exit_now) {
1968 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
1969 _iwl_mvm_exit_d0i3(mvm);
1970 }
1971
1972 iwl_trans_resume(mvm->trans);
1973
1974 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1975 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
1976
1977 if (ret)
1978 return ret;
1979 /*
1980 * d0i3 exit will be deferred until reconfig_complete.
1981 * make sure there we are out of d0i3.
1982 */
1983 }
1984 return 0;
1985}
1986
1987int iwl_mvm_resume(struct ieee80211_hw *hw)
1988{
1989 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1990
1991 /* 'any' trigger means d0i3 was used */
1992 if (hw->wiphy->wowlan_config->any)
1993 return iwl_mvm_resume_d0i3(mvm);
1994 else
1995 return iwl_mvm_resume_d3(mvm);
1996}
1997
1998void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
1999{
2000 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2001
2002 device_set_wakeup_enable(mvm->trans->dev, enabled);
2003}
2004
2005#ifdef CONFIG_IWLWIFI_DEBUGFS
2006static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2007{
2008 struct iwl_mvm *mvm = inode->i_private;
2009 int err;
2010
2011 if (mvm->d3_test_active)
2012 return -EBUSY;
2013
2014 file->private_data = inode->i_private;
2015
2016 ieee80211_stop_queues(mvm->hw);
2017 synchronize_net();
2018
2019 /* start pseudo D3 */
2020 rtnl_lock();
2021 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2022 rtnl_unlock();
2023 if (err > 0)
2024 err = -EINVAL;
2025 if (err) {
2026 ieee80211_wake_queues(mvm->hw);
2027 return err;
2028 }
2029 mvm->d3_test_active = true;
2030 mvm->keep_vif = NULL;
2031 return 0;
2032}
2033
2034static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2035 size_t count, loff_t *ppos)
2036{
2037 struct iwl_mvm *mvm = file->private_data;
2038 u32 pme_asserted;
2039
2040 while (true) {
2041 /* read pme_ptr if available */
2042 if (mvm->d3_test_pme_ptr) {
2043 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2044 mvm->d3_test_pme_ptr);
2045 if (pme_asserted)
2046 break;
2047 }
2048
2049 if (msleep_interruptible(100))
2050 break;
2051 }
2052
2053 return 0;
2054}
2055
2056static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2057 struct ieee80211_vif *vif)
2058{
2059 /* skip the one we keep connection on */
2060 if (_data == vif)
2061 return;
2062
2063 if (vif->type == NL80211_IFTYPE_STATION)
2064 ieee80211_connection_loss(vif);
2065}
2066
2067static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2068{
2069 struct iwl_mvm *mvm = inode->i_private;
2070 int remaining_time = 10;
2071
2072 mvm->d3_test_active = false;
2073 rtnl_lock();
2074 __iwl_mvm_resume(mvm, true);
2075 rtnl_unlock();
2076 iwl_abort_notification_waits(&mvm->notif_wait);
2077 ieee80211_restart_hw(mvm->hw);
2078
2079 /* wait for restart and disconnect all interfaces */
2080 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2081 remaining_time > 0) {
2082 remaining_time--;
2083 msleep(1000);
2084 }
2085
2086 if (remaining_time == 0)
2087 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
2088
2089 ieee80211_iterate_active_interfaces_atomic(
2090 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2091 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2092
2093 ieee80211_wake_queues(mvm->hw);
2094
2095 return 0;
2096}
2097
2098const struct file_operations iwl_dbgfs_d3_test_ops = {
2099 .llseek = no_llseek,
2100 .open = iwl_mvm_d3_test_open,
2101 .read = iwl_mvm_d3_test_read,
2102 .release = iwl_mvm_d3_test_release,
2103};
2104#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
deleted file mode 100644
index 7904b41a04c6..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ /dev/null
@@ -1,1483 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include "mvm.h"
66#include "fw-api-tof.h"
67#include "debugfs.h"
68
69static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
70 struct ieee80211_vif *vif,
71 enum iwl_dbgfs_pm_mask param, int val)
72{
73 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
74 struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
75
76 dbgfs_pm->mask |= param;
77
78 switch (param) {
79 case MVM_DEBUGFS_PM_KEEP_ALIVE: {
80 int dtimper = vif->bss_conf.dtim_period ?: 1;
81 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
82
83 IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
84 if (val * MSEC_PER_SEC < 3 * dtimper_msec)
85 IWL_WARN(mvm,
86 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
87 val * MSEC_PER_SEC, 3 * dtimper_msec);
88 dbgfs_pm->keep_alive_seconds = val;
89 break;
90 }
91 case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
92 IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
93 val ? "enabled" : "disabled");
94 dbgfs_pm->skip_over_dtim = val;
95 break;
96 case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
97 IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
98 dbgfs_pm->skip_dtim_periods = val;
99 break;
100 case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
101 IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
102 dbgfs_pm->rx_data_timeout = val;
103 break;
104 case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
105 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
106 dbgfs_pm->tx_data_timeout = val;
107 break;
108 case MVM_DEBUGFS_PM_LPRX_ENA:
109 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
110 dbgfs_pm->lprx_ena = val;
111 break;
112 case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
113 IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
114 dbgfs_pm->lprx_rssi_threshold = val;
115 break;
116 case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
117 IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
118 dbgfs_pm->snooze_ena = val;
119 break;
120 case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
121 IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
122 dbgfs_pm->uapsd_misbehaving = val;
123 break;
124 case MVM_DEBUGFS_PM_USE_PS_POLL:
125 IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
126 dbgfs_pm->use_ps_poll = val;
127 break;
128 }
129}
130
131static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
132 size_t count, loff_t *ppos)
133{
134 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
135 struct iwl_mvm *mvm = mvmvif->mvm;
136 enum iwl_dbgfs_pm_mask param;
137 int val, ret;
138
139 if (!strncmp("keep_alive=", buf, 11)) {
140 if (sscanf(buf + 11, "%d", &val) != 1)
141 return -EINVAL;
142 param = MVM_DEBUGFS_PM_KEEP_ALIVE;
143 } else if (!strncmp("skip_over_dtim=", buf, 15)) {
144 if (sscanf(buf + 15, "%d", &val) != 1)
145 return -EINVAL;
146 param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
147 } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
148 if (sscanf(buf + 18, "%d", &val) != 1)
149 return -EINVAL;
150 param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
151 } else if (!strncmp("rx_data_timeout=", buf, 16)) {
152 if (sscanf(buf + 16, "%d", &val) != 1)
153 return -EINVAL;
154 param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
155 } else if (!strncmp("tx_data_timeout=", buf, 16)) {
156 if (sscanf(buf + 16, "%d", &val) != 1)
157 return -EINVAL;
158 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
159 } else if (!strncmp("lprx=", buf, 5)) {
160 if (sscanf(buf + 5, "%d", &val) != 1)
161 return -EINVAL;
162 param = MVM_DEBUGFS_PM_LPRX_ENA;
163 } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
164 if (sscanf(buf + 20, "%d", &val) != 1)
165 return -EINVAL;
166 if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
167 POWER_LPRX_RSSI_THRESHOLD_MIN)
168 return -EINVAL;
169 param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
170 } else if (!strncmp("snooze_enable=", buf, 14)) {
171 if (sscanf(buf + 14, "%d", &val) != 1)
172 return -EINVAL;
173 param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
174 } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
175 if (sscanf(buf + 18, "%d", &val) != 1)
176 return -EINVAL;
177 param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
178 } else if (!strncmp("use_ps_poll=", buf, 12)) {
179 if (sscanf(buf + 12, "%d", &val) != 1)
180 return -EINVAL;
181 param = MVM_DEBUGFS_PM_USE_PS_POLL;
182 } else {
183 return -EINVAL;
184 }
185
186 mutex_lock(&mvm->mutex);
187 iwl_dbgfs_update_pm(mvm, vif, param, val);
188 ret = iwl_mvm_power_update_mac(mvm);
189 mutex_unlock(&mvm->mutex);
190
191 return ret ?: count;
192}
193
194static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
195 char __user *user_buf,
196 size_t count, loff_t *ppos)
197{
198 struct ieee80211_vif *vif = file->private_data;
199 char buf[64];
200 int bufsz = sizeof(buf);
201 int pos;
202
203 pos = scnprintf(buf, bufsz, "bss limit = %d\n",
204 vif->bss_conf.txpower);
205
206 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
207}
208
209static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
210 char __user *user_buf,
211 size_t count, loff_t *ppos)
212{
213 struct ieee80211_vif *vif = file->private_data;
214 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
215 struct iwl_mvm *mvm = mvmvif->mvm;
216 char buf[512];
217 int bufsz = sizeof(buf);
218 int pos;
219
220 pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
221
222 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
223}
224
225static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
226 char __user *user_buf,
227 size_t count, loff_t *ppos)
228{
229 struct ieee80211_vif *vif = file->private_data;
230 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
231 struct iwl_mvm *mvm = mvmvif->mvm;
232 u8 ap_sta_id;
233 struct ieee80211_chanctx_conf *chanctx_conf;
234 char buf[512];
235 int bufsz = sizeof(buf);
236 int pos = 0;
237 int i;
238
239 mutex_lock(&mvm->mutex);
240
241 ap_sta_id = mvmvif->ap_sta_id;
242
243 switch (ieee80211_vif_type_p2p(vif)) {
244 case NL80211_IFTYPE_ADHOC:
245 pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
246 break;
247 case NL80211_IFTYPE_STATION:
248 pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
249 break;
250 case NL80211_IFTYPE_AP:
251 pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
252 break;
253 case NL80211_IFTYPE_P2P_CLIENT:
254 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
255 break;
256 case NL80211_IFTYPE_P2P_GO:
257 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
258 break;
259 case NL80211_IFTYPE_P2P_DEVICE:
260 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
261 break;
262 default:
263 break;
264 }
265
266 pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
267 mvmvif->id, mvmvif->color);
268 pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
269 vif->bss_conf.bssid);
270 pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
271 for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
272 pos += scnprintf(buf+pos, bufsz-pos,
273 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
274 i, mvmvif->queue_params[i].txop,
275 mvmvif->queue_params[i].cw_min,
276 mvmvif->queue_params[i].cw_max,
277 mvmvif->queue_params[i].aifs,
278 mvmvif->queue_params[i].uapsd);
279
280 if (vif->type == NL80211_IFTYPE_STATION &&
281 ap_sta_id != IWL_MVM_STATION_COUNT) {
282 struct ieee80211_sta *sta;
283
284 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
285 lockdep_is_held(&mvm->mutex));
286 if (!IS_ERR_OR_NULL(sta)) {
287 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
288
289 pos += scnprintf(buf+pos, bufsz-pos,
290 "ap_sta_id %d - reduced Tx power %d\n",
291 ap_sta_id,
292 mvm_sta->bt_reduced_txpower);
293 }
294 }
295
296 rcu_read_lock();
297 chanctx_conf = rcu_dereference(vif->chanctx_conf);
298 if (chanctx_conf)
299 pos += scnprintf(buf+pos, bufsz-pos,
300 "idle rx chains %d, active rx chains: %d\n",
301 chanctx_conf->rx_chains_static,
302 chanctx_conf->rx_chains_dynamic);
303 rcu_read_unlock();
304
305 mutex_unlock(&mvm->mutex);
306
307 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
308}
309
310static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
311 enum iwl_dbgfs_bf_mask param, int value)
312{
313 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
314 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
315
316 dbgfs_bf->mask |= param;
317
318 switch (param) {
319 case MVM_DEBUGFS_BF_ENERGY_DELTA:
320 dbgfs_bf->bf_energy_delta = value;
321 break;
322 case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
323 dbgfs_bf->bf_roaming_energy_delta = value;
324 break;
325 case MVM_DEBUGFS_BF_ROAMING_STATE:
326 dbgfs_bf->bf_roaming_state = value;
327 break;
328 case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
329 dbgfs_bf->bf_temp_threshold = value;
330 break;
331 case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
332 dbgfs_bf->bf_temp_fast_filter = value;
333 break;
334 case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
335 dbgfs_bf->bf_temp_slow_filter = value;
336 break;
337 case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
338 dbgfs_bf->bf_enable_beacon_filter = value;
339 break;
340 case MVM_DEBUGFS_BF_DEBUG_FLAG:
341 dbgfs_bf->bf_debug_flag = value;
342 break;
343 case MVM_DEBUGFS_BF_ESCAPE_TIMER:
344 dbgfs_bf->bf_escape_timer = value;
345 break;
346 case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
347 dbgfs_bf->ba_enable_beacon_abort = value;
348 break;
349 case MVM_DEBUGFS_BA_ESCAPE_TIMER:
350 dbgfs_bf->ba_escape_timer = value;
351 break;
352 }
353}
354
355static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
356 size_t count, loff_t *ppos)
357{
358 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
359 struct iwl_mvm *mvm = mvmvif->mvm;
360 enum iwl_dbgfs_bf_mask param;
361 int value, ret = 0;
362
363 if (!strncmp("bf_energy_delta=", buf, 16)) {
364 if (sscanf(buf+16, "%d", &value) != 1)
365 return -EINVAL;
366 if (value < IWL_BF_ENERGY_DELTA_MIN ||
367 value > IWL_BF_ENERGY_DELTA_MAX)
368 return -EINVAL;
369 param = MVM_DEBUGFS_BF_ENERGY_DELTA;
370 } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
371 if (sscanf(buf+24, "%d", &value) != 1)
372 return -EINVAL;
373 if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
374 value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
375 return -EINVAL;
376 param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
377 } else if (!strncmp("bf_roaming_state=", buf, 17)) {
378 if (sscanf(buf+17, "%d", &value) != 1)
379 return -EINVAL;
380 if (value < IWL_BF_ROAMING_STATE_MIN ||
381 value > IWL_BF_ROAMING_STATE_MAX)
382 return -EINVAL;
383 param = MVM_DEBUGFS_BF_ROAMING_STATE;
384 } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
385 if (sscanf(buf+18, "%d", &value) != 1)
386 return -EINVAL;
387 if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
388 value > IWL_BF_TEMP_THRESHOLD_MAX)
389 return -EINVAL;
390 param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
391 } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
392 if (sscanf(buf+20, "%d", &value) != 1)
393 return -EINVAL;
394 if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
395 value > IWL_BF_TEMP_FAST_FILTER_MAX)
396 return -EINVAL;
397 param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
398 } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
399 if (sscanf(buf+20, "%d", &value) != 1)
400 return -EINVAL;
401 if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
402 value > IWL_BF_TEMP_SLOW_FILTER_MAX)
403 return -EINVAL;
404 param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
405 } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
406 if (sscanf(buf+24, "%d", &value) != 1)
407 return -EINVAL;
408 if (value < 0 || value > 1)
409 return -EINVAL;
410 param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
411 } else if (!strncmp("bf_debug_flag=", buf, 14)) {
412 if (sscanf(buf+14, "%d", &value) != 1)
413 return -EINVAL;
414 if (value < 0 || value > 1)
415 return -EINVAL;
416 param = MVM_DEBUGFS_BF_DEBUG_FLAG;
417 } else if (!strncmp("bf_escape_timer=", buf, 16)) {
418 if (sscanf(buf+16, "%d", &value) != 1)
419 return -EINVAL;
420 if (value < IWL_BF_ESCAPE_TIMER_MIN ||
421 value > IWL_BF_ESCAPE_TIMER_MAX)
422 return -EINVAL;
423 param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
424 } else if (!strncmp("ba_escape_timer=", buf, 16)) {
425 if (sscanf(buf+16, "%d", &value) != 1)
426 return -EINVAL;
427 if (value < IWL_BA_ESCAPE_TIMER_MIN ||
428 value > IWL_BA_ESCAPE_TIMER_MAX)
429 return -EINVAL;
430 param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
431 } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
432 if (sscanf(buf+23, "%d", &value) != 1)
433 return -EINVAL;
434 if (value < 0 || value > 1)
435 return -EINVAL;
436 param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
437 } else {
438 return -EINVAL;
439 }
440
441 mutex_lock(&mvm->mutex);
442 iwl_dbgfs_update_bf(vif, param, value);
443 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
444 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
445 else
446 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
447 mutex_unlock(&mvm->mutex);
448
449 return ret ?: count;
450}
451
452static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
453 char __user *user_buf,
454 size_t count, loff_t *ppos)
455{
456 struct ieee80211_vif *vif = file->private_data;
457 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
458 char buf[256];
459 int pos = 0;
460 const size_t bufsz = sizeof(buf);
461 struct iwl_beacon_filter_cmd cmd = {
462 IWL_BF_CMD_CONFIG_DEFAULTS,
463 .bf_enable_beacon_filter =
464 cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
465 .ba_enable_beacon_abort =
466 cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
467 };
468
469 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
470 if (mvmvif->bf_data.bf_enabled)
471 cmd.bf_enable_beacon_filter = cpu_to_le32(1);
472 else
473 cmd.bf_enable_beacon_filter = 0;
474
475 pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
476 le32_to_cpu(cmd.bf_energy_delta));
477 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
478 le32_to_cpu(cmd.bf_roaming_energy_delta));
479 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
480 le32_to_cpu(cmd.bf_roaming_state));
481 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
482 le32_to_cpu(cmd.bf_temp_threshold));
483 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
484 le32_to_cpu(cmd.bf_temp_fast_filter));
485 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
486 le32_to_cpu(cmd.bf_temp_slow_filter));
487 pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
488 le32_to_cpu(cmd.bf_enable_beacon_filter));
489 pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
490 le32_to_cpu(cmd.bf_debug_flag));
491 pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
492 le32_to_cpu(cmd.bf_escape_timer));
493 pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
494 le32_to_cpu(cmd.ba_escape_timer));
495 pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
496 le32_to_cpu(cmd.ba_enable_beacon_abort));
497
498 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
499}
500
501static inline char *iwl_dbgfs_is_match(char *name, char *buf)
502{
503 int len = strlen(name);
504
505 return !strncmp(name, buf, len) ? buf + len : NULL;
506}
507
508static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
509 char *buf,
510 size_t count, loff_t *ppos)
511{
512 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
513 struct iwl_mvm *mvm = mvmvif->mvm;
514 u32 value;
515 int ret = -EINVAL;
516 char *data;
517
518 mutex_lock(&mvm->mutex);
519
520 data = iwl_dbgfs_is_match("tof_disabled=", buf);
521 if (data) {
522 ret = kstrtou32(data, 10, &value);
523 if (ret == 0)
524 mvm->tof_data.tof_cfg.tof_disabled = value;
525 goto out;
526 }
527
528 data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
529 if (data) {
530 ret = kstrtou32(data, 10, &value);
531 if (ret == 0)
532 mvm->tof_data.tof_cfg.one_sided_disabled = value;
533 goto out;
534 }
535
536 data = iwl_dbgfs_is_match("is_debug_mode=", buf);
537 if (data) {
538 ret = kstrtou32(data, 10, &value);
539 if (ret == 0)
540 mvm->tof_data.tof_cfg.is_debug_mode = value;
541 goto out;
542 }
543
544 data = iwl_dbgfs_is_match("is_buf=", buf);
545 if (data) {
546 ret = kstrtou32(data, 10, &value);
547 if (ret == 0)
548 mvm->tof_data.tof_cfg.is_buf_required = value;
549 goto out;
550 }
551
552 data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
553 if (data) {
554 ret = kstrtou32(data, 10, &value);
555 if (ret == 0 && value) {
556 ret = iwl_mvm_tof_config_cmd(mvm);
557 goto out;
558 }
559 }
560
561out:
562 mutex_unlock(&mvm->mutex);
563
564 return ret ?: count;
565}
566
567static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
568 char __user *user_buf,
569 size_t count, loff_t *ppos)
570{
571 struct ieee80211_vif *vif = file->private_data;
572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
573 struct iwl_mvm *mvm = mvmvif->mvm;
574 char buf[256];
575 int pos = 0;
576 const size_t bufsz = sizeof(buf);
577 struct iwl_tof_config_cmd *cmd;
578
579 cmd = &mvm->tof_data.tof_cfg;
580
581 mutex_lock(&mvm->mutex);
582
583 pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
584 cmd->tof_disabled);
585 pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
586 cmd->one_sided_disabled);
587 pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
588 cmd->is_debug_mode);
589 pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
590 cmd->is_buf_required);
591
592 mutex_unlock(&mvm->mutex);
593
594 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
595}
596
597static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
598 char *buf,
599 size_t count, loff_t *ppos)
600{
601 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
602 struct iwl_mvm *mvm = mvmvif->mvm;
603 u32 value;
604 int ret = 0;
605 char *data;
606
607 mutex_lock(&mvm->mutex);
608
609 data = iwl_dbgfs_is_match("burst_period=", buf);
610 if (data) {
611 ret = kstrtou32(data, 10, &value);
612 if (!ret)
613 mvm->tof_data.responder_cfg.burst_period =
614 cpu_to_le16(value);
615 goto out;
616 }
617
618 data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
619 if (data) {
620 ret = kstrtou32(data, 10, &value);
621 if (ret == 0)
622 mvm->tof_data.responder_cfg.min_delta_ftm = value;
623 goto out;
624 }
625
626 data = iwl_dbgfs_is_match("burst_duration=", buf);
627 if (data) {
628 ret = kstrtou32(data, 10, &value);
629 if (ret == 0)
630 mvm->tof_data.responder_cfg.burst_duration = value;
631 goto out;
632 }
633
634 data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
635 if (data) {
636 ret = kstrtou32(data, 10, &value);
637 if (ret == 0)
638 mvm->tof_data.responder_cfg.num_of_burst_exp = value;
639 goto out;
640 }
641
642 data = iwl_dbgfs_is_match("abort_responder=", buf);
643 if (data) {
644 ret = kstrtou32(data, 10, &value);
645 if (ret == 0)
646 mvm->tof_data.responder_cfg.abort_responder = value;
647 goto out;
648 }
649
650 data = iwl_dbgfs_is_match("get_ch_est=", buf);
651 if (data) {
652 ret = kstrtou32(data, 10, &value);
653 if (ret == 0)
654 mvm->tof_data.responder_cfg.get_ch_est = value;
655 goto out;
656 }
657
658 data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
659 if (data) {
660 ret = kstrtou32(data, 10, &value);
661 if (ret == 0)
662 mvm->tof_data.responder_cfg.recv_sta_req_params = value;
663 goto out;
664 }
665
666 data = iwl_dbgfs_is_match("channel_num=", buf);
667 if (data) {
668 ret = kstrtou32(data, 10, &value);
669 if (ret == 0)
670 mvm->tof_data.responder_cfg.channel_num = value;
671 goto out;
672 }
673
674 data = iwl_dbgfs_is_match("bandwidth=", buf);
675 if (data) {
676 ret = kstrtou32(data, 10, &value);
677 if (ret == 0)
678 mvm->tof_data.responder_cfg.bandwidth = value;
679 goto out;
680 }
681
682 data = iwl_dbgfs_is_match("rate=", buf);
683 if (data) {
684 ret = kstrtou32(data, 10, &value);
685 if (ret == 0)
686 mvm->tof_data.responder_cfg.rate = value;
687 goto out;
688 }
689
690 data = iwl_dbgfs_is_match("bssid=", buf);
691 if (data) {
692 u8 *mac = mvm->tof_data.responder_cfg.bssid;
693
694 if (!mac_pton(data, mac)) {
695 ret = -EINVAL;
696 goto out;
697 }
698 }
699
700 data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
701 if (data) {
702 ret = kstrtou32(data, 10, &value);
703 if (ret == 0)
704 mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
705 cpu_to_le16(value);
706 goto out;
707 }
708
709 data = iwl_dbgfs_is_match("toa_offset=", buf);
710 if (data) {
711 ret = kstrtou32(data, 10, &value);
712 if (ret == 0)
713 mvm->tof_data.responder_cfg.toa_offset =
714 cpu_to_le16(value);
715 goto out;
716 }
717
718 data = iwl_dbgfs_is_match("center_freq=", buf);
719 if (data) {
720 struct iwl_tof_responder_config_cmd *cmd =
721 &mvm->tof_data.responder_cfg;
722
723 ret = kstrtou32(data, 10, &value);
724 if (ret == 0 && value) {
725 enum ieee80211_band band = (cmd->channel_num <= 14) ?
726 IEEE80211_BAND_2GHZ :
727 IEEE80211_BAND_5GHZ;
728 struct ieee80211_channel chn = {
729 .band = band,
730 .center_freq = ieee80211_channel_to_frequency(
731 cmd->channel_num, band),
732 };
733 struct cfg80211_chan_def chandef = {
734 .chan = &chn,
735 .center_freq1 =
736 ieee80211_channel_to_frequency(value,
737 band),
738 };
739
740 cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
741 }
742 goto out;
743 }
744
745 data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
746 if (data) {
747 ret = kstrtou32(data, 10, &value);
748 if (ret == 0)
749 mvm->tof_data.responder_cfg.ftm_per_burst = value;
750 goto out;
751 }
752
753 data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
754 if (data) {
755 ret = kstrtou32(data, 10, &value);
756 if (ret == 0)
757 mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
758 goto out;
759 }
760
761 data = iwl_dbgfs_is_match("asap_mode=", buf);
762 if (data) {
763 ret = kstrtou32(data, 10, &value);
764 if (ret == 0)
765 mvm->tof_data.responder_cfg.asap_mode = value;
766 goto out;
767 }
768
769 data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
770 if (data) {
771 ret = kstrtou32(data, 10, &value);
772 if (ret == 0 && value) {
773 ret = iwl_mvm_tof_responder_cmd(mvm, vif);
774 goto out;
775 }
776 }
777
778out:
779 mutex_unlock(&mvm->mutex);
780
781 return ret ?: count;
782}
783
784static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
785 char __user *user_buf,
786 size_t count, loff_t *ppos)
787{
788 struct ieee80211_vif *vif = file->private_data;
789 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
790 struct iwl_mvm *mvm = mvmvif->mvm;
791 char buf[256];
792 int pos = 0;
793 const size_t bufsz = sizeof(buf);
794 struct iwl_tof_responder_config_cmd *cmd;
795
796 cmd = &mvm->tof_data.responder_cfg;
797
798 mutex_lock(&mvm->mutex);
799
800 pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
801 le16_to_cpu(cmd->burst_period));
802 pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
803 cmd->burst_duration);
804 pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
805 cmd->bandwidth);
806 pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
807 cmd->channel_num);
808 pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
809 cmd->ctrl_ch_position);
810 pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
811 cmd->bssid);
812 pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
813 cmd->min_delta_ftm);
814 pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
815 cmd->num_of_burst_exp);
816 pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
817 pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
818 cmd->abort_responder);
819 pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
820 cmd->get_ch_est);
821 pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
822 cmd->recv_sta_req_params);
823 pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
824 cmd->ftm_per_burst);
825 pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
826 cmd->ftm_resp_ts_avail);
827 pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
828 cmd->asap_mode);
829 pos += scnprintf(buf + pos, bufsz - pos,
830 "tsf_timer_offset_msecs = %d\n",
831 le16_to_cpu(cmd->tsf_timer_offset_msecs));
832 pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
833 le16_to_cpu(cmd->toa_offset));
834
835 mutex_unlock(&mvm->mutex);
836
837 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
838}
839
840static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
841 char *buf, size_t count,
842 loff_t *ppos)
843{
844 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
845 struct iwl_mvm *mvm = mvmvif->mvm;
846 u32 value;
847 int ret = 0;
848 char *data;
849
850 mutex_lock(&mvm->mutex);
851
852 data = iwl_dbgfs_is_match("request_id=", buf);
853 if (data) {
854 ret = kstrtou32(data, 10, &value);
855 if (ret == 0)
856 mvm->tof_data.range_req.request_id = value;
857 goto out;
858 }
859
860 data = iwl_dbgfs_is_match("initiator=", buf);
861 if (data) {
862 ret = kstrtou32(data, 10, &value);
863 if (ret == 0)
864 mvm->tof_data.range_req.initiator = value;
865 goto out;
866 }
867
868 data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
869 if (data) {
870 ret = kstrtou32(data, 10, &value);
871 if (ret == 0)
872 mvm->tof_data.range_req.one_sided_los_disable = value;
873 goto out;
874 }
875
876 data = iwl_dbgfs_is_match("req_timeout=", buf);
877 if (data) {
878 ret = kstrtou32(data, 10, &value);
879 if (ret == 0)
880 mvm->tof_data.range_req.req_timeout = value;
881 goto out;
882 }
883
884 data = iwl_dbgfs_is_match("report_policy=", buf);
885 if (data) {
886 ret = kstrtou32(data, 10, &value);
887 if (ret == 0)
888 mvm->tof_data.range_req.report_policy = value;
889 goto out;
890 }
891
892 data = iwl_dbgfs_is_match("macaddr_random=", buf);
893 if (data) {
894 ret = kstrtou32(data, 10, &value);
895 if (ret == 0)
896 mvm->tof_data.range_req.macaddr_random = value;
897 goto out;
898 }
899
900 data = iwl_dbgfs_is_match("num_of_ap=", buf);
901 if (data) {
902 ret = kstrtou32(data, 10, &value);
903 if (ret == 0)
904 mvm->tof_data.range_req.num_of_ap = value;
905 goto out;
906 }
907
908 data = iwl_dbgfs_is_match("macaddr_template=", buf);
909 if (data) {
910 u8 mac[ETH_ALEN];
911
912 if (!mac_pton(data, mac)) {
913 ret = -EINVAL;
914 goto out;
915 }
916 memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
917 goto out;
918 }
919
920 data = iwl_dbgfs_is_match("macaddr_mask=", buf);
921 if (data) {
922 u8 mac[ETH_ALEN];
923
924 if (!mac_pton(data, mac)) {
925 ret = -EINVAL;
926 goto out;
927 }
928 memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
929 goto out;
930 }
931
932 data = iwl_dbgfs_is_match("ap=", buf);
933 if (data) {
934 struct iwl_tof_range_req_ap_entry ap = {};
935 int size = sizeof(struct iwl_tof_range_req_ap_entry);
936 u16 burst_period;
937 u8 *mac = ap.bssid;
938 unsigned int i;
939
940 if (sscanf(data, "%u %hhd %hhd %hhd"
941 "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
942 "%hhd %hhd %hd"
943 "%hhd %hhd %d"
944 "%hhx %hhd %hhd %hhd",
945 &i, &ap.channel_num, &ap.bandwidth,
946 &ap.ctrl_ch_position,
947 mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
948 &ap.measure_type, &ap.num_of_bursts,
949 &burst_period,
950 &ap.samples_per_burst, &ap.retries_per_sample,
951 &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
952 &ap.enable_dyn_ack, &ap.rssi) != 20) {
953 ret = -EINVAL;
954 goto out;
955 }
956 if (i >= IWL_MVM_TOF_MAX_APS) {
957 IWL_ERR(mvm, "Invalid AP index %d\n", i);
958 ret = -EINVAL;
959 goto out;
960 }
961
962 ap.burst_period = cpu_to_le16(burst_period);
963
964 memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
965 goto out;
966 }
967
968 data = iwl_dbgfs_is_match("send_range_request=", buf);
969 if (data) {
970 ret = kstrtou32(data, 10, &value);
971 if (ret == 0 && value)
972 ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
973 goto out;
974 }
975
976 ret = -EINVAL;
977out:
978 mutex_unlock(&mvm->mutex);
979 return ret ?: count;
980}
981
982static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
983 char __user *user_buf,
984 size_t count, loff_t *ppos)
985{
986 struct ieee80211_vif *vif = file->private_data;
987 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
988 struct iwl_mvm *mvm = mvmvif->mvm;
989 char buf[512];
990 int pos = 0;
991 const size_t bufsz = sizeof(buf);
992 struct iwl_tof_range_req_cmd *cmd;
993 int i;
994
995 cmd = &mvm->tof_data.range_req;
996
997 mutex_lock(&mvm->mutex);
998
999 pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
1000 cmd->request_id);
1001 pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
1002 cmd->initiator);
1003 pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
1004 cmd->one_sided_los_disable);
1005 pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
1006 cmd->req_timeout);
1007 pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
1008 cmd->report_policy);
1009 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
1010 cmd->macaddr_random);
1011 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
1012 cmd->macaddr_template);
1013 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
1014 cmd->macaddr_mask);
1015 pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
1016 cmd->num_of_ap);
1017 for (i = 0; i < cmd->num_of_ap; i++) {
1018 struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
1019
1020 pos += scnprintf(buf + pos, bufsz - pos,
1021 "ap %.2d: channel_num=%hhd bw=%hhd"
1022 " control=%hhd bssid=%pM type=%hhd"
1023 " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
1024 " retries=%hhd tsf_delta=%d"
1025 " tsf_delta_direction=%hhd location_req=0x%hhx "
1026 " asap=%hhd enable=%hhd rssi=%hhd\n",
1027 i, ap->channel_num, ap->bandwidth,
1028 ap->ctrl_ch_position, ap->bssid,
1029 ap->measure_type, ap->num_of_bursts,
1030 ap->burst_period, ap->samples_per_burst,
1031 ap->retries_per_sample, ap->tsf_delta,
1032 ap->tsf_delta_direction,
1033 ap->location_req, ap->asap_mode,
1034 ap->enable_dyn_ack, ap->rssi);
1035 }
1036
1037 mutex_unlock(&mvm->mutex);
1038
1039 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1040}
1041
1042static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
1043 char *buf,
1044 size_t count, loff_t *ppos)
1045{
1046 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1047 struct iwl_mvm *mvm = mvmvif->mvm;
1048 u32 value;
1049 int ret = 0;
1050 char *data;
1051
1052 mutex_lock(&mvm->mutex);
1053
1054 data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
1055 if (data) {
1056 ret = kstrtou32(data, 10, &value);
1057 if (ret == 0)
1058 mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
1059 cpu_to_le16(value);
1060 goto out;
1061 }
1062
1063 data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
1064 if (data) {
1065 ret = kstrtou32(data, 10, &value);
1066 if (ret == 0)
1067 mvm->tof_data.range_req_ext.min_delta_ftm = value;
1068 goto out;
1069 }
1070
1071 data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
1072 if (data) {
1073 ret = kstrtou32(data, 10, &value);
1074 if (ret == 0)
1075 mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
1076 value;
1077 goto out;
1078 }
1079
1080 data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
1081 if (data) {
1082 ret = kstrtou32(data, 10, &value);
1083 if (ret == 0)
1084 mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
1085 value;
1086 goto out;
1087 }
1088
1089 data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
1090 if (data) {
1091 ret = kstrtou32(data, 10, &value);
1092 if (ret == 0)
1093 mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
1094 value;
1095 goto out;
1096 }
1097
1098 data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
1099 if (data) {
1100 ret = kstrtou32(data, 10, &value);
1101 if (ret == 0 && value)
1102 ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
1103 goto out;
1104 }
1105
1106 ret = -EINVAL;
1107out:
1108 mutex_unlock(&mvm->mutex);
1109 return ret ?: count;
1110}
1111
1112static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
1113 char __user *user_buf,
1114 size_t count, loff_t *ppos)
1115{
1116 struct ieee80211_vif *vif = file->private_data;
1117 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1118 struct iwl_mvm *mvm = mvmvif->mvm;
1119 char buf[256];
1120 int pos = 0;
1121 const size_t bufsz = sizeof(buf);
1122 struct iwl_tof_range_req_ext_cmd *cmd;
1123
1124 cmd = &mvm->tof_data.range_req_ext;
1125
1126 mutex_lock(&mvm->mutex);
1127
1128 pos += scnprintf(buf + pos, bufsz - pos,
1129 "tsf_timer_offset_msec = %hd\n",
1130 cmd->tsf_timer_offset_msec);
1131 pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
1132 cmd->min_delta_ftm);
1133 pos += scnprintf(buf + pos, bufsz - pos,
1134 "ftm_format_and_bw20M = %hhd\n",
1135 cmd->ftm_format_and_bw20M);
1136 pos += scnprintf(buf + pos, bufsz - pos,
1137 "ftm_format_and_bw40M = %hhd\n",
1138 cmd->ftm_format_and_bw40M);
1139 pos += scnprintf(buf + pos, bufsz - pos,
1140 "ftm_format_and_bw80M = %hhd\n",
1141 cmd->ftm_format_and_bw80M);
1142
1143 mutex_unlock(&mvm->mutex);
1144 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1145}
1146
1147static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
1148 char *buf,
1149 size_t count, loff_t *ppos)
1150{
1151 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1152 struct iwl_mvm *mvm = mvmvif->mvm;
1153 u32 value;
1154 int abort_id, ret = 0;
1155 char *data;
1156
1157 mutex_lock(&mvm->mutex);
1158
1159 data = iwl_dbgfs_is_match("abort_id=", buf);
1160 if (data) {
1161 ret = kstrtou32(data, 10, &value);
1162 if (ret == 0)
1163 mvm->tof_data.last_abort_id = value;
1164 goto out;
1165 }
1166
1167 data = iwl_dbgfs_is_match("send_range_abort=", buf);
1168 if (data) {
1169 ret = kstrtou32(data, 10, &value);
1170 if (ret == 0 && value) {
1171 abort_id = mvm->tof_data.last_abort_id;
1172 ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
1173 goto out;
1174 }
1175 }
1176
1177out:
1178 mutex_unlock(&mvm->mutex);
1179 return ret ?: count;
1180}
1181
1182static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
1183 char __user *user_buf,
1184 size_t count, loff_t *ppos)
1185{
1186 struct ieee80211_vif *vif = file->private_data;
1187 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1188 struct iwl_mvm *mvm = mvmvif->mvm;
1189 char buf[32];
1190 int pos = 0;
1191 const size_t bufsz = sizeof(buf);
1192 int last_abort_id;
1193
1194 mutex_lock(&mvm->mutex);
1195 last_abort_id = mvm->tof_data.last_abort_id;
1196 mutex_unlock(&mvm->mutex);
1197
1198 pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
1199 last_abort_id);
1200 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1201}
1202
1203static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
1204 char __user *user_buf,
1205 size_t count, loff_t *ppos)
1206{
1207 struct ieee80211_vif *vif = file->private_data;
1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1209 struct iwl_mvm *mvm = mvmvif->mvm;
1210 char *buf;
1211 int pos = 0;
1212 const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
1213 struct iwl_tof_range_rsp_ntfy *cmd;
1214 int i, ret;
1215
1216 buf = kzalloc(bufsz, GFP_KERNEL);
1217 if (!buf)
1218 return -ENOMEM;
1219
1220 mutex_lock(&mvm->mutex);
1221 cmd = &mvm->tof_data.range_resp;
1222
1223 pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
1224 cmd->request_id);
1225 pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
1226 cmd->request_status);
1227 pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
1228 cmd->last_in_batch);
1229 pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
1230 cmd->num_of_aps);
1231 for (i = 0; i < cmd->num_of_aps; i++) {
1232 struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
1233
1234 pos += scnprintf(buf + pos, bufsz - pos,
1235 "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
1236 " rtt=%d rtt_var=%d rtt_spread=%d"
1237 " rssi=%hhd rssi_spread=%hhd"
1238 " range=%d range_var=%d"
1239 " time_stamp=%d\n",
1240 i, ap->bssid, ap->measure_status,
1241 ap->measure_bw,
1242 ap->rtt, ap->rtt_variance, ap->rtt_spread,
1243 ap->rssi, ap->rssi_spread, ap->range,
1244 ap->range_variance, ap->timestamp);
1245 }
1246 mutex_unlock(&mvm->mutex);
1247
1248 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1249 kfree(buf);
1250 return ret;
1251}
1252
1253static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
1254 size_t count, loff_t *ppos)
1255{
1256 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1257 struct iwl_mvm *mvm = mvmvif->mvm;
1258 u8 value;
1259 int ret;
1260
1261 ret = kstrtou8(buf, 0, &value);
1262 if (ret)
1263 return ret;
1264 if (value > 1)
1265 return -EINVAL;
1266
1267 mutex_lock(&mvm->mutex);
1268 iwl_mvm_update_low_latency(mvm, vif, value);
1269 mutex_unlock(&mvm->mutex);
1270
1271 return count;
1272}
1273
1274static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
1275 char __user *user_buf,
1276 size_t count, loff_t *ppos)
1277{
1278 struct ieee80211_vif *vif = file->private_data;
1279 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1280 char buf[2];
1281
1282 buf[0] = mvmvif->low_latency ? '1' : '0';
1283 buf[1] = '\n';
1284 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
1285}
1286
1287static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
1288 char __user *user_buf,
1289 size_t count, loff_t *ppos)
1290{
1291 struct ieee80211_vif *vif = file->private_data;
1292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1293 char buf[20];
1294 int len;
1295
1296 len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
1297 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1298}
1299
1300static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
1301 char *buf, size_t count,
1302 loff_t *ppos)
1303{
1304 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1305 struct iwl_mvm *mvm = mvmvif->mvm;
1306 bool ret;
1307
1308 mutex_lock(&mvm->mutex);
1309 ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
1310 mutex_unlock(&mvm->mutex);
1311
1312 return ret ? count : -EINVAL;
1313}
1314
1315static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
1316 size_t count, loff_t *ppos)
1317{
1318 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1319 struct iwl_mvm *mvm = mvmvif->mvm;
1320 struct ieee80211_chanctx_conf *chanctx_conf;
1321 struct iwl_mvm_phy_ctxt *phy_ctxt;
1322 u16 value;
1323 int ret;
1324
1325 ret = kstrtou16(buf, 0, &value);
1326 if (ret)
1327 return ret;
1328
1329 mutex_lock(&mvm->mutex);
1330 rcu_read_lock();
1331
1332 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1333 /* make sure the channel context is assigned */
1334 if (!chanctx_conf) {
1335 rcu_read_unlock();
1336 mutex_unlock(&mvm->mutex);
1337 return -EINVAL;
1338 }
1339
1340 phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
1341 rcu_read_unlock();
1342
1343 mvm->dbgfs_rx_phyinfo = value;
1344
1345 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
1346 chanctx_conf->rx_chains_static,
1347 chanctx_conf->rx_chains_dynamic);
1348 mutex_unlock(&mvm->mutex);
1349
1350 return ret ?: count;
1351}
1352
1353static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
1354 char __user *user_buf,
1355 size_t count, loff_t *ppos)
1356{
1357 struct ieee80211_vif *vif = file->private_data;
1358 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1359 char buf[8];
1360
1361 snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
1362
1363 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
1364}
1365
1366#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
1367 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
1368#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
1369 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
1370#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
1371 if (!debugfs_create_file(#name, mode, parent, vif, \
1372 &iwl_dbgfs_##name##_ops)) \
1373 goto err; \
1374 } while (0)
1375
1376MVM_DEBUGFS_READ_FILE_OPS(mac_params);
1377MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
1378MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
1379MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
1380MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
1381MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
1382MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
1383MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
1384MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
1385MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
1386MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
1387MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
1388MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
1389
1390void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1391{
1392 struct dentry *dbgfs_dir = vif->debugfs_dir;
1393 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1394 char buf[100];
1395
1396 /*
1397 * Check if debugfs directory already exist before creating it.
1398 * This may happen when, for example, resetting hw or suspend-resume
1399 */
1400 if (!dbgfs_dir || mvmvif->dbgfs_dir)
1401 return;
1402
1403 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
1404
1405 if (!mvmvif->dbgfs_dir) {
1406 IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
1407 dbgfs_dir->d_name.name);
1408 return;
1409 }
1410
1411 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
1412 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
1413 (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
1414 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
1415 MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
1416 S_IRUSR);
1417
1418 MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
1419 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
1420 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
1421 S_IRUSR | S_IWUSR);
1422 MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
1423 S_IRUSR | S_IWUSR);
1424 MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
1425 S_IRUSR | S_IWUSR);
1426
1427 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1428 mvmvif == mvm->bf_allowed_vif)
1429 MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
1430 S_IRUSR | S_IWUSR);
1431
1432 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
1433 !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
1434 if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
1435 MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
1436 mvmvif->dbgfs_dir,
1437 S_IRUSR | S_IWUSR);
1438
1439 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
1440 S_IRUSR | S_IWUSR);
1441 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
1442 S_IRUSR | S_IWUSR);
1443 MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
1444 S_IRUSR | S_IWUSR);
1445 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
1446 S_IRUSR | S_IWUSR);
1447 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
1448 S_IRUSR);
1449 }
1450
1451 /*
1452 * Create symlink for convenience pointing to interface specific
1453 * debugfs entries for the driver. For example, under
1454 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
1455 * find
1456 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
1457 */
1458 snprintf(buf, 100, "../../../%s/%s/%s/%s",
1459 dbgfs_dir->d_parent->d_parent->d_name.name,
1460 dbgfs_dir->d_parent->d_name.name,
1461 dbgfs_dir->d_name.name,
1462 mvmvif->dbgfs_dir->d_name.name);
1463
1464 mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
1465 mvm->debugfs_dir, buf);
1466 if (!mvmvif->dbgfs_slink)
1467 IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
1468 dbgfs_dir->d_name.name);
1469 return;
1470err:
1471 IWL_ERR(mvm, "Can't create debugfs entity\n");
1472}
1473
1474void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1475{
1476 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1477
1478 debugfs_remove(mvmvif->dbgfs_slink);
1479 mvmvif->dbgfs_slink = NULL;
1480
1481 debugfs_remove_recursive(mvmvif->dbgfs_dir);
1482 mvmvif->dbgfs_dir = NULL;
1483}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
deleted file mode 100644
index 05928fb4021d..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ /dev/null
@@ -1,1516 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/vmalloc.h>
66
67#include "mvm.h"
68#include "sta.h"
69#include "iwl-io.h"
70#include "debugfs.h"
71#include "iwl-fw-error-dump.h"
72
73static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
74 size_t count, loff_t *ppos)
75{
76 int ret;
77 u32 scd_q_msk;
78
79 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
80 return -EIO;
81
82 if (sscanf(buf, "%x", &scd_q_msk) != 1)
83 return -EINVAL;
84
85 IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
86
87 mutex_lock(&mvm->mutex);
88 ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
89 mutex_unlock(&mvm->mutex);
90
91 return ret;
92}
93
94static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
95 size_t count, loff_t *ppos)
96{
97 struct iwl_mvm_sta *mvmsta;
98 int sta_id, drain, ret;
99
100 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
101 return -EIO;
102
103 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
104 return -EINVAL;
105 if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
106 return -EINVAL;
107 if (drain < 0 || drain > 1)
108 return -EINVAL;
109
110 mutex_lock(&mvm->mutex);
111
112 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
113
114 if (!mvmsta)
115 ret = -ENOENT;
116 else
117 ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
118
119 mutex_unlock(&mvm->mutex);
120
121 return ret;
122}
123
124static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos)
126{
127 struct iwl_mvm *mvm = file->private_data;
128 const struct fw_img *img;
129 unsigned int ofs, len;
130 size_t ret;
131 u8 *ptr;
132
133 if (!mvm->ucode_loaded)
134 return -EINVAL;
135
136 /* default is to dump the entire data segment */
137 img = &mvm->fw->img[mvm->cur_ucode];
138 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
139 len = img->sec[IWL_UCODE_SECTION_DATA].len;
140
141 if (mvm->dbgfs_sram_len) {
142 ofs = mvm->dbgfs_sram_offset;
143 len = mvm->dbgfs_sram_len;
144 }
145
146 ptr = kzalloc(len, GFP_KERNEL);
147 if (!ptr)
148 return -ENOMEM;
149
150 iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
151
152 ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
153
154 kfree(ptr);
155
156 return ret;
157}
158
159static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
160 size_t count, loff_t *ppos)
161{
162 const struct fw_img *img;
163 u32 offset, len;
164 u32 img_offset, img_len;
165
166 if (!mvm->ucode_loaded)
167 return -EINVAL;
168
169 img = &mvm->fw->img[mvm->cur_ucode];
170 img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
171 img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
172
173 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
174 if ((offset & 0x3) || (len & 0x3))
175 return -EINVAL;
176
177 if (offset + len > img_offset + img_len)
178 return -EINVAL;
179
180 mvm->dbgfs_sram_offset = offset;
181 mvm->dbgfs_sram_len = len;
182 } else {
183 mvm->dbgfs_sram_offset = 0;
184 mvm->dbgfs_sram_len = 0;
185 }
186
187 return count;
188}
189
190static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file,
191 char __user *user_buf,
192 size_t count, loff_t *ppos)
193{
194 struct iwl_mvm *mvm = file->private_data;
195 char buf[16];
196 int pos;
197
198 if (!mvm->temperature_test)
199 pos = scnprintf(buf , sizeof(buf), "disabled\n");
200 else
201 pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature);
202
203 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
204}
205
206/*
207 * Set NIC Temperature
208 * Cause the driver to ignore the actual NIC temperature reported by the FW
209 * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -
210 * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX
211 * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE
212 */
213static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
214 char *buf, size_t count,
215 loff_t *ppos)
216{
217 int temperature;
218
219 if (!mvm->ucode_loaded && !mvm->temperature_test)
220 return -EIO;
221
222 if (kstrtoint(buf, 10, &temperature))
223 return -EINVAL;
224 /* not a legal temperature */
225 if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
226 temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
227 temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN)
228 return -EINVAL;
229
230 mutex_lock(&mvm->mutex);
231 if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
232 if (!mvm->temperature_test)
233 goto out;
234
235 mvm->temperature_test = false;
236 /* Since we can't read the temp while awake, just set
237 * it to zero until we get the next RX stats from the
238 * firmware.
239 */
240 mvm->temperature = 0;
241 } else {
242 mvm->temperature_test = true;
243 mvm->temperature = temperature;
244 }
245 IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
246 mvm->temperature_test ? "En" : "Dis" ,
247 mvm->temperature);
248 /* handle the temperature change */
249 iwl_mvm_tt_handler(mvm);
250
251out:
252 mutex_unlock(&mvm->mutex);
253
254 return count;
255}
256
257static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
258 char __user *user_buf,
259 size_t count, loff_t *ppos)
260{
261 struct iwl_mvm *mvm = file->private_data;
262 char buf[16];
263 int pos, temp;
264
265 if (!mvm->ucode_loaded)
266 return -EIO;
267
268 mutex_lock(&mvm->mutex);
269 temp = iwl_mvm_get_temp(mvm);
270 mutex_unlock(&mvm->mutex);
271
272 if (temp < 0)
273 return temp;
274
275 pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
276
277 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
278}
279
280static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
281 size_t count, loff_t *ppos)
282{
283 struct iwl_mvm *mvm = file->private_data;
284 struct ieee80211_sta *sta;
285 char buf[400];
286 int i, pos = 0, bufsz = sizeof(buf);
287
288 mutex_lock(&mvm->mutex);
289
290 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
291 pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
292 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
293 lockdep_is_held(&mvm->mutex));
294 if (!sta)
295 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
296 else if (IS_ERR(sta))
297 pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
298 PTR_ERR(sta));
299 else
300 pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
301 sta->addr);
302 }
303
304 mutex_unlock(&mvm->mutex);
305
306 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
307}
308
309static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
310 char __user *user_buf,
311 size_t count, loff_t *ppos)
312{
313 struct iwl_mvm *mvm = file->private_data;
314 char buf[64];
315 int bufsz = sizeof(buf);
316 int pos = 0;
317
318 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
319 mvm->disable_power_off);
320 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
321 mvm->disable_power_off_d3);
322
323 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324}
325
326static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
327 size_t count, loff_t *ppos)
328{
329 int ret, val;
330
331 if (!mvm->ucode_loaded)
332 return -EIO;
333
334 if (!strncmp("disable_power_off_d0=", buf, 21)) {
335 if (sscanf(buf + 21, "%d", &val) != 1)
336 return -EINVAL;
337 mvm->disable_power_off = val;
338 } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
339 if (sscanf(buf + 21, "%d", &val) != 1)
340 return -EINVAL;
341 mvm->disable_power_off_d3 = val;
342 } else {
343 return -EINVAL;
344 }
345
346 mutex_lock(&mvm->mutex);
347 ret = iwl_mvm_power_update_device(mvm);
348 mutex_unlock(&mvm->mutex);
349
350 return ret ?: count;
351}
352
353#define BT_MBOX_MSG(_notif, _num, _field) \
354 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
355 >> BT_MBOX##_num##_##_field##_POS)
356
357
358#define BT_MBOX_PRINT(_num, _field, _end) \
359 pos += scnprintf(buf + pos, bufsz - pos, \
360 "\t%s: %d%s", \
361 #_field, \
362 BT_MBOX_MSG(notif, _num, _field), \
363 true ? "\n" : ", ");
364
365static
366int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
367 int pos, int bufsz)
368{
369 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
370
371 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
372 BT_MBOX_PRINT(0, LE_PROF1, false);
373 BT_MBOX_PRINT(0, LE_PROF2, false);
374 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
375 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
376 BT_MBOX_PRINT(0, INBAND_S, false);
377 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
378 BT_MBOX_PRINT(0, LE_SCAN, false);
379 BT_MBOX_PRINT(0, LE_ADV, false);
380 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
381 BT_MBOX_PRINT(0, OPEN_CON_1, true);
382
383 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
384
385 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
386 BT_MBOX_PRINT(1, IP_SR, false);
387 BT_MBOX_PRINT(1, LE_MSTR, false);
388 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
389 BT_MBOX_PRINT(1, MSG_TYPE, false);
390 BT_MBOX_PRINT(1, SSN, true);
391
392 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
393
394 BT_MBOX_PRINT(2, SNIFF_ACT, false);
395 BT_MBOX_PRINT(2, PAG, false);
396 BT_MBOX_PRINT(2, INQUIRY, false);
397 BT_MBOX_PRINT(2, CONN, false);
398 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
399 BT_MBOX_PRINT(2, DISC, false);
400 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
401 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
402 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
403 BT_MBOX_PRINT(2, SCO_DURATION, true);
404
405 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
406
407 BT_MBOX_PRINT(3, SCO_STATE, false);
408 BT_MBOX_PRINT(3, SNIFF_STATE, false);
409 BT_MBOX_PRINT(3, A2DP_STATE, false);
410 BT_MBOX_PRINT(3, ACL_STATE, false);
411 BT_MBOX_PRINT(3, MSTR_STATE, false);
412 BT_MBOX_PRINT(3, OBX_STATE, false);
413 BT_MBOX_PRINT(3, OPEN_CON_2, false);
414 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
415 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
416 BT_MBOX_PRINT(3, INBAND_P, false);
417 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
418 BT_MBOX_PRINT(3, SSN_2, false);
419 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
420
421 return pos;
422}
423
424static
425int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
426 char *buf, int pos, int bufsz)
427{
428 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
429
430 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
431 BT_MBOX_PRINT(0, LE_PROF1, false);
432 BT_MBOX_PRINT(0, LE_PROF2, false);
433 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
434 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
435 BT_MBOX_PRINT(0, INBAND_S, false);
436 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
437 BT_MBOX_PRINT(0, LE_SCAN, false);
438 BT_MBOX_PRINT(0, LE_ADV, false);
439 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
440 BT_MBOX_PRINT(0, OPEN_CON_1, true);
441
442 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
443
444 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
445 BT_MBOX_PRINT(1, IP_SR, false);
446 BT_MBOX_PRINT(1, LE_MSTR, false);
447 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
448 BT_MBOX_PRINT(1, MSG_TYPE, false);
449 BT_MBOX_PRINT(1, SSN, true);
450
451 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
452
453 BT_MBOX_PRINT(2, SNIFF_ACT, false);
454 BT_MBOX_PRINT(2, PAG, false);
455 BT_MBOX_PRINT(2, INQUIRY, false);
456 BT_MBOX_PRINT(2, CONN, false);
457 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
458 BT_MBOX_PRINT(2, DISC, false);
459 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
460 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
461 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
462 BT_MBOX_PRINT(2, SCO_DURATION, true);
463
464 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
465
466 BT_MBOX_PRINT(3, SCO_STATE, false);
467 BT_MBOX_PRINT(3, SNIFF_STATE, false);
468 BT_MBOX_PRINT(3, A2DP_STATE, false);
469 BT_MBOX_PRINT(3, ACL_STATE, false);
470 BT_MBOX_PRINT(3, MSTR_STATE, false);
471 BT_MBOX_PRINT(3, OBX_STATE, false);
472 BT_MBOX_PRINT(3, OPEN_CON_2, false);
473 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
474 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
475 BT_MBOX_PRINT(3, INBAND_P, false);
476 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
477 BT_MBOX_PRINT(3, SSN_2, false);
478 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
479
480 return pos;
481}
482
483static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
484 size_t count, loff_t *ppos)
485{
486 struct iwl_mvm *mvm = file->private_data;
487 char *buf;
488 int ret, pos = 0, bufsz = sizeof(char) * 1024;
489
490 buf = kmalloc(bufsz, GFP_KERNEL);
491 if (!buf)
492 return -ENOMEM;
493
494 mutex_lock(&mvm->mutex);
495
496 if (!fw_has_api(&mvm->fw->ucode_capa,
497 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
498 struct iwl_bt_coex_profile_notif_old *notif =
499 &mvm->last_bt_notif_old;
500
501 pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
502
503 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
504 notif->bt_ci_compliance);
505 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
506 le32_to_cpu(notif->primary_ch_lut));
507 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
508 le32_to_cpu(notif->secondary_ch_lut));
509 pos += scnprintf(buf+pos,
510 bufsz-pos, "bt_activity_grading = %d\n",
511 le32_to_cpu(notif->bt_activity_grading));
512 pos += scnprintf(buf+pos, bufsz-pos,
513 "antenna isolation = %d CORUN LUT index = %d\n",
514 mvm->last_ant_isol, mvm->last_corun_lut);
515 } else {
516 struct iwl_bt_coex_profile_notif *notif =
517 &mvm->last_bt_notif;
518
519 pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
520
521 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
522 notif->bt_ci_compliance);
523 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
524 le32_to_cpu(notif->primary_ch_lut));
525 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
526 le32_to_cpu(notif->secondary_ch_lut));
527 pos += scnprintf(buf+pos,
528 bufsz-pos, "bt_activity_grading = %d\n",
529 le32_to_cpu(notif->bt_activity_grading));
530 pos += scnprintf(buf+pos, bufsz-pos,
531 "antenna isolation = %d CORUN LUT index = %d\n",
532 mvm->last_ant_isol, mvm->last_corun_lut);
533 }
534
535 mutex_unlock(&mvm->mutex);
536
537 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
538 kfree(buf);
539
540 return ret;
541}
542#undef BT_MBOX_PRINT
543
544static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
545 size_t count, loff_t *ppos)
546{
547 struct iwl_mvm *mvm = file->private_data;
548 char buf[256];
549 int bufsz = sizeof(buf);
550 int pos = 0;
551
552 mutex_lock(&mvm->mutex);
553
554 if (!fw_has_api(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
556 struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
557
558 pos += scnprintf(buf+pos, bufsz-pos,
559 "Channel inhibition CMD\n");
560 pos += scnprintf(buf+pos, bufsz-pos,
561 "\tPrimary Channel Bitmap 0x%016llx\n",
562 le64_to_cpu(cmd->bt_primary_ci));
563 pos += scnprintf(buf+pos, bufsz-pos,
564 "\tSecondary Channel Bitmap 0x%016llx\n",
565 le64_to_cpu(cmd->bt_secondary_ci));
566
567 pos += scnprintf(buf+pos, bufsz-pos,
568 "BT Configuration CMD - 0=default, 1=never, 2=always\n");
569 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
570 mvm->bt_ack_kill_msk[0]);
571 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
572 mvm->bt_cts_kill_msk[0]);
573
574 } else {
575 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
576
577 pos += scnprintf(buf+pos, bufsz-pos,
578 "Channel inhibition CMD\n");
579 pos += scnprintf(buf+pos, bufsz-pos,
580 "\tPrimary Channel Bitmap 0x%016llx\n",
581 le64_to_cpu(cmd->bt_primary_ci));
582 pos += scnprintf(buf+pos, bufsz-pos,
583 "\tSecondary Channel Bitmap 0x%016llx\n",
584 le64_to_cpu(cmd->bt_secondary_ci));
585 }
586
587 mutex_unlock(&mvm->mutex);
588
589 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
590}
591
592static ssize_t
593iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
594 size_t count, loff_t *ppos)
595{
596 u32 bt_tx_prio;
597
598 if (sscanf(buf, "%u", &bt_tx_prio) != 1)
599 return -EINVAL;
600 if (bt_tx_prio > 4)
601 return -EINVAL;
602
603 mvm->bt_tx_prio = bt_tx_prio;
604
605 return count;
606}
607
608static ssize_t
609iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
610 size_t count, loff_t *ppos)
611{
612 static const char * const modes_str[BT_FORCE_ANT_MAX] = {
613 [BT_FORCE_ANT_DIS] = "dis",
614 [BT_FORCE_ANT_AUTO] = "auto",
615 [BT_FORCE_ANT_BT] = "bt",
616 [BT_FORCE_ANT_WIFI] = "wifi",
617 };
618 int ret, bt_force_ant_mode;
619
620 for (bt_force_ant_mode = 0;
621 bt_force_ant_mode < ARRAY_SIZE(modes_str);
622 bt_force_ant_mode++) {
623 if (!strcmp(buf, modes_str[bt_force_ant_mode]))
624 break;
625 }
626
627 if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
628 return -EINVAL;
629
630 ret = 0;
631 mutex_lock(&mvm->mutex);
632 if (mvm->bt_force_ant_mode == bt_force_ant_mode)
633 goto out;
634
635 mvm->bt_force_ant_mode = bt_force_ant_mode;
636 IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
637 modes_str[mvm->bt_force_ant_mode]);
638 ret = iwl_send_bt_init_conf(mvm);
639
640out:
641 mutex_unlock(&mvm->mutex);
642 return ret ?: count;
643}
644
645#define PRINT_STATS_LE32(_struct, _memb) \
646 pos += scnprintf(buf + pos, bufsz - pos, \
647 fmt_table, #_memb, \
648 le32_to_cpu(_struct->_memb))
649
650static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
651 char __user *user_buf, size_t count,
652 loff_t *ppos)
653{
654 struct iwl_mvm *mvm = file->private_data;
655 static const char *fmt_table = "\t%-30s %10u\n";
656 static const char *fmt_header = "%-32s\n";
657 int pos = 0;
658 char *buf;
659 int ret;
660 /* 43 is the size of each data line, 33 is the size of each header */
661 size_t bufsz =
662 ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
663 (4 * 33) + 1;
664
665 struct mvm_statistics_rx_phy *ofdm;
666 struct mvm_statistics_rx_phy *cck;
667 struct mvm_statistics_rx_non_phy *general;
668 struct mvm_statistics_rx_ht_phy *ht;
669
670 buf = kzalloc(bufsz, GFP_KERNEL);
671 if (!buf)
672 return -ENOMEM;
673
674 mutex_lock(&mvm->mutex);
675
676 ofdm = &mvm->rx_stats.ofdm;
677 cck = &mvm->rx_stats.cck;
678 general = &mvm->rx_stats.general;
679 ht = &mvm->rx_stats.ofdm_ht;
680
681 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
682 "Statistics_Rx - OFDM");
683 PRINT_STATS_LE32(ofdm, ina_cnt);
684 PRINT_STATS_LE32(ofdm, fina_cnt);
685 PRINT_STATS_LE32(ofdm, plcp_err);
686 PRINT_STATS_LE32(ofdm, crc32_err);
687 PRINT_STATS_LE32(ofdm, overrun_err);
688 PRINT_STATS_LE32(ofdm, early_overrun_err);
689 PRINT_STATS_LE32(ofdm, crc32_good);
690 PRINT_STATS_LE32(ofdm, false_alarm_cnt);
691 PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
692 PRINT_STATS_LE32(ofdm, sfd_timeout);
693 PRINT_STATS_LE32(ofdm, fina_timeout);
694 PRINT_STATS_LE32(ofdm, unresponded_rts);
695 PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
696 PRINT_STATS_LE32(ofdm, sent_ack_cnt);
697 PRINT_STATS_LE32(ofdm, sent_cts_cnt);
698 PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
699 PRINT_STATS_LE32(ofdm, dsp_self_kill);
700 PRINT_STATS_LE32(ofdm, mh_format_err);
701 PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
702 PRINT_STATS_LE32(ofdm, reserved);
703
704 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
705 "Statistics_Rx - CCK");
706 PRINT_STATS_LE32(cck, ina_cnt);
707 PRINT_STATS_LE32(cck, fina_cnt);
708 PRINT_STATS_LE32(cck, plcp_err);
709 PRINT_STATS_LE32(cck, crc32_err);
710 PRINT_STATS_LE32(cck, overrun_err);
711 PRINT_STATS_LE32(cck, early_overrun_err);
712 PRINT_STATS_LE32(cck, crc32_good);
713 PRINT_STATS_LE32(cck, false_alarm_cnt);
714 PRINT_STATS_LE32(cck, fina_sync_err_cnt);
715 PRINT_STATS_LE32(cck, sfd_timeout);
716 PRINT_STATS_LE32(cck, fina_timeout);
717 PRINT_STATS_LE32(cck, unresponded_rts);
718 PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
719 PRINT_STATS_LE32(cck, sent_ack_cnt);
720 PRINT_STATS_LE32(cck, sent_cts_cnt);
721 PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
722 PRINT_STATS_LE32(cck, dsp_self_kill);
723 PRINT_STATS_LE32(cck, mh_format_err);
724 PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
725 PRINT_STATS_LE32(cck, reserved);
726
727 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
728 "Statistics_Rx - GENERAL");
729 PRINT_STATS_LE32(general, bogus_cts);
730 PRINT_STATS_LE32(general, bogus_ack);
731 PRINT_STATS_LE32(general, non_bssid_frames);
732 PRINT_STATS_LE32(general, filtered_frames);
733 PRINT_STATS_LE32(general, non_channel_beacons);
734 PRINT_STATS_LE32(general, channel_beacons);
735 PRINT_STATS_LE32(general, num_missed_bcon);
736 PRINT_STATS_LE32(general, adc_rx_saturation_time);
737 PRINT_STATS_LE32(general, ina_detection_search_time);
738 PRINT_STATS_LE32(general, beacon_silence_rssi_a);
739 PRINT_STATS_LE32(general, beacon_silence_rssi_b);
740 PRINT_STATS_LE32(general, beacon_silence_rssi_c);
741 PRINT_STATS_LE32(general, interference_data_flag);
742 PRINT_STATS_LE32(general, channel_load);
743 PRINT_STATS_LE32(general, dsp_false_alarms);
744 PRINT_STATS_LE32(general, beacon_rssi_a);
745 PRINT_STATS_LE32(general, beacon_rssi_b);
746 PRINT_STATS_LE32(general, beacon_rssi_c);
747 PRINT_STATS_LE32(general, beacon_energy_a);
748 PRINT_STATS_LE32(general, beacon_energy_b);
749 PRINT_STATS_LE32(general, beacon_energy_c);
750 PRINT_STATS_LE32(general, num_bt_kills);
751 PRINT_STATS_LE32(general, mac_id);
752 PRINT_STATS_LE32(general, directed_data_mpdu);
753
754 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
755 "Statistics_Rx - HT");
756 PRINT_STATS_LE32(ht, plcp_err);
757 PRINT_STATS_LE32(ht, overrun_err);
758 PRINT_STATS_LE32(ht, early_overrun_err);
759 PRINT_STATS_LE32(ht, crc32_good);
760 PRINT_STATS_LE32(ht, crc32_err);
761 PRINT_STATS_LE32(ht, mh_format_err);
762 PRINT_STATS_LE32(ht, agg_crc32_good);
763 PRINT_STATS_LE32(ht, agg_mpdu_cnt);
764 PRINT_STATS_LE32(ht, agg_cnt);
765 PRINT_STATS_LE32(ht, unsupport_mcs);
766
767 mutex_unlock(&mvm->mutex);
768
769 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
770 kfree(buf);
771
772 return ret;
773}
774#undef PRINT_STAT_LE32
775
776static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
777 char __user *user_buf, size_t count,
778 loff_t *ppos,
779 struct iwl_mvm_frame_stats *stats)
780{
781 char *buff, *pos, *endpos;
782 int idx, i;
783 int ret;
784 static const size_t bufsz = 1024;
785
786 buff = kmalloc(bufsz, GFP_KERNEL);
787 if (!buff)
788 return -ENOMEM;
789
790 spin_lock_bh(&mvm->drv_stats_lock);
791
792 pos = buff;
793 endpos = pos + bufsz;
794
795 pos += scnprintf(pos, endpos - pos,
796 "Legacy/HT/VHT\t:\t%d/%d/%d\n",
797 stats->legacy_frames,
798 stats->ht_frames,
799 stats->vht_frames);
800 pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
801 stats->bw_20_frames,
802 stats->bw_40_frames,
803 stats->bw_80_frames);
804 pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
805 stats->ngi_frames,
806 stats->sgi_frames);
807 pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
808 stats->siso_frames,
809 stats->mimo2_frames);
810 pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
811 stats->fail_frames,
812 stats->success_frames);
813 pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
814 stats->agg_frames);
815 pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
816 stats->ampdu_count);
817 pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
818 stats->ampdu_count > 0 ?
819 (stats->agg_frames / stats->ampdu_count) : 0);
820
821 pos += scnprintf(pos, endpos - pos, "Last Rates\n");
822
823 idx = stats->last_frame_idx - 1;
824 for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
825 idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
826 if (stats->last_rates[idx] == 0)
827 continue;
828 pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
829 (int)(ARRAY_SIZE(stats->last_rates) - i));
830 pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
831 }
832 spin_unlock_bh(&mvm->drv_stats_lock);
833
834 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
835 kfree(buff);
836
837 return ret;
838}
839
840static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
841 char __user *user_buf, size_t count,
842 loff_t *ppos)
843{
844 struct iwl_mvm *mvm = file->private_data;
845
846 return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
847 &mvm->drv_rx_stats);
848}
849
850static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
851 size_t count, loff_t *ppos)
852{
853 int ret;
854
855 mutex_lock(&mvm->mutex);
856
857 /* allow one more restart that we're provoking here */
858 if (mvm->restart_fw >= 0)
859 mvm->restart_fw++;
860
861 /* take the return value to make compiler happy - it will fail anyway */
862 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
863
864 mutex_unlock(&mvm->mutex);
865
866 return count;
867}
868
869static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
870 size_t count, loff_t *ppos)
871{
872 int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
873 if (ret)
874 return ret;
875
876 iwl_force_nmi(mvm->trans);
877
878 iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
879
880 return count;
881}
882
883static ssize_t
884iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
885 char __user *user_buf,
886 size_t count, loff_t *ppos)
887{
888 struct iwl_mvm *mvm = file->private_data;
889 int pos = 0;
890 char buf[32];
891 const size_t bufsz = sizeof(buf);
892
893 /* print which antennas were set for the scan command by the user */
894 pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
895 if (mvm->scan_rx_ant & ANT_A)
896 pos += scnprintf(buf + pos, bufsz - pos, "A");
897 if (mvm->scan_rx_ant & ANT_B)
898 pos += scnprintf(buf + pos, bufsz - pos, "B");
899 if (mvm->scan_rx_ant & ANT_C)
900 pos += scnprintf(buf + pos, bufsz - pos, "C");
901 pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
902
903 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
904}
905
906static ssize_t
907iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
908 size_t count, loff_t *ppos)
909{
910 u8 scan_rx_ant;
911
912 if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
913 return -EINVAL;
914 if (scan_rx_ant > ANT_ABC)
915 return -EINVAL;
916 if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
917 return -EINVAL;
918
919 if (mvm->scan_rx_ant != scan_rx_ant) {
920 mvm->scan_rx_ant = scan_rx_ant;
921 if (fw_has_capa(&mvm->fw->ucode_capa,
922 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
923 iwl_mvm_config_scan(mvm);
924 }
925
926 return count;
927}
928
929static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
930 char __user *user_buf,
931 size_t count, loff_t *ppos)
932{
933 struct iwl_mvm *mvm = file->private_data;
934 int conf;
935 char buf[8];
936 const size_t bufsz = sizeof(buf);
937 int pos = 0;
938
939 mutex_lock(&mvm->mutex);
940 conf = mvm->fw_dbg_conf;
941 mutex_unlock(&mvm->mutex);
942
943 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
944
945 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
946}
947
948static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
949 char *buf, size_t count,
950 loff_t *ppos)
951{
952 unsigned int conf_id;
953 int ret;
954
955 ret = kstrtouint(buf, 0, &conf_id);
956 if (ret)
957 return ret;
958
959 if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
960 return -EINVAL;
961
962 mutex_lock(&mvm->mutex);
963 ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id);
964 mutex_unlock(&mvm->mutex);
965
966 return ret ?: count;
967}
968
969static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
970 char *buf, size_t count,
971 loff_t *ppos)
972{
973 int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
974
975 if (ret)
976 return ret;
977
978 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
979
980 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
981
982 return count;
983}
984
985#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
986#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
987static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
988 char __user *user_buf,
989 size_t count, loff_t *ppos)
990{
991 struct iwl_mvm *mvm = file->private_data;
992 struct iwl_bcast_filter_cmd cmd;
993 const struct iwl_fw_bcast_filter *filter;
994 char *buf;
995 int bufsz = 1024;
996 int i, j, pos = 0;
997 ssize_t ret;
998
999 buf = kzalloc(bufsz, GFP_KERNEL);
1000 if (!buf)
1001 return -ENOMEM;
1002
1003 mutex_lock(&mvm->mutex);
1004 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
1005 ADD_TEXT("None\n");
1006 mutex_unlock(&mvm->mutex);
1007 goto out;
1008 }
1009 mutex_unlock(&mvm->mutex);
1010
1011 for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
1012 filter = &cmd.filters[i];
1013
1014 ADD_TEXT("Filter [%d]:\n", i);
1015 ADD_TEXT("\tDiscard=%d\n", filter->discard);
1016 ADD_TEXT("\tFrame Type: %s\n",
1017 filter->frame_type ? "IPv4" : "Generic");
1018
1019 for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
1020 const struct iwl_fw_bcast_filter_attr *attr;
1021
1022 attr = &filter->attrs[j];
1023 if (!attr->mask)
1024 break;
1025
1026 ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
1027 j, attr->offset,
1028 attr->offset_type ? "IP End" :
1029 "Payload Start",
1030 be32_to_cpu(attr->mask),
1031 be32_to_cpu(attr->val),
1032 le16_to_cpu(attr->reserved1));
1033 }
1034 }
1035out:
1036 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1037 kfree(buf);
1038 return ret;
1039}
1040
1041static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
1042 size_t count, loff_t *ppos)
1043{
1044 int pos, next_pos;
1045 struct iwl_fw_bcast_filter filter = {};
1046 struct iwl_bcast_filter_cmd cmd;
1047 u32 filter_id, attr_id, mask, value;
1048 int err = 0;
1049
1050 if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
1051 &filter.frame_type, &pos) != 3)
1052 return -EINVAL;
1053
1054 if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
1055 filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
1056 return -EINVAL;
1057
1058 for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
1059 attr_id++) {
1060 struct iwl_fw_bcast_filter_attr *attr =
1061 &filter.attrs[attr_id];
1062
1063 if (pos >= count)
1064 break;
1065
1066 if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
1067 &attr->offset, &attr->offset_type,
1068 &mask, &value, &next_pos) != 4)
1069 return -EINVAL;
1070
1071 attr->mask = cpu_to_be32(mask);
1072 attr->val = cpu_to_be32(value);
1073 if (mask)
1074 filter.num_attrs++;
1075
1076 pos += next_pos;
1077 }
1078
1079 mutex_lock(&mvm->mutex);
1080 memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
1081 &filter, sizeof(filter));
1082
1083 /* send updated bcast filtering configuration */
1084 if (mvm->dbgfs_bcast_filtering.override &&
1085 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1086 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1087 sizeof(cmd), &cmd);
1088 mutex_unlock(&mvm->mutex);
1089
1090 return err ?: count;
1091}
1092
1093static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
1094 char __user *user_buf,
1095 size_t count, loff_t *ppos)
1096{
1097 struct iwl_mvm *mvm = file->private_data;
1098 struct iwl_bcast_filter_cmd cmd;
1099 char *buf;
1100 int bufsz = 1024;
1101 int i, pos = 0;
1102 ssize_t ret;
1103
1104 buf = kzalloc(bufsz, GFP_KERNEL);
1105 if (!buf)
1106 return -ENOMEM;
1107
1108 mutex_lock(&mvm->mutex);
1109 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
1110 ADD_TEXT("None\n");
1111 mutex_unlock(&mvm->mutex);
1112 goto out;
1113 }
1114 mutex_unlock(&mvm->mutex);
1115
1116 for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
1117 const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
1118
1119 ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
1120 i, mac->default_discard, mac->attached_filters);
1121 }
1122out:
1123 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1124 kfree(buf);
1125 return ret;
1126}
1127
1128static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
1129 char *buf, size_t count,
1130 loff_t *ppos)
1131{
1132 struct iwl_bcast_filter_cmd cmd;
1133 struct iwl_fw_bcast_mac mac = {};
1134 u32 mac_id, attached_filters;
1135 int err = 0;
1136
1137 if (!mvm->bcast_filters)
1138 return -ENOENT;
1139
1140 if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
1141 &attached_filters) != 3)
1142 return -EINVAL;
1143
1144 if (mac_id >= ARRAY_SIZE(cmd.macs) ||
1145 mac.default_discard > 1 ||
1146 attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
1147 return -EINVAL;
1148
1149 mac.attached_filters = cpu_to_le16(attached_filters);
1150
1151 mutex_lock(&mvm->mutex);
1152 memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
1153 &mac, sizeof(mac));
1154
1155 /* send updated bcast filtering configuration */
1156 if (mvm->dbgfs_bcast_filtering.override &&
1157 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1158 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1159 sizeof(cmd), &cmd);
1160 mutex_unlock(&mvm->mutex);
1161
1162 return err ?: count;
1163}
1164#endif
1165
1166#ifdef CONFIG_PM_SLEEP
1167static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
1168 size_t count, loff_t *ppos)
1169{
1170 int store;
1171
1172 if (sscanf(buf, "%d", &store) != 1)
1173 return -EINVAL;
1174
1175 mvm->store_d3_resume_sram = store;
1176
1177 return count;
1178}
1179
1180static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
1181 size_t count, loff_t *ppos)
1182{
1183 struct iwl_mvm *mvm = file->private_data;
1184 const struct fw_img *img;
1185 int ofs, len, pos = 0;
1186 size_t bufsz, ret;
1187 char *buf;
1188 u8 *ptr = mvm->d3_resume_sram;
1189
1190 img = &mvm->fw->img[IWL_UCODE_WOWLAN];
1191 len = img->sec[IWL_UCODE_SECTION_DATA].len;
1192
1193 bufsz = len * 4 + 256;
1194 buf = kzalloc(bufsz, GFP_KERNEL);
1195 if (!buf)
1196 return -ENOMEM;
1197
1198 pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
1199 mvm->store_d3_resume_sram ? "en" : "dis");
1200
1201 if (ptr) {
1202 for (ofs = 0; ofs < len; ofs += 16) {
1203 pos += scnprintf(buf + pos, bufsz - pos,
1204 "0x%.4x %16ph\n", ofs, ptr + ofs);
1205 }
1206 } else {
1207 pos += scnprintf(buf + pos, bufsz - pos,
1208 "(no data captured)\n");
1209 }
1210
1211 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1212
1213 kfree(buf);
1214
1215 return ret;
1216}
1217#endif
1218
1219#define PRINT_MVM_REF(ref) do { \
1220 if (mvm->refs[ref]) \
1221 pos += scnprintf(buf + pos, bufsz - pos, \
1222 "\t(0x%lx): %d %s\n", \
1223 BIT(ref), mvm->refs[ref], #ref); \
1224} while (0)
1225
1226static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
1227 char __user *user_buf,
1228 size_t count, loff_t *ppos)
1229{
1230 struct iwl_mvm *mvm = file->private_data;
1231 int i, pos = 0;
1232 char buf[256];
1233 const size_t bufsz = sizeof(buf);
1234 u32 refs = 0;
1235
1236 for (i = 0; i < IWL_MVM_REF_COUNT; i++)
1237 if (mvm->refs[i])
1238 refs |= BIT(i);
1239
1240 pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n",
1241 refs);
1242
1243 PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
1244 PRINT_MVM_REF(IWL_MVM_REF_SCAN);
1245 PRINT_MVM_REF(IWL_MVM_REF_ROC);
1246 PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
1247 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
1248 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
1249 PRINT_MVM_REF(IWL_MVM_REF_USER);
1250 PRINT_MVM_REF(IWL_MVM_REF_TX);
1251 PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
1252 PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
1253 PRINT_MVM_REF(IWL_MVM_REF_START_AP);
1254 PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
1255 PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
1256 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
1257 PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
1258 PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
1259 PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
1260 PRINT_MVM_REF(IWL_MVM_REF_NMI);
1261 PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
1262 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
1263 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
1264 PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
1265
1266 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1267}
1268
1269static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
1270 size_t count, loff_t *ppos)
1271{
1272 unsigned long value;
1273 int ret;
1274 bool taken;
1275
1276 ret = kstrtoul(buf, 10, &value);
1277 if (ret < 0)
1278 return ret;
1279
1280 mutex_lock(&mvm->mutex);
1281
1282 taken = mvm->refs[IWL_MVM_REF_USER];
1283 if (value == 1 && !taken)
1284 iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
1285 else if (value == 0 && taken)
1286 iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
1287 else
1288 ret = -EINVAL;
1289
1290 mutex_unlock(&mvm->mutex);
1291
1292 if (ret < 0)
1293 return ret;
1294 return count;
1295}
1296
1297#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
1298 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
1299#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
1300 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
1301#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
1302 if (!debugfs_create_file(alias, mode, parent, mvm, \
1303 &iwl_dbgfs_##name##_ops)) \
1304 goto err; \
1305 } while (0)
1306#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
1307 MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
1308
1309static ssize_t
1310iwl_dbgfs_prph_reg_read(struct file *file,
1311 char __user *user_buf,
1312 size_t count, loff_t *ppos)
1313{
1314 struct iwl_mvm *mvm = file->private_data;
1315 int pos = 0;
1316 char buf[32];
1317 const size_t bufsz = sizeof(buf);
1318 int ret;
1319
1320 if (!mvm->dbgfs_prph_reg_addr)
1321 return -EINVAL;
1322
1323 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
1324 if (ret)
1325 return ret;
1326
1327 pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
1328 mvm->dbgfs_prph_reg_addr,
1329 iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
1330
1331 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
1332
1333 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1334}
1335
1336static ssize_t
1337iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
1338 size_t count, loff_t *ppos)
1339{
1340 u8 args;
1341 u32 value;
1342 int ret;
1343
1344 args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
1345 /* if we only want to set the reg address - nothing more to do */
1346 if (args == 1)
1347 goto out;
1348
1349 /* otherwise, make sure we have both address and value */
1350 if (args != 2)
1351 return -EINVAL;
1352
1353 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
1354 if (ret)
1355 return ret;
1356
1357 iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
1358
1359 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
1360out:
1361 return count;
1362}
1363
1364static ssize_t
1365iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
1366 size_t count, loff_t *ppos)
1367{
1368 int ret;
1369
1370 mutex_lock(&mvm->mutex);
1371 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
1372 mutex_unlock(&mvm->mutex);
1373
1374 return ret ?: count;
1375}
1376
1377MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
1378
1379/* Device wide debugfs entries */
1380MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
1381MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
1382MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
1383MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
1384MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
1385MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
1386MVM_DEBUGFS_READ_FILE_OPS(stations);
1387MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
1388MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
1389MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
1390MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
1391MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
1392MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
1393MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
1394MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
1395MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
1396MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1397MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1398MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
1399MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
1400
1401#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1402MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
1403MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
1404#endif
1405
1406#ifdef CONFIG_PM_SLEEP
1407MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
1408#endif
1409
1410int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1411{
1412 struct dentry *bcast_dir __maybe_unused;
1413 char buf[100];
1414
1415 spin_lock_init(&mvm->drv_stats_lock);
1416
1417 mvm->debugfs_dir = dbgfs_dir;
1418
1419 MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
1420 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
1421 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1422 MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir,
1423 S_IWUSR | S_IRUSR);
1424 MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
1425 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
1426 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
1427 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
1428 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
1429 S_IRUSR | S_IWUSR);
1430 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1431 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
1432 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
1433 MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
1434 MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
1435 MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR);
1436 MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
1437 S_IWUSR | S_IRUSR);
1438 MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1439 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1440 MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1441 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
1442 MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
1443 if (!debugfs_create_bool("enable_scan_iteration_notif",
1444 S_IRUSR | S_IWUSR,
1445 mvm->debugfs_dir,
1446 &mvm->scan_iter_notif_enabled))
1447 goto err;
1448
1449#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1450 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
1451 bcast_dir = debugfs_create_dir("bcast_filtering",
1452 mvm->debugfs_dir);
1453 if (!bcast_dir)
1454 goto err;
1455
1456 if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
1457 bcast_dir,
1458 &mvm->dbgfs_bcast_filtering.override))
1459 goto err;
1460
1461 MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
1462 bcast_dir, S_IWUSR | S_IRUSR);
1463 MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
1464 bcast_dir, S_IWUSR | S_IRUSR);
1465 }
1466#endif
1467
1468#ifdef CONFIG_PM_SLEEP
1469 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1470 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
1471 if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
1472 mvm->debugfs_dir, &mvm->d3_wake_sysassert))
1473 goto err;
1474 if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
1475 mvm->debugfs_dir, &mvm->last_netdetect_scans))
1476 goto err;
1477#endif
1478
1479 if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
1480 mvm->debugfs_dir,
1481 &mvm->low_latency_agg_frame_limit))
1482 goto err;
1483 if (!debugfs_create_u8("ps_disabled", S_IRUSR,
1484 mvm->debugfs_dir, &mvm->ps_disabled))
1485 goto err;
1486 if (!debugfs_create_blob("nvm_hw", S_IRUSR,
1487 mvm->debugfs_dir, &mvm->nvm_hw_blob))
1488 goto err;
1489 if (!debugfs_create_blob("nvm_sw", S_IRUSR,
1490 mvm->debugfs_dir, &mvm->nvm_sw_blob))
1491 goto err;
1492 if (!debugfs_create_blob("nvm_calib", S_IRUSR,
1493 mvm->debugfs_dir, &mvm->nvm_calib_blob))
1494 goto err;
1495 if (!debugfs_create_blob("nvm_prod", S_IRUSR,
1496 mvm->debugfs_dir, &mvm->nvm_prod_blob))
1497 goto err;
1498 if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
1499 mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
1500 goto err;
1501
1502 /*
1503 * Create a symlink with mac80211. It will be removed when mac80211
1504 * exists (before the opmode exists which removes the target.)
1505 */
1506 snprintf(buf, 100, "../../%s/%s",
1507 dbgfs_dir->d_parent->d_parent->d_name.name,
1508 dbgfs_dir->d_parent->d_name.name);
1509 if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
1510 goto err;
1511
1512 return 0;
1513err:
1514 IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
1515 return -ENOMEM;
1516}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
deleted file mode 100644
index 8c4190e7e027..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#define MVM_DEBUGFS_READ_FILE_OPS(name) \
67static const struct file_operations iwl_dbgfs_##name##_ops = { \
68 .read = iwl_dbgfs_##name##_read, \
69 .open = simple_open, \
70 .llseek = generic_file_llseek, \
71}
72
73#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
74static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
75 const char __user *user_buf, \
76 size_t count, loff_t *ppos) \
77{ \
78 argtype *arg = file->private_data; \
79 char buf[buflen] = {}; \
80 size_t buf_size = min(count, sizeof(buf) - 1); \
81 \
82 if (copy_from_user(buf, user_buf, buf_size)) \
83 return -EFAULT; \
84 \
85 return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
86} \
87
88#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
89MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
90static const struct file_operations iwl_dbgfs_##name##_ops = { \
91 .write = _iwl_dbgfs_##name##_write, \
92 .read = iwl_dbgfs_##name##_read, \
93 .open = simple_open, \
94 .llseek = generic_file_llseek, \
95};
96
97#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
98MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
99static const struct file_operations iwl_dbgfs_##name##_ops = { \
100 .write = _iwl_dbgfs_##name##_write, \
101 .open = simple_open, \
102 .llseek = generic_file_llseek, \
103};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
deleted file mode 100644
index d398a6102805..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ /dev/null
@@ -1,476 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_bt_coex_h__
66#define __fw_api_bt_coex_h__
67
68#include <linux/types.h>
69#include <linux/bitops.h>
70
71#define BITS(nb) (BIT(nb) - 1)
72
73/**
74 * enum iwl_bt_coex_flags - flags for BT_COEX command
75 * @BT_COEX_MODE_POS:
76 * @BT_COEX_MODE_MSK:
77 * @BT_COEX_DISABLE_OLD:
78 * @BT_COEX_2W_OLD:
79 * @BT_COEX_3W_OLD:
80 * @BT_COEX_NW_OLD:
81 * @BT_COEX_AUTO_OLD:
82 * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests)
83 * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests)
84 * @BT_COEX_SYNC2SCO:
85 * @BT_COEX_CORUNNING:
86 * @BT_COEX_MPLUT:
87 * @BT_COEX_TTC:
88 * @BT_COEX_RRC:
89 *
90 * The COEX_MODE must be set for each command. Even if it is not changed.
91 */
92enum iwl_bt_coex_flags {
93 BT_COEX_MODE_POS = 3,
94 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
95 BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS,
96 BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS,
97 BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS,
98 BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS,
99 BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS,
100 BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS,
101 BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS,
102 BT_COEX_SYNC2SCO = BIT(7),
103 BT_COEX_CORUNNING = BIT(8),
104 BT_COEX_MPLUT = BIT(9),
105 BT_COEX_TTC = BIT(20),
106 BT_COEX_RRC = BIT(21),
107};
108
109/*
110 * indicates what has changed in the BT_COEX command.
111 * BT_VALID_ENABLE must be set for each command. Commands without this bit will
112 * discarded by the firmware
113 */
114enum iwl_bt_coex_valid_bit_msk {
115 BT_VALID_ENABLE = BIT(0),
116 BT_VALID_BT_PRIO_BOOST = BIT(1),
117 BT_VALID_MAX_KILL = BIT(2),
118 BT_VALID_3W_TMRS = BIT(3),
119 BT_VALID_KILL_ACK = BIT(4),
120 BT_VALID_KILL_CTS = BIT(5),
121 BT_VALID_REDUCED_TX_POWER = BIT(6),
122 BT_VALID_LUT = BIT(7),
123 BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
124 BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
125 BT_VALID_MULTI_PRIO_LUT = BIT(10),
126 BT_VALID_TRM_KICK_FILTER = BIT(11),
127 BT_VALID_CORUN_LUT_20 = BIT(12),
128 BT_VALID_CORUN_LUT_40 = BIT(13),
129 BT_VALID_ANT_ISOLATION = BIT(14),
130 BT_VALID_ANT_ISOLATION_THRS = BIT(15),
131 BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
132 BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
133 BT_VALID_SYNC_TO_SCO = BIT(18),
134 BT_VALID_TTC = BIT(20),
135 BT_VALID_RRC = BIT(21),
136};
137
138/**
139 * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
140 * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
141 * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
142 *
143 * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
144 * reduces its Tx power, it can work along with BT, hence reducing the amount
145 * of WiFi frames being killed by BT.
146 */
147enum iwl_bt_reduced_tx_power {
148 BT_REDUCED_TX_POWER_CTL = BIT(0),
149 BT_REDUCED_TX_POWER_DATA = BIT(1),
150};
151
152enum iwl_bt_coex_lut_type {
153 BT_COEX_TIGHT_LUT = 0,
154 BT_COEX_LOOSE_LUT,
155 BT_COEX_TX_DIS_LUT,
156
157 BT_COEX_MAX_LUT,
158 BT_COEX_INVALID_LUT = 0xff,
159}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
160
161#define BT_COEX_LUT_SIZE (12)
162#define BT_COEX_CORUN_LUT_SIZE (32)
163#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
164#define BT_COEX_BOOST_SIZE (4)
165#define BT_REDUCED_TX_POWER_BIT BIT(7)
166
167/**
168 * struct iwl_bt_coex_cmd_old - bt coex configuration command
169 * @flags:&enum iwl_bt_coex_flags
170 * @max_kill:
171 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
172 * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
173 * should be set by default
174 * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
175 * should be set by default
176 * @bt4_antenna_isolation: antenna isolation
177 * @bt4_antenna_isolation_thr: antenna threshold value
178 * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
179 * @bt4_tx_rx_max_freq0: TxRx max frequency
180 * @bt_prio_boost: BT priority boost registers
181 * @wifi_tx_prio_boost: SW boost of wifi tx priority
182 * @wifi_rx_prio_boost: SW boost of wifi rx priority
183 * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
184 * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
185 * @decision_lut: PTA decision LUT, per Prio-Ch
186 * @bt4_multiprio_lut: multi priority LUT configuration
187 * @bt4_corun_lut20: co-running 20 MHz LUT configuration
188 * @bt4_corun_lut40: co-running 40 MHz LUT configuration
189 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
190 *
191 * The structure is used for the BT_COEX command.
192 */
193struct iwl_bt_coex_cmd_old {
194 __le32 flags;
195 u8 max_kill;
196 u8 bt_reduced_tx_power;
197 u8 override_primary_lut;
198 u8 override_secondary_lut;
199
200 u8 bt4_antenna_isolation;
201 u8 bt4_antenna_isolation_thr;
202 u8 bt4_tx_tx_delta_freq_thr;
203 u8 bt4_tx_rx_max_freq0;
204
205 __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
206 __le32 wifi_tx_prio_boost;
207 __le32 wifi_rx_prio_boost;
208 __le32 kill_ack_msk;
209 __le32 kill_cts_msk;
210
211 __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
212 __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
213 __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
214 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
215
216 __le32 valid_bit_msk;
217} __packed; /* BT_COEX_CMD_API_S_VER_5 */
218
219enum iwl_bt_coex_mode {
220 BT_COEX_DISABLE = 0x0,
221 BT_COEX_NW = 0x1,
222 BT_COEX_BT = 0x2,
223 BT_COEX_WIFI = 0x3,
224}; /* BT_COEX_MODES_E */
225
226enum iwl_bt_coex_enabled_modules {
227 BT_COEX_MPLUT_ENABLED = BIT(0),
228 BT_COEX_MPLUT_BOOST_ENABLED = BIT(1),
229 BT_COEX_SYNC2SCO_ENABLED = BIT(2),
230 BT_COEX_CORUN_ENABLED = BIT(3),
231 BT_COEX_HIGH_BAND_RET = BIT(4),
232}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
233
234/**
235 * struct iwl_bt_coex_cmd - bt coex configuration command
236 * @mode: enum %iwl_bt_coex_mode
237 * @enabled_modules: enum %iwl_bt_coex_enabled_modules
238 *
239 * The structure is used for the BT_COEX command.
240 */
241struct iwl_bt_coex_cmd {
242 __le32 mode;
243 __le32 enabled_modules;
244} __packed; /* BT_COEX_CMD_API_S_VER_6 */
245
246/**
247 * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut
248 * @corun_lut20: co-running 20 MHz LUT configuration
249 * @corun_lut40: co-running 40 MHz LUT configuration
250 *
251 * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command.
252 */
253struct iwl_bt_coex_corun_lut_update_cmd {
254 __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE];
255 __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
256} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
257
258/**
259 * struct iwl_bt_coex_reduced_txp_update_cmd
260 * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
261 * bits are the sta_id (value)
262 */
263struct iwl_bt_coex_reduced_txp_update_cmd {
264 __le32 reduced_txp;
265} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
266
267/**
268 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
269 * @bt_primary_ci:
270 * @primary_ch_phy_id:
271 * @bt_secondary_ci:
272 * @secondary_ch_phy_id:
273 *
274 * Used for BT_COEX_CI command
275 */
276struct iwl_bt_coex_ci_cmd {
277 __le64 bt_primary_ci;
278 __le32 primary_ch_phy_id;
279
280 __le64 bt_secondary_ci;
281 __le32 secondary_ch_phy_id;
282} __packed; /* BT_CI_MSG_API_S_VER_2 */
283
284#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
285 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
286 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
287
288enum iwl_bt_mxbox_dw0 {
289 BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
290 BT_MBOX(0, LE_PROF1, 3, 1),
291 BT_MBOX(0, LE_PROF2, 4, 1),
292 BT_MBOX(0, LE_PROF_OTHER, 5, 1),
293 BT_MBOX(0, CHL_SEQ_N, 8, 4),
294 BT_MBOX(0, INBAND_S, 13, 1),
295 BT_MBOX(0, LE_MIN_RSSI, 16, 4),
296 BT_MBOX(0, LE_SCAN, 20, 1),
297 BT_MBOX(0, LE_ADV, 21, 1),
298 BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
299 BT_MBOX(0, OPEN_CON_1, 28, 2),
300};
301
302enum iwl_bt_mxbox_dw1 {
303 BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
304 BT_MBOX(1, IP_SR, 4, 1),
305 BT_MBOX(1, LE_MSTR, 5, 1),
306 BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
307 BT_MBOX(1, MSG_TYPE, 16, 3),
308 BT_MBOX(1, SSN, 19, 2),
309};
310
311enum iwl_bt_mxbox_dw2 {
312 BT_MBOX(2, SNIFF_ACT, 0, 3),
313 BT_MBOX(2, PAG, 3, 1),
314 BT_MBOX(2, INQUIRY, 4, 1),
315 BT_MBOX(2, CONN, 5, 1),
316 BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
317 BT_MBOX(2, DISC, 13, 1),
318 BT_MBOX(2, SCO_TX_ACT, 16, 2),
319 BT_MBOX(2, SCO_RX_ACT, 18, 2),
320 BT_MBOX(2, ESCO_RE_TX, 20, 2),
321 BT_MBOX(2, SCO_DURATION, 24, 6),
322};
323
324enum iwl_bt_mxbox_dw3 {
325 BT_MBOX(3, SCO_STATE, 0, 1),
326 BT_MBOX(3, SNIFF_STATE, 1, 1),
327 BT_MBOX(3, A2DP_STATE, 2, 1),
328 BT_MBOX(3, ACL_STATE, 3, 1),
329 BT_MBOX(3, MSTR_STATE, 4, 1),
330 BT_MBOX(3, OBX_STATE, 5, 1),
331 BT_MBOX(3, OPEN_CON_2, 8, 2),
332 BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
333 BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
334 BT_MBOX(3, INBAND_P, 13, 1),
335 BT_MBOX(3, MSG_TYPE_2, 16, 3),
336 BT_MBOX(3, SSN_2, 19, 2),
337 BT_MBOX(3, UPDATE_REQUEST, 21, 1),
338};
339
340#define BT_MBOX_MSG(_notif, _num, _field) \
341 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
342 >> BT_MBOX##_num##_##_field##_POS)
343
344enum iwl_bt_activity_grading {
345 BT_OFF = 0,
346 BT_ON_NO_CONNECTION = 1,
347 BT_LOW_TRAFFIC = 2,
348 BT_HIGH_TRAFFIC = 3,
349
350 BT_MAX_AG,
351}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
352
353enum iwl_bt_ci_compliance {
354 BT_CI_COMPLIANCE_NONE = 0,
355 BT_CI_COMPLIANCE_PRIMARY = 1,
356 BT_CI_COMPLIANCE_SECONDARY = 2,
357 BT_CI_COMPLIANCE_BOTH = 3,
358}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
359
360#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \
361 (_ttc_rrc_status & BIT(_phy_id))
362
363#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \
364 ((_ttc_rrc_status >> 4) & BIT(_phy_id))
365
366/**
367 * struct iwl_bt_coex_profile_notif - notification about BT coex
368 * @mbox_msg: message from BT to WiFi
369 * @msg_idx: the index of the message
370 * @bt_ci_compliance: enum %iwl_bt_ci_compliance
371 * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type
372 * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type
373 * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
374 * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY
375 */
376struct iwl_bt_coex_profile_notif {
377 __le32 mbox_msg[4];
378 __le32 msg_idx;
379 __le32 bt_ci_compliance;
380
381 __le32 primary_ch_lut;
382 __le32 secondary_ch_lut;
383 __le32 bt_activity_grading;
384 u8 ttc_rrc_status;
385 u8 reserved[3];
386} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
387
388enum iwl_bt_coex_prio_table_event {
389 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
390 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
391 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
392 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
393 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
394 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
395 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
396 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
397 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
398 BT_COEX_PRIO_TBL_EVT_IDLE = 9,
399 BT_COEX_PRIO_TBL_EVT_MAX = 16,
400}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
401
402enum iwl_bt_coex_prio_table_prio {
403 BT_COEX_PRIO_TBL_DISABLED = 0,
404 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
405 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
406 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
407 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
408 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
409 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
410 BT_COEX_PRIO_TBL_MAX = 8,
411}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
412
413#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
414#define BT_COEX_PRIO_TBL_PRIO_POS (1)
415#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
416
417/**
418 * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
419 * @prio_tbl:
420 */
421struct iwl_bt_coex_prio_tbl_cmd {
422 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
423} __packed;
424
425/**
426 * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command
427 * @bt_primary_ci:
428 * @bt_secondary_ci:
429 * @co_run_bw_primary:
430 * @co_run_bw_secondary:
431 * @primary_ch_phy_id:
432 * @secondary_ch_phy_id:
433 *
434 * Used for BT_COEX_CI command
435 */
436struct iwl_bt_coex_ci_cmd_old {
437 __le64 bt_primary_ci;
438 __le64 bt_secondary_ci;
439
440 u8 co_run_bw_primary;
441 u8 co_run_bw_secondary;
442 u8 primary_ch_phy_id;
443 u8 secondary_ch_phy_id;
444} __packed; /* BT_CI_MSG_API_S_VER_1 */
445
446/**
447 * struct iwl_bt_coex_profile_notif_old - notification about BT coex
448 * @mbox_msg: message from BT to WiFi
449 * @msg_idx: the index of the message
450 * @bt_status: 0 - off, 1 - on
451 * @bt_open_conn: number of BT connections open
452 * @bt_traffic_load: load of BT traffic
453 * @bt_agg_traffic_load: aggregated load of BT traffic
454 * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
455 * @primary_ch_lut: LUT used for primary channel
456 * @secondary_ch_lut: LUT used for secondary channel
457 * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
458 */
459struct iwl_bt_coex_profile_notif_old {
460 __le32 mbox_msg[4];
461 __le32 msg_idx;
462 u8 bt_status;
463 u8 bt_open_conn;
464 u8 bt_traffic_load;
465 u8 bt_agg_traffic_load;
466 u8 bt_ci_compliance;
467 u8 ttc_enabled;
468 u8 rrc_enabled;
469 u8 reserved;
470
471 __le32 primary_ch_lut;
472 __le32 secondary_ch_lut;
473 __le32 bt_activity_grading;
474} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
475
476#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
deleted file mode 100644
index 20521bebb0b1..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ /dev/null
@@ -1,425 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_d3_h__
66#define __fw_api_d3_h__
67
68/**
69 * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
70 * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
71 */
72enum iwl_d3_wakeup_flags {
73 IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
74}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
75
76/**
77 * struct iwl_d3_manager_config - D3 manager configuration command
78 * @min_sleep_time: minimum sleep time (in usec)
79 * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
80 * @wakeup_host_timer: force wakeup after this many seconds
81 *
82 * The structure is used for the D3_CONFIG_CMD command.
83 */
84struct iwl_d3_manager_config {
85 __le32 min_sleep_time;
86 __le32 wakeup_flags;
87 __le32 wakeup_host_timer;
88} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
89
90
91/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
92
93/**
94 * enum iwl_d3_proto_offloads - enabled protocol offloads
95 * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
96 * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
97 */
98enum iwl_proto_offloads {
99 IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
100 IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
101};
102
103#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
104#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
105#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
106#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
107#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
108
109#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
110#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
111
112/**
113 * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
114 * @enabled: enable flags
115 * @remote_ipv4_addr: remote address to answer to (or zero if all)
116 * @host_ipv4_addr: our IPv4 address to respond to queries for
117 * @arp_mac_addr: our MAC address for ARP responses
118 * @reserved: unused
119 */
120struct iwl_proto_offload_cmd_common {
121 __le32 enabled;
122 __be32 remote_ipv4_addr;
123 __be32 host_ipv4_addr;
124 u8 arp_mac_addr[ETH_ALEN];
125 __le16 reserved;
126} __packed;
127
128/**
129 * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
130 * @common: common/IPv4 configuration
131 * @remote_ipv6_addr: remote address to answer to (or zero if all)
132 * @solicited_node_ipv6_addr: broken -- solicited node address exists
133 * for each target address
134 * @target_ipv6_addr: our target addresses
135 * @ndp_mac_addr: neighbor solicitation response MAC address
136 */
137struct iwl_proto_offload_cmd_v1 {
138 struct iwl_proto_offload_cmd_common common;
139 u8 remote_ipv6_addr[16];
140 u8 solicited_node_ipv6_addr[16];
141 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
142 u8 ndp_mac_addr[ETH_ALEN];
143 __le16 reserved2;
144} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
145
146/**
147 * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
148 * @common: common/IPv4 configuration
149 * @remote_ipv6_addr: remote address to answer to (or zero if all)
150 * @solicited_node_ipv6_addr: broken -- solicited node address exists
151 * for each target address
152 * @target_ipv6_addr: our target addresses
153 * @ndp_mac_addr: neighbor solicitation response MAC address
154 */
155struct iwl_proto_offload_cmd_v2 {
156 struct iwl_proto_offload_cmd_common common;
157 u8 remote_ipv6_addr[16];
158 u8 solicited_node_ipv6_addr[16];
159 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
160 u8 ndp_mac_addr[ETH_ALEN];
161 u8 numValidIPv6Addresses;
162 u8 reserved2[3];
163} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
164
165struct iwl_ns_config {
166 struct in6_addr source_ipv6_addr;
167 struct in6_addr dest_ipv6_addr;
168 u8 target_mac_addr[ETH_ALEN];
169 __le16 reserved;
170} __packed; /* NS_OFFLOAD_CONFIG */
171
172struct iwl_targ_addr {
173 struct in6_addr addr;
174 __le32 config_num;
175} __packed; /* TARGET_IPV6_ADDRESS */
176
177/**
178 * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
179 * @common: common/IPv4 configuration
180 * @target_ipv6_addr: target IPv6 addresses
181 * @ns_config: NS offload configurations
182 */
183struct iwl_proto_offload_cmd_v3_small {
184 struct iwl_proto_offload_cmd_common common;
185 __le32 num_valid_ipv6_addrs;
186 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
187 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
188} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
189
190/**
191 * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
192 * @common: common/IPv4 configuration
193 * @target_ipv6_addr: target IPv6 addresses
194 * @ns_config: NS offload configurations
195 */
196struct iwl_proto_offload_cmd_v3_large {
197 struct iwl_proto_offload_cmd_common common;
198 __le32 num_valid_ipv6_addrs;
199 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
200 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
201} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
202
203/*
204 * WOWLAN_PATTERNS
205 */
206#define IWL_WOWLAN_MIN_PATTERN_LEN 16
207#define IWL_WOWLAN_MAX_PATTERN_LEN 128
208
209struct iwl_wowlan_pattern {
210 u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
211 u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
212 u8 mask_size;
213 u8 pattern_size;
214 __le16 reserved;
215} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
216
217#define IWL_WOWLAN_MAX_PATTERNS 20
218
219struct iwl_wowlan_patterns_cmd {
220 __le32 n_patterns;
221 struct iwl_wowlan_pattern patterns[];
222} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
223
224enum iwl_wowlan_wakeup_filters {
225 IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
226 IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
227 IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
228 IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
229 IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
230 IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
231 IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
232 IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
233 IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
234 IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
235 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
236 IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
237 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
238 IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
239 IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
240 IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
241 IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
242}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
243
244struct iwl_wowlan_config_cmd {
245 __le32 wakeup_filter;
246 __le16 non_qos_seq;
247 __le16 qos_seq[8];
248 u8 wowlan_ba_teardown_tids;
249 u8 is_11n_connection;
250 u8 offloading_tid;
251 u8 reserved[3];
252} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
253
254/*
255 * WOWLAN_TSC_RSC_PARAMS
256 */
257#define IWL_NUM_RSC 16
258
259struct tkip_sc {
260 __le16 iv16;
261 __le16 pad;
262 __le32 iv32;
263} __packed; /* TKIP_SC_API_U_VER_1 */
264
265struct iwl_tkip_rsc_tsc {
266 struct tkip_sc unicast_rsc[IWL_NUM_RSC];
267 struct tkip_sc multicast_rsc[IWL_NUM_RSC];
268 struct tkip_sc tsc;
269} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
270
271struct aes_sc {
272 __le64 pn;
273} __packed; /* TKIP_AES_SC_API_U_VER_1 */
274
275struct iwl_aes_rsc_tsc {
276 struct aes_sc unicast_rsc[IWL_NUM_RSC];
277 struct aes_sc multicast_rsc[IWL_NUM_RSC];
278 struct aes_sc tsc;
279} __packed; /* AES_TSC_RSC_API_S_VER_1 */
280
281union iwl_all_tsc_rsc {
282 struct iwl_tkip_rsc_tsc tkip;
283 struct iwl_aes_rsc_tsc aes;
284}; /* ALL_TSC_RSC_API_S_VER_2 */
285
286struct iwl_wowlan_rsc_tsc_params_cmd {
287 union iwl_all_tsc_rsc all_tsc_rsc;
288} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
289
290#define IWL_MIC_KEY_SIZE 8
291struct iwl_mic_keys {
292 u8 tx[IWL_MIC_KEY_SIZE];
293 u8 rx_unicast[IWL_MIC_KEY_SIZE];
294 u8 rx_mcast[IWL_MIC_KEY_SIZE];
295} __packed; /* MIC_KEYS_API_S_VER_1 */
296
297#define IWL_P1K_SIZE 5
298struct iwl_p1k_cache {
299 __le16 p1k[IWL_P1K_SIZE];
300} __packed;
301
302#define IWL_NUM_RX_P1K_CACHE 2
303
304struct iwl_wowlan_tkip_params_cmd {
305 struct iwl_mic_keys mic_keys;
306 struct iwl_p1k_cache tx;
307 struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
308 struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
309} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
310
311#define IWL_KCK_MAX_SIZE 32
312#define IWL_KEK_MAX_SIZE 32
313
314struct iwl_wowlan_kek_kck_material_cmd {
315 u8 kck[IWL_KCK_MAX_SIZE];
316 u8 kek[IWL_KEK_MAX_SIZE];
317 __le16 kck_len;
318 __le16 kek_len;
319 __le64 replay_ctr;
320} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
321
322#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
323
324enum iwl_wowlan_rekey_status {
325 IWL_WOWLAN_REKEY_POST_REKEY = 0,
326 IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
327}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
328
329enum iwl_wowlan_wakeup_reason {
330 IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
331 IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
332 IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
333 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
334 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
335 IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
336 IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
337 IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
338 IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
339 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
340 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
341 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
342 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
343 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
344 IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
345 IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
346 IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
347 IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
348
349}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
350
351struct iwl_wowlan_gtk_status {
352 u8 key_index;
353 u8 reserved[3];
354 u8 decrypt_key[16];
355 u8 tkip_mic_key[8];
356 struct iwl_wowlan_rsc_tsc_params_cmd rsc;
357} __packed;
358
359struct iwl_wowlan_status {
360 struct iwl_wowlan_gtk_status gtk;
361 __le64 replay_ctr;
362 __le16 pattern_number;
363 __le16 non_qos_seq_ctr;
364 __le16 qos_seq_ctr[8];
365 __le32 wakeup_reasons;
366 __le32 num_of_gtk_rekeys;
367 __le32 transmitted_ndps;
368 __le32 received_beacons;
369 __le32 wake_packet_length;
370 __le32 wake_packet_bufsize;
371 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
372} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
373
374#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
375#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
376#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
377
378struct iwl_tcp_packet_info {
379 __le16 tcp_pseudo_header_checksum;
380 __le16 tcp_payload_length;
381} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
382
383struct iwl_tcp_packet {
384 struct iwl_tcp_packet_info info;
385 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
386 u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
387} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
388
389struct iwl_remote_wake_packet {
390 struct iwl_tcp_packet_info info;
391 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
392 u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
393} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
394
395struct iwl_wowlan_remote_wake_config {
396 __le32 connection_max_time; /* unused */
397 /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
398 u8 max_syn_retries;
399 u8 max_data_retries;
400 u8 tcp_syn_ack_timeout;
401 u8 tcp_ack_timeout;
402
403 struct iwl_tcp_packet syn_tx;
404 struct iwl_tcp_packet synack_rx;
405 struct iwl_tcp_packet keepalive_ack_rx;
406 struct iwl_tcp_packet fin_tx;
407
408 struct iwl_remote_wake_packet keepalive_tx;
409 struct iwl_remote_wake_packet wake_rx;
410
411 /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
412 u8 sequence_number_offset;
413 u8 sequence_number_length;
414 u8 token_offset;
415 u8 token_length;
416 /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
417 __le32 initial_sequence_number;
418 __le16 keepalive_interval;
419 __le16 num_tokens;
420 u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
421} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
422
423/* TODO: NetDetect API */
424
425#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
deleted file mode 100644
index f3f3ee0a766b..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ /dev/null
@@ -1,387 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_mac_h__
64#define __fw_api_mac_h__
65
66/*
67 * The first MAC indices (starting from 0)
68 * are available to the driver, AUX follows
69 */
70#define MAC_INDEX_AUX 4
71#define MAC_INDEX_MIN_DRIVER 0
72#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
73#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
74
75enum iwl_ac {
76 AC_BK,
77 AC_BE,
78 AC_VI,
79 AC_VO,
80 AC_NUM,
81};
82
83/**
84 * enum iwl_mac_protection_flags - MAC context flags
85 * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
86 * this will require CCK RTS/CTS2self.
87 * RTS/CTS will protect full burst time.
88 * @MAC_PROT_FLG_HT_PROT: enable HT protection
89 * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
90 * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
91 */
92enum iwl_mac_protection_flags {
93 MAC_PROT_FLG_TGG_PROTECT = BIT(3),
94 MAC_PROT_FLG_HT_PROT = BIT(23),
95 MAC_PROT_FLG_FAT_PROT = BIT(24),
96 MAC_PROT_FLG_SELF_CTS_EN = BIT(30),
97};
98
99#define MAC_FLG_SHORT_SLOT BIT(4)
100#define MAC_FLG_SHORT_PREAMBLE BIT(5)
101
102/**
103 * enum iwl_mac_types - Supported MAC types
104 * @FW_MAC_TYPE_FIRST: lowest supported MAC type
105 * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
106 * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
107 * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
108 * @FW_MAC_TYPE_IBSS: IBSS
109 * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
110 * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
111 * @FW_MAC_TYPE_P2P_STA: P2P client
112 * @FW_MAC_TYPE_GO: P2P GO
113 * @FW_MAC_TYPE_TEST: ?
114 * @FW_MAC_TYPE_MAX: highest support MAC type
115 */
116enum iwl_mac_types {
117 FW_MAC_TYPE_FIRST = 1,
118 FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
119 FW_MAC_TYPE_LISTENER,
120 FW_MAC_TYPE_PIBSS,
121 FW_MAC_TYPE_IBSS,
122 FW_MAC_TYPE_BSS_STA,
123 FW_MAC_TYPE_P2P_DEVICE,
124 FW_MAC_TYPE_P2P_STA,
125 FW_MAC_TYPE_GO,
126 FW_MAC_TYPE_TEST,
127 FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
128}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
129
130/**
131 * enum iwl_tsf_id - TSF hw timer ID
132 * @TSF_ID_A: use TSF A
133 * @TSF_ID_B: use TSF B
134 * @TSF_ID_C: use TSF C
135 * @TSF_ID_D: use TSF D
136 * @NUM_TSF_IDS: number of TSF timers available
137 */
138enum iwl_tsf_id {
139 TSF_ID_A = 0,
140 TSF_ID_B = 1,
141 TSF_ID_C = 2,
142 TSF_ID_D = 3,
143 NUM_TSF_IDS = 4,
144}; /* TSF_ID_API_E_VER_1 */
145
146/**
147 * struct iwl_mac_data_ap - configuration data for AP MAC context
148 * @beacon_time: beacon transmit time in system time
149 * @beacon_tsf: beacon transmit time in TSF
150 * @bi: beacon interval in TU
151 * @bi_reciprocal: 2^32 / bi
152 * @dtim_interval: dtim transmit time in TU
153 * @dtim_reciprocal: 2^32 / dtim_interval
154 * @mcast_qid: queue ID for multicast traffic
155 * @beacon_template: beacon template ID
156 */
157struct iwl_mac_data_ap {
158 __le32 beacon_time;
159 __le64 beacon_tsf;
160 __le32 bi;
161 __le32 bi_reciprocal;
162 __le32 dtim_interval;
163 __le32 dtim_reciprocal;
164 __le32 mcast_qid;
165 __le32 beacon_template;
166} __packed; /* AP_MAC_DATA_API_S_VER_1 */
167
168/**
169 * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
170 * @beacon_time: beacon transmit time in system time
171 * @beacon_tsf: beacon transmit time in TSF
172 * @bi: beacon interval in TU
173 * @bi_reciprocal: 2^32 / bi
174 * @beacon_template: beacon template ID
175 */
176struct iwl_mac_data_ibss {
177 __le32 beacon_time;
178 __le64 beacon_tsf;
179 __le32 bi;
180 __le32 bi_reciprocal;
181 __le32 beacon_template;
182} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
183
184/**
185 * struct iwl_mac_data_sta - configuration data for station MAC context
186 * @is_assoc: 1 for associated state, 0 otherwise
187 * @dtim_time: DTIM arrival time in system time
188 * @dtim_tsf: DTIM arrival time in TSF
189 * @bi: beacon interval in TU, applicable only when associated
190 * @bi_reciprocal: 2^32 / bi , applicable only when associated
191 * @dtim_interval: DTIM interval in TU, applicable only when associated
192 * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
193 * @listen_interval: in beacon intervals, applicable only when associated
194 * @assoc_id: unique ID assigned by the AP during association
195 */
196struct iwl_mac_data_sta {
197 __le32 is_assoc;
198 __le32 dtim_time;
199 __le64 dtim_tsf;
200 __le32 bi;
201 __le32 bi_reciprocal;
202 __le32 dtim_interval;
203 __le32 dtim_reciprocal;
204 __le32 listen_interval;
205 __le32 assoc_id;
206 __le32 assoc_beacon_arrive_time;
207} __packed; /* STA_MAC_DATA_API_S_VER_1 */
208
209/**
210 * struct iwl_mac_data_go - configuration data for P2P GO MAC context
211 * @ap: iwl_mac_data_ap struct with most config data
212 * @ctwin: client traffic window in TU (period after TBTT when GO is present).
213 * 0 indicates that there is no CT window.
214 * @opp_ps_enabled: indicate that opportunistic PS allowed
215 */
216struct iwl_mac_data_go {
217 struct iwl_mac_data_ap ap;
218 __le32 ctwin;
219 __le32 opp_ps_enabled;
220} __packed; /* GO_MAC_DATA_API_S_VER_1 */
221
222/**
223 * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
224 * @sta: iwl_mac_data_sta struct with most config data
225 * @ctwin: client traffic window in TU (period after TBTT when GO is present).
226 * 0 indicates that there is no CT window.
227 */
228struct iwl_mac_data_p2p_sta {
229 struct iwl_mac_data_sta sta;
230 __le32 ctwin;
231} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
232
233/**
234 * struct iwl_mac_data_pibss - Pseudo IBSS config data
235 * @stats_interval: interval in TU between statistics notifications to host.
236 */
237struct iwl_mac_data_pibss {
238 __le32 stats_interval;
239} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
240
241/*
242 * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
243 * context.
244 * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
245 * other channels as well. This should be to true only in case that the
246 * device is discoverable and there is an active GO. Note that setting this
247 * field when not needed, will increase the number of interrupts and have
248 * effect on the platform power, as this setting opens the Rx filters on
249 * all macs.
250 */
251struct iwl_mac_data_p2p_dev {
252 __le32 is_disc_extended;
253} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
254
255/**
256 * enum iwl_mac_filter_flags - MAC context filter flags
257 * @MAC_FILTER_IN_PROMISC: accept all data frames
258 * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
259 * control frames to the host
260 * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
261 * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
262 * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
263 * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
264 * (in station mode when associated)
265 * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
266 * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
267 * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
268 */
269enum iwl_mac_filter_flags {
270 MAC_FILTER_IN_PROMISC = BIT(0),
271 MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1),
272 MAC_FILTER_ACCEPT_GRP = BIT(2),
273 MAC_FILTER_DIS_DECRYPT = BIT(3),
274 MAC_FILTER_DIS_GRP_DECRYPT = BIT(4),
275 MAC_FILTER_IN_BEACON = BIT(6),
276 MAC_FILTER_OUT_BCAST = BIT(8),
277 MAC_FILTER_IN_CRC32 = BIT(11),
278 MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
279};
280
281/**
282 * enum iwl_mac_qos_flags - QoS flags
283 * @MAC_QOS_FLG_UPDATE_EDCA: ?
284 * @MAC_QOS_FLG_TGN: HT is enabled
285 * @MAC_QOS_FLG_TXOP_TYPE: ?
286 *
287 */
288enum iwl_mac_qos_flags {
289 MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
290 MAC_QOS_FLG_TGN = BIT(1),
291 MAC_QOS_FLG_TXOP_TYPE = BIT(4),
292};
293
294/**
295 * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
296 * @cw_min: Contention window, start value in numbers of slots.
297 * Should be a power-of-2, minus 1. Device's default is 0x0f.
298 * @cw_max: Contention window, max value in numbers of slots.
299 * Should be a power-of-2, minus 1. Device's default is 0x3f.
300 * @aifsn: Number of slots in Arbitration Interframe Space (before
301 * performing random backoff timing prior to Tx). Device default 1.
302 * @fifos_mask: FIFOs used by this MAC for this AC
303 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
304 *
305 * One instance of this config struct for each of 4 EDCA access categories
306 * in struct iwl_qosparam_cmd.
307 *
308 * Device will automatically increase contention window by (2*CW) + 1 for each
309 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
310 * value, to cap the CW value.
311 */
312struct iwl_ac_qos {
313 __le16 cw_min;
314 __le16 cw_max;
315 u8 aifsn;
316 u8 fifos_mask;
317 __le16 edca_txop;
318} __packed; /* AC_QOS_API_S_VER_2 */
319
320/**
321 * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
322 * ( MAC_CONTEXT_CMD = 0x28 )
323 * @id_and_color: ID and color of the MAC
324 * @action: action to perform, one of FW_CTXT_ACTION_*
325 * @mac_type: one of FW_MAC_TYPE_*
326 * @tsd_id: TSF HW timer, one of TSF_ID_*
327 * @node_addr: MAC address
328 * @bssid_addr: BSSID
329 * @cck_rates: basic rates available for CCK
330 * @ofdm_rates: basic rates available for OFDM
331 * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
332 * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
333 * @short_slot: 0x10 for enabling short slots, 0 otherwise
334 * @filter_flags: combination of MAC_FILTER_*
335 * @qos_flags: from MAC_QOS_FLG_*
336 * @ac: one iwl_mac_qos configuration for each AC
337 * @mac_specific: one of struct iwl_mac_data_*, according to mac_type
338 */
339struct iwl_mac_ctx_cmd {
340 /* COMMON_INDEX_HDR_API_S_VER_1 */
341 __le32 id_and_color;
342 __le32 action;
343 /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
344 __le32 mac_type;
345 __le32 tsf_id;
346 u8 node_addr[6];
347 __le16 reserved_for_node_addr;
348 u8 bssid_addr[6];
349 __le16 reserved_for_bssid_addr;
350 __le32 cck_rates;
351 __le32 ofdm_rates;
352 __le32 protection_flags;
353 __le32 cck_short_preamble;
354 __le32 short_slot;
355 __le32 filter_flags;
356 /* MAC_QOS_PARAM_API_S_VER_1 */
357 __le32 qos_flags;
358 struct iwl_ac_qos ac[AC_NUM+1];
359 /* MAC_CONTEXT_COMMON_DATA_API_S */
360 union {
361 struct iwl_mac_data_ap ap;
362 struct iwl_mac_data_go go;
363 struct iwl_mac_data_sta sta;
364 struct iwl_mac_data_p2p_sta p2p_sta;
365 struct iwl_mac_data_p2p_dev p2p_dev;
366 struct iwl_mac_data_pibss pibss;
367 struct iwl_mac_data_ibss ibss;
368 };
369} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
370
371static inline u32 iwl_mvm_reciprocal(u32 v)
372{
373 if (!v)
374 return 0;
375 return 0xFFFFFFFF / v;
376}
377
378#define IWL_NONQOS_SEQ_GET 0x1
379#define IWL_NONQOS_SEQ_SET 0x2
380struct iwl_nonqos_seq_query_cmd {
381 __le32 get_set_flag;
382 __le32 mac_id_n_color;
383 __le16 value;
384 __le16 reserved;
385} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
386
387#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
deleted file mode 100644
index c8f3e2536cbb..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ /dev/null
@@ -1,467 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#ifndef __fw_api_power_h__
69#define __fw_api_power_h__
70
71/* Power Management Commands, Responses, Notifications */
72
73/**
74 * enum iwl_ltr_config_flags - masks for LTR config command flags
75 * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
76 * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
77 * memory access
78 * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
79 * reg change
80 * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
81 * D0 to D3
82 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
83 * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
84 * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
85 */
86enum iwl_ltr_config_flags {
87 LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
88 LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
89 LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
90 LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
91 LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
92 LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
93 LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
94};
95
96/**
97 * struct iwl_ltr_config_cmd_v1 - configures the LTR
98 * @flags: See %enum iwl_ltr_config_flags
99 */
100struct iwl_ltr_config_cmd_v1 {
101 __le32 flags;
102 __le32 static_long;
103 __le32 static_short;
104} __packed; /* LTR_CAPABLE_API_S_VER_1 */
105
106#define LTR_VALID_STATES_NUM 4
107
108/**
109 * struct iwl_ltr_config_cmd - configures the LTR
110 * @flags: See %enum iwl_ltr_config_flags
111 * @static_long:
112 * @static_short:
113 * @ltr_cfg_values:
114 * @ltr_short_idle_timeout:
115 */
116struct iwl_ltr_config_cmd {
117 __le32 flags;
118 __le32 static_long;
119 __le32 static_short;
120 __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
121 __le32 ltr_short_idle_timeout;
122} __packed; /* LTR_CAPABLE_API_S_VER_2 */
123
124/* Radio LP RX Energy Threshold measured in dBm */
125#define POWER_LPRX_RSSI_THRESHOLD 75
126#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
127#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
128
129/**
130 * enum iwl_power_flags - masks for power table command flags
131 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
132 * receiver and transmitter. '0' - does not allow.
133 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
134 * '1' Driver enables PM (use rest of parameters)
135 * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
136 * '1' PM could sleep over DTIM till listen Interval.
137 * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
138 * access categories are both delivery and trigger enabled.
139 * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
140 * PBW Snoozing enabled
141 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
142 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
143 * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
144 * detection enablement
145*/
146enum iwl_power_flags {
147 POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
148 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
149 POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
150 POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
151 POWER_FLAGS_BT_SCO_ENA = BIT(8),
152 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
153 POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
154 POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
155};
156
157#define IWL_POWER_VEC_SIZE 5
158
159/**
160 * struct iwl_powertable_cmd - legacy power command. Beside old API support this
161 * is used also with a new power API for device wide power settings.
162 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
163 *
164 * @flags: Power table command flags from POWER_FLAGS_*
165 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
166 * Minimum allowed:- 3 * DTIM. Keep alive period must be
167 * set regardless of power scheme or current power state.
168 * FW use this value also when PM is disabled.
169 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
170 * PSM transition - legacy PM
171 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
172 * PSM transition - legacy PM
173 * @sleep_interval: not in use
174 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
175 * is set. For example, if it is required to skip over
176 * one DTIM, this value need to be set to 2 (DTIM periods).
177 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
178 * Default: 80dbm
179 */
180struct iwl_powertable_cmd {
181 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
182 __le16 flags;
183 u8 keep_alive_seconds;
184 u8 debug_flags;
185 __le32 rx_data_timeout;
186 __le32 tx_data_timeout;
187 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
188 __le32 skip_dtim_periods;
189 __le32 lprx_rssi_threshold;
190} __packed;
191
192/**
193 * enum iwl_device_power_flags - masks for device power command flags
194 * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
195 * receiver and transmitter. '0' - does not allow.
196*/
197enum iwl_device_power_flags {
198 DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
199};
200
201/**
202 * struct iwl_device_power_cmd - device wide power command.
203 * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
204 *
205 * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
206 */
207struct iwl_device_power_cmd {
208 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
209 __le16 flags;
210 __le16 reserved;
211} __packed;
212
213/**
214 * struct iwl_mac_power_cmd - New power command containing uAPSD support
215 * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
216 * @id_and_color: MAC contex identifier
217 * @flags: Power table command flags from POWER_FLAGS_*
218 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
219 * Minimum allowed:- 3 * DTIM. Keep alive period must be
220 * set regardless of power scheme or current power state.
221 * FW use this value also when PM is disabled.
222 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
223 * PSM transition - legacy PM
224 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
225 * PSM transition - legacy PM
226 * @sleep_interval: not in use
227 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
228 * is set. For example, if it is required to skip over
229 * one DTIM, this value need to be set to 2 (DTIM periods).
230 * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
231 * PSM transition - uAPSD
232 * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
233 * PSM transition - uAPSD
234 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
235 * Default: 80dbm
236 * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
237 * @snooze_interval: Maximum time between attempts to retrieve buffered data
238 * from the AP [msec]
239 * @snooze_window: A window of time in which PBW snoozing insures that all
240 * packets received. It is also the minimum time from last
241 * received unicast RX packet, before client stops snoozing
242 * for data. [msec]
243 * @snooze_step: TBD
244 * @qndp_tid: TID client shall use for uAPSD QNDP triggers
245 * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
246 * each corresponding AC.
247 * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
248 * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
249 * values.
250 * @heavy_tx_thld_packets: TX threshold measured in number of packets
251 * @heavy_rx_thld_packets: RX threshold measured in number of packets
252 * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
253 * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
254 * @limited_ps_threshold:
255*/
256struct iwl_mac_power_cmd {
257 /* CONTEXT_DESC_API_T_VER_1 */
258 __le32 id_and_color;
259
260 /* CLIENT_PM_POWER_TABLE_S_VER_1 */
261 __le16 flags;
262 __le16 keep_alive_seconds;
263 __le32 rx_data_timeout;
264 __le32 tx_data_timeout;
265 __le32 rx_data_timeout_uapsd;
266 __le32 tx_data_timeout_uapsd;
267 u8 lprx_rssi_threshold;
268 u8 skip_dtim_periods;
269 __le16 snooze_interval;
270 __le16 snooze_window;
271 u8 snooze_step;
272 u8 qndp_tid;
273 u8 uapsd_ac_flags;
274 u8 uapsd_max_sp;
275 u8 heavy_tx_thld_packets;
276 u8 heavy_rx_thld_packets;
277 u8 heavy_tx_thld_percentage;
278 u8 heavy_rx_thld_percentage;
279 u8 limited_ps_threshold;
280 u8 reserved;
281} __packed;
282
283/*
284 * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
285 * associated AP is identified as improperly implementing uAPSD protocol.
286 * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
287 * @sta_id: index of station in uCode's station table - associated AP ID in
288 * this context.
289 */
290struct iwl_uapsd_misbehaving_ap_notif {
291 __le32 sta_id;
292 u8 mac_id;
293 u8 reserved[3];
294} __packed;
295
296/**
297 * struct iwl_reduce_tx_power_cmd - TX power reduction command
298 * REDUCE_TX_POWER_CMD = 0x9f
299 * @flags: (reserved for future implementation)
300 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
301 * @pwr_restriction: TX power restriction in dBms.
302 */
303struct iwl_reduce_tx_power_cmd {
304 u8 flags;
305 u8 mac_context_id;
306 __le16 pwr_restriction;
307} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
308
309enum iwl_dev_tx_power_cmd_mode {
310 IWL_TX_POWER_MODE_SET_MAC = 0,
311 IWL_TX_POWER_MODE_SET_DEVICE = 1,
312 IWL_TX_POWER_MODE_SET_CHAINS = 2,
313}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
314
315/**
316 * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
317 * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
318 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
319 * @pwr_restriction: TX power restriction in 1/8 dBms.
320 * @dev_24: device TX power restriction in 1/8 dBms
321 * @dev_52_low: device TX power restriction upper band - low
322 * @dev_52_high: device TX power restriction upper band - high
323 */
324struct iwl_dev_tx_power_cmd_v2 {
325 __le32 set_mode;
326 __le32 mac_context_id;
327 __le16 pwr_restriction;
328 __le16 dev_24;
329 __le16 dev_52_low;
330 __le16 dev_52_high;
331} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
332
333#define IWL_NUM_CHAIN_LIMITS 2
334#define IWL_NUM_SUB_BANDS 5
335
336/**
337 * struct iwl_dev_tx_power_cmd - TX power reduction command
338 * @v2: version 2 of the command, embedded here for easier software handling
339 * @per_chain_restriction: per chain restrictions
340 */
341struct iwl_dev_tx_power_cmd {
342 /* v3 is just an extension of v2 - keep this here */
343 struct iwl_dev_tx_power_cmd_v2 v2;
344 __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
345} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
346
347#define IWL_DEV_MAX_TX_POWER 0x7FFF
348
349/**
350 * struct iwl_beacon_filter_cmd
351 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
352 * @id_and_color: MAC contex identifier
353 * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
354 * to driver if delta in Energy values calculated for this and last
355 * passed beacon is greater than this threshold. Zero value means that
356 * the Energy change is ignored for beacon filtering, and beacon will
357 * not be forced to be sent to driver regardless of this delta. Typical
358 * energy delta 5dB.
359 * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
360 * Send beacon to driver if delta in Energy values calculated for this
361 * and last passed beacon is greater than this threshold. Zero value
362 * means that the Energy change is ignored for beacon filtering while in
363 * Roaming state, typical energy delta 1dB.
364 * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
365 * calculated for current beacon is less than the threshold, use
366 * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
367 * Threshold. Typical energy threshold is -72dBm.
368 * @bf_temp_threshold: This threshold determines the type of temperature
369 * filtering (Slow or Fast) that is selected (Units are in Celsuis):
370 * If the current temperature is above this threshold - Fast filter
371 * will be used, If the current temperature is below this threshold -
372 * Slow filter will be used.
373 * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
374 * calculated for this and the last passed beacon is greater than this
375 * threshold. Zero value means that the temperature change is ignored for
376 * beacon filtering; beacons will not be forced to be sent to driver
377 * regardless of whether its temerature has been changed.
378 * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
379 * calculated for this and the last passed beacon is greater than this
380 * threshold. Zero value means that the temperature change is ignored for
381 * beacon filtering; beacons will not be forced to be sent to driver
382 * regardless of whether its temerature has been changed.
383 * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
384 * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
385 * for a specific period of time. Units: Beacons.
386 * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
387 * for a longer period of time then this escape-timeout. Units: Beacons.
388 * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
389 */
390struct iwl_beacon_filter_cmd {
391 __le32 bf_energy_delta;
392 __le32 bf_roaming_energy_delta;
393 __le32 bf_roaming_state;
394 __le32 bf_temp_threshold;
395 __le32 bf_temp_fast_filter;
396 __le32 bf_temp_slow_filter;
397 __le32 bf_enable_beacon_filter;
398 __le32 bf_debug_flag;
399 __le32 bf_escape_timer;
400 __le32 ba_escape_timer;
401 __le32 ba_enable_beacon_abort;
402} __packed;
403
404/* Beacon filtering and beacon abort */
405#define IWL_BF_ENERGY_DELTA_DEFAULT 5
406#define IWL_BF_ENERGY_DELTA_D0I3 20
407#define IWL_BF_ENERGY_DELTA_MAX 255
408#define IWL_BF_ENERGY_DELTA_MIN 0
409
410#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
411#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
412#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
413#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
414
415#define IWL_BF_ROAMING_STATE_DEFAULT 72
416#define IWL_BF_ROAMING_STATE_D0I3 72
417#define IWL_BF_ROAMING_STATE_MAX 255
418#define IWL_BF_ROAMING_STATE_MIN 0
419
420#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
421#define IWL_BF_TEMP_THRESHOLD_D0I3 112
422#define IWL_BF_TEMP_THRESHOLD_MAX 255
423#define IWL_BF_TEMP_THRESHOLD_MIN 0
424
425#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
426#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
427#define IWL_BF_TEMP_FAST_FILTER_MAX 255
428#define IWL_BF_TEMP_FAST_FILTER_MIN 0
429
430#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
431#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
432#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
433#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
434
435#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
436
437#define IWL_BF_DEBUG_FLAG_DEFAULT 0
438#define IWL_BF_DEBUG_FLAG_D0I3 0
439
440#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
441#define IWL_BF_ESCAPE_TIMER_D0I3 0
442#define IWL_BF_ESCAPE_TIMER_MAX 1024
443#define IWL_BF_ESCAPE_TIMER_MIN 0
444
445#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
446#define IWL_BA_ESCAPE_TIMER_D0I3 6
447#define IWL_BA_ESCAPE_TIMER_D3 9
448#define IWL_BA_ESCAPE_TIMER_MAX 1024
449#define IWL_BA_ESCAPE_TIMER_MIN 0
450
451#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
452
453#define IWL_BF_CMD_CONFIG(mode) \
454 .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
455 .bf_roaming_energy_delta = \
456 cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
457 .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
458 .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
459 .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
460 .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
461 .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
462 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
463 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
464
465#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
466#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
467#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
deleted file mode 100644
index 0f1ea80a55ef..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ /dev/null
@@ -1,389 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_rs_h__
64#define __fw_api_rs_h__
65
66#include "fw-api-mac.h"
67
68/*
69 * These serve as indexes into
70 * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
71 * TODO: avoid overlap between legacy and HT rates
72 */
73enum {
74 IWL_RATE_1M_INDEX = 0,
75 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
76 IWL_RATE_2M_INDEX,
77 IWL_RATE_5M_INDEX,
78 IWL_RATE_11M_INDEX,
79 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
80 IWL_RATE_6M_INDEX,
81 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
82 IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
83 IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
84 IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
85 IWL_RATE_9M_INDEX,
86 IWL_RATE_12M_INDEX,
87 IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
88 IWL_RATE_18M_INDEX,
89 IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
90 IWL_RATE_24M_INDEX,
91 IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
92 IWL_RATE_36M_INDEX,
93 IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
94 IWL_RATE_48M_INDEX,
95 IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
96 IWL_RATE_54M_INDEX,
97 IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
98 IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
99 IWL_RATE_60M_INDEX,
100 IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
101 IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
102 IWL_RATE_MCS_8_INDEX,
103 IWL_RATE_MCS_9_INDEX,
104 IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
105 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
106 IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
107};
108
109#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
110
111/* fw API values for legacy bit rates, both OFDM and CCK */
112enum {
113 IWL_RATE_6M_PLCP = 13,
114 IWL_RATE_9M_PLCP = 15,
115 IWL_RATE_12M_PLCP = 5,
116 IWL_RATE_18M_PLCP = 7,
117 IWL_RATE_24M_PLCP = 9,
118 IWL_RATE_36M_PLCP = 11,
119 IWL_RATE_48M_PLCP = 1,
120 IWL_RATE_54M_PLCP = 3,
121 IWL_RATE_1M_PLCP = 10,
122 IWL_RATE_2M_PLCP = 20,
123 IWL_RATE_5M_PLCP = 55,
124 IWL_RATE_11M_PLCP = 110,
125 IWL_RATE_INVM_PLCP = -1,
126};
127
128/*
129 * rate_n_flags bit fields
130 *
131 * The 32-bit value has different layouts in the low 8 bites depending on the
132 * format. There are three formats, HT, VHT and legacy (11abg, with subformats
133 * for CCK and OFDM).
134 *
135 * High-throughput (HT) rate format
136 * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
137 * Very High-throughput (VHT) rate format
138 * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
139 * Legacy OFDM rate format for bits 7:0
140 * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
141 * Legacy CCK rate format for bits 7:0:
142 * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
143 */
144
145/* Bit 8: (1) HT format, (0) legacy or VHT format */
146#define RATE_MCS_HT_POS 8
147#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
148
149/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
150#define RATE_MCS_CCK_POS 9
151#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
152
153/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
154#define RATE_MCS_VHT_POS 26
155#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
156
157
158/*
159 * High-throughput (HT) rate format for bits 7:0
160 *
161 * 2-0: MCS rate base
162 * 0) 6 Mbps
163 * 1) 12 Mbps
164 * 2) 18 Mbps
165 * 3) 24 Mbps
166 * 4) 36 Mbps
167 * 5) 48 Mbps
168 * 6) 54 Mbps
169 * 7) 60 Mbps
170 * 4-3: 0) Single stream (SISO)
171 * 1) Dual stream (MIMO)
172 * 2) Triple stream (MIMO)
173 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
174 * (bits 7-6 are zero)
175 *
176 * Together the low 5 bits work out to the MCS index because we don't
177 * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
178 * streams and 16-23 have three streams. We could also support MCS 32
179 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
180 */
181#define RATE_HT_MCS_RATE_CODE_MSK 0x7
182#define RATE_HT_MCS_NSS_POS 3
183#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
184
185/* Bit 10: (1) Use Green Field preamble */
186#define RATE_HT_MCS_GF_POS 10
187#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
188
189#define RATE_HT_MCS_INDEX_MSK 0x3f
190
191/*
192 * Very High-throughput (VHT) rate format for bits 7:0
193 *
194 * 3-0: VHT MCS (0-9)
195 * 5-4: number of streams - 1:
196 * 0) Single stream (SISO)
197 * 1) Dual stream (MIMO)
198 * 2) Triple stream (MIMO)
199 */
200
201/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
202#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
203#define RATE_VHT_MCS_NSS_POS 4
204#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
205
206/*
207 * Legacy OFDM rate format for bits 7:0
208 *
209 * 3-0: 0xD) 6 Mbps
210 * 0xF) 9 Mbps
211 * 0x5) 12 Mbps
212 * 0x7) 18 Mbps
213 * 0x9) 24 Mbps
214 * 0xB) 36 Mbps
215 * 0x1) 48 Mbps
216 * 0x3) 54 Mbps
217 * (bits 7-4 are 0)
218 *
219 * Legacy CCK rate format for bits 7:0:
220 * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
221 *
222 * 6-0: 10) 1 Mbps
223 * 20) 2 Mbps
224 * 55) 5.5 Mbps
225 * 110) 11 Mbps
226 * (bit 7 is 0)
227 */
228#define RATE_LEGACY_RATE_MSK 0xff
229
230
231/*
232 * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
233 * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
234 */
235#define RATE_MCS_CHAN_WIDTH_POS 11
236#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS)
237#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
238#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
239#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
240#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
241
242/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
243#define RATE_MCS_SGI_POS 13
244#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
245
246/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
247#define RATE_MCS_ANT_POS 14
248#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
249#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
250#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS)
251#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
252 RATE_MCS_ANT_B_MSK)
253#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
254 RATE_MCS_ANT_C_MSK)
255#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
256#define RATE_MCS_ANT_NUM 3
257
258/* Bit 17-18: (0) SS, (1) SS*2 */
259#define RATE_MCS_STBC_POS 17
260#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
261#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
262
263/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
264#define RATE_MCS_BF_POS 19
265#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
266
267/* Bit 20: (0) ZLF is off, (1) ZLF is on */
268#define RATE_MCS_ZLF_POS 20
269#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
270
271/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
272#define RATE_MCS_DUP_POS 24
273#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS)
274
275/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
276#define RATE_MCS_LDPC_POS 27
277#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
278
279
280/* Link Quality definitions */
281
282/* # entries in rate scale table to support Tx retries */
283#define LQ_MAX_RETRY_NUM 16
284
285/* Link quality command flags bit fields */
286
287/* Bit 0: (0) Don't use RTS (1) Use RTS */
288#define LQ_FLAG_USE_RTS_POS 0
289#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
290
291/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
292#define LQ_FLAG_COLOR_POS 1
293#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
294
295/* Bit 4-5: Tx RTS BW Signalling
296 * (0) No RTS BW signalling
297 * (1) Static BW signalling
298 * (2) Dynamic BW signalling
299 */
300#define LQ_FLAG_RTS_BW_SIG_POS 4
301#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
302#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
303#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
304
305/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
306 * Dyanmic BW selection allows Tx with narrower BW then requested in rates
307 */
308#define LQ_FLAG_DYNAMIC_BW_POS 6
309#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
310
311/* Single Stream Tx Parameters (lq_cmd->ss_params)
312 * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
313 * used for single stream Tx.
314 */
315
316/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
317 * (0) - No STBC allowed
318 * (1) - 2x1 STBC allowed (HT/VHT)
319 * (2) - 4x2 STBC allowed (HT/VHT)
320 * (3) - 3x2 STBC allowed (HT only)
321 * All our chips are at most 2 antennas so only (1) is valid for now.
322 */
323#define LQ_SS_STBC_ALLOWED_POS 0
324#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK)
325
326/* 2x1 STBC is allowed */
327#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS)
328
329/* Bit 2: Beamformer (VHT only) is allowed */
330#define LQ_SS_BFER_ALLOWED_POS 2
331#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS)
332
333/* Bit 3: Force BFER or STBC for testing
334 * If this is set:
335 * If BFER is allowed then force the ucode to choose BFER else
336 * If STBC is allowed then force the ucode to choose STBC over SISO
337 */
338#define LQ_SS_FORCE_POS 3
339#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS)
340
341/* Bit 31: ss_params field is valid. Used for FW backward compatibility
342 * with other drivers which don't support the ss_params API yet
343 */
344#define LQ_SS_PARAMS_VALID_POS 31
345#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS)
346
347/**
348 * struct iwl_lq_cmd - link quality command
349 * @sta_id: station to update
350 * @control: not used
351 * @flags: combination of LQ_FLAG_*
352 * @mimo_delim: the first SISO index in rs_table, which separates MIMO
353 * and SISO rates
354 * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
355 * Should be ANT_[ABC]
356 * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
357 * @initial_rate_index: first index from rs_table per AC category
358 * @agg_time_limit: aggregation max time threshold in usec/100, meaning
359 * value of 100 is one usec. Range is 100 to 8000
360 * @agg_disable_start_th: try-count threshold for starting aggregation.
361 * If a frame has higher try-count, it should not be selected for
362 * starting an aggregation sequence.
363 * @agg_frame_cnt_limit: max frame count in an aggregation.
364 * 0: no limit
365 * 1: no aggregation (one frame per aggregation)
366 * 2 - 0x3f: maximal number of frames (up to 3f == 63)
367 * @rs_table: array of rates for each TX try, each is rate_n_flags,
368 * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
369 * @ss_params: single stream features. declare whether STBC or BFER are allowed.
370 */
371struct iwl_lq_cmd {
372 u8 sta_id;
373 u8 reduced_tpc;
374 u16 control;
375 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
376 u8 flags;
377 u8 mimo_delim;
378 u8 single_stream_ant_msk;
379 u8 dual_stream_ant_msk;
380 u8 initial_rate_index[AC_NUM];
381 /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
382 __le16 agg_time_limit;
383 u8 agg_disable_start_th;
384 u8 agg_frame_cnt_limit;
385 __le32 reserved2;
386 __le32 rs_table[LQ_MAX_RETRY_NUM];
387 __le32 ss_params;
388}; /* LINK_QUALITY_CMD_API_S_VER_1 */
389#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h
deleted file mode 100644
index 9b7e49d4620f..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h
+++ /dev/null
@@ -1,238 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#ifndef __fw_api_rx_h__
69#define __fw_api_rx_h__
70
71#define IWL_RX_INFO_PHY_CNT 8
72#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
73#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
74#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
75#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
76#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
77#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
78#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
79
80/**
81 * struct iwl_rx_phy_info - phy info
82 * (REPLY_RX_PHY_CMD = 0xc0)
83 * @non_cfg_phy_cnt: non configurable DSP phy data byte count
84 * @cfg_phy_cnt: configurable DSP phy data byte count
85 * @stat_id: configurable DSP phy data set ID
86 * @reserved1:
87 * @system_timestamp: GP2 at on air rise
88 * @timestamp: TSF at on air rise
89 * @beacon_time_stamp: beacon at on-air rise
90 * @phy_flags: general phy flags: band, modulation, ...
91 * @channel: channel number
92 * @non_cfg_phy_buf: for various implementations of non_cfg_phy
93 * @rate_n_flags: RATE_MCS_*
94 * @byte_count: frame's byte-count
95 * @frame_time: frame's time on the air, based on byte count and frame rate
96 * calculation
97 * @mac_active_msk: what MACs were active when the frame was received
98 *
99 * Before each Rx, the device sends this data. It contains PHY information
100 * about the reception of the packet.
101 */
102struct iwl_rx_phy_info {
103 u8 non_cfg_phy_cnt;
104 u8 cfg_phy_cnt;
105 u8 stat_id;
106 u8 reserved1;
107 __le32 system_timestamp;
108 __le64 timestamp;
109 __le32 beacon_time_stamp;
110 __le16 phy_flags;
111 __le16 channel;
112 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
113 __le32 rate_n_flags;
114 __le32 byte_count;
115 __le16 mac_active_msk;
116 __le16 frame_time;
117} __packed;
118
119/*
120 * TCP offload Rx assist info
121 *
122 * bits 0:3 - reserved
123 * bits 4:7 - MIC CRC length
124 * bits 8:12 - MAC header length
125 * bit 13 - Padding indication
126 * bit 14 - A-AMSDU indication
127 * bit 15 - Offload enabled
128 */
129enum iwl_csum_rx_assist_info {
130 CSUM_RXA_RESERVED_MASK = 0x000f,
131 CSUM_RXA_MICSIZE_MASK = 0x00f0,
132 CSUM_RXA_HEADERLEN_MASK = 0x1f00,
133 CSUM_RXA_PADD = BIT(13),
134 CSUM_RXA_AMSDU = BIT(14),
135 CSUM_RXA_ENA = BIT(15)
136};
137
138/**
139 * struct iwl_rx_mpdu_res_start - phy info
140 * @assist: see CSUM_RX_ASSIST_ above
141 */
142struct iwl_rx_mpdu_res_start {
143 __le16 byte_count;
144 __le16 assist;
145} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
146
147/**
148 * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
149 * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
150 * @RX_RES_PHY_FLAGS_MOD_CCK:
151 * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
152 * @RX_RES_PHY_FLAGS_NARROW_BAND:
153 * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
154 * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
155 * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
156 * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
157 * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
158 */
159enum iwl_rx_phy_flags {
160 RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
161 RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
162 RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
163 RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
164 RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
165 RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
166 RX_RES_PHY_FLAGS_AGG = BIT(7),
167 RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
168 RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
169 RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
170};
171
172/**
173 * enum iwl_mvm_rx_status - written by fw for each Rx packet
174 * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
175 * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
176 * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
177 * @RX_MPDU_RES_STATUS_KEY_VALID:
178 * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
179 * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
180 * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
181 * in the driver.
182 * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
183 * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
184 * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
185 * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
186 * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
187 * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
188 * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
189 * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
190 * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
191 * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
192 * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
193 * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
194 * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
195 * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
196 * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
197 * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
198 * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
199 * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
200 * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
201 * @RX_MPDU_RES_STATUS_STA_ID_MSK:
202 * @RX_MPDU_RES_STATUS_RRF_KILL:
203 * @RX_MPDU_RES_STATUS_FILTERING_MSK:
204 * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
205 */
206enum iwl_mvm_rx_status {
207 RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
208 RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
209 RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
210 RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
211 RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
212 RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
213 RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
214 RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
215 RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
216 RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
217 RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
218 RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
219 RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
220 RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
221 RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
222 RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
223 RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
224 RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
225 RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
226 RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
227 RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
228 RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
229 RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
230 RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
231 RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
232 RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
233 RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
234 RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
235 RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
236};
237
238#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
deleted file mode 100644
index 3a657e4b60ac..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ /dev/null
@@ -1,730 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_scan_h__
67#define __fw_api_scan_h__
68
69#include "fw-api.h"
70
71/* Scan Commands, Responses, Notifications */
72
73/* Max number of IEs for direct SSID scans in a command */
74#define PROBE_OPTION_MAX 20
75
76/**
77 * struct iwl_ssid_ie - directed scan network information element
78 *
79 * Up to 20 of these may appear in REPLY_SCAN_CMD,
80 * selected by "type" bit field in struct iwl_scan_channel;
81 * each channel may select different ssids from among the 20 entries.
82 * SSID IEs get transmitted in reverse order of entry.
83 */
84struct iwl_ssid_ie {
85 u8 id;
86 u8 len;
87 u8 ssid[IEEE80211_MAX_SSID_LEN];
88} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
89
90/* scan offload */
91#define IWL_SCAN_MAX_BLACKLIST_LEN 64
92#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
93#define IWL_SCAN_MAX_PROFILES 11
94#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
95
96/* Default watchdog (in MS) for scheduled scan iteration */
97#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
98
99#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
100#define CAN_ABORT_STATUS 1
101
102#define IWL_FULL_SCAN_MULTIPLIER 5
103#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
104#define IWL_MAX_SCHED_SCAN_PLANS 2
105
106enum scan_framework_client {
107 SCAN_CLIENT_SCHED_SCAN = BIT(0),
108 SCAN_CLIENT_NETDETECT = BIT(1),
109 SCAN_CLIENT_ASSET_TRACKING = BIT(2),
110};
111
112/**
113 * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
114 * @ssid: MAC address to filter out
115 * @reported_rssi: AP rssi reported to the host
116 * @client_bitmap: clients ignore this entry - enum scan_framework_client
117 */
118struct iwl_scan_offload_blacklist {
119 u8 ssid[ETH_ALEN];
120 u8 reported_rssi;
121 u8 client_bitmap;
122} __packed;
123
124enum iwl_scan_offload_network_type {
125 IWL_NETWORK_TYPE_BSS = 1,
126 IWL_NETWORK_TYPE_IBSS = 2,
127 IWL_NETWORK_TYPE_ANY = 3,
128};
129
130enum iwl_scan_offload_band_selection {
131 IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
132 IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
133 IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
134};
135
136/**
137 * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
138 * @ssid_index: index to ssid list in fixed part
139 * @unicast_cipher: encryption algorithm to match - bitmap
140 * @aut_alg: authentication algorithm to match - bitmap
141 * @network_type: enum iwl_scan_offload_network_type
142 * @band_selection: enum iwl_scan_offload_band_selection
143 * @client_bitmap: clients waiting for match - enum scan_framework_client
144 */
145struct iwl_scan_offload_profile {
146 u8 ssid_index;
147 u8 unicast_cipher;
148 u8 auth_alg;
149 u8 network_type;
150 u8 band_selection;
151 u8 client_bitmap;
152 u8 reserved[2];
153} __packed;
154
155/**
156 * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
157 * @blaclist: AP list to filter off from scan results
158 * @profiles: profiles to search for match
159 * @blacklist_len: length of blacklist
160 * @num_profiles: num of profiles in the list
161 * @match_notify: clients waiting for match found notification
162 * @pass_match: clients waiting for the results
163 * @active_clients: active clients bitmap - enum scan_framework_client
164 * @any_beacon_notify: clients waiting for match notification without match
165 */
166struct iwl_scan_offload_profile_cfg {
167 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
168 u8 blacklist_len;
169 u8 num_profiles;
170 u8 match_notify;
171 u8 pass_match;
172 u8 active_clients;
173 u8 any_beacon_notify;
174 u8 reserved[2];
175} __packed;
176
177/**
178 * iwl_scan_schedule_lmac - schedule of scan offload
179 * @delay: delay between iterations, in seconds.
180 * @iterations: num of scan iterations
181 * @full_scan_mul: number of partial scans before each full scan
182 */
183struct iwl_scan_schedule_lmac {
184 __le16 delay;
185 u8 iterations;
186 u8 full_scan_mul;
187} __packed; /* SCAN_SCHEDULE_API_S */
188
189enum iwl_scan_offload_complete_status {
190 IWL_SCAN_OFFLOAD_COMPLETED = 1,
191 IWL_SCAN_OFFLOAD_ABORTED = 2,
192};
193
194enum iwl_scan_ebs_status {
195 IWL_SCAN_EBS_SUCCESS,
196 IWL_SCAN_EBS_FAILED,
197 IWL_SCAN_EBS_CHAN_NOT_FOUND,
198 IWL_SCAN_EBS_INACTIVE,
199};
200
201/**
202 * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
203 * @tx_flags: combination of TX_CMD_FLG_*
204 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
205 * cleared. Combination of RATE_MCS_*
206 * @sta_id: index of destination station in FW station table
207 * @reserved: for alignment and future use
208 */
209struct iwl_scan_req_tx_cmd {
210 __le32 tx_flags;
211 __le32 rate_n_flags;
212 u8 sta_id;
213 u8 reserved[3];
214} __packed;
215
216enum iwl_scan_channel_flags_lmac {
217 IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27),
218 IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28),
219};
220
221/**
222 * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
223 * @flags: bits 1-20: directed scan to i'th ssid
224 * other bits &enum iwl_scan_channel_flags_lmac
225 * @channel_number: channel number 1-13 etc
226 * @iter_count: scan iteration on this channel
227 * @iter_interval: interval in seconds between iterations on one channel
228 */
229struct iwl_scan_channel_cfg_lmac {
230 __le32 flags;
231 __le16 channel_num;
232 __le16 iter_count;
233 __le32 iter_interval;
234} __packed;
235
236/*
237 * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
238 * @offset: offset in the data block
239 * @len: length of the segment
240 */
241struct iwl_scan_probe_segment {
242 __le16 offset;
243 __le16 len;
244} __packed;
245
246/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
247 * @mac_header: first (and common) part of the probe
248 * @band_data: band specific data
249 * @common_data: last (and common) part of the probe
250 * @buf: raw data block
251 */
252struct iwl_scan_probe_req {
253 struct iwl_scan_probe_segment mac_header;
254 struct iwl_scan_probe_segment band_data[2];
255 struct iwl_scan_probe_segment common_data;
256 u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
257} __packed;
258
259enum iwl_scan_channel_flags {
260 IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
261 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
262 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
263};
264
265/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
266 * @flags: enum iwl_scan_channel_flags
267 * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
268 * involved.
269 * 1 - EBS is disabled.
270 * 2 - every second scan will be full scan(and so on).
271 */
272struct iwl_scan_channel_opt {
273 __le16 flags;
274 __le16 non_ebs_ratio;
275} __packed;
276
277/**
278 * iwl_mvm_lmac_scan_flags
279 * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
280 * without filtering.
281 * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
282 * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
283 * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
284 * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching
285 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
286 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
287 * and DS parameter set IEs into probe requests.
288 * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
289 */
290enum iwl_mvm_lmac_scan_flags {
291 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
292 IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1),
293 IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2),
294 IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
295 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
296 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
297 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
298 IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
299};
300
301enum iwl_scan_priority {
302 IWL_SCAN_PRIORITY_LOW,
303 IWL_SCAN_PRIORITY_MEDIUM,
304 IWL_SCAN_PRIORITY_HIGH,
305};
306
307enum iwl_scan_priority_ext {
308 IWL_SCAN_PRIORITY_EXT_0_LOWEST,
309 IWL_SCAN_PRIORITY_EXT_1,
310 IWL_SCAN_PRIORITY_EXT_2,
311 IWL_SCAN_PRIORITY_EXT_3,
312 IWL_SCAN_PRIORITY_EXT_4,
313 IWL_SCAN_PRIORITY_EXT_5,
314 IWL_SCAN_PRIORITY_EXT_6,
315 IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
316};
317
318/**
319 * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
320 * @reserved1: for alignment and future use
321 * @channel_num: num of channels to scan
322 * @active-dwell: dwell time for active channels
323 * @passive-dwell: dwell time for passive channels
324 * @fragmented-dwell: dwell time for fragmented passive scan
325 * @reserved2: for alignment and future use
326 * @rx_chain_selct: PHY_RX_CHAIN_* flags
327 * @scan_flags: &enum iwl_mvm_lmac_scan_flags
328 * @max_out_time: max time (in TU) to be out of associated channel
329 * @suspend_time: pause scan this long (TUs) when returning to service channel
330 * @flags: RXON flags
331 * @filter_flags: RXON filter
332 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
333 * @direct_scan: list of SSIDs for directed active scan
334 * @scan_prio: enum iwl_scan_priority
335 * @iter_num: number of scan iterations
336 * @delay: delay in seconds before first iteration
337 * @schedule: two scheduling plans. The first one is finite, the second one can
338 * be infinite.
339 * @channel_opt: channel optimization options, for full and partial scan
340 * @data: channel configuration and probe request packet.
341 */
342struct iwl_scan_req_lmac {
343 /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
344 __le32 reserved1;
345 u8 n_channels;
346 u8 active_dwell;
347 u8 passive_dwell;
348 u8 fragmented_dwell;
349 __le16 reserved2;
350 __le16 rx_chain_select;
351 __le32 scan_flags;
352 __le32 max_out_time;
353 __le32 suspend_time;
354 /* RX_ON_FLAGS_API_S_VER_1 */
355 __le32 flags;
356 __le32 filter_flags;
357 struct iwl_scan_req_tx_cmd tx_cmd[2];
358 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
359 __le32 scan_prio;
360 /* SCAN_REQ_PERIODIC_PARAMS_API_S */
361 __le32 iter_num;
362 __le32 delay;
363 struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
364 struct iwl_scan_channel_opt channel_opt[2];
365 u8 data[];
366} __packed;
367
368/**
369 * struct iwl_scan_results_notif - scan results for one channel -
370 * SCAN_RESULT_NTF_API_S_VER_3
371 * @channel: which channel the results are from
372 * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
373 * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
374 * @num_probe_not_sent: # of request that weren't sent due to not enough time
375 * @duration: duration spent in channel, in usecs
376 */
377struct iwl_scan_results_notif {
378 u8 channel;
379 u8 band;
380 u8 probe_status;
381 u8 num_probe_not_sent;
382 __le32 duration;
383} __packed;
384
385/**
386 * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels)
387 * SCAN_COMPLETE_NTF_API_S_VER_3
388 * @scanned_channels: number of channels scanned (and number of valid results)
389 * @status: one of SCAN_COMP_STATUS_*
390 * @bt_status: BT on/off status
391 * @last_channel: last channel that was scanned
392 * @tsf_low: TSF timer (lower half) in usecs
393 * @tsf_high: TSF timer (higher half) in usecs
394 * @results: an array of scan results, only "scanned_channels" of them are valid
395 */
396struct iwl_lmac_scan_complete_notif {
397 u8 scanned_channels;
398 u8 status;
399 u8 bt_status;
400 u8 last_channel;
401 __le32 tsf_low;
402 __le32 tsf_high;
403 struct iwl_scan_results_notif results[];
404} __packed;
405
406/**
407 * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
408 * @last_schedule_line: last schedule line executed (fast or regular)
409 * @last_schedule_iteration: last scan iteration executed before scan abort
410 * @status: enum iwl_scan_offload_complete_status
411 * @ebs_status: EBS success status &enum iwl_scan_ebs_status
412 * @time_after_last_iter; time in seconds elapsed after last iteration
413 */
414struct iwl_periodic_scan_complete {
415 u8 last_schedule_line;
416 u8 last_schedule_iteration;
417 u8 status;
418 u8 ebs_status;
419 __le32 time_after_last_iter;
420 __le32 reserved;
421} __packed;
422
423/* UMAC Scan API */
424
425/* The maximum of either of these cannot exceed 8, because we use an
426 * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
427 */
428#define IWL_MVM_MAX_UMAC_SCANS 8
429#define IWL_MVM_MAX_LMAC_SCANS 1
430
431enum scan_config_flags {
432 SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
433 SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
434 SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
435 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
436 SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
437 SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
438 SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
439 SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
440 SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
441 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
442 SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
443 SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
444 SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
445 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
446 SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
447 SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
448 SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
449 SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
450
451 /* Bits 26-31 are for num of channels in channel_array */
452#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
453};
454
455enum scan_config_rates {
456 /* OFDM basic rates */
457 SCAN_CONFIG_RATE_6M = BIT(0),
458 SCAN_CONFIG_RATE_9M = BIT(1),
459 SCAN_CONFIG_RATE_12M = BIT(2),
460 SCAN_CONFIG_RATE_18M = BIT(3),
461 SCAN_CONFIG_RATE_24M = BIT(4),
462 SCAN_CONFIG_RATE_36M = BIT(5),
463 SCAN_CONFIG_RATE_48M = BIT(6),
464 SCAN_CONFIG_RATE_54M = BIT(7),
465 /* CCK basic rates */
466 SCAN_CONFIG_RATE_1M = BIT(8),
467 SCAN_CONFIG_RATE_2M = BIT(9),
468 SCAN_CONFIG_RATE_5M = BIT(10),
469 SCAN_CONFIG_RATE_11M = BIT(11),
470
471 /* Bits 16-27 are for supported rates */
472#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
473};
474
475enum iwl_channel_flags {
476 IWL_CHANNEL_FLAG_EBS = BIT(0),
477 IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
478 IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
479 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
480};
481
482/**
483 * struct iwl_scan_config
484 * @flags: enum scan_config_flags
485 * @tx_chains: valid_tx antenna - ANT_* definitions
486 * @rx_chains: valid_rx antenna - ANT_* definitions
487 * @legacy_rates: default legacy rates - enum scan_config_rates
488 * @out_of_channel_time: default max out of serving channel time
489 * @suspend_time: default max suspend time
490 * @dwell_active: default dwell time for active scan
491 * @dwell_passive: default dwell time for passive scan
492 * @dwell_fragmented: default dwell time for fragmented scan
493 * @reserved: for future use and alignment
494 * @mac_addr: default mac address to be used in probes
495 * @bcast_sta_id: the index of the station in the fw
496 * @channel_flags: default channel flags - enum iwl_channel_flags
497 * scan_config_channel_flag
498 * @channel_array: default supported channels
499 */
500struct iwl_scan_config {
501 __le32 flags;
502 __le32 tx_chains;
503 __le32 rx_chains;
504 __le32 legacy_rates;
505 __le32 out_of_channel_time;
506 __le32 suspend_time;
507 u8 dwell_active;
508 u8 dwell_passive;
509 u8 dwell_fragmented;
510 u8 reserved;
511 u8 mac_addr[ETH_ALEN];
512 u8 bcast_sta_id;
513 u8 channel_flags;
514 u8 channel_array[];
515} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
516
517/**
518 * iwl_umac_scan_flags
519 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
520 * can be preempted by other scan requests with higher priority.
521 * The low priority scan will be resumed when the higher proirity scan is
522 * completed.
523 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
524 * when scan starts.
525 */
526enum iwl_umac_scan_flags {
527 IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
528 IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
529};
530
531enum iwl_umac_scan_uid_offsets {
532 IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
533 IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
534};
535
536enum iwl_umac_scan_general_flags {
537 IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
538 IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
539 IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
540 IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
541 IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
542 IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
543 IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
544 IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
545 IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
546 IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9)
547};
548
549/**
550 * struct iwl_scan_channel_cfg_umac
551 * @flags: bitmap - 0-19: directed scan to i'th ssid.
552 * @channel_num: channel number 1-13 etc.
553 * @iter_count: repetition count for the channel.
554 * @iter_interval: interval between two scan iterations on one channel.
555 */
556struct iwl_scan_channel_cfg_umac {
557 __le32 flags;
558 u8 channel_num;
559 u8 iter_count;
560 __le16 iter_interval;
561} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
562
563/**
564 * struct iwl_scan_umac_schedule
565 * @interval: interval in seconds between scan iterations
566 * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
567 * @reserved: for alignment and future use
568 */
569struct iwl_scan_umac_schedule {
570 __le16 interval;
571 u8 iter_count;
572 u8 reserved;
573} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
574
575/**
576 * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
577 * parameters following channels configuration array.
578 * @schedule: two scheduling plans.
579 * @delay: delay in TUs before starting the first scan iteration
580 * @reserved: for future use and alignment
581 * @preq: probe request with IEs blocks
582 * @direct_scan: list of SSIDs for directed active scan
583 */
584struct iwl_scan_req_umac_tail {
585 /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
586 struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
587 __le16 delay;
588 __le16 reserved;
589 /* SCAN_PROBE_PARAMS_API_S_VER_1 */
590 struct iwl_scan_probe_req preq;
591 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
592} __packed;
593
594/**
595 * struct iwl_scan_req_umac
596 * @flags: &enum iwl_umac_scan_flags
597 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
598 * @ooc_priority: out of channel priority - &enum iwl_scan_priority
599 * @general_flags: &enum iwl_umac_scan_general_flags
600 * @reserved1: for future use and alignment
601 * @active_dwell: dwell time for active scan
602 * @passive_dwell: dwell time for passive scan
603 * @fragmented_dwell: dwell time for fragmented passive scan
604 * @max_out_time: max out of serving channel time
605 * @suspend_time: max suspend time
606 * @scan_priority: scan internal prioritization &enum iwl_scan_priority
607 * @channel_flags: &enum iwl_scan_channel_flags
608 * @n_channels: num of channels in scan request
609 * @reserved2: for future use and alignment
610 * @data: &struct iwl_scan_channel_cfg_umac and
611 * &struct iwl_scan_req_umac_tail
612 */
613struct iwl_scan_req_umac {
614 __le32 flags;
615 __le32 uid;
616 __le32 ooc_priority;
617 /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
618 __le32 general_flags;
619 u8 reserved1;
620 u8 active_dwell;
621 u8 passive_dwell;
622 u8 fragmented_dwell;
623 __le32 max_out_time;
624 __le32 suspend_time;
625 __le32 scan_priority;
626 /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
627 u8 channel_flags;
628 u8 n_channels;
629 __le16 reserved2;
630 u8 data[];
631} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
632
633/**
634 * struct iwl_umac_scan_abort
635 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
636 * @flags: reserved
637 */
638struct iwl_umac_scan_abort {
639 __le32 uid;
640 __le32 flags;
641} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
642
643/**
644 * struct iwl_umac_scan_complete
645 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
646 * @last_schedule: last scheduling line
647 * @last_iter: last scan iteration number
648 * @scan status: &enum iwl_scan_offload_complete_status
649 * @ebs_status: &enum iwl_scan_ebs_status
650 * @time_from_last_iter: time elapsed from last iteration
651 * @reserved: for future use
652 */
653struct iwl_umac_scan_complete {
654 __le32 uid;
655 u8 last_schedule;
656 u8 last_iter;
657 u8 status;
658 u8 ebs_status;
659 __le32 time_from_last_iter;
660 __le32 reserved;
661} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
662
663#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
664/**
665 * struct iwl_scan_offload_profile_match - match information
666 * @bssid: matched bssid
667 * @channel: channel where the match occurred
668 * @energy:
669 * @matching_feature:
670 * @matching_channels: bitmap of channels that matched, referencing
671 * the channels passed in tue scan offload request
672 */
673struct iwl_scan_offload_profile_match {
674 u8 bssid[ETH_ALEN];
675 __le16 reserved;
676 u8 channel;
677 u8 energy;
678 u8 matching_feature;
679 u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
680} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
681
682/**
683 * struct iwl_scan_offload_profiles_query - match results query response
684 * @matched_profiles: bitmap of matched profiles, referencing the
685 * matches passed in the scan offload request
686 * @last_scan_age: age of the last offloaded scan
687 * @n_scans_done: number of offloaded scans done
688 * @gp2_d0u: GP2 when D0U occurred
689 * @gp2_invoked: GP2 when scan offload was invoked
690 * @resume_while_scanning: not used
691 * @self_recovery: obsolete
692 * @reserved: reserved
693 * @matches: array of match information, one for each match
694 */
695struct iwl_scan_offload_profiles_query {
696 __le32 matched_profiles;
697 __le32 last_scan_age;
698 __le32 n_scans_done;
699 __le32 gp2_d0u;
700 __le32 gp2_invoked;
701 u8 resume_while_scanning;
702 u8 self_recovery;
703 __le16 reserved;
704 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
705} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
706
707/**
708 * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
709 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
710 * @scanned_channels: number of channels scanned and number of valid elements in
711 * results array
712 * @status: one of SCAN_COMP_STATUS_*
713 * @bt_status: BT on/off status
714 * @last_channel: last channel that was scanned
715 * @tsf_low: TSF timer (lower half) in usecs
716 * @tsf_high: TSF timer (higher half) in usecs
717 * @results: array of scan results, only "scanned_channels" of them are valid
718 */
719struct iwl_umac_scan_iter_complete_notif {
720 __le32 uid;
721 u8 scanned_channels;
722 u8 status;
723 u8 bt_status;
724 u8 last_channel;
725 __le32 tsf_low;
726 __le32 tsf_high;
727 struct iwl_scan_results_notif results[];
728} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
729
730#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
deleted file mode 100644
index 493a8bdfbc9e..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ /dev/null
@@ -1,414 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_sta_h__
66#define __fw_api_sta_h__
67
68/**
69 * enum iwl_sta_flags - flags for the ADD_STA host command
70 * @STA_FLG_REDUCED_TX_PWR_CTRL:
71 * @STA_FLG_REDUCED_TX_PWR_DATA:
72 * @STA_FLG_DISABLE_TX: set if TX should be disabled
73 * @STA_FLG_PS: set if STA is in Power Save
74 * @STA_FLG_INVALID: set if STA is invalid
75 * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
76 * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
77 * @STA_FLG_DRAIN_FLOW: drain flow
78 * @STA_FLG_PAN: STA is for PAN interface
79 * @STA_FLG_CLASS_AUTH:
80 * @STA_FLG_CLASS_ASSOC:
81 * @STA_FLG_CLASS_MIMO_PROT:
82 * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
83 * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
84 * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
85 * initialised by driver and can be updated by fw upon reception of
86 * action frames that can change the channel width. When cleared the fw
87 * will send all the frames in 20MHz even when FAT channel is requested.
88 * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
89 * driver and can be updated by fw upon reception of action frames.
90 * @STA_FLG_MFP_EN: Management Frame Protection
91 */
92enum iwl_sta_flags {
93 STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
94 STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
95
96 STA_FLG_DISABLE_TX = BIT(4),
97
98 STA_FLG_PS = BIT(8),
99 STA_FLG_DRAIN_FLOW = BIT(12),
100 STA_FLG_PAN = BIT(13),
101 STA_FLG_CLASS_AUTH = BIT(14),
102 STA_FLG_CLASS_ASSOC = BIT(15),
103 STA_FLG_RTS_MIMO_PROT = BIT(17),
104
105 STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
106 STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
107 STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
108 STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
109 STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
110 STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
111 STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
112 STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
113 STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
114 STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
115
116 STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
117 STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
118 STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
119 STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
120 STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
121 STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
122
123 STA_FLG_FAT_EN_20MHZ = (0 << 26),
124 STA_FLG_FAT_EN_40MHZ = (1 << 26),
125 STA_FLG_FAT_EN_80MHZ = (2 << 26),
126 STA_FLG_FAT_EN_160MHZ = (3 << 26),
127 STA_FLG_FAT_EN_MSK = (3 << 26),
128
129 STA_FLG_MIMO_EN_SISO = (0 << 28),
130 STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
131 STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
132 STA_FLG_MIMO_EN_MSK = (3 << 28),
133};
134
135/**
136 * enum iwl_sta_key_flag - key flags for the ADD_STA host command
137 * @STA_KEY_FLG_NO_ENC: no encryption
138 * @STA_KEY_FLG_WEP: WEP encryption algorithm
139 * @STA_KEY_FLG_CCM: CCMP encryption algorithm
140 * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
141 * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
142 * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
143 * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
144 * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
145 * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
146 * station info array (1 - n 1X mode)
147 * @STA_KEY_FLG_KEYID_MSK: the index of the key
148 * @STA_KEY_NOT_VALID: key is invalid
149 * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
150 * @STA_KEY_MULTICAST: set for multical key
151 * @STA_KEY_MFP: key is used for Management Frame Protection
152 */
153enum iwl_sta_key_flag {
154 STA_KEY_FLG_NO_ENC = (0 << 0),
155 STA_KEY_FLG_WEP = (1 << 0),
156 STA_KEY_FLG_CCM = (2 << 0),
157 STA_KEY_FLG_TKIP = (3 << 0),
158 STA_KEY_FLG_EXT = (4 << 0),
159 STA_KEY_FLG_CMAC = (6 << 0),
160 STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
161 STA_KEY_FLG_EN_MSK = (7 << 0),
162
163 STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
164 STA_KEY_FLG_KEYID_POS = 8,
165 STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
166 STA_KEY_NOT_VALID = BIT(11),
167 STA_KEY_FLG_WEP_13BYTES = BIT(12),
168 STA_KEY_MULTICAST = BIT(14),
169 STA_KEY_MFP = BIT(15),
170};
171
172/**
173 * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
174 * @STA_MODIFY_KEY: this command modifies %key
175 * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
176 * @STA_MODIFY_TX_RATE: unused
177 * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
178 * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
179 * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
180 * @STA_MODIFY_PROT_TH:
181 * @STA_MODIFY_QUEUES: modify the queues used by this station
182 */
183enum iwl_sta_modify_flag {
184 STA_MODIFY_KEY = BIT(0),
185 STA_MODIFY_TID_DISABLE_TX = BIT(1),
186 STA_MODIFY_TX_RATE = BIT(2),
187 STA_MODIFY_ADD_BA_TID = BIT(3),
188 STA_MODIFY_REMOVE_BA_TID = BIT(4),
189 STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
190 STA_MODIFY_PROT_TH = BIT(6),
191 STA_MODIFY_QUEUES = BIT(7),
192};
193
194#define STA_MODE_MODIFY 1
195
196/**
197 * enum iwl_sta_sleep_flag - type of sleep of the station
198 * @STA_SLEEP_STATE_AWAKE:
199 * @STA_SLEEP_STATE_PS_POLL:
200 * @STA_SLEEP_STATE_UAPSD:
201 * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
202 * (last) released frame
203 */
204enum iwl_sta_sleep_flag {
205 STA_SLEEP_STATE_AWAKE = 0,
206 STA_SLEEP_STATE_PS_POLL = BIT(0),
207 STA_SLEEP_STATE_UAPSD = BIT(1),
208 STA_SLEEP_STATE_MOREDATA = BIT(2),
209};
210
211/* STA ID and color bits definitions */
212#define STA_ID_SEED (0x0f)
213#define STA_ID_POS (0)
214#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
215
216#define STA_COLOR_SEED (0x7)
217#define STA_COLOR_POS (4)
218#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
219
220#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
221 (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
222#define STA_ID_N_COLOR_GET_ID(id_n_color) \
223 (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
224
225#define STA_KEY_MAX_NUM (16)
226#define STA_KEY_IDX_INVALID (0xff)
227#define STA_KEY_MAX_DATA_KEY_NUM (4)
228#define IWL_MAX_GLOBAL_KEYS (4)
229#define STA_KEY_LEN_WEP40 (5)
230#define STA_KEY_LEN_WEP104 (13)
231
232/**
233 * struct iwl_mvm_keyinfo - key information
234 * @key_flags: type %iwl_sta_key_flag
235 * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
236 * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
237 * @key_offset: key offset in the fw's key table
238 * @key: 16-byte unicast decryption key
239 * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
240 * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
241 * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
242 */
243struct iwl_mvm_keyinfo {
244 __le16 key_flags;
245 u8 tkip_rx_tsc_byte2;
246 u8 reserved1;
247 __le16 tkip_rx_ttak[5];
248 u8 key_offset;
249 u8 reserved2;
250 u8 key[16];
251 __le64 tx_secur_seq_cnt;
252 __le64 hw_tkip_mic_rx_key;
253 __le64 hw_tkip_mic_tx_key;
254} __packed;
255
256/**
257 * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
258 * ( REPLY_ADD_STA = 0x18 )
259 * @add_modify: 1: modify existing, 0: add new station
260 * @awake_acs:
261 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
262 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
263 * @mac_id_n_color: the Mac context this station belongs to
264 * @addr[ETH_ALEN]: station's MAC address
265 * @sta_id: index of station in uCode's station table
266 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
267 * alone. 1 - modify, 0 - don't change.
268 * @station_flags: look at %iwl_sta_flags
269 * @station_flags_msk: what of %station_flags have changed
270 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
271 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
272 * add_immediate_ba_ssn.
273 * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
274 * Set %STA_MODIFY_REMOVE_BA_TID to use this field
275 * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
276 * add_immediate_ba_tid.
277 * @sleep_tx_count: number of packets to transmit to station even though it is
278 * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
279 * keeps track of STA sleep state.
280 * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
281 * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
282 * mac-addr.
283 * @beamform_flags: beam forming controls
284 * @tfd_queue_msk: tfd queues used by this station
285 *
286 * The device contains an internal table of per-station information, with info
287 * on security keys, aggregation parameters, and Tx rates for initial Tx
288 * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
289 *
290 * ADD_STA sets up the table entry for one station, either creating a new
291 * entry, or modifying a pre-existing one.
292 */
293struct iwl_mvm_add_sta_cmd {
294 u8 add_modify;
295 u8 awake_acs;
296 __le16 tid_disable_tx;
297 __le32 mac_id_n_color;
298 u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
299 __le16 reserved2;
300 u8 sta_id;
301 u8 modify_mask;
302 __le16 reserved3;
303 __le32 station_flags;
304 __le32 station_flags_msk;
305 u8 add_immediate_ba_tid;
306 u8 remove_immediate_ba_tid;
307 __le16 add_immediate_ba_ssn;
308 __le16 sleep_tx_count;
309 __le16 sleep_state_flags;
310 __le16 assoc_id;
311 __le16 beamform_flags;
312 __le32 tfd_queue_msk;
313} __packed; /* ADD_STA_CMD_API_S_VER_7 */
314
315/**
316 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
317 * ( REPLY_ADD_STA_KEY = 0x17 )
318 * @sta_id: index of station in uCode's station table
319 * @key_offset: key offset in key storage
320 * @key_flags: type %iwl_sta_key_flag
321 * @key: key material data
322 * @key2: key material data
323 * @rx_secur_seq_cnt: RX security sequence counter for the key
324 * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
325 * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
326 */
327struct iwl_mvm_add_sta_key_cmd {
328 u8 sta_id;
329 u8 key_offset;
330 __le16 key_flags;
331 u8 key[16];
332 u8 key2[16];
333 u8 rx_secur_seq_cnt[16];
334 u8 tkip_rx_tsc_byte2;
335 u8 reserved;
336 __le16 tkip_rx_ttak[5];
337} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
338
339/**
340 * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
341 * @ADD_STA_SUCCESS: operation was executed successfully
342 * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
343 * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
344 * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
345 * doesn't exist.
346 */
347enum iwl_mvm_add_sta_rsp_status {
348 ADD_STA_SUCCESS = 0x1,
349 ADD_STA_STATIONS_OVERLOAD = 0x2,
350 ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
351 ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
352};
353
354/**
355 * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
356 * ( REMOVE_STA = 0x19 )
357 * @sta_id: the station id of the station to be removed
358 */
359struct iwl_mvm_rm_sta_cmd {
360 u8 sta_id;
361 u8 reserved[3];
362} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
363
364/**
365 * struct iwl_mvm_mgmt_mcast_key_cmd
366 * ( MGMT_MCAST_KEY = 0x1f )
367 * @ctrl_flags: %iwl_sta_key_flag
368 * @IGTK:
369 * @K1: unused
370 * @K2: unused
371 * @sta_id: station ID that support IGTK
372 * @key_id:
373 * @receive_seq_cnt: initial RSC/PN needed for replay check
374 */
375struct iwl_mvm_mgmt_mcast_key_cmd {
376 __le32 ctrl_flags;
377 u8 IGTK[16];
378 u8 K1[16];
379 u8 K2[16];
380 __le32 key_id;
381 __le32 sta_id;
382 __le64 receive_seq_cnt;
383} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
384
385struct iwl_mvm_wep_key {
386 u8 key_index;
387 u8 key_offset;
388 __le16 reserved1;
389 u8 key_size;
390 u8 reserved2[3];
391 u8 key[16];
392} __packed;
393
394struct iwl_mvm_wep_key_cmd {
395 __le32 mac_id_n_color;
396 u8 num_keys;
397 u8 decryption_type;
398 u8 flags;
399 u8 reserved;
400 struct iwl_mvm_wep_key wep_key[0];
401} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
402
403/**
404 * struct iwl_mvm_eosp_notification - EOSP notification from firmware
405 * @remain_frame_count: # of frames remaining, non-zero if SP was cut
406 * short by GO absence
407 * @sta_id: station ID
408 */
409struct iwl_mvm_eosp_notification {
410 __le32 remain_frame_count;
411 __le32 sta_id;
412} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
413
414#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
deleted file mode 100644
index 0c321f63ee42..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
+++ /dev/null
@@ -1,284 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_stats_h__
67#define __fw_api_stats_h__
68#include "fw-api-mac.h"
69
70struct mvm_statistics_dbg {
71 __le32 burst_check;
72 __le32 burst_count;
73 __le32 wait_for_silence_timeout_cnt;
74 __le32 reserved[3];
75} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
76
77struct mvm_statistics_div {
78 __le32 tx_on_a;
79 __le32 tx_on_b;
80 __le32 exec_time;
81 __le32 probe_time;
82 __le32 rssi_ant;
83 __le32 reserved2;
84} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
85
86struct mvm_statistics_rx_non_phy {
87 __le32 bogus_cts; /* CTS received when not expecting CTS */
88 __le32 bogus_ack; /* ACK received when not expecting ACK */
89 __le32 non_bssid_frames; /* number of frames with BSSID that
90 * doesn't belong to the STA BSSID */
91 __le32 filtered_frames; /* count frames that were dumped in the
92 * filtering process */
93 __le32 non_channel_beacons; /* beacons with our bss id but not on
94 * our serving channel */
95 __le32 channel_beacons; /* beacons with our bss id and in our
96 * serving channel */
97 __le32 num_missed_bcon; /* number of missed beacons */
98 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
99 * ADC was in saturation */
100 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
101 * for INA */
102 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
103 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
104 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
105 __le32 interference_data_flag; /* flag for interference data
106 * availability. 1 when data is
107 * available. */
108 __le32 channel_load; /* counts RX Enable time in uSec */
109 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
110 * and CCK) counter */
111 __le32 beacon_rssi_a;
112 __le32 beacon_rssi_b;
113 __le32 beacon_rssi_c;
114 __le32 beacon_energy_a;
115 __le32 beacon_energy_b;
116 __le32 beacon_energy_c;
117 __le32 num_bt_kills;
118 __le32 mac_id;
119 __le32 directed_data_mpdu;
120} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
121
122struct mvm_statistics_rx_phy {
123 __le32 ina_cnt;
124 __le32 fina_cnt;
125 __le32 plcp_err;
126 __le32 crc32_err;
127 __le32 overrun_err;
128 __le32 early_overrun_err;
129 __le32 crc32_good;
130 __le32 false_alarm_cnt;
131 __le32 fina_sync_err_cnt;
132 __le32 sfd_timeout;
133 __le32 fina_timeout;
134 __le32 unresponded_rts;
135 __le32 rxe_frame_lmt_overrun;
136 __le32 sent_ack_cnt;
137 __le32 sent_cts_cnt;
138 __le32 sent_ba_rsp_cnt;
139 __le32 dsp_self_kill;
140 __le32 mh_format_err;
141 __le32 re_acq_main_rssi_sum;
142 __le32 reserved;
143} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
144
145struct mvm_statistics_rx_ht_phy {
146 __le32 plcp_err;
147 __le32 overrun_err;
148 __le32 early_overrun_err;
149 __le32 crc32_good;
150 __le32 crc32_err;
151 __le32 mh_format_err;
152 __le32 agg_crc32_good;
153 __le32 agg_mpdu_cnt;
154 __le32 agg_cnt;
155 __le32 unsupport_mcs;
156} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
157
158struct mvm_statistics_tx_non_phy {
159 __le32 preamble_cnt;
160 __le32 rx_detected_cnt;
161 __le32 bt_prio_defer_cnt;
162 __le32 bt_prio_kill_cnt;
163 __le32 few_bytes_cnt;
164 __le32 cts_timeout;
165 __le32 ack_timeout;
166 __le32 expected_ack_cnt;
167 __le32 actual_ack_cnt;
168 __le32 dump_msdu_cnt;
169 __le32 burst_abort_next_frame_mismatch_cnt;
170 __le32 burst_abort_missing_next_frame_cnt;
171 __le32 cts_timeout_collision;
172 __le32 ack_or_ba_timeout_collision;
173} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
174
175#define MAX_CHAINS 3
176
177struct mvm_statistics_tx_non_phy_agg {
178 __le32 ba_timeout;
179 __le32 ba_reschedule_frames;
180 __le32 scd_query_agg_frame_cnt;
181 __le32 scd_query_no_agg;
182 __le32 scd_query_agg;
183 __le32 scd_query_mismatch;
184 __le32 frame_not_ready;
185 __le32 underrun;
186 __le32 bt_prio_kill;
187 __le32 rx_ba_rsp_cnt;
188 __s8 txpower[MAX_CHAINS];
189 __s8 reserved;
190 __le32 reserved2;
191} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
192
193struct mvm_statistics_tx_channel_width {
194 __le32 ext_cca_narrow_ch20[1];
195 __le32 ext_cca_narrow_ch40[2];
196 __le32 ext_cca_narrow_ch80[3];
197 __le32 ext_cca_narrow_ch160[4];
198 __le32 last_tx_ch_width_indx;
199 __le32 rx_detected_per_ch_width[4];
200 __le32 success_per_ch_width[4];
201 __le32 fail_per_ch_width[4];
202}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
203
204struct mvm_statistics_tx {
205 struct mvm_statistics_tx_non_phy general;
206 struct mvm_statistics_tx_non_phy_agg agg;
207 struct mvm_statistics_tx_channel_width channel_width;
208} __packed; /* STATISTICS_TX_API_S_VER_4 */
209
210
211struct mvm_statistics_bt_activity {
212 __le32 hi_priority_tx_req_cnt;
213 __le32 hi_priority_tx_denied_cnt;
214 __le32 lo_priority_tx_req_cnt;
215 __le32 lo_priority_tx_denied_cnt;
216 __le32 hi_priority_rx_req_cnt;
217 __le32 hi_priority_rx_denied_cnt;
218 __le32 lo_priority_rx_req_cnt;
219 __le32 lo_priority_rx_denied_cnt;
220} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
221
222struct mvm_statistics_general_v8 {
223 __le32 radio_temperature;
224 __le32 radio_voltage;
225 struct mvm_statistics_dbg dbg;
226 __le32 sleep_time;
227 __le32 slots_out;
228 __le32 slots_idle;
229 __le32 ttl_timestamp;
230 struct mvm_statistics_div slow_div;
231 __le32 rx_enable_counter;
232 /*
233 * num_of_sos_states:
234 * count the number of times we have to re-tune
235 * in order to get out of bad PHY status
236 */
237 __le32 num_of_sos_states;
238 __le32 beacon_filtered;
239 __le32 missed_beacons;
240 u8 beacon_filter_average_energy;
241 u8 beacon_filter_reason;
242 u8 beacon_filter_current_energy;
243 u8 beacon_filter_reserved;
244 __le32 beacon_filter_delta_time;
245 struct mvm_statistics_bt_activity bt_activity;
246 __le64 rx_time;
247 __le64 on_time_rf;
248 __le64 on_time_scan;
249 __le64 tx_time;
250 __le32 beacon_counter[NUM_MAC_INDEX];
251 u8 beacon_average_energy[NUM_MAC_INDEX];
252 u8 reserved[4 - (NUM_MAC_INDEX % 4)];
253} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
254
255struct mvm_statistics_rx {
256 struct mvm_statistics_rx_phy ofdm;
257 struct mvm_statistics_rx_phy cck;
258 struct mvm_statistics_rx_non_phy general;
259 struct mvm_statistics_rx_ht_phy ofdm_ht;
260} __packed; /* STATISTICS_RX_API_S_VER_3 */
261
262/*
263 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
264 *
265 * By default, uCode issues this notification after receiving a beacon
266 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
267 * STATISTICS_CMD (0x9c), below.
268 */
269
270struct iwl_notif_statistics_v10 {
271 __le32 flag;
272 struct mvm_statistics_rx rx;
273 struct mvm_statistics_tx tx;
274 struct mvm_statistics_general_v8 general;
275} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
276
277#define IWL_STATISTICS_FLG_CLEAR 0x1
278#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
279
280struct iwl_statistics_cmd {
281 __le32 flags;
282} __packed; /* STATISTICS_CMD_API_S_VER_1 */
283
284#endif /* __fw_api_stats_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
deleted file mode 100644
index eed6271d01a3..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
+++ /dev/null
@@ -1,386 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __fw_api_tof_h__
64#define __fw_api_tof_h__
65
66#include "fw-api.h"
67
68/* ToF sub-group command IDs */
69enum iwl_mvm_tof_sub_grp_ids {
70 TOF_RANGE_REQ_CMD = 0x1,
71 TOF_CONFIG_CMD = 0x2,
72 TOF_RANGE_ABORT_CMD = 0x3,
73 TOF_RANGE_REQ_EXT_CMD = 0x4,
74 TOF_RESPONDER_CONFIG_CMD = 0x5,
75 TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
76 TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
77 TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
78 TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
79 TOF_RANGE_RESPONSE_NOTIF = 0xFE,
80 TOF_MCSI_DEBUG_NOTIF = 0xFB,
81};
82
83/**
84 * struct iwl_tof_config_cmd - ToF configuration
85 * @tof_disabled: 0 enabled, 1 - disabled
86 * @one_sided_disabled: 0 enabled, 1 - disabled
87 * @is_debug_mode: 1 debug mode, 0 - otherwise
88 * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
89 */
90struct iwl_tof_config_cmd {
91 __le32 sub_grp_cmd_id;
92 u8 tof_disabled;
93 u8 one_sided_disabled;
94 u8 is_debug_mode;
95 u8 is_buf_required;
96} __packed;
97
98/**
99 * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
100 * @burst_period: future use: (currently hard coded in the LMAC)
101 * The interval between two sequential bursts.
102 * @min_delta_ftm: future use: (currently hard coded in the LMAC)
103 * The minimum delay between two sequential FTM Responses
104 * in the same burst.
105 * @burst_duration: future use: (currently hard coded in the LMAC)
106 * The total time for all FTMs handshake in the same burst.
107 * Affect the time events duration in the LMAC.
108 * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
109 * The number of bursts for the current ToF request. Affect
110 * the number of events allocations in the current iteration.
111 * @get_ch_est: for xVT only, NA for driver
112 * @abort_responder: when set to '1' - Responder will terminate its activity
113 * (all other fields in the command are ignored)
114 * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
115 * params and use the recomended Initiator params.
116 * 0 - otherwise
117 * @channel_num: current AP Channel
118 * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
119 * @rate: current AP rate
120 * @ctrl_ch_position: coding of the control channel position relative to
121 * the center frequency.
122 * 40MHz 0 below center, 1 above center
123 * 80MHz bits [0..1]: 0 the near 20MHz to the center,
124 * 1 the far 20MHz to the center
125 * bit[2] as above 40MHz
126 * @ftm_per_burst: FTMs per Burst
127 * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
128 * '1' - we measure over the Initial FTM Response
129 * @asap_mode: ASAP / Non ASAP mode for the current WLS station
130 * @sta_id: index of the AP STA when in AP mode
131 * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
132 * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
133 * purposes, simulating station movement by adding various values
134 * to this field
135 * @bssid: Current AP BSSID
136 */
137struct iwl_tof_responder_config_cmd {
138 __le32 sub_grp_cmd_id;
139 __le16 burst_period;
140 u8 min_delta_ftm;
141 u8 burst_duration;
142 u8 num_of_burst_exp;
143 u8 get_ch_est;
144 u8 abort_responder;
145 u8 recv_sta_req_params;
146 u8 channel_num;
147 u8 bandwidth;
148 u8 rate;
149 u8 ctrl_ch_position;
150 u8 ftm_per_burst;
151 u8 ftm_resp_ts_avail;
152 u8 asap_mode;
153 u8 sta_id;
154 __le16 tsf_timer_offset_msecs;
155 __le16 toa_offset;
156 u8 bssid[ETH_ALEN];
157} __packed;
158
159/**
160 * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
161 * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
162 * @min_delta_ftm: Minimal time between two consecutive measurements,
163 * in units of 100us. 0 means no preference by station
164 * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
165 * value be sent to the AP
166 * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
167 * value to be sent to the AP
168 * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
169 * value to be sent to the AP
170 */
171struct iwl_tof_range_req_ext_cmd {
172 __le32 sub_grp_cmd_id;
173 __le16 tsf_timer_offset_msec;
174 __le16 reserved;
175 u8 min_delta_ftm;
176 u8 ftm_format_and_bw20M;
177 u8 ftm_format_and_bw40M;
178 u8 ftm_format_and_bw80M;
179} __packed;
180
181#define IWL_MVM_TOF_MAX_APS 21
182
183/**
184 * struct iwl_tof_range_req_ap_entry - AP configuration parameters
185 * @channel_num: Current AP Channel
186 * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
187 * @tsf_delta_direction: TSF relatively to the subject AP
188 * @ctrl_ch_position: Coding of the control channel position relative to the
189 * center frequency.
190 * 40MHz 0 below center, 1 above center
191 * 80MHz bits [0..1]: 0 the near 20MHz to the center,
192 * 1 the far 20MHz to the center
193 * bit[2] as above 40MHz
194 * @bssid: AP's bss id
195 * @measure_type: Measurement type: 0 - two sided, 1 - One sided
196 * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
197 * number of measurement iterations (min 2^0 = 1, max 2^14)
198 * @burst_period: Recommended value to be sent to the AP. Measurement
199 * periodicity In units of 100ms. ignored if num_of_bursts = 0
200 * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
201 * 1-sided: how many rts/cts pairs should be used per burst.
202 * @retries_per_sample: Max number of retries that the LMAC should send
203 * in case of no replies by the AP.
204 * @tsf_delta: TSF Delta in units of microseconds.
205 * The difference between the AP TSF and the device local clock.
206 * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
207 * Bit[1] Civic should be sent in the FTMR
208 * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
209 * @enable_dyn_ack: Enable Dynamic ACK BW.
210 * 0 Initiator interact with regular AP
211 * 1 Initiator interact with Responder machine: need to send the
212 * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
213 * use it for its ch est measurement (this flag will be set when we
214 * configure the opposite machine to be Responder).
215 * @rssi: Last received value
216 * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
217 */
218struct iwl_tof_range_req_ap_entry {
219 u8 channel_num;
220 u8 bandwidth;
221 u8 tsf_delta_direction;
222 u8 ctrl_ch_position;
223 u8 bssid[ETH_ALEN];
224 u8 measure_type;
225 u8 num_of_bursts;
226 __le16 burst_period;
227 u8 samples_per_burst;
228 u8 retries_per_sample;
229 __le32 tsf_delta;
230 u8 location_req;
231 u8 asap_mode;
232 u8 enable_dyn_ack;
233 s8 rssi;
234} __packed;
235
236/**
237 * enum iwl_tof_response_mode
238 * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
239 * possible (not supported for this release)
240 * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
241 * timeout expiration
242 * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
243 * earlier of: measurements completion / timeout
244 * expiration.
245 */
246enum iwl_tof_response_mode {
247 IWL_MVM_TOF_RESPOSE_ASAP = 1,
248 IWL_MVM_TOF_RESPOSE_TIMEOUT,
249 IWL_MVM_TOF_RESPOSE_COMPLETE,
250};
251
252/**
253 * struct iwl_tof_range_req_cmd - start measurement cmd
254 * @request_id: A Token incremented per request. The same Token will be
255 * sent back in the range response
256 * @initiator: 0- NW initiated, 1 - Client Initiated
257 * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
258 * '1' - run ML-Algo for ToF only
259 * @req_timeout: Requested timeout of the response in units of 100ms.
260 * This is equivalent to the session time configured to the
261 * LMAC in Initiator Request
262 * @report_policy: Supported partially for this release: For current release -
263 * the range report will be uploaded as a batch when ready or
264 * when the session is done (successfully / partially).
265 * one of iwl_tof_response_mode.
266 * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
267 * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
268 * '1' Use MAC Address randomization according to the below
269 * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
270 * Bits set to 1 shall be randomized by the UMAC
271 */
272struct iwl_tof_range_req_cmd {
273 __le32 sub_grp_cmd_id;
274 u8 request_id;
275 u8 initiator;
276 u8 one_sided_los_disable;
277 u8 req_timeout;
278 u8 report_policy;
279 u8 los_det_disable;
280 u8 num_of_ap;
281 u8 macaddr_random;
282 u8 macaddr_template[ETH_ALEN];
283 u8 macaddr_mask[ETH_ALEN];
284 struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
285} __packed;
286
287/**
288 * struct iwl_tof_gen_resp_cmd - generic ToF response
289 */
290struct iwl_tof_gen_resp_cmd {
291 __le32 sub_grp_cmd_id;
292 u8 data[];
293} __packed;
294
295/**
296 * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
297 * @measure_status: current APs measurement status
298 * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
299 * @rtt: The Round Trip Time that took for the last measurement for
300 * current AP [nSec]
301 * @rtt_variance: The Variance of the RTT values measured for current AP
302 * @rtt_spread: The Difference between the maximum and the minimum RTT
303 * values measured for current AP in the current session [nsec]
304 * @rssi: RSSI as uploaded in the Channel Estimation notification
305 * @rssi_spread: The Difference between the maximum and the minimum RSSI values
306 * measured for current AP in the current session
307 * @range: Measured range [cm]
308 * @range_variance: Measured range variance [cm]
309 * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
310 * uploaded by the LMAC
311 */
312struct iwl_tof_range_rsp_ap_entry_ntfy {
313 u8 bssid[ETH_ALEN];
314 u8 measure_status;
315 u8 measure_bw;
316 __le32 rtt;
317 __le32 rtt_variance;
318 __le32 rtt_spread;
319 s8 rssi;
320 u8 rssi_spread;
321 __le16 reserved;
322 __le32 range;
323 __le32 range_variance;
324 __le32 timestamp;
325} __packed;
326
327/**
328 * struct iwl_tof_range_rsp_ntfy -
329 * @request_id: A Token ID of the corresponding Range request
330 * @request_status: status of current measurement session
331 * @last_in_batch: reprot policy (when not all responses are uploaded at once)
332 * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
333 */
334struct iwl_tof_range_rsp_ntfy {
335 u8 request_id;
336 u8 request_status;
337 u8 last_in_batch;
338 u8 num_of_aps;
339 struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
340} __packed;
341
342#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
343/**
344 * struct iwl_tof_mcsi_notif - used for debug
345 * @token: token ID for the current session
346 * @role: '0' - initiator, '1' - responder
347 * @initiator_bssid: initiator machine
348 * @responder_bssid: responder machine
349 * @mcsi_buffer: debug data
350 */
351struct iwl_tof_mcsi_notif {
352 u8 token;
353 u8 role;
354 __le16 reserved;
355 u8 initiator_bssid[ETH_ALEN];
356 u8 responder_bssid[ETH_ALEN];
357 u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
358} __packed;
359
360/**
361 * struct iwl_tof_neighbor_report_notif
362 * @bssid: BSSID of the AP which sent the report
363 * @request_token: same token as the corresponding request
364 * @status:
365 * @report_ie_len: the length of the response frame starting from the Element ID
366 * @data: the IEs
367 */
368struct iwl_tof_neighbor_report {
369 u8 bssid[ETH_ALEN];
370 u8 request_token;
371 u8 status;
372 __le16 report_ie_len;
373 u8 data[];
374} __packed;
375
376/**
377 * struct iwl_tof_range_abort_cmd
378 * @request_id: corresponds to a range request
379 */
380struct iwl_tof_range_abort_cmd {
381 __le32 sub_grp_cmd_id;
382 u8 request_id;
383 u8 reserved[3];
384} __packed;
385
386#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
deleted file mode 100644
index 853698ab8b05..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ /dev/null
@@ -1,646 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_tx_h__
64#define __fw_api_tx_h__
65
66/**
67 * enum iwl_tx_flags - bitmasks for tx_flags in TX command
68 * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
69 * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame
70 * @TX_CMD_FLG_ACK: expect ACK from receiving station
71 * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
72 * Otherwise, use rate_n_flags from the TX command
73 * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
74 * Must set TX_CMD_FLG_ACK with this flag.
75 * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
76 * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
77 * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
78 * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
79 * on old firmwares).
80 * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
81 * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
82 * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
83 * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
84 * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
85 * Should be set for beacons and probe responses
86 * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
87 * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
88 * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
89 * Should be set for 26/30 length MAC headers
90 * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
91 * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
92 * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
93 * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
94 * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
95 * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
96 * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
97 * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
98 */
99enum iwl_tx_flags {
100 TX_CMD_FLG_PROT_REQUIRE = BIT(0),
101 TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
102 TX_CMD_FLG_ACK = BIT(3),
103 TX_CMD_FLG_STA_RATE = BIT(4),
104 TX_CMD_FLG_BAR = BIT(6),
105 TX_CMD_FLG_TXOP_PROT = BIT(7),
106 TX_CMD_FLG_VHT_NDPA = BIT(8),
107 TX_CMD_FLG_HT_NDPA = BIT(9),
108 TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
109 TX_CMD_FLG_BT_PRIO_POS = 11,
110 TX_CMD_FLG_BT_DIS = BIT(12),
111 TX_CMD_FLG_SEQ_CTL = BIT(13),
112 TX_CMD_FLG_MORE_FRAG = BIT(14),
113 TX_CMD_FLG_TSF = BIT(16),
114 TX_CMD_FLG_CALIB = BIT(17),
115 TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
116 TX_CMD_FLG_MH_PAD = BIT(20),
117 TX_CMD_FLG_RESP_TO_DRV = BIT(21),
118 TX_CMD_FLG_CCMP_AGG = BIT(22),
119 TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
120 TX_CMD_FLG_DUR = BIT(25),
121 TX_CMD_FLG_FW_DROP = BIT(26),
122 TX_CMD_FLG_EXEC_PAPD = BIT(27),
123 TX_CMD_FLG_PAPD_TYPE = BIT(28),
124 TX_CMD_FLG_HCCA_CHUNK = BIT(31)
125}; /* TX_FLAGS_BITS_API_S_VER_1 */
126
127/**
128 * enum iwl_tx_pm_timeouts - pm timeout values in TX command
129 * @PM_FRAME_NONE: no need to suspend sleep mode
130 * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
131 * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
132 */
133enum iwl_tx_pm_timeouts {
134 PM_FRAME_NONE = 0,
135 PM_FRAME_MGMT = 2,
136 PM_FRAME_ASSOC = 3,
137};
138
139/*
140 * TX command security control
141 */
142#define TX_CMD_SEC_WEP 0x01
143#define TX_CMD_SEC_CCM 0x02
144#define TX_CMD_SEC_TKIP 0x03
145#define TX_CMD_SEC_EXT 0x04
146#define TX_CMD_SEC_MSK 0x07
147#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
148#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
149#define TX_CMD_SEC_KEY128 0x08
150
151/* TODO: how does these values are OK with only 16 bit variable??? */
152/*
153 * TX command next frame info
154 *
155 * bits 0:2 - security control (TX_CMD_SEC_*)
156 * bit 3 - immediate ACK required
157 * bit 4 - rate is taken from STA table
158 * bit 5 - frame belongs to BA stream
159 * bit 6 - immediate BA response expected
160 * bit 7 - unused
161 * bits 8:15 - Station ID
162 * bits 16:31 - rate
163 */
164#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
165#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
166#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
167#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
168#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
169#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
170#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
171#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
172#define TX_CMD_NEXT_FRAME_RATE_POS (16)
173
174/*
175 * TX command Frame life time in us - to be written in pm_frame_timeout
176 */
177#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
178#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
179#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
180#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
181
182/*
183 * TID for non QoS frames - to be written in tid_tspec
184 */
185#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
186
187/*
188 * Limits on the retransmissions - to be written in {data,rts}_retry_limit
189 */
190#define IWL_DEFAULT_TX_RETRY 15
191#define IWL_MGMT_DFAULT_RETRY_LIMIT 3
192#define IWL_RTS_DFAULT_RETRY_LIMIT 60
193#define IWL_BAR_DFAULT_RETRY_LIMIT 60
194#define IWL_LOW_RETRY_LIMIT 7
195
196/* TODO: complete documentation for try_cnt and btkill_cnt */
197/**
198 * struct iwl_tx_cmd - TX command struct to FW
199 * ( TX_CMD = 0x1c )
200 * @len: in bytes of the payload, see below for details
201 * @tx_flags: combination of TX_CMD_FLG_*
202 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
203 * cleared. Combination of RATE_MCS_*
204 * @sta_id: index of destination station in FW station table
205 * @sec_ctl: security control, TX_CMD_SEC_*
206 * @initial_rate_index: index into the the rate table for initial TX attempt.
207 * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
208 * @key: security key
209 * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
210 * @life_time: frame life time (usecs??)
211 * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
212 * btkill_cnd + reserved), first 32 bits. "0" disables usage.
213 * @dram_msb_ptr: upper bits of the scratch physical address
214 * @rts_retry_limit: max attempts for RTS
215 * @data_retry_limit: max attempts to send the data packet
216 * @tid_spec: TID/tspec
217 * @pm_frame_timeout: PM TX frame timeout
218 *
219 * The byte count (both len and next_frame_len) includes MAC header
220 * (24/26/30/32 bytes)
221 * + 2 bytes pad if 26/30 header size
222 * + 8 byte IV for CCM or TKIP (not used for WEP)
223 * + Data payload
224 * + 8-byte MIC (not used for CCM/WEP)
225 * It does not include post-MAC padding, i.e.,
226 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
227 * Range of len: 14-2342 bytes.
228 *
229 * After the struct fields the MAC header is placed, plus any padding,
230 * and then the actial payload.
231 */
232struct iwl_tx_cmd {
233 __le16 len;
234 __le16 next_frame_len;
235 __le32 tx_flags;
236 struct {
237 u8 try_cnt;
238 u8 btkill_cnt;
239 __le16 reserved;
240 } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
241 __le32 rate_n_flags;
242 u8 sta_id;
243 u8 sec_ctl;
244 u8 initial_rate_index;
245 u8 reserved2;
246 u8 key[16];
247 __le32 reserved3;
248 __le32 life_time;
249 __le32 dram_lsb_ptr;
250 u8 dram_msb_ptr;
251 u8 rts_retry_limit;
252 u8 data_retry_limit;
253 u8 tid_tspec;
254 __le16 pm_frame_timeout;
255 __le16 reserved4;
256 u8 payload[0];
257 struct ieee80211_hdr hdr[0];
258} __packed; /* TX_CMD_API_S_VER_3 */
259
260/*
261 * TX response related data
262 */
263
264/*
265 * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
266 * @TX_STATUS_SUCCESS:
267 * @TX_STATUS_DIRECT_DONE:
268 * @TX_STATUS_POSTPONE_DELAY:
269 * @TX_STATUS_POSTPONE_FEW_BYTES:
270 * @TX_STATUS_POSTPONE_BT_PRIO:
271 * @TX_STATUS_POSTPONE_QUIET_PERIOD:
272 * @TX_STATUS_POSTPONE_CALC_TTAK:
273 * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
274 * @TX_STATUS_FAIL_SHORT_LIMIT:
275 * @TX_STATUS_FAIL_LONG_LIMIT:
276 * @TX_STATUS_FAIL_UNDERRUN:
277 * @TX_STATUS_FAIL_DRAIN_FLOW:
278 * @TX_STATUS_FAIL_RFKILL_FLUSH:
279 * @TX_STATUS_FAIL_LIFE_EXPIRE:
280 * @TX_STATUS_FAIL_DEST_PS:
281 * @TX_STATUS_FAIL_HOST_ABORTED:
282 * @TX_STATUS_FAIL_BT_RETRY:
283 * @TX_STATUS_FAIL_STA_INVALID:
284 * @TX_TATUS_FAIL_FRAG_DROPPED:
285 * @TX_STATUS_FAIL_TID_DISABLE:
286 * @TX_STATUS_FAIL_FIFO_FLUSHED:
287 * @TX_STATUS_FAIL_SMALL_CF_POLL:
288 * @TX_STATUS_FAIL_FW_DROP:
289 * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
290 * STA table
291 * @TX_FRAME_STATUS_INTERNAL_ABORT:
292 * @TX_MODE_MSK:
293 * @TX_MODE_NO_BURST:
294 * @TX_MODE_IN_BURST_SEQ:
295 * @TX_MODE_FIRST_IN_BURST:
296 * @TX_QUEUE_NUM_MSK:
297 *
298 * Valid only if frame_count =1
299 * TODO: complete documentation
300 */
301enum iwl_tx_status {
302 TX_STATUS_MSK = 0x000000ff,
303 TX_STATUS_SUCCESS = 0x01,
304 TX_STATUS_DIRECT_DONE = 0x02,
305 /* postpone TX */
306 TX_STATUS_POSTPONE_DELAY = 0x40,
307 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
308 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
309 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
310 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
311 /* abort TX */
312 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
313 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
314 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
315 TX_STATUS_FAIL_UNDERRUN = 0x84,
316 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
317 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
318 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
319 TX_STATUS_FAIL_DEST_PS = 0x88,
320 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
321 TX_STATUS_FAIL_BT_RETRY = 0x8a,
322 TX_STATUS_FAIL_STA_INVALID = 0x8b,
323 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
324 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
325 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
326 TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
327 TX_STATUS_FAIL_FW_DROP = 0x90,
328 TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
329 TX_STATUS_INTERNAL_ABORT = 0x92,
330 TX_MODE_MSK = 0x00000f00,
331 TX_MODE_NO_BURST = 0x00000000,
332 TX_MODE_IN_BURST_SEQ = 0x00000100,
333 TX_MODE_FIRST_IN_BURST = 0x00000200,
334 TX_QUEUE_NUM_MSK = 0x0001f000,
335 TX_NARROW_BW_MSK = 0x00060000,
336 TX_NARROW_BW_1DIV2 = 0x00020000,
337 TX_NARROW_BW_1DIV4 = 0x00040000,
338 TX_NARROW_BW_1DIV8 = 0x00060000,
339};
340
341/*
342 * enum iwl_tx_agg_status - TX aggregation status
343 * @AGG_TX_STATE_STATUS_MSK:
344 * @AGG_TX_STATE_TRANSMITTED:
345 * @AGG_TX_STATE_UNDERRUN:
346 * @AGG_TX_STATE_BT_PRIO:
347 * @AGG_TX_STATE_FEW_BYTES:
348 * @AGG_TX_STATE_ABORT:
349 * @AGG_TX_STATE_LAST_SENT_TTL:
350 * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
351 * @AGG_TX_STATE_LAST_SENT_BT_KILL:
352 * @AGG_TX_STATE_SCD_QUERY:
353 * @AGG_TX_STATE_TEST_BAD_CRC32:
354 * @AGG_TX_STATE_RESPONSE:
355 * @AGG_TX_STATE_DUMP_TX:
356 * @AGG_TX_STATE_DELAY_TX:
357 * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
358 * occur if tx failed for this frame when it was a member of a previous
359 * aggregation block). If rate scaling is used, retry count indicates the
360 * rate table entry used for all frames in the new agg.
361 *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
362 * this frame
363 *
364 * TODO: complete documentation
365 */
366enum iwl_tx_agg_status {
367 AGG_TX_STATE_STATUS_MSK = 0x00fff,
368 AGG_TX_STATE_TRANSMITTED = 0x000,
369 AGG_TX_STATE_UNDERRUN = 0x001,
370 AGG_TX_STATE_BT_PRIO = 0x002,
371 AGG_TX_STATE_FEW_BYTES = 0x004,
372 AGG_TX_STATE_ABORT = 0x008,
373 AGG_TX_STATE_LAST_SENT_TTL = 0x010,
374 AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
375 AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
376 AGG_TX_STATE_SCD_QUERY = 0x080,
377 AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
378 AGG_TX_STATE_RESPONSE = 0x1ff,
379 AGG_TX_STATE_DUMP_TX = 0x200,
380 AGG_TX_STATE_DELAY_TX = 0x400,
381 AGG_TX_STATE_TRY_CNT_POS = 12,
382 AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
383};
384
385#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \
386 AGG_TX_STATE_LAST_SENT_TRY_CNT| \
387 AGG_TX_STATE_LAST_SENT_BT_KILL)
388
389/*
390 * The mask below describes a status where we are absolutely sure that the MPDU
391 * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
392 * written the bytes to the TXE, but we know nothing about what the DSP did.
393 */
394#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
395 AGG_TX_STATE_ABORT | \
396 AGG_TX_STATE_SCD_QUERY)
397
398/*
399 * REPLY_TX = 0x1c (response)
400 *
401 * This response may be in one of two slightly different formats, indicated
402 * by the frame_count field:
403 *
404 * 1) No aggregation (frame_count == 1). This reports Tx results for a single
405 * frame. Multiple attempts, at various bit rates, may have been made for
406 * this frame.
407 *
408 * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
409 * frames that used block-acknowledge. All frames were transmitted at
410 * same rate. Rate scaling may have been used if first frame in this new
411 * agg block failed in previous agg block(s).
412 *
413 * Note that, for aggregation, ACK (block-ack) status is not delivered
414 * here; block-ack has not been received by the time the device records
415 * this status.
416 * This status relates to reasons the tx might have been blocked or aborted
417 * within the device, rather than whether it was received successfully by
418 * the destination station.
419 */
420
421/**
422 * struct agg_tx_status - per packet TX aggregation status
423 * @status: enum iwl_tx_agg_status
424 * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
425 */
426struct agg_tx_status {
427 __le16 status;
428 __le16 sequence;
429} __packed;
430
431/*
432 * definitions for initial rate index field
433 * bits [3:0] initial rate index
434 * bits [6:4] rate table color, used for the initial rate
435 * bit-7 invalid rate indication
436 */
437#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
438#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
439#define TX_RES_INV_RATE_INDEX_MSK 0x80
440
441#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
442#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
443
444/**
445 * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
446 * ( REPLY_TX = 0x1c )
447 * @frame_count: 1 no aggregation, >1 aggregation
448 * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
449 * @failure_rts: num of failures due to unsuccessful RTS
450 * @failure_frame: num failures due to no ACK (unused for agg)
451 * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
452 * Tx of all the batch. RATE_MCS_*
453 * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
454 * for agg: RTS + CTS + aggregation tx time + block-ack time.
455 * in usec.
456 * @pa_status: tx power info
457 * @pa_integ_res_a: tx power info
458 * @pa_integ_res_b: tx power info
459 * @pa_integ_res_c: tx power info
460 * @measurement_req_id: tx power info
461 * @tfd_info: TFD information set by the FH
462 * @seq_ctl: sequence control from the Tx cmd
463 * @byte_cnt: byte count from the Tx cmd
464 * @tlc_info: TLC rate info
465 * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
466 * @frame_ctrl: frame control
467 * @status: for non-agg: frame status TX_STATUS_*
468 * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
469 * follow this one, up to frame_count.
470 *
471 * After the array of statuses comes the SSN of the SCD. Look at
472 * %iwl_mvm_get_scd_ssn for more details.
473 */
474struct iwl_mvm_tx_resp {
475 u8 frame_count;
476 u8 bt_kill_count;
477 u8 failure_rts;
478 u8 failure_frame;
479 __le32 initial_rate;
480 __le16 wireless_media_time;
481
482 u8 pa_status;
483 u8 pa_integ_res_a[3];
484 u8 pa_integ_res_b[3];
485 u8 pa_integ_res_c[3];
486 __le16 measurement_req_id;
487 u8 reduced_tpc;
488 u8 reserved;
489
490 __le32 tfd_info;
491 __le16 seq_ctl;
492 __le16 byte_cnt;
493 u8 tlc_info;
494 u8 ra_tid;
495 __le16 frame_ctrl;
496
497 struct agg_tx_status status;
498} __packed; /* TX_RSP_API_S_VER_3 */
499
500/**
501 * struct iwl_mvm_ba_notif - notifies about reception of BA
502 * ( BA_NOTIF = 0xc5 )
503 * @sta_addr_lo32: lower 32 bits of the MAC address
504 * @sta_addr_hi16: upper 16 bits of the MAC address
505 * @sta_id: Index of recipient (BA-sending) station in fw's station table
506 * @tid: tid of the session
507 * @seq_ctl:
508 * @bitmap: the bitmap of the BA notification as seen in the air
509 * @scd_flow: the tx queue this BA relates to
510 * @scd_ssn: the index of the last contiguously sent packet
511 * @txed: number of Txed frames in this batch
512 * @txed_2_done: number of Acked frames in this batch
513 */
514struct iwl_mvm_ba_notif {
515 __le32 sta_addr_lo32;
516 __le16 sta_addr_hi16;
517 __le16 reserved;
518
519 u8 sta_id;
520 u8 tid;
521 __le16 seq_ctl;
522 __le64 bitmap;
523 __le16 scd_flow;
524 __le16 scd_ssn;
525 u8 txed;
526 u8 txed_2_done;
527 __le16 reserved1;
528} __packed;
529
530/*
531 * struct iwl_mac_beacon_cmd - beacon template command
532 * @tx: the tx commands associated with the beacon frame
533 * @template_id: currently equal to the mac context id of the coresponding
534 * mac.
535 * @tim_idx: the offset of the tim IE in the beacon
536 * @tim_size: the length of the tim IE
537 * @frame: the template of the beacon frame
538 */
539struct iwl_mac_beacon_cmd {
540 struct iwl_tx_cmd tx;
541 __le32 template_id;
542 __le32 tim_idx;
543 __le32 tim_size;
544 struct ieee80211_hdr frame[0];
545} __packed;
546
547struct iwl_beacon_notif {
548 struct iwl_mvm_tx_resp beacon_notify_hdr;
549 __le64 tsf;
550 __le32 ibss_mgr_status;
551} __packed;
552
553/**
554 * struct iwl_extended_beacon_notif - notifies about beacon transmission
555 * @beacon_notify_hdr: tx response command associated with the beacon
556 * @tsf: last beacon tsf
557 * @ibss_mgr_status: whether IBSS is manager
558 * @gp2: last beacon time in gp2
559 */
560struct iwl_extended_beacon_notif {
561 struct iwl_mvm_tx_resp beacon_notify_hdr;
562 __le64 tsf;
563 __le32 ibss_mgr_status;
564 __le32 gp2;
565} __packed; /* BEACON_NTFY_API_S_VER_5 */
566
567/**
568 * enum iwl_dump_control - dump (flush) control flags
569 * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
570 * and the TFD queues are empty.
571 */
572enum iwl_dump_control {
573 DUMP_TX_FIFO_FLUSH = BIT(1),
574};
575
576/**
577 * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
578 * @queues_ctl: bitmap of queues to flush
579 * @flush_ctl: control flags
580 * @reserved: reserved
581 */
582struct iwl_tx_path_flush_cmd {
583 __le32 queues_ctl;
584 __le16 flush_ctl;
585 __le16 reserved;
586} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
587
588/**
589 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
590 * @tx_resp: the Tx response from the fw (agg or non-agg)
591 *
592 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
593 * it can't know that everything will go well until the end of the AMPDU, it
594 * can't know in advance the number of MPDUs that will be sent in the current
595 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
596 * Hence, it can't know in advance what the SSN of the SCD will be at the end
597 * of the batch. This is why the SSN of the SCD is written at the end of the
598 * whole struct at a variable offset. This function knows how to cope with the
599 * variable offset and returns the SSN of the SCD.
600 */
601static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
602{
603 return le32_to_cpup((__le32 *)&tx_resp->status +
604 tx_resp->frame_count) & 0xfff;
605}
606
607/**
608 * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
609 * @token:
610 * @sta_id: station id
611 * @tid:
612 * @scd_queue: scheduler queue to confiug
613 * @enable: 1 queue enable, 0 queue disable
614 * @aggregate: 1 aggregated queue, 0 otherwise
615 * @tx_fifo: %enum iwl_mvm_tx_fifo
616 * @window: BA window size
617 * @ssn: SSN for the BA agreement
618 */
619struct iwl_scd_txq_cfg_cmd {
620 u8 token;
621 u8 sta_id;
622 u8 tid;
623 u8 scd_queue;
624 u8 enable;
625 u8 aggregate;
626 u8 tx_fifo;
627 u8 window;
628 __le16 ssn;
629 __le16 reserved;
630} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
631
632/**
633 * struct iwl_scd_txq_cfg_rsp
634 * @token: taken from the command
635 * @sta_id: station id from the command
636 * @tid: tid from the command
637 * @scd_queue: scd_queue from the command
638 */
639struct iwl_scd_txq_cfg_rsp {
640 u8 token;
641 u8 sta_id;
642 u8 tid;
643 u8 scd_queue;
644} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
645
646#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
deleted file mode 100644
index 181590fbd3b3..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ /dev/null
@@ -1,1773 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_h__
67#define __fw_api_h__
68
69#include "fw-api-rs.h"
70#include "fw-api-rx.h"
71#include "fw-api-tx.h"
72#include "fw-api-sta.h"
73#include "fw-api-mac.h"
74#include "fw-api-power.h"
75#include "fw-api-d3.h"
76#include "fw-api-coex.h"
77#include "fw-api-scan.h"
78#include "fw-api-stats.h"
79#include "fw-api-tof.h"
80
81/* Tx queue numbers */
82enum {
83 IWL_MVM_OFFCHANNEL_QUEUE = 8,
84 IWL_MVM_CMD_QUEUE = 9,
85};
86
87enum iwl_mvm_tx_fifo {
88 IWL_MVM_TX_FIFO_BK = 0,
89 IWL_MVM_TX_FIFO_BE,
90 IWL_MVM_TX_FIFO_VI,
91 IWL_MVM_TX_FIFO_VO,
92 IWL_MVM_TX_FIFO_MCAST = 5,
93 IWL_MVM_TX_FIFO_CMD = 7,
94};
95
96#define IWL_MVM_STATION_COUNT 16
97
98#define IWL_MVM_TDLS_STA_COUNT 4
99
100/* commands */
101enum {
102 MVM_ALIVE = 0x1,
103 REPLY_ERROR = 0x2,
104 ECHO_CMD = 0x3,
105
106 INIT_COMPLETE_NOTIF = 0x4,
107
108 /* PHY context commands */
109 PHY_CONTEXT_CMD = 0x8,
110 DBG_CFG = 0x9,
111 ANTENNA_COUPLING_NOTIFICATION = 0xa,
112
113 /* UMAC scan commands */
114 SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
115 SCAN_CFG_CMD = 0xc,
116 SCAN_REQ_UMAC = 0xd,
117 SCAN_ABORT_UMAC = 0xe,
118 SCAN_COMPLETE_UMAC = 0xf,
119
120 /* station table */
121 ADD_STA_KEY = 0x17,
122 ADD_STA = 0x18,
123 REMOVE_STA = 0x19,
124
125 /* paging get item */
126 FW_GET_ITEM_CMD = 0x1a,
127
128 /* TX */
129 TX_CMD = 0x1c,
130 TXPATH_FLUSH = 0x1e,
131 MGMT_MCAST_KEY = 0x1f,
132
133 /* scheduler config */
134 SCD_QUEUE_CFG = 0x1d,
135
136 /* global key */
137 WEP_KEY = 0x20,
138
139 /* Memory */
140 SHARED_MEM_CFG = 0x25,
141
142 /* TDLS */
143 TDLS_CHANNEL_SWITCH_CMD = 0x27,
144 TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
145 TDLS_CONFIG_CMD = 0xa7,
146
147 /* MAC and Binding commands */
148 MAC_CONTEXT_CMD = 0x28,
149 TIME_EVENT_CMD = 0x29, /* both CMD and response */
150 TIME_EVENT_NOTIFICATION = 0x2a,
151 BINDING_CONTEXT_CMD = 0x2b,
152 TIME_QUOTA_CMD = 0x2c,
153 NON_QOS_TX_COUNTER_CMD = 0x2d,
154
155 LQ_CMD = 0x4e,
156
157 /* paging block to FW cpu2 */
158 FW_PAGING_BLOCK_CMD = 0x4f,
159
160 /* Scan offload */
161 SCAN_OFFLOAD_REQUEST_CMD = 0x51,
162 SCAN_OFFLOAD_ABORT_CMD = 0x52,
163 HOT_SPOT_CMD = 0x53,
164 SCAN_OFFLOAD_COMPLETE = 0x6D,
165 SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
166 SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
167 MATCH_FOUND_NOTIFICATION = 0xd9,
168 SCAN_ITERATION_COMPLETE = 0xe7,
169
170 /* Phy */
171 PHY_CONFIGURATION_CMD = 0x6a,
172 CALIB_RES_NOTIF_PHY_DB = 0x6b,
173 /* PHY_DB_CMD = 0x6c, */
174
175 /* ToF - 802.11mc FTM */
176 TOF_CMD = 0x10,
177 TOF_NOTIFICATION = 0x11,
178
179 /* Power - legacy power table command */
180 POWER_TABLE_CMD = 0x77,
181 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
182 LTR_CONFIG = 0xee,
183
184 /* Thermal Throttling*/
185 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
186
187 /* Set/Get DC2DC frequency tune */
188 DC2DC_CONFIG_CMD = 0x83,
189
190 /* NVM */
191 NVM_ACCESS_CMD = 0x88,
192
193 SET_CALIB_DEFAULT_CMD = 0x8e,
194
195 BEACON_NOTIFICATION = 0x90,
196 BEACON_TEMPLATE_CMD = 0x91,
197 TX_ANT_CONFIGURATION_CMD = 0x98,
198 STATISTICS_CMD = 0x9c,
199 STATISTICS_NOTIFICATION = 0x9d,
200 EOSP_NOTIFICATION = 0x9e,
201 REDUCE_TX_POWER_CMD = 0x9f,
202
203 /* RF-KILL commands and notifications */
204 CARD_STATE_CMD = 0xa0,
205 CARD_STATE_NOTIFICATION = 0xa1,
206
207 MISSED_BEACONS_NOTIFICATION = 0xa2,
208
209 /* Power - new power table command */
210 MAC_PM_POWER_TABLE = 0xa9,
211
212 MFUART_LOAD_NOTIFICATION = 0xb1,
213
214 REPLY_RX_PHY_CMD = 0xc0,
215 REPLY_RX_MPDU_CMD = 0xc1,
216 BA_NOTIF = 0xc5,
217
218 /* Location Aware Regulatory */
219 MCC_UPDATE_CMD = 0xc8,
220 MCC_CHUB_UPDATE_CMD = 0xc9,
221
222 MARKER_CMD = 0xcb,
223
224 /* BT Coex */
225 BT_COEX_PRIO_TABLE = 0xcc,
226 BT_COEX_PROT_ENV = 0xcd,
227 BT_PROFILE_NOTIFICATION = 0xce,
228 BT_CONFIG = 0x9b,
229 BT_COEX_UPDATE_SW_BOOST = 0x5a,
230 BT_COEX_UPDATE_CORUN_LUT = 0x5b,
231 BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
232 BT_COEX_CI = 0x5d,
233
234 REPLY_SF_CFG_CMD = 0xd1,
235 REPLY_BEACON_FILTERING_CMD = 0xd2,
236
237 /* DTS measurements */
238 CMD_DTS_MEASUREMENT_TRIGGER = 0xdc,
239 DTS_MEASUREMENT_NOTIFICATION = 0xdd,
240
241 REPLY_DEBUG_CMD = 0xf0,
242 DEBUG_LOG_MSG = 0xf7,
243
244 BCAST_FILTER_CMD = 0xcf,
245 MCAST_FILTER_CMD = 0xd0,
246
247 /* D3 commands/notifications */
248 D3_CONFIG_CMD = 0xd3,
249 PROT_OFFLOAD_CONFIG_CMD = 0xd4,
250 OFFLOADS_QUERY_CMD = 0xd5,
251 REMOTE_WAKE_CONFIG_CMD = 0xd6,
252 D0I3_END_CMD = 0xed,
253
254 /* for WoWLAN in particular */
255 WOWLAN_PATTERNS = 0xe0,
256 WOWLAN_CONFIGURATION = 0xe1,
257 WOWLAN_TSC_RSC_PARAM = 0xe2,
258 WOWLAN_TKIP_PARAM = 0xe3,
259 WOWLAN_KEK_KCK_MATERIAL = 0xe4,
260 WOWLAN_GET_STATUSES = 0xe5,
261 WOWLAN_TX_POWER_PER_DB = 0xe6,
262
263 /* and for NetDetect */
264 SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
265 SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
266 SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
267
268 REPLY_MAX = 0xff,
269};
270
271enum iwl_phy_ops_subcmd_ids {
272 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
273 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
274};
275
276/* command groups */
277enum {
278 PHY_OPS_GROUP = 0x4,
279};
280
281/**
282 * struct iwl_cmd_response - generic response struct for most commands
283 * @status: status of the command asked, changes for each one
284 */
285struct iwl_cmd_response {
286 __le32 status;
287};
288
289/*
290 * struct iwl_tx_ant_cfg_cmd
291 * @valid: valid antenna configuration
292 */
293struct iwl_tx_ant_cfg_cmd {
294 __le32 valid;
295} __packed;
296
297/*
298 * Calibration control struct.
299 * Sent as part of the phy configuration command.
300 * @flow_trigger: bitmap for which calibrations to perform according to
301 * flow triggers.
302 * @event_trigger: bitmap for which calibrations to perform according to
303 * event triggers.
304 */
305struct iwl_calib_ctrl {
306 __le32 flow_trigger;
307 __le32 event_trigger;
308} __packed;
309
310/* This enum defines the bitmap of various calibrations to enable in both
311 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
312 */
313enum iwl_calib_cfg {
314 IWL_CALIB_CFG_XTAL_IDX = BIT(0),
315 IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
316 IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
317 IWL_CALIB_CFG_PAPD_IDX = BIT(3),
318 IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
319 IWL_CALIB_CFG_DC_IDX = BIT(5),
320 IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
321 IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
322 IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
323 IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
324 IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
325 IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
326 IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
327 IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
328 IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
329 IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
330 IWL_CALIB_CFG_DAC_IDX = BIT(16),
331 IWL_CALIB_CFG_ABS_IDX = BIT(17),
332 IWL_CALIB_CFG_AGC_IDX = BIT(18),
333};
334
335/*
336 * Phy configuration command.
337 */
338struct iwl_phy_cfg_cmd {
339 __le32 phy_cfg;
340 struct iwl_calib_ctrl calib_control;
341} __packed;
342
343#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
344#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
345#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
346#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
347#define PHY_CFG_TX_CHAIN_A BIT(8)
348#define PHY_CFG_TX_CHAIN_B BIT(9)
349#define PHY_CFG_TX_CHAIN_C BIT(10)
350#define PHY_CFG_RX_CHAIN_A BIT(12)
351#define PHY_CFG_RX_CHAIN_B BIT(13)
352#define PHY_CFG_RX_CHAIN_C BIT(14)
353
354
355/* Target of the NVM_ACCESS_CMD */
356enum {
357 NVM_ACCESS_TARGET_CACHE = 0,
358 NVM_ACCESS_TARGET_OTP = 1,
359 NVM_ACCESS_TARGET_EEPROM = 2,
360};
361
362/* Section types for NVM_ACCESS_CMD */
363enum {
364 NVM_SECTION_TYPE_SW = 1,
365 NVM_SECTION_TYPE_REGULATORY = 3,
366 NVM_SECTION_TYPE_CALIBRATION = 4,
367 NVM_SECTION_TYPE_PRODUCTION = 5,
368 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
369 NVM_SECTION_TYPE_PHY_SKU = 12,
370 NVM_MAX_NUM_SECTIONS = 13,
371};
372
373/**
374 * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
375 * @op_code: 0 - read, 1 - write
376 * @target: NVM_ACCESS_TARGET_*
377 * @type: NVM_SECTION_TYPE_*
378 * @offset: offset in bytes into the section
379 * @length: in bytes, to read/write
380 * @data: if write operation, the data to write. On read its empty
381 */
382struct iwl_nvm_access_cmd {
383 u8 op_code;
384 u8 target;
385 __le16 type;
386 __le16 offset;
387 __le16 length;
388 u8 data[];
389} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
390
391#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
392
393/*
394 * struct iwl_fw_paging_cmd - paging layout
395 *
396 * (FW_PAGING_BLOCK_CMD = 0x4f)
397 *
398 * Send to FW the paging layout in the driver.
399 *
400 * @flags: various flags for the command
401 * @block_size: the block size in powers of 2
402 * @block_num: number of blocks specified in the command.
403 * @device_phy_addr: virtual addresses from device side
404*/
405struct iwl_fw_paging_cmd {
406 __le32 flags;
407 __le32 block_size;
408 __le32 block_num;
409 __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
410} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
411
412/*
413 * Fw items ID's
414 *
415 * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
416 * download
417 */
418enum iwl_fw_item_id {
419 IWL_FW_ITEM_ID_PAGING = 3,
420};
421
422/*
423 * struct iwl_fw_get_item_cmd - get an item from the fw
424 */
425struct iwl_fw_get_item_cmd {
426 __le32 item_id;
427} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
428
429struct iwl_fw_get_item_resp {
430 __le32 item_id;
431 __le32 item_byte_cnt;
432 __le32 item_val;
433} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
434
435/**
436 * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
437 * @offset: offset in bytes into the section
438 * @length: in bytes, either how much was written or read
439 * @type: NVM_SECTION_TYPE_*
440 * @status: 0 for success, fail otherwise
441 * @data: if read operation, the data returned. Empty on write.
442 */
443struct iwl_nvm_access_resp {
444 __le16 offset;
445 __le16 length;
446 __le16 type;
447 __le16 status;
448 u8 data[];
449} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
450
451/* MVM_ALIVE 0x1 */
452
453/* alive response is_valid values */
454#define ALIVE_RESP_UCODE_OK BIT(0)
455#define ALIVE_RESP_RFKILL BIT(1)
456
457/* alive response ver_type values */
458enum {
459 FW_TYPE_HW = 0,
460 FW_TYPE_PROT = 1,
461 FW_TYPE_AP = 2,
462 FW_TYPE_WOWLAN = 3,
463 FW_TYPE_TIMING = 4,
464 FW_TYPE_WIPAN = 5
465};
466
467/* alive response ver_subtype values */
468enum {
469 FW_SUBTYPE_FULL_FEATURE = 0,
470 FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
471 FW_SUBTYPE_REDUCED = 2,
472 FW_SUBTYPE_ALIVE_ONLY = 3,
473 FW_SUBTYPE_WOWLAN = 4,
474 FW_SUBTYPE_AP_SUBTYPE = 5,
475 FW_SUBTYPE_WIPAN = 6,
476 FW_SUBTYPE_INITIALIZE = 9
477};
478
479#define IWL_ALIVE_STATUS_ERR 0xDEAD
480#define IWL_ALIVE_STATUS_OK 0xCAFE
481
482#define IWL_ALIVE_FLG_RFKILL BIT(0)
483
484struct mvm_alive_resp_ver1 {
485 __le16 status;
486 __le16 flags;
487 u8 ucode_minor;
488 u8 ucode_major;
489 __le16 id;
490 u8 api_minor;
491 u8 api_major;
492 u8 ver_subtype;
493 u8 ver_type;
494 u8 mac;
495 u8 opt;
496 __le16 reserved2;
497 __le32 timestamp;
498 __le32 error_event_table_ptr; /* SRAM address for error log */
499 __le32 log_event_table_ptr; /* SRAM address for event log */
500 __le32 cpu_register_ptr;
501 __le32 dbgm_config_ptr;
502 __le32 alive_counter_ptr;
503 __le32 scd_base_ptr; /* SRAM address for SCD */
504} __packed; /* ALIVE_RES_API_S_VER_1 */
505
506struct mvm_alive_resp_ver2 {
507 __le16 status;
508 __le16 flags;
509 u8 ucode_minor;
510 u8 ucode_major;
511 __le16 id;
512 u8 api_minor;
513 u8 api_major;
514 u8 ver_subtype;
515 u8 ver_type;
516 u8 mac;
517 u8 opt;
518 __le16 reserved2;
519 __le32 timestamp;
520 __le32 error_event_table_ptr; /* SRAM address for error log */
521 __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
522 __le32 cpu_register_ptr;
523 __le32 dbgm_config_ptr;
524 __le32 alive_counter_ptr;
525 __le32 scd_base_ptr; /* SRAM address for SCD */
526 __le32 st_fwrd_addr; /* pointer to Store and forward */
527 __le32 st_fwrd_size;
528 u8 umac_minor; /* UMAC version: minor */
529 u8 umac_major; /* UMAC version: major */
530 __le16 umac_id; /* UMAC version: id */
531 __le32 error_info_addr; /* SRAM address for UMAC error log */
532 __le32 dbg_print_buff_addr;
533} __packed; /* ALIVE_RES_API_S_VER_2 */
534
535struct mvm_alive_resp {
536 __le16 status;
537 __le16 flags;
538 __le32 ucode_minor;
539 __le32 ucode_major;
540 u8 ver_subtype;
541 u8 ver_type;
542 u8 mac;
543 u8 opt;
544 __le32 timestamp;
545 __le32 error_event_table_ptr; /* SRAM address for error log */
546 __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
547 __le32 cpu_register_ptr;
548 __le32 dbgm_config_ptr;
549 __le32 alive_counter_ptr;
550 __le32 scd_base_ptr; /* SRAM address for SCD */
551 __le32 st_fwrd_addr; /* pointer to Store and forward */
552 __le32 st_fwrd_size;
553 __le32 umac_minor; /* UMAC version: minor */
554 __le32 umac_major; /* UMAC version: major */
555 __le32 error_info_addr; /* SRAM address for UMAC error log */
556 __le32 dbg_print_buff_addr;
557} __packed; /* ALIVE_RES_API_S_VER_3 */
558
559/* Error response/notification */
560enum {
561 FW_ERR_UNKNOWN_CMD = 0x0,
562 FW_ERR_INVALID_CMD_PARAM = 0x1,
563 FW_ERR_SERVICE = 0x2,
564 FW_ERR_ARC_MEMORY = 0x3,
565 FW_ERR_ARC_CODE = 0x4,
566 FW_ERR_WATCH_DOG = 0x5,
567 FW_ERR_WEP_GRP_KEY_INDX = 0x10,
568 FW_ERR_WEP_KEY_SIZE = 0x11,
569 FW_ERR_OBSOLETE_FUNC = 0x12,
570 FW_ERR_UNEXPECTED = 0xFE,
571 FW_ERR_FATAL = 0xFF
572};
573
574/**
575 * struct iwl_error_resp - FW error indication
576 * ( REPLY_ERROR = 0x2 )
577 * @error_type: one of FW_ERR_*
578 * @cmd_id: the command ID for which the error occured
579 * @bad_cmd_seq_num: sequence number of the erroneous command
580 * @error_service: which service created the error, applicable only if
581 * error_type = 2, otherwise 0
582 * @timestamp: TSF in usecs.
583 */
584struct iwl_error_resp {
585 __le32 error_type;
586 u8 cmd_id;
587 u8 reserved1;
588 __le16 bad_cmd_seq_num;
589 __le32 error_service;
590 __le64 timestamp;
591} __packed;
592
593
594/* Common PHY, MAC and Bindings definitions */
595
596#define MAX_MACS_IN_BINDING (3)
597#define MAX_BINDINGS (4)
598#define AUX_BINDING_INDEX (3)
599#define MAX_PHYS (4)
600
601/* Used to extract ID and color from the context dword */
602#define FW_CTXT_ID_POS (0)
603#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
604#define FW_CTXT_COLOR_POS (8)
605#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
606#define FW_CTXT_INVALID (0xffffffff)
607
608#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
609 (_color << FW_CTXT_COLOR_POS))
610
611/* Possible actions on PHYs, MACs and Bindings */
612enum {
613 FW_CTXT_ACTION_STUB = 0,
614 FW_CTXT_ACTION_ADD,
615 FW_CTXT_ACTION_MODIFY,
616 FW_CTXT_ACTION_REMOVE,
617 FW_CTXT_ACTION_NUM
618}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
619
620/* Time Events */
621
622/* Time Event types, according to MAC type */
623enum iwl_time_event_type {
624 /* BSS Station Events */
625 TE_BSS_STA_AGGRESSIVE_ASSOC,
626 TE_BSS_STA_ASSOC,
627 TE_BSS_EAP_DHCP_PROT,
628 TE_BSS_QUIET_PERIOD,
629
630 /* P2P Device Events */
631 TE_P2P_DEVICE_DISCOVERABLE,
632 TE_P2P_DEVICE_LISTEN,
633 TE_P2P_DEVICE_ACTION_SCAN,
634 TE_P2P_DEVICE_FULL_SCAN,
635
636 /* P2P Client Events */
637 TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
638 TE_P2P_CLIENT_ASSOC,
639 TE_P2P_CLIENT_QUIET_PERIOD,
640
641 /* P2P GO Events */
642 TE_P2P_GO_ASSOC_PROT,
643 TE_P2P_GO_REPETITIVE_NOA,
644 TE_P2P_GO_CT_WINDOW,
645
646 /* WiDi Sync Events */
647 TE_WIDI_TX_SYNC,
648
649 /* Channel Switch NoA */
650 TE_CHANNEL_SWITCH_PERIOD,
651
652 TE_MAX
653}; /* MAC_EVENT_TYPE_API_E_VER_1 */
654
655
656
657/* Time event - defines for command API v1 */
658
659/*
660 * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
661 * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
662 * the first fragment is scheduled.
663 * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
664 * the first 2 fragments are scheduled.
665 * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
666 * number of fragments are valid.
667 *
668 * Other than the constant defined above, specifying a fragmentation value 'x'
669 * means that the event can be fragmented but only the first 'x' will be
670 * scheduled.
671 */
672enum {
673 TE_V1_FRAG_NONE = 0,
674 TE_V1_FRAG_SINGLE = 1,
675 TE_V1_FRAG_DUAL = 2,
676 TE_V1_FRAG_ENDLESS = 0xffffffff
677};
678
679/* If a Time Event can be fragmented, this is the max number of fragments */
680#define TE_V1_FRAG_MAX_MSK 0x0fffffff
681/* Repeat the time event endlessly (until removed) */
682#define TE_V1_REPEAT_ENDLESS 0xffffffff
683/* If a Time Event has bounded repetitions, this is the maximal value */
684#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
685
686/* Time Event dependencies: none, on another TE, or in a specific time */
687enum {
688 TE_V1_INDEPENDENT = 0,
689 TE_V1_DEP_OTHER = BIT(0),
690 TE_V1_DEP_TSF = BIT(1),
691 TE_V1_EVENT_SOCIOPATHIC = BIT(2),
692}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
693
694/*
695 * @TE_V1_NOTIF_NONE: no notifications
696 * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
697 * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
698 * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
699 * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
700 * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
701 * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
702 * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
703 * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
704 *
705 * Supported Time event notifications configuration.
706 * A notification (both event and fragment) includes a status indicating weather
707 * the FW was able to schedule the event or not. For fragment start/end
708 * notification the status is always success. There is no start/end fragment
709 * notification for monolithic events.
710 */
711enum {
712 TE_V1_NOTIF_NONE = 0,
713 TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
714 TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
715 TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
716 TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
717 TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
718 TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
719 TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
720 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
721}; /* MAC_EVENT_ACTION_API_E_VER_2 */
722
723/* Time event - defines for command API */
724
725/*
726 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
727 * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
728 * the first fragment is scheduled.
729 * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
730 * the first 2 fragments are scheduled.
731 * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
732 * number of fragments are valid.
733 *
734 * Other than the constant defined above, specifying a fragmentation value 'x'
735 * means that the event can be fragmented but only the first 'x' will be
736 * scheduled.
737 */
738enum {
739 TE_V2_FRAG_NONE = 0,
740 TE_V2_FRAG_SINGLE = 1,
741 TE_V2_FRAG_DUAL = 2,
742 TE_V2_FRAG_MAX = 0xfe,
743 TE_V2_FRAG_ENDLESS = 0xff
744};
745
746/* Repeat the time event endlessly (until removed) */
747#define TE_V2_REPEAT_ENDLESS 0xff
748/* If a Time Event has bounded repetitions, this is the maximal value */
749#define TE_V2_REPEAT_MAX 0xfe
750
751#define TE_V2_PLACEMENT_POS 12
752#define TE_V2_ABSENCE_POS 15
753
754/* Time event policy values
755 * A notification (both event and fragment) includes a status indicating weather
756 * the FW was able to schedule the event or not. For fragment start/end
757 * notification the status is always success. There is no start/end fragment
758 * notification for monolithic events.
759 *
760 * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
761 * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
762 * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
763 * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
764 * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
765 * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
766 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
767 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
768 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
769 * @TE_V2_DEP_OTHER: depends on another time event
770 * @TE_V2_DEP_TSF: depends on a specific time
771 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
772 * @TE_V2_ABSENCE: are we present or absent during the Time Event.
773 */
774enum {
775 TE_V2_DEFAULT_POLICY = 0x0,
776
777 /* notifications (event start/stop, fragment start/stop) */
778 TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
779 TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
780 TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
781 TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
782
783 TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
784 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
785 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
786 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
787 T2_V2_START_IMMEDIATELY = BIT(11),
788
789 TE_V2_NOTIF_MSK = 0xff,
790
791 /* placement characteristics */
792 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
793 TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
794 TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
795
796 /* are we present or absent during the Time Event. */
797 TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
798};
799
800/**
801 * struct iwl_time_event_cmd_api - configuring Time Events
802 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
803 * with version 1. determined by IWL_UCODE_TLV_FLAGS)
804 * ( TIME_EVENT_CMD = 0x29 )
805 * @id_and_color: ID and color of the relevant MAC
806 * @action: action to perform, one of FW_CTXT_ACTION_*
807 * @id: this field has two meanings, depending on the action:
808 * If the action is ADD, then it means the type of event to add.
809 * For all other actions it is the unique event ID assigned when the
810 * event was added by the FW.
811 * @apply_time: When to start the Time Event (in GP2)
812 * @max_delay: maximum delay to event's start (apply time), in TU
813 * @depends_on: the unique ID of the event we depend on (if any)
814 * @interval: interval between repetitions, in TU
815 * @duration: duration of event in TU
816 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
817 * @max_frags: maximal number of fragments the Time Event can be divided to
818 * @policy: defines whether uCode shall notify the host or other uCode modules
819 * on event and/or fragment start and/or end
820 * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
821 * TE_EVENT_SOCIOPATHIC
822 * using TE_ABSENCE and using TE_NOTIF_*
823 */
824struct iwl_time_event_cmd {
825 /* COMMON_INDEX_HDR_API_S_VER_1 */
826 __le32 id_and_color;
827 __le32 action;
828 __le32 id;
829 /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
830 __le32 apply_time;
831 __le32 max_delay;
832 __le32 depends_on;
833 __le32 interval;
834 __le32 duration;
835 u8 repeat;
836 u8 max_frags;
837 __le16 policy;
838} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
839
840/**
841 * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
842 * @status: bit 0 indicates success, all others specify errors
843 * @id: the Time Event type
844 * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
845 * @id_and_color: ID and color of the relevant MAC
846 */
847struct iwl_time_event_resp {
848 __le32 status;
849 __le32 id;
850 __le32 unique_id;
851 __le32 id_and_color;
852} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
853
854/**
855 * struct iwl_time_event_notif - notifications of time event start/stop
856 * ( TIME_EVENT_NOTIFICATION = 0x2a )
857 * @timestamp: action timestamp in GP2
858 * @session_id: session's unique id
859 * @unique_id: unique id of the Time Event itself
860 * @id_and_color: ID and color of the relevant MAC
861 * @action: one of TE_NOTIF_START or TE_NOTIF_END
862 * @status: true if scheduled, false otherwise (not executed)
863 */
864struct iwl_time_event_notif {
865 __le32 timestamp;
866 __le32 session_id;
867 __le32 unique_id;
868 __le32 id_and_color;
869 __le32 action;
870 __le32 status;
871} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
872
873
874/* Bindings and Time Quota */
875
876/**
877 * struct iwl_binding_cmd - configuring bindings
878 * ( BINDING_CONTEXT_CMD = 0x2b )
879 * @id_and_color: ID and color of the relevant Binding
880 * @action: action to perform, one of FW_CTXT_ACTION_*
881 * @macs: array of MAC id and colors which belong to the binding
882 * @phy: PHY id and color which belongs to the binding
883 */
884struct iwl_binding_cmd {
885 /* COMMON_INDEX_HDR_API_S_VER_1 */
886 __le32 id_and_color;
887 __le32 action;
888 /* BINDING_DATA_API_S_VER_1 */
889 __le32 macs[MAX_MACS_IN_BINDING];
890 __le32 phy;
891} __packed; /* BINDING_CMD_API_S_VER_1 */
892
893/* The maximal number of fragments in the FW's schedule session */
894#define IWL_MVM_MAX_QUOTA 128
895
896/**
897 * struct iwl_time_quota_data - configuration of time quota per binding
898 * @id_and_color: ID and color of the relevant Binding
899 * @quota: absolute time quota in TU. The scheduler will try to divide the
900 * remainig quota (after Time Events) according to this quota.
901 * @max_duration: max uninterrupted context duration in TU
902 */
903struct iwl_time_quota_data {
904 __le32 id_and_color;
905 __le32 quota;
906 __le32 max_duration;
907} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
908
909/**
910 * struct iwl_time_quota_cmd - configuration of time quota between bindings
911 * ( TIME_QUOTA_CMD = 0x2c )
912 * @quotas: allocations per binding
913 */
914struct iwl_time_quota_cmd {
915 struct iwl_time_quota_data quotas[MAX_BINDINGS];
916} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
917
918
919/* PHY context */
920
921/* Supported bands */
922#define PHY_BAND_5 (0)
923#define PHY_BAND_24 (1)
924
925/* Supported channel width, vary if there is VHT support */
926#define PHY_VHT_CHANNEL_MODE20 (0x0)
927#define PHY_VHT_CHANNEL_MODE40 (0x1)
928#define PHY_VHT_CHANNEL_MODE80 (0x2)
929#define PHY_VHT_CHANNEL_MODE160 (0x3)
930
931/*
932 * Control channel position:
933 * For legacy set bit means upper channel, otherwise lower.
934 * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
935 * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
936 * center_freq
937 * |
938 * 40Mhz |_______|_______|
939 * 80Mhz |_______|_______|_______|_______|
940 * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
941 * code 011 010 001 000 | 100 101 110 111
942 */
943#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
944#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
945#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
946#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
947#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
948#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
949#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
950#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
951
952/*
953 * @band: PHY_BAND_*
954 * @channel: channel number
955 * @width: PHY_[VHT|LEGACY]_CHANNEL_*
956 * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
957 */
958struct iwl_fw_channel_info {
959 u8 band;
960 u8 channel;
961 u8 width;
962 u8 ctrl_pos;
963} __packed;
964
965#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
966#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
967 (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
968#define PHY_RX_CHAIN_VALID_POS (1)
969#define PHY_RX_CHAIN_VALID_MSK \
970 (0x7 << PHY_RX_CHAIN_VALID_POS)
971#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
972#define PHY_RX_CHAIN_FORCE_SEL_MSK \
973 (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
974#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
975#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
976 (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
977#define PHY_RX_CHAIN_CNT_POS (10)
978#define PHY_RX_CHAIN_CNT_MSK \
979 (0x3 << PHY_RX_CHAIN_CNT_POS)
980#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
981#define PHY_RX_CHAIN_MIMO_CNT_MSK \
982 (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
983#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
984#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
985 (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
986
987/* TODO: fix the value, make it depend on firmware at runtime? */
988#define NUM_PHY_CTX 3
989
990/* TODO: complete missing documentation */
991/**
992 * struct iwl_phy_context_cmd - config of the PHY context
993 * ( PHY_CONTEXT_CMD = 0x8 )
994 * @id_and_color: ID and color of the relevant Binding
995 * @action: action to perform, one of FW_CTXT_ACTION_*
996 * @apply_time: 0 means immediate apply and context switch.
997 * other value means apply new params after X usecs
998 * @tx_param_color: ???
999 * @channel_info:
1000 * @txchain_info: ???
1001 * @rxchain_info: ???
1002 * @acquisition_data: ???
1003 * @dsp_cfg_flags: set to 0
1004 */
1005struct iwl_phy_context_cmd {
1006 /* COMMON_INDEX_HDR_API_S_VER_1 */
1007 __le32 id_and_color;
1008 __le32 action;
1009 /* PHY_CONTEXT_DATA_API_S_VER_1 */
1010 __le32 apply_time;
1011 __le32 tx_param_color;
1012 struct iwl_fw_channel_info ci;
1013 __le32 txchain_info;
1014 __le32 rxchain_info;
1015 __le32 acquisition_data;
1016 __le32 dsp_cfg_flags;
1017} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
1018
1019/*
1020 * Aux ROC command
1021 *
1022 * Command requests the firmware to create a time event for a certain duration
1023 * and remain on the given channel. This is done by using the Aux framework in
1024 * the FW.
1025 * The command was first used for Hot Spot issues - but can be used regardless
1026 * to Hot Spot.
1027 *
1028 * ( HOT_SPOT_CMD 0x53 )
1029 *
1030 * @id_and_color: ID and color of the MAC
1031 * @action: action to perform, one of FW_CTXT_ACTION_*
1032 * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
1033 * event_unique_id should be the id of the time event assigned by ucode.
1034 * Otherwise ignore the event_unique_id.
1035 * @sta_id_and_color: station id and color, resumed during "Remain On Channel"
1036 * activity.
1037 * @channel_info: channel info
1038 * @node_addr: Our MAC Address
1039 * @reserved: reserved for alignment
1040 * @apply_time: GP2 value to start (should always be the current GP2 value)
1041 * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
1042 * time by which start of the event is allowed to be postponed.
1043 * @duration: event duration in TU To calculate event duration:
1044 * timeEventDuration = min(duration, remainingQuota)
1045 */
1046struct iwl_hs20_roc_req {
1047 /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
1048 __le32 id_and_color;
1049 __le32 action;
1050 __le32 event_unique_id;
1051 __le32 sta_id_and_color;
1052 struct iwl_fw_channel_info channel_info;
1053 u8 node_addr[ETH_ALEN];
1054 __le16 reserved;
1055 __le32 apply_time;
1056 __le32 apply_time_max_delay;
1057 __le32 duration;
1058} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
1059
1060/*
1061 * values for AUX ROC result values
1062 */
1063enum iwl_mvm_hot_spot {
1064 HOT_SPOT_RSP_STATUS_OK,
1065 HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
1066 HOT_SPOT_MAX_NUM_OF_SESSIONS,
1067};
1068
1069/*
1070 * Aux ROC command response
1071 *
1072 * In response to iwl_hs20_roc_req the FW sends this command to notify the
1073 * driver the uid of the timevent.
1074 *
1075 * ( HOT_SPOT_CMD 0x53 )
1076 *
1077 * @event_unique_id: Unique ID of time event assigned by ucode
1078 * @status: Return status 0 is success, all the rest used for specific errors
1079 */
1080struct iwl_hs20_roc_res {
1081 __le32 event_unique_id;
1082 __le32 status;
1083} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
1084
1085/**
1086 * struct iwl_radio_version_notif - information on the radio version
1087 * ( RADIO_VERSION_NOTIFICATION = 0x68 )
1088 * @radio_flavor:
1089 * @radio_step:
1090 * @radio_dash:
1091 */
1092struct iwl_radio_version_notif {
1093 __le32 radio_flavor;
1094 __le32 radio_step;
1095 __le32 radio_dash;
1096} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
1097
1098enum iwl_card_state_flags {
1099 CARD_ENABLED = 0x00,
1100 HW_CARD_DISABLED = 0x01,
1101 SW_CARD_DISABLED = 0x02,
1102 CT_KILL_CARD_DISABLED = 0x04,
1103 HALT_CARD_DISABLED = 0x08,
1104 CARD_DISABLED_MSK = 0x0f,
1105 CARD_IS_RX_ON = 0x10,
1106};
1107
1108/**
1109 * struct iwl_radio_version_notif - information on the radio version
1110 * ( CARD_STATE_NOTIFICATION = 0xa1 )
1111 * @flags: %iwl_card_state_flags
1112 */
1113struct iwl_card_state_notif {
1114 __le32 flags;
1115} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
1116
1117/**
1118 * struct iwl_missed_beacons_notif - information on missed beacons
1119 * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
1120 * @mac_id: interface ID
1121 * @consec_missed_beacons_since_last_rx: number of consecutive missed
1122 * beacons since last RX.
1123 * @consec_missed_beacons: number of consecutive missed beacons
1124 * @num_expected_beacons:
1125 * @num_recvd_beacons:
1126 */
1127struct iwl_missed_beacons_notif {
1128 __le32 mac_id;
1129 __le32 consec_missed_beacons_since_last_rx;
1130 __le32 consec_missed_beacons;
1131 __le32 num_expected_beacons;
1132 __le32 num_recvd_beacons;
1133} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
1134
1135/**
1136 * struct iwl_mfuart_load_notif - mfuart image version & status
1137 * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
1138 * @installed_ver: installed image version
1139 * @external_ver: external image version
1140 * @status: MFUART loading status
1141 * @duration: MFUART loading time
1142*/
1143struct iwl_mfuart_load_notif {
1144 __le32 installed_ver;
1145 __le32 external_ver;
1146 __le32 status;
1147 __le32 duration;
1148} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
1149
1150/**
1151 * struct iwl_set_calib_default_cmd - set default value for calibration.
1152 * ( SET_CALIB_DEFAULT_CMD = 0x8e )
1153 * @calib_index: the calibration to set value for
1154 * @length: of data
1155 * @data: the value to set for the calibration result
1156 */
1157struct iwl_set_calib_default_cmd {
1158 __le16 calib_index;
1159 __le16 length;
1160 u8 data[0];
1161} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
1162
1163#define MAX_PORT_ID_NUM 2
1164#define MAX_MCAST_FILTERING_ADDRESSES 256
1165
1166/**
1167 * struct iwl_mcast_filter_cmd - configure multicast filter.
1168 * @filter_own: Set 1 to filter out multicast packets sent by station itself
1169 * @port_id: Multicast MAC addresses array specifier. This is a strange way
1170 * to identify network interface adopted in host-device IF.
1171 * It is used by FW as index in array of addresses. This array has
1172 * MAX_PORT_ID_NUM members.
1173 * @count: Number of MAC addresses in the array
1174 * @pass_all: Set 1 to pass all multicast packets.
1175 * @bssid: current association BSSID.
1176 * @addr_list: Place holder for array of MAC addresses.
1177 * IMPORTANT: add padding if necessary to ensure DWORD alignment.
1178 */
1179struct iwl_mcast_filter_cmd {
1180 u8 filter_own;
1181 u8 port_id;
1182 u8 count;
1183 u8 pass_all;
1184 u8 bssid[6];
1185 u8 reserved[2];
1186 u8 addr_list[0];
1187} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
1188
1189#define MAX_BCAST_FILTERS 8
1190#define MAX_BCAST_FILTER_ATTRS 2
1191
1192/**
1193 * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
1194 * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
1195 * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
1196 * start of ip payload).
1197 */
1198enum iwl_mvm_bcast_filter_attr_offset {
1199 BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
1200 BCAST_FILTER_OFFSET_IP_END = 1,
1201};
1202
1203/**
1204 * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
1205 * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
1206 * @offset: starting offset of this pattern.
1207 * @val: value to match - big endian (MSB is the first
1208 * byte to match from offset pos).
1209 * @mask: mask to match (big endian).
1210 */
1211struct iwl_fw_bcast_filter_attr {
1212 u8 offset_type;
1213 u8 offset;
1214 __le16 reserved1;
1215 __be32 val;
1216 __be32 mask;
1217} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
1218
1219/**
1220 * enum iwl_mvm_bcast_filter_frame_type - filter frame type
1221 * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
1222 * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
1223 */
1224enum iwl_mvm_bcast_filter_frame_type {
1225 BCAST_FILTER_FRAME_TYPE_ALL = 0,
1226 BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
1227};
1228
1229/**
1230 * struct iwl_fw_bcast_filter - broadcast filter
1231 * @discard: discard frame (1) or let it pass (0).
1232 * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
1233 * @num_attrs: number of valid attributes in this filter.
1234 * @attrs: attributes of this filter. a filter is considered matched
1235 * only when all its attributes are matched (i.e. AND relationship)
1236 */
1237struct iwl_fw_bcast_filter {
1238 u8 discard;
1239 u8 frame_type;
1240 u8 num_attrs;
1241 u8 reserved1;
1242 struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
1243} __packed; /* BCAST_FILTER_S_VER_1 */
1244
1245/**
1246 * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
1247 * @default_discard: default action for this mac (discard (1) / pass (0)).
1248 * @attached_filters: bitmap of relevant filters for this mac.
1249 */
1250struct iwl_fw_bcast_mac {
1251 u8 default_discard;
1252 u8 reserved1;
1253 __le16 attached_filters;
1254} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
1255
1256/**
1257 * struct iwl_bcast_filter_cmd - broadcast filtering configuration
1258 * @disable: enable (0) / disable (1)
1259 * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
1260 * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
1261 * @filters: broadcast filters
1262 * @macs: broadcast filtering configuration per-mac
1263 */
1264struct iwl_bcast_filter_cmd {
1265 u8 disable;
1266 u8 max_bcast_filters;
1267 u8 max_macs;
1268 u8 reserved1;
1269 struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
1270 struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
1271} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
1272
1273/*
1274 * enum iwl_mvm_marker_id - maker ids
1275 *
1276 * The ids for different type of markers to insert into the usniffer logs
1277 */
1278enum iwl_mvm_marker_id {
1279 MARKER_ID_TX_FRAME_LATENCY = 1,
1280}; /* MARKER_ID_API_E_VER_1 */
1281
1282/**
1283 * struct iwl_mvm_marker - mark info into the usniffer logs
1284 *
1285 * (MARKER_CMD = 0xcb)
1286 *
1287 * Mark the UTC time stamp into the usniffer logs together with additional
1288 * metadata, so the usniffer output can be parsed.
1289 * In the command response the ucode will return the GP2 time.
1290 *
1291 * @dw_len: The amount of dwords following this byte including this byte.
1292 * @marker_id: A unique marker id (iwl_mvm_marker_id).
1293 * @reserved: reserved.
1294 * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC
1295 * @metadata: additional meta data that will be written to the unsiffer log
1296 */
1297struct iwl_mvm_marker {
1298 u8 dwLen;
1299 u8 markerId;
1300 __le16 reserved;
1301 __le64 timestamp;
1302 __le32 metadata[0];
1303} __packed; /* MARKER_API_S_VER_1 */
1304
1305/*
1306 * enum iwl_dc2dc_config_id - flag ids
1307 *
1308 * Ids of dc2dc configuration flags
1309 */
1310enum iwl_dc2dc_config_id {
1311 DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
1312 DCDC_FREQ_TUNE_SET = 0x2,
1313}; /* MARKER_ID_API_E_VER_1 */
1314
1315/**
1316 * struct iwl_dc2dc_config_cmd - configure dc2dc values
1317 *
1318 * (DC2DC_CONFIG_CMD = 0x83)
1319 *
1320 * Set/Get & configure dc2dc values.
1321 * The command always returns the current dc2dc values.
1322 *
1323 * @flags: set/get dc2dc
1324 * @enable_low_power_mode: not used.
1325 * @dc2dc_freq_tune0: frequency divider - digital domain
1326 * @dc2dc_freq_tune1: frequency divider - analog domain
1327 */
1328struct iwl_dc2dc_config_cmd {
1329 __le32 flags;
1330 __le32 enable_low_power_mode; /* not used */
1331 __le32 dc2dc_freq_tune0;
1332 __le32 dc2dc_freq_tune1;
1333} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
1334
1335/**
1336 * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
1337 *
1338 * Current dc2dc values returned by the FW.
1339 *
1340 * @dc2dc_freq_tune0: frequency divider - digital domain
1341 * @dc2dc_freq_tune1: frequency divider - analog domain
1342 */
1343struct iwl_dc2dc_config_resp {
1344 __le32 dc2dc_freq_tune0;
1345 __le32 dc2dc_freq_tune1;
1346} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
1347
1348/***********************************
1349 * Smart Fifo API
1350 ***********************************/
1351/* Smart Fifo state */
1352enum iwl_sf_state {
1353 SF_LONG_DELAY_ON = 0, /* should never be called by driver */
1354 SF_FULL_ON,
1355 SF_UNINIT,
1356 SF_INIT_OFF,
1357 SF_HW_NUM_STATES
1358};
1359
1360/* Smart Fifo possible scenario */
1361enum iwl_sf_scenario {
1362 SF_SCENARIO_SINGLE_UNICAST,
1363 SF_SCENARIO_AGG_UNICAST,
1364 SF_SCENARIO_MULTICAST,
1365 SF_SCENARIO_BA_RESP,
1366 SF_SCENARIO_TX_RESP,
1367 SF_NUM_SCENARIO
1368};
1369
1370#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
1371#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
1372
1373/* smart FIFO default values */
1374#define SF_W_MARK_SISO 6144
1375#define SF_W_MARK_MIMO2 8192
1376#define SF_W_MARK_MIMO3 6144
1377#define SF_W_MARK_LEGACY 4096
1378#define SF_W_MARK_SCAN 4096
1379
1380/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
1381#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1382#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1383#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1384#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1385#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
1386#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1387#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
1388#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
1389#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
1390#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
1391
1392/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
1393#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
1394#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
1395#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
1396#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
1397#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
1398#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
1399#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
1400#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
1401#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
1402#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
1403
1404#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
1405
1406#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
1407
1408/**
1409 * Smart Fifo configuration command.
1410 * @state: smart fifo state, types listed in enum %iwl_sf_sate.
1411 * @watermark: Minimum allowed availabe free space in RXF for transient state.
1412 * @long_delay_timeouts: aging and idle timer values for each scenario
1413 * in long delay state.
1414 * @full_on_timeouts: timer values for each scenario in full on state.
1415 */
1416struct iwl_sf_cfg_cmd {
1417 __le32 state;
1418 __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
1419 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1420 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1421} __packed; /* SF_CFG_API_S_VER_2 */
1422
1423/***********************************
1424 * Location Aware Regulatory (LAR) API - MCC updates
1425 ***********************************/
1426
1427/**
1428 * struct iwl_mcc_update_cmd - Request the device to update geographic
1429 * regulatory profile according to the given MCC (Mobile Country Code).
1430 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1431 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1432 * MCC in the cmd response will be the relevant MCC in the NVM.
1433 * @mcc: given mobile country code
1434 * @source_id: the source from where we got the MCC, see iwl_mcc_source
1435 * @reserved: reserved for alignment
1436 */
1437struct iwl_mcc_update_cmd {
1438 __le16 mcc;
1439 u8 source_id;
1440 u8 reserved;
1441} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
1442
1443/**
1444 * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
1445 * Contains the new channel control profile map, if changed, and the new MCC
1446 * (mobile country code).
1447 * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
1448 * @status: see &enum iwl_mcc_update_status
1449 * @mcc: the new applied MCC
1450 * @cap: capabilities for all channels which matches the MCC
1451 * @source_id: the MCC source, see iwl_mcc_source
1452 * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
1453 * channels, depending on platform)
1454 * @channels: channel control data map, DWORD for each channel. Only the first
1455 * 16bits are used.
1456 */
1457struct iwl_mcc_update_resp {
1458 __le32 status;
1459 __le16 mcc;
1460 u8 cap;
1461 u8 source_id;
1462 __le32 n_channels;
1463 __le32 channels[0];
1464} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
1465
1466/**
1467 * struct iwl_mcc_chub_notif - chub notifies of mcc change
1468 * (MCC_CHUB_UPDATE_CMD = 0xc9)
1469 * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
1470 * the cellular and connectivity cores that gets updates of the mcc, and
1471 * notifies the ucode directly of any mcc change.
1472 * The ucode requests the driver to request the device to update geographic
1473 * regulatory profile according to the given MCC (Mobile Country Code).
1474 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1475 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1476 * MCC in the cmd response will be the relevant MCC in the NVM.
1477 * @mcc: given mobile country code
1478 * @source_id: identity of the change originator, see iwl_mcc_source
1479 * @reserved1: reserved for alignment
1480 */
1481struct iwl_mcc_chub_notif {
1482 u16 mcc;
1483 u8 source_id;
1484 u8 reserved1;
1485} __packed; /* LAR_MCC_NOTIFY_S */
1486
1487enum iwl_mcc_update_status {
1488 MCC_RESP_NEW_CHAN_PROFILE,
1489 MCC_RESP_SAME_CHAN_PROFILE,
1490 MCC_RESP_INVALID,
1491 MCC_RESP_NVM_DISABLED,
1492 MCC_RESP_ILLEGAL,
1493 MCC_RESP_LOW_PRIORITY,
1494};
1495
1496enum iwl_mcc_source {
1497 MCC_SOURCE_OLD_FW = 0,
1498 MCC_SOURCE_ME = 1,
1499 MCC_SOURCE_BIOS = 2,
1500 MCC_SOURCE_3G_LTE_HOST = 3,
1501 MCC_SOURCE_3G_LTE_DEVICE = 4,
1502 MCC_SOURCE_WIFI = 5,
1503 MCC_SOURCE_RESERVED = 6,
1504 MCC_SOURCE_DEFAULT = 7,
1505 MCC_SOURCE_UNINITIALIZED = 8,
1506 MCC_SOURCE_GET_CURRENT = 0x10
1507};
1508
1509/* DTS measurements */
1510
1511enum iwl_dts_measurement_flags {
1512 DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
1513 DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
1514};
1515
1516/**
1517 * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements
1518 *
1519 * @flags: indicates which measurements we want as specified in &enum
1520 * iwl_dts_measurement_flags
1521 */
1522struct iwl_dts_measurement_cmd {
1523 __le32 flags;
1524} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
1525
1526/**
1527* enum iwl_dts_control_measurement_mode - DTS measurement type
1528* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
1529* back (latest value. Not waiting for new value). Use automatic
1530* SW DTS configuration.
1531* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
1532* trigger DTS reading and provide read back temperature read
1533* when available.
1534* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
1535* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
1536* without measurement trigger.
1537*/
1538enum iwl_dts_control_measurement_mode {
1539 DTS_AUTOMATIC = 0,
1540 DTS_REQUEST_READ = 1,
1541 DTS_OVER_WRITE = 2,
1542 DTS_DIRECT_WITHOUT_MEASURE = 3,
1543};
1544
1545/**
1546* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
1547* @DTS_USE_TOP: Top
1548* @DTS_USE_CHAIN_A: chain A
1549* @DTS_USE_CHAIN_B: chain B
1550* @DTS_USE_CHAIN_C: chain C
1551* @XTAL_TEMPERATURE - read temperature from xtal
1552*/
1553enum iwl_dts_used {
1554 DTS_USE_TOP = 0,
1555 DTS_USE_CHAIN_A = 1,
1556 DTS_USE_CHAIN_B = 2,
1557 DTS_USE_CHAIN_C = 3,
1558 XTAL_TEMPERATURE = 4,
1559};
1560
1561/**
1562* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
1563* @DTS_BIT6_MODE: bit 6 mode
1564* @DTS_BIT8_MODE: bit 8 mode
1565*/
1566enum iwl_dts_bit_mode {
1567 DTS_BIT6_MODE = 0,
1568 DTS_BIT8_MODE = 1,
1569};
1570
1571/**
1572 * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
1573 * @control_mode: see &enum iwl_dts_control_measurement_mode
1574 * @temperature: used when over write DTS mode is selected
1575 * @sensor: set temperature sensor to use. See &enum iwl_dts_used
1576 * @avg_factor: average factor to DTS in request DTS read mode
1577 * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
1578 * @step_duration: step duration for the DTS
1579 */
1580struct iwl_ext_dts_measurement_cmd {
1581 __le32 control_mode;
1582 __le32 temperature;
1583 __le32 sensor;
1584 __le32 avg_factor;
1585 __le32 bit_mode;
1586 __le32 step_duration;
1587} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
1588
1589/**
1590 * iwl_dts_measurement_notif - notification received with the measurements
1591 *
1592 * @temp: the measured temperature
1593 * @voltage: the measured voltage
1594 */
1595struct iwl_dts_measurement_notif {
1596 __le32 temp;
1597 __le32 voltage;
1598} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
1599
1600/***********************************
1601 * TDLS API
1602 ***********************************/
1603
1604/* Type of TDLS request */
1605enum iwl_tdls_channel_switch_type {
1606 TDLS_SEND_CHAN_SW_REQ = 0,
1607 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
1608 TDLS_MOVE_CH,
1609}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
1610
1611/**
1612 * Switch timing sub-element in a TDLS channel-switch command
1613 * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
1614 * received from peer
1615 * @max_offchan_duration: What amount of microseconds out of a DTIM is given
1616 * to the TDLS off-channel communication. For instance if the DTIM is
1617 * 200TU and the TDLS peer is to be given 25% of the time, the value
1618 * given will be 50TU, or 50 * 1024 if translated into microseconds.
1619 * @switch_time: switch time the peer sent in its channel switch timing IE
1620 * @switch_timout: switch timeout the peer sent in its channel switch timing IE
1621 */
1622struct iwl_tdls_channel_switch_timing {
1623 __le32 frame_timestamp; /* GP2 time of peer packet Rx */
1624 __le32 max_offchan_duration; /* given in micro-seconds */
1625 __le32 switch_time; /* given in micro-seconds */
1626 __le32 switch_timeout; /* given in micro-seconds */
1627} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
1628
1629#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
1630
1631/**
1632 * TDLS channel switch frame template
1633 *
1634 * A template representing a TDLS channel-switch request or response frame
1635 *
1636 * @switch_time_offset: offset to the channel switch timing IE in the template
1637 * @tx_cmd: Tx parameters for the frame
1638 * @data: frame data
1639 */
1640struct iwl_tdls_channel_switch_frame {
1641 __le32 switch_time_offset;
1642 struct iwl_tx_cmd tx_cmd;
1643 u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
1644} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
1645
1646/**
1647 * TDLS channel switch command
1648 *
1649 * The command is sent to initiate a channel switch and also in response to
1650 * incoming TDLS channel-switch request/response packets from remote peers.
1651 *
1652 * @switch_type: see &enum iwl_tdls_channel_switch_type
1653 * @peer_sta_id: station id of TDLS peer
1654 * @ci: channel we switch to
1655 * @timing: timing related data for command
1656 * @frame: channel-switch request/response template, depending to switch_type
1657 */
1658struct iwl_tdls_channel_switch_cmd {
1659 u8 switch_type;
1660 __le32 peer_sta_id;
1661 struct iwl_fw_channel_info ci;
1662 struct iwl_tdls_channel_switch_timing timing;
1663 struct iwl_tdls_channel_switch_frame frame;
1664} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
1665
1666/**
1667 * TDLS channel switch start notification
1668 *
1669 * @status: non-zero on success
1670 * @offchannel_duration: duration given in microseconds
1671 * @sta_id: peer currently performing the channel-switch with
1672 */
1673struct iwl_tdls_channel_switch_notif {
1674 __le32 status;
1675 __le32 offchannel_duration;
1676 __le32 sta_id;
1677} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
1678
1679/**
1680 * TDLS station info
1681 *
1682 * @sta_id: station id of the TDLS peer
1683 * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
1684 * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
1685 * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
1686 */
1687struct iwl_tdls_sta_info {
1688 u8 sta_id;
1689 u8 tx_to_peer_tid;
1690 __le16 tx_to_peer_ssn;
1691 __le32 is_initiator;
1692} __packed; /* TDLS_STA_INFO_VER_1 */
1693
1694/**
1695 * TDLS basic config command
1696 *
1697 * @id_and_color: MAC id and color being configured
1698 * @tdls_peer_count: amount of currently connected TDLS peers
1699 * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
1700 * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
1701 * @sta_info: per-station info. Only the first tdls_peer_count entries are set
1702 * @pti_req_data_offset: offset of network-level data for the PTI template
1703 * @pti_req_tx_cmd: Tx parameters for PTI request template
1704 * @pti_req_template: PTI request template data
1705 */
1706struct iwl_tdls_config_cmd {
1707 __le32 id_and_color; /* mac id and color */
1708 u8 tdls_peer_count;
1709 u8 tx_to_ap_tid;
1710 __le16 tx_to_ap_ssn;
1711 struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
1712
1713 __le32 pti_req_data_offset;
1714 struct iwl_tx_cmd pti_req_tx_cmd;
1715 u8 pti_req_template[0];
1716} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
1717
1718/**
1719 * TDLS per-station config information from FW
1720 *
1721 * @sta_id: station id of the TDLS peer
1722 * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
1723 * the peer
1724 */
1725struct iwl_tdls_config_sta_info_res {
1726 __le16 sta_id;
1727 __le16 tx_to_peer_last_seq;
1728} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
1729
1730/**
1731 * TDLS config information from FW
1732 *
1733 * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
1734 * @sta_info: per-station TDLS config information
1735 */
1736struct iwl_tdls_config_res {
1737 __le32 tx_to_ap_last_seq;
1738 struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
1739} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
1740
1741#define TX_FIFO_MAX_NUM 8
1742#define RX_FIFO_MAX_NUM 2
1743
1744/**
1745 * Shared memory configuration information from the FW
1746 *
1747 * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
1748 * accessible)
1749 * @shared_mem_size: shared memory size
1750 * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
1751 * 0x0 as accessible only via DBGM RDAT)
1752 * @sample_buff_size: internal sample buff size
1753 * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
1754 * 8000 HW set to 0x0 as not accessible)
1755 * @txfifo_size: size of TXF0 ... TXF7
1756 * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
1757 * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
1758 * when paging is not supported this should be 0
1759 * @page_buff_size: size of %page_buff_addr
1760 */
1761struct iwl_shared_mem_cfg {
1762 __le32 shared_mem_addr;
1763 __le32 shared_mem_size;
1764 __le32 sample_buff_addr;
1765 __le32 sample_buff_size;
1766 __le32 txfifo_addr;
1767 __le32 txfifo_size[TX_FIFO_MAX_NUM];
1768 __le32 rxfifo_size[RX_FIFO_MAX_NUM];
1769 __le32 page_buff_addr;
1770 __le32 page_buff_size;
1771} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
1772
1773#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
deleted file mode 100644
index d906fa13ba97..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ /dev/null
@@ -1,1166 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/mac80211.h>
66
67#include "iwl-trans.h"
68#include "iwl-op-mode.h"
69#include "iwl-fw.h"
70#include "iwl-debug.h"
71#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73#include "iwl-prph.h"
74#include "iwl-eeprom-parse.h"
75
76#include "mvm.h"
77#include "iwl-phy-db.h"
78
79#define MVM_UCODE_ALIVE_TIMEOUT HZ
80#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
81
82#define UCODE_VALID_OK cpu_to_le32(0x1)
83
84struct iwl_mvm_alive_data {
85 bool valid;
86 u32 scd_base_addr;
87};
88
89static inline const struct fw_img *
90iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
91{
92 if (ucode_type >= IWL_UCODE_TYPE_MAX)
93 return NULL;
94
95 return &mvm->fw->img[ucode_type];
96}
97
98static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
99{
100 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
101 .valid = cpu_to_le32(valid_tx_ant),
102 };
103
104 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
105 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
106 sizeof(tx_ant_cmd), &tx_ant_cmd);
107}
108
109static void iwl_free_fw_paging(struct iwl_mvm *mvm)
110{
111 int i;
112
113 if (!mvm->fw_paging_db[0].fw_paging_block)
114 return;
115
116 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
117 if (!mvm->fw_paging_db[i].fw_paging_block) {
118 IWL_DEBUG_FW(mvm,
119 "Paging: block %d already freed, continue to next page\n",
120 i);
121
122 continue;
123 }
124
125 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
126 get_order(mvm->fw_paging_db[i].fw_paging_size));
127 }
128 kfree(mvm->trans->paging_download_buf);
129 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
130}
131
132static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
133{
134 int sec_idx, idx;
135 u32 offset = 0;
136
137 /*
138 * find where is the paging image start point:
139 * if CPU2 exist and it's in paging format, then the image looks like:
140 * CPU1 sections (2 or more)
141 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
142 * CPU2 sections (not paged)
143 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
144 * non paged to CPU2 paging sec
145 * CPU2 paging CSS
146 * CPU2 paging image (including instruction and data)
147 */
148 for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
149 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
150 sec_idx++;
151 break;
152 }
153 }
154
155 if (sec_idx >= IWL_UCODE_SECTION_MAX) {
156 IWL_ERR(mvm, "driver didn't find paging image\n");
157 iwl_free_fw_paging(mvm);
158 return -EINVAL;
159 }
160
161 /* copy the CSS block to the dram */
162 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
163 sec_idx);
164
165 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
166 image->sec[sec_idx].data,
167 mvm->fw_paging_db[0].fw_paging_size);
168
169 IWL_DEBUG_FW(mvm,
170 "Paging: copied %d CSS bytes to first block\n",
171 mvm->fw_paging_db[0].fw_paging_size);
172
173 sec_idx++;
174
175 /*
176 * copy the paging blocks to the dram
177 * loop index start from 1 since that CSS block already copied to dram
178 * and CSS index is 0.
179 * loop stop at num_of_paging_blk since that last block is not full.
180 */
181 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
182 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
183 image->sec[sec_idx].data + offset,
184 mvm->fw_paging_db[idx].fw_paging_size);
185
186 IWL_DEBUG_FW(mvm,
187 "Paging: copied %d paging bytes to block %d\n",
188 mvm->fw_paging_db[idx].fw_paging_size,
189 idx);
190
191 offset += mvm->fw_paging_db[idx].fw_paging_size;
192 }
193
194 /* copy the last paging block */
195 if (mvm->num_of_pages_in_last_blk > 0) {
196 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
197 image->sec[sec_idx].data + offset,
198 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
199
200 IWL_DEBUG_FW(mvm,
201 "Paging: copied %d pages in the last block %d\n",
202 mvm->num_of_pages_in_last_blk, idx);
203 }
204
205 return 0;
206}
207
208static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
209 const struct fw_img *image)
210{
211 struct page *block;
212 dma_addr_t phys = 0;
213 int blk_idx = 0;
214 int order, num_of_pages;
215 int dma_enabled;
216
217 if (mvm->fw_paging_db[0].fw_paging_block)
218 return 0;
219
220 dma_enabled = is_device_dma_capable(mvm->trans->dev);
221
222 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
223 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
224
225 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
226 mvm->num_of_paging_blk = ((num_of_pages - 1) /
227 NUM_OF_PAGE_PER_GROUP) + 1;
228
229 mvm->num_of_pages_in_last_blk =
230 num_of_pages -
231 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
232
233 IWL_DEBUG_FW(mvm,
234 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
235 mvm->num_of_paging_blk,
236 mvm->num_of_pages_in_last_blk);
237
238 /* allocate block of 4Kbytes for paging CSS */
239 order = get_order(FW_PAGING_SIZE);
240 block = alloc_pages(GFP_KERNEL, order);
241 if (!block) {
242 /* free all the previous pages since we failed */
243 iwl_free_fw_paging(mvm);
244 return -ENOMEM;
245 }
246
247 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
248 mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
249
250 if (dma_enabled) {
251 phys = dma_map_page(mvm->trans->dev, block, 0,
252 PAGE_SIZE << order, DMA_BIDIRECTIONAL);
253 if (dma_mapping_error(mvm->trans->dev, phys)) {
254 /*
255 * free the previous pages and the current one since
256 * we failed to map_page.
257 */
258 iwl_free_fw_paging(mvm);
259 return -ENOMEM;
260 }
261 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
262 } else {
263 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
264 blk_idx << BLOCK_2_EXP_SIZE;
265 }
266
267 IWL_DEBUG_FW(mvm,
268 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
269 order);
270
271 /*
272 * allocate blocks in dram.
273 * since that CSS allocated in fw_paging_db[0] loop start from index 1
274 */
275 for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
276 /* allocate block of PAGING_BLOCK_SIZE (32K) */
277 order = get_order(PAGING_BLOCK_SIZE);
278 block = alloc_pages(GFP_KERNEL, order);
279 if (!block) {
280 /* free all the previous pages since we failed */
281 iwl_free_fw_paging(mvm);
282 return -ENOMEM;
283 }
284
285 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
286 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
287
288 if (dma_enabled) {
289 phys = dma_map_page(mvm->trans->dev, block, 0,
290 PAGE_SIZE << order,
291 DMA_BIDIRECTIONAL);
292 if (dma_mapping_error(mvm->trans->dev, phys)) {
293 /*
294 * free the previous pages and the current one
295 * since we failed to map_page.
296 */
297 iwl_free_fw_paging(mvm);
298 return -ENOMEM;
299 }
300 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
301 } else {
302 mvm->fw_paging_db[blk_idx].fw_paging_phys =
303 PAGING_ADDR_SIG |
304 blk_idx << BLOCK_2_EXP_SIZE;
305 }
306
307 IWL_DEBUG_FW(mvm,
308 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
309 order);
310 }
311
312 return 0;
313}
314
315static int iwl_save_fw_paging(struct iwl_mvm *mvm,
316 const struct fw_img *fw)
317{
318 int ret;
319
320 ret = iwl_alloc_fw_paging_mem(mvm, fw);
321 if (ret)
322 return ret;
323
324 return iwl_fill_paging_mem(mvm, fw);
325}
326
327/* send paging cmd to FW in case CPU2 has paging image */
328static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
329{
330 int blk_idx;
331 __le32 dev_phy_addr;
332 struct iwl_fw_paging_cmd fw_paging_cmd = {
333 .flags =
334 cpu_to_le32(PAGING_CMD_IS_SECURED |
335 PAGING_CMD_IS_ENABLED |
336 (mvm->num_of_pages_in_last_blk <<
337 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
338 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
339 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
340 };
341
342 /* loop for for all paging blocks + CSS block */
343 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
344 dev_phy_addr =
345 cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
346 PAGE_2_EXP_SIZE);
347 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
348 }
349
350 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
351 IWL_ALWAYS_LONG_GROUP, 0),
352 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
353}
354
355/*
356 * Send paging item cmd to FW in case CPU2 has paging image
357 */
358static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
359{
360 int ret;
361 struct iwl_fw_get_item_cmd fw_get_item_cmd = {
362 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
363 };
364
365 struct iwl_fw_get_item_resp *item_resp;
366 struct iwl_host_cmd cmd = {
367 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
368 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
369 .data = { &fw_get_item_cmd, },
370 };
371
372 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
373
374 ret = iwl_mvm_send_cmd(mvm, &cmd);
375 if (ret) {
376 IWL_ERR(mvm,
377 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
378 ret);
379 return ret;
380 }
381
382 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
383 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
384 IWL_ERR(mvm,
385 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
386 le32_to_cpu(item_resp->item_id));
387 ret = -EIO;
388 goto exit;
389 }
390
391 mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
392 GFP_KERNEL);
393 if (!mvm->trans->paging_download_buf) {
394 ret = -ENOMEM;
395 goto exit;
396 }
397 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
398 mvm->trans->paging_db = mvm->fw_paging_db;
399 IWL_DEBUG_FW(mvm,
400 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
401 mvm->trans->paging_req_addr);
402
403exit:
404 iwl_free_resp(&cmd);
405
406 return ret;
407}
408
409static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
410 struct iwl_rx_packet *pkt, void *data)
411{
412 struct iwl_mvm *mvm =
413 container_of(notif_wait, struct iwl_mvm, notif_wait);
414 struct iwl_mvm_alive_data *alive_data = data;
415 struct mvm_alive_resp_ver1 *palive1;
416 struct mvm_alive_resp_ver2 *palive2;
417 struct mvm_alive_resp *palive;
418
419 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
420 palive1 = (void *)pkt->data;
421
422 mvm->support_umac_log = false;
423 mvm->error_event_table =
424 le32_to_cpu(palive1->error_event_table_ptr);
425 mvm->log_event_table =
426 le32_to_cpu(palive1->log_event_table_ptr);
427 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
428
429 alive_data->valid = le16_to_cpu(palive1->status) ==
430 IWL_ALIVE_STATUS_OK;
431 IWL_DEBUG_FW(mvm,
432 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
433 le16_to_cpu(palive1->status), palive1->ver_type,
434 palive1->ver_subtype, palive1->flags);
435 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
436 palive2 = (void *)pkt->data;
437
438 mvm->error_event_table =
439 le32_to_cpu(palive2->error_event_table_ptr);
440 mvm->log_event_table =
441 le32_to_cpu(palive2->log_event_table_ptr);
442 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
443 mvm->umac_error_event_table =
444 le32_to_cpu(palive2->error_info_addr);
445 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
446 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
447
448 alive_data->valid = le16_to_cpu(palive2->status) ==
449 IWL_ALIVE_STATUS_OK;
450 if (mvm->umac_error_event_table)
451 mvm->support_umac_log = true;
452
453 IWL_DEBUG_FW(mvm,
454 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
455 le16_to_cpu(palive2->status), palive2->ver_type,
456 palive2->ver_subtype, palive2->flags);
457
458 IWL_DEBUG_FW(mvm,
459 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
460 palive2->umac_major, palive2->umac_minor);
461 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
462 palive = (void *)pkt->data;
463
464 mvm->error_event_table =
465 le32_to_cpu(palive->error_event_table_ptr);
466 mvm->log_event_table =
467 le32_to_cpu(palive->log_event_table_ptr);
468 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
469 mvm->umac_error_event_table =
470 le32_to_cpu(palive->error_info_addr);
471 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
472 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
473
474 alive_data->valid = le16_to_cpu(palive->status) ==
475 IWL_ALIVE_STATUS_OK;
476 if (mvm->umac_error_event_table)
477 mvm->support_umac_log = true;
478
479 IWL_DEBUG_FW(mvm,
480 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
481 le16_to_cpu(palive->status), palive->ver_type,
482 palive->ver_subtype, palive->flags);
483
484 IWL_DEBUG_FW(mvm,
485 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
486 le32_to_cpu(palive->umac_major),
487 le32_to_cpu(palive->umac_minor));
488 }
489
490 return true;
491}
492
493static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
494 struct iwl_rx_packet *pkt, void *data)
495{
496 struct iwl_phy_db *phy_db = data;
497
498 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
499 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
500 return true;
501 }
502
503 WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
504
505 return false;
506}
507
508static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
509 enum iwl_ucode_type ucode_type)
510{
511 struct iwl_notification_wait alive_wait;
512 struct iwl_mvm_alive_data alive_data;
513 const struct fw_img *fw;
514 int ret, i;
515 enum iwl_ucode_type old_type = mvm->cur_ucode;
516 static const u16 alive_cmd[] = { MVM_ALIVE };
517 struct iwl_sf_region st_fwrd_space;
518
519 if (ucode_type == IWL_UCODE_REGULAR &&
520 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
521 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
522 else
523 fw = iwl_get_ucode_image(mvm, ucode_type);
524 if (WARN_ON(!fw))
525 return -EINVAL;
526 mvm->cur_ucode = ucode_type;
527 mvm->ucode_loaded = false;
528
529 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
530 alive_cmd, ARRAY_SIZE(alive_cmd),
531 iwl_alive_fn, &alive_data);
532
533 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
534 if (ret) {
535 mvm->cur_ucode = old_type;
536 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
537 return ret;
538 }
539
540 /*
541 * Some things may run in the background now, but we
542 * just wait for the ALIVE notification here.
543 */
544 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
545 MVM_UCODE_ALIVE_TIMEOUT);
546 if (ret) {
547 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
548 IWL_ERR(mvm,
549 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
550 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
551 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
552 mvm->cur_ucode = old_type;
553 return ret;
554 }
555
556 if (!alive_data.valid) {
557 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
558 mvm->cur_ucode = old_type;
559 return -EIO;
560 }
561
562 /*
563 * update the sdio allocation according to the pointer we get in the
564 * alive notification.
565 */
566 st_fwrd_space.addr = mvm->sf_space.addr;
567 st_fwrd_space.size = mvm->sf_space.size;
568 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
569 if (ret) {
570 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
571 return ret;
572 }
573
574 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
575
576 /*
577 * configure and operate fw paging mechanism.
578 * driver configures the paging flow only once, CPU2 paging image
579 * included in the IWL_UCODE_INIT image.
580 */
581 if (fw->paging_mem_size) {
582 /*
583 * When dma is not enabled, the driver needs to copy / write
584 * the downloaded / uploaded page to / from the smem.
585 * This gets the location of the place were the pages are
586 * stored.
587 */
588 if (!is_device_dma_capable(mvm->trans->dev)) {
589 ret = iwl_trans_get_paging_item(mvm);
590 if (ret) {
591 IWL_ERR(mvm, "failed to get FW paging item\n");
592 return ret;
593 }
594 }
595
596 ret = iwl_save_fw_paging(mvm, fw);
597 if (ret) {
598 IWL_ERR(mvm, "failed to save the FW paging image\n");
599 return ret;
600 }
601
602 ret = iwl_send_paging_cmd(mvm, fw);
603 if (ret) {
604 IWL_ERR(mvm, "failed to send the paging cmd\n");
605 iwl_free_fw_paging(mvm);
606 return ret;
607 }
608 }
609
610 /*
611 * Note: all the queues are enabled as part of the interface
612 * initialization, but in firmware restart scenarios they
613 * could be stopped, so wake them up. In firmware restart,
614 * mac80211 will have the queues stopped as well until the
615 * reconfiguration completes. During normal startup, they
616 * will be empty.
617 */
618
619 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
620 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
621
622 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
623 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
624
625 mvm->ucode_loaded = true;
626
627 return 0;
628}
629
630static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
631{
632 struct iwl_phy_cfg_cmd phy_cfg_cmd;
633 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
634
635 /* Set parameters */
636 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
637 phy_cfg_cmd.calib_control.event_trigger =
638 mvm->fw->default_calib[ucode_type].event_trigger;
639 phy_cfg_cmd.calib_control.flow_trigger =
640 mvm->fw->default_calib[ucode_type].flow_trigger;
641
642 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
643 phy_cfg_cmd.phy_cfg);
644
645 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
646 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
647}
648
649int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
650{
651 struct iwl_notification_wait calib_wait;
652 static const u16 init_complete[] = {
653 INIT_COMPLETE_NOTIF,
654 CALIB_RES_NOTIF_PHY_DB
655 };
656 int ret;
657
658 lockdep_assert_held(&mvm->mutex);
659
660 if (WARN_ON_ONCE(mvm->calibrating))
661 return 0;
662
663 iwl_init_notification_wait(&mvm->notif_wait,
664 &calib_wait,
665 init_complete,
666 ARRAY_SIZE(init_complete),
667 iwl_wait_phy_db_entry,
668 mvm->phy_db);
669
670 /* Will also start the device */
671 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
672 if (ret) {
673 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
674 goto error;
675 }
676
677 ret = iwl_send_bt_init_conf(mvm);
678 if (ret)
679 goto error;
680
681 /* Read the NVM only at driver load time, no need to do this twice */
682 if (read_nvm) {
683 /* Read nvm */
684 ret = iwl_nvm_init(mvm, true);
685 if (ret) {
686 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
687 goto error;
688 }
689 }
690
691 /* In case we read the NVM from external file, load it to the NIC */
692 if (mvm->nvm_file_name)
693 iwl_mvm_load_nvm_to_nic(mvm);
694
695 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
696 WARN_ON(ret);
697
698 /*
699 * abort after reading the nvm in case RF Kill is on, we will complete
700 * the init seq later when RF kill will switch to off
701 */
702 if (iwl_mvm_is_radio_hw_killed(mvm)) {
703 IWL_DEBUG_RF_KILL(mvm,
704 "jump over all phy activities due to RF kill\n");
705 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
706 ret = 1;
707 goto out;
708 }
709
710 mvm->calibrating = true;
711
712 /* Send TX valid antennas before triggering calibrations */
713 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
714 if (ret)
715 goto error;
716
717 /*
718 * Send phy configurations command to init uCode
719 * to start the 16.0 uCode init image internal calibrations.
720 */
721 ret = iwl_send_phy_cfg_cmd(mvm);
722 if (ret) {
723 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
724 ret);
725 goto error;
726 }
727
728 /*
729 * Some things may run in the background now, but we
730 * just wait for the calibration complete notification.
731 */
732 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
733 MVM_UCODE_CALIB_TIMEOUT);
734
735 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
736 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
737 ret = 1;
738 }
739 goto out;
740
741error:
742 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
743out:
744 mvm->calibrating = false;
745 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
746 /* we want to debug INIT and we have no NVM - fake */
747 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
748 sizeof(struct ieee80211_channel) +
749 sizeof(struct ieee80211_rate),
750 GFP_KERNEL);
751 if (!mvm->nvm_data)
752 return -ENOMEM;
753 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
754 mvm->nvm_data->bands[0].n_channels = 1;
755 mvm->nvm_data->bands[0].n_bitrates = 1;
756 mvm->nvm_data->bands[0].bitrates =
757 (void *)mvm->nvm_data->channels + 1;
758 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
759 }
760
761 return ret;
762}
763
764static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
765{
766 struct iwl_host_cmd cmd = {
767 .id = SHARED_MEM_CFG,
768 .flags = CMD_WANT_SKB,
769 .data = { NULL, },
770 .len = { 0, },
771 };
772 struct iwl_rx_packet *pkt;
773 struct iwl_shared_mem_cfg *mem_cfg;
774 u32 i;
775
776 lockdep_assert_held(&mvm->mutex);
777
778 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
779 return;
780
781 pkt = cmd.resp_pkt;
782 mem_cfg = (void *)pkt->data;
783
784 mvm->shared_mem_cfg.shared_mem_addr =
785 le32_to_cpu(mem_cfg->shared_mem_addr);
786 mvm->shared_mem_cfg.shared_mem_size =
787 le32_to_cpu(mem_cfg->shared_mem_size);
788 mvm->shared_mem_cfg.sample_buff_addr =
789 le32_to_cpu(mem_cfg->sample_buff_addr);
790 mvm->shared_mem_cfg.sample_buff_size =
791 le32_to_cpu(mem_cfg->sample_buff_size);
792 mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
793 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
794 mvm->shared_mem_cfg.txfifo_size[i] =
795 le32_to_cpu(mem_cfg->txfifo_size[i]);
796 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
797 mvm->shared_mem_cfg.rxfifo_size[i] =
798 le32_to_cpu(mem_cfg->rxfifo_size[i]);
799 mvm->shared_mem_cfg.page_buff_addr =
800 le32_to_cpu(mem_cfg->page_buff_addr);
801 mvm->shared_mem_cfg.page_buff_size =
802 le32_to_cpu(mem_cfg->page_buff_size);
803 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
804
805 iwl_free_resp(&cmd);
806}
807
808int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
809 struct iwl_mvm_dump_desc *desc,
810 struct iwl_fw_dbg_trigger_tlv *trigger)
811{
812 unsigned int delay = 0;
813
814 if (trigger)
815 delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
816
817 if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
818 return -EBUSY;
819
820 if (WARN_ON(mvm->fw_dump_desc))
821 iwl_mvm_free_fw_dump_desc(mvm);
822
823 IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
824 le32_to_cpu(desc->trig_desc.type));
825
826 mvm->fw_dump_desc = desc;
827 mvm->fw_dump_trig = trigger;
828
829 queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
830
831 return 0;
832}
833
834int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
835 const char *str, size_t len,
836 struct iwl_fw_dbg_trigger_tlv *trigger)
837{
838 struct iwl_mvm_dump_desc *desc;
839
840 desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
841 if (!desc)
842 return -ENOMEM;
843
844 desc->len = len;
845 desc->trig_desc.type = cpu_to_le32(trig);
846 memcpy(desc->trig_desc.data, str, len);
847
848 return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
849}
850
851int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
852 struct iwl_fw_dbg_trigger_tlv *trigger,
853 const char *fmt, ...)
854{
855 u16 occurrences = le16_to_cpu(trigger->occurrences);
856 int ret, len = 0;
857 char buf[64];
858
859 if (!occurrences)
860 return 0;
861
862 if (fmt) {
863 va_list ap;
864
865 buf[sizeof(buf) - 1] = '\0';
866
867 va_start(ap, fmt);
868 vsnprintf(buf, sizeof(buf), fmt, ap);
869 va_end(ap);
870
871 /* check for truncation */
872 if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
873 buf[sizeof(buf) - 1] = '\0';
874
875 len = strlen(buf) + 1;
876 }
877
878 ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
879 trigger);
880
881 if (ret)
882 return ret;
883
884 trigger->occurrences = cpu_to_le16(occurrences - 1);
885 return 0;
886}
887
888static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
889{
890 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
891 iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
892 else
893 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
894}
895
896int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
897{
898 u8 *ptr;
899 int ret;
900 int i;
901
902 if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
903 "Invalid configuration %d\n", conf_id))
904 return -EINVAL;
905
906 /* EARLY START - firmware's configuration is hard coded */
907 if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
908 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
909 conf_id == FW_DBG_START_FROM_ALIVE) {
910 iwl_mvm_restart_early_start(mvm);
911 return 0;
912 }
913
914 if (!mvm->fw->dbg_conf_tlv[conf_id])
915 return -EINVAL;
916
917 if (mvm->fw_dbg_conf != FW_DBG_INVALID)
918 IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
919 mvm->fw_dbg_conf);
920
921 /* Send all HCMDs for configuring the FW debug */
922 ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
923 for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
924 struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
925
926 ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
927 le16_to_cpu(cmd->len), cmd->data);
928 if (ret)
929 return ret;
930
931 ptr += sizeof(*cmd);
932 ptr += le16_to_cpu(cmd->len);
933 }
934
935 mvm->fw_dbg_conf = conf_id;
936 return ret;
937}
938
939static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
940{
941 struct iwl_ltr_config_cmd cmd = {
942 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
943 };
944
945 if (!mvm->trans->ltr_enabled)
946 return 0;
947
948 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
949 sizeof(cmd), &cmd);
950}
951
952int iwl_mvm_up(struct iwl_mvm *mvm)
953{
954 int ret, i;
955 struct ieee80211_channel *chan;
956 struct cfg80211_chan_def chandef;
957
958 lockdep_assert_held(&mvm->mutex);
959
960 ret = iwl_trans_start_hw(mvm->trans);
961 if (ret)
962 return ret;
963
964 /*
965 * If we haven't completed the run of the init ucode during
966 * module loading, load init ucode now
967 * (for example, if we were in RFKILL)
968 */
969 ret = iwl_run_init_mvm_ucode(mvm, false);
970 if (ret && !iwlmvm_mod_params.init_dbg) {
971 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
972 /* this can't happen */
973 if (WARN_ON(ret > 0))
974 ret = -ERFKILL;
975 goto error;
976 }
977 if (!iwlmvm_mod_params.init_dbg) {
978 /*
979 * Stop and start the transport without entering low power
980 * mode. This will save the state of other components on the
981 * device that are triggered by the INIT firwmare (MFUART).
982 */
983 _iwl_trans_stop_device(mvm->trans, false);
984 ret = _iwl_trans_start_hw(mvm->trans, false);
985 if (ret)
986 goto error;
987 }
988
989 if (iwlmvm_mod_params.init_dbg)
990 return 0;
991
992 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
993 if (ret) {
994 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
995 goto error;
996 }
997
998 iwl_mvm_get_shared_mem_conf(mvm);
999
1000 ret = iwl_mvm_sf_update(mvm, NULL, false);
1001 if (ret)
1002 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1003
1004 mvm->fw_dbg_conf = FW_DBG_INVALID;
1005 /* if we have a destination, assume EARLY START */
1006 if (mvm->fw->dbg_dest_tlv)
1007 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1008 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1009
1010 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1011 if (ret)
1012 goto error;
1013
1014 ret = iwl_send_bt_init_conf(mvm);
1015 if (ret)
1016 goto error;
1017
1018 /* Send phy db control command and then phy db calibration*/
1019 ret = iwl_send_phy_db_data(mvm->phy_db);
1020 if (ret)
1021 goto error;
1022
1023 ret = iwl_send_phy_cfg_cmd(mvm);
1024 if (ret)
1025 goto error;
1026
1027 /* init the fw <-> mac80211 STA mapping */
1028 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1029 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1030
1031 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1032
1033 /* reset quota debouncing buffer - 0xff will yield invalid data */
1034 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1035
1036 /* Add auxiliary station for scanning */
1037 ret = iwl_mvm_add_aux_sta(mvm);
1038 if (ret)
1039 goto error;
1040
1041 /* Add all the PHY contexts */
1042 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
1043 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1044 for (i = 0; i < NUM_PHY_CTX; i++) {
1045 /*
1046 * The channel used here isn't relevant as it's
1047 * going to be overwritten in the other flows.
1048 * For now use the first channel we have.
1049 */
1050 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1051 &chandef, 1, 1);
1052 if (ret)
1053 goto error;
1054 }
1055
1056 /* Initialize tx backoffs to the minimal possible */
1057 iwl_mvm_tt_tx_backoff(mvm, 0);
1058
1059 WARN_ON(iwl_mvm_config_ltr(mvm));
1060
1061 ret = iwl_mvm_power_update_device(mvm);
1062 if (ret)
1063 goto error;
1064
1065 /*
1066 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1067 * anyway, so don't init MCC.
1068 */
1069 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1070 ret = iwl_mvm_init_mcc(mvm);
1071 if (ret)
1072 goto error;
1073 }
1074
1075 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1076 ret = iwl_mvm_config_scan(mvm);
1077 if (ret)
1078 goto error;
1079 }
1080
1081 if (iwl_mvm_is_csum_supported(mvm) &&
1082 mvm->cfg->features & NETIF_F_RXCSUM)
1083 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1084
1085 /* allow FW/transport low power modes if not during restart */
1086 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1087 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1088
1089 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1090 return 0;
1091 error:
1092 iwl_trans_stop_device(mvm->trans);
1093 return ret;
1094}
1095
1096int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1097{
1098 int ret, i;
1099
1100 lockdep_assert_held(&mvm->mutex);
1101
1102 ret = iwl_trans_start_hw(mvm->trans);
1103 if (ret)
1104 return ret;
1105
1106 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1107 if (ret) {
1108 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1109 goto error;
1110 }
1111
1112 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1113 if (ret)
1114 goto error;
1115
1116 /* Send phy db control command and then phy db calibration*/
1117 ret = iwl_send_phy_db_data(mvm->phy_db);
1118 if (ret)
1119 goto error;
1120
1121 ret = iwl_send_phy_cfg_cmd(mvm);
1122 if (ret)
1123 goto error;
1124
1125 /* init the fw <-> mac80211 STA mapping */
1126 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1127 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1128
1129 /* Add auxiliary station for scanning */
1130 ret = iwl_mvm_add_aux_sta(mvm);
1131 if (ret)
1132 goto error;
1133
1134 return 0;
1135 error:
1136 iwl_trans_stop_device(mvm->trans);
1137 return ret;
1138}
1139
1140void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1141 struct iwl_rx_cmd_buffer *rxb)
1142{
1143 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1144 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1145 u32 flags = le32_to_cpu(card_state_notif->flags);
1146
1147 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1148 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1149 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1150 (flags & CT_KILL_CARD_DISABLED) ?
1151 "Reached" : "Not reached");
1152}
1153
1154void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1155 struct iwl_rx_cmd_buffer *rxb)
1156{
1157 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1158 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1159
1160 IWL_DEBUG_INFO(mvm,
1161 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1162 le32_to_cpu(mfuart_notif->installed_ver),
1163 le32_to_cpu(mfuart_notif->external_ver),
1164 le32_to_cpu(mfuart_notif->status),
1165 le32_to_cpu(mfuart_notif->duration));
1166}
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
deleted file mode 100644
index e3b3cf4dbd77..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/leds.h>
65#include "iwl-io.h"
66#include "iwl-csr.h"
67#include "mvm.h"
68
69/* Set led register on */
70static void iwl_mvm_led_enable(struct iwl_mvm *mvm)
71{
72 iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
73}
74
75/* Set led register off */
76static void iwl_mvm_led_disable(struct iwl_mvm *mvm)
77{
78 iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
79}
80
81static void iwl_led_brightness_set(struct led_classdev *led_cdev,
82 enum led_brightness brightness)
83{
84 struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
85 if (brightness > 0)
86 iwl_mvm_led_enable(mvm);
87 else
88 iwl_mvm_led_disable(mvm);
89}
90
91int iwl_mvm_leds_init(struct iwl_mvm *mvm)
92{
93 int mode = iwlwifi_mod_params.led_mode;
94 int ret;
95
96 switch (mode) {
97 case IWL_LED_BLINK:
98 IWL_ERR(mvm, "Blink led mode not supported, used default\n");
99 case IWL_LED_DEFAULT:
100 case IWL_LED_RF_STATE:
101 mode = IWL_LED_RF_STATE;
102 break;
103 case IWL_LED_DISABLE:
104 IWL_INFO(mvm, "Led disabled\n");
105 return 0;
106 default:
107 return -EINVAL;
108 }
109
110 mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
111 wiphy_name(mvm->hw->wiphy));
112 mvm->led.brightness_set = iwl_led_brightness_set;
113 mvm->led.max_brightness = 1;
114
115 if (mode == IWL_LED_RF_STATE)
116 mvm->led.default_trigger =
117 ieee80211_get_radio_led_name(mvm->hw);
118
119 ret = led_classdev_register(mvm->trans->dev, &mvm->led);
120 if (ret) {
121 kfree(mvm->led.name);
122 IWL_INFO(mvm, "Failed to enable led\n");
123 return ret;
124 }
125
126 return 0;
127}
128
129void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
130{
131 if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
132 return;
133
134 led_classdev_unregister(&mvm->led);
135 kfree(mvm->led.name);
136}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
deleted file mode 100644
index ad7ad720d2e7..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ /dev/null
@@ -1,1452 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#include <linux/etherdevice.h>
69#include <net/mac80211.h>
70#include "iwl-io.h"
71#include "iwl-prph.h"
72#include "fw-api.h"
73#include "mvm.h"
74#include "time-event.h"
75
76const u8 iwl_mvm_ac_to_tx_fifo[] = {
77 IWL_MVM_TX_FIFO_VO,
78 IWL_MVM_TX_FIFO_VI,
79 IWL_MVM_TX_FIFO_BE,
80 IWL_MVM_TX_FIFO_BK,
81};
82
83struct iwl_mvm_mac_iface_iterator_data {
84 struct iwl_mvm *mvm;
85 struct ieee80211_vif *vif;
86 unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
87 unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
88 enum iwl_tsf_id preferred_tsf;
89 bool found_vif;
90};
91
92struct iwl_mvm_hw_queues_iface_iterator_data {
93 struct ieee80211_vif *exclude_vif;
94 unsigned long used_hw_queues;
95};
96
97static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
98 struct ieee80211_vif *vif)
99{
100 struct iwl_mvm_mac_iface_iterator_data *data = _data;
101 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
102 u16 min_bi;
103
104 /* Skip the interface for which we are trying to assign a tsf_id */
105 if (vif == data->vif)
106 return;
107
108 /*
109 * The TSF is a hardware/firmware resource, there are 4 and
110 * the driver should assign and free them as needed. However,
111 * there are cases where 2 MACs should share the same TSF ID
112 * for the purpose of clock sync, an optimization to avoid
113 * clock drift causing overlapping TBTTs/DTIMs for a GO and
114 * client in the system.
115 *
116 * The firmware will decide according to the MAC type which
117 * will be the master and slave. Clients that need to sync
118 * with a remote station will be the master, and an AP or GO
119 * will be the slave.
120 *
121 * Depending on the new interface type it can be slaved to
122 * or become the master of an existing interface.
123 */
124 switch (data->vif->type) {
125 case NL80211_IFTYPE_STATION:
126 /*
127 * The new interface is a client, so if the one we're iterating
128 * is an AP, and the beacon interval of the AP is a multiple or
129 * divisor of the beacon interval of the client, the same TSF
130 * should be used to avoid drift between the new client and
131 * existing AP. The existing AP will get drift updates from the
132 * new client context in this case.
133 */
134 if (vif->type != NL80211_IFTYPE_AP ||
135 data->preferred_tsf != NUM_TSF_IDS ||
136 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
137 break;
138
139 min_bi = min(data->vif->bss_conf.beacon_int,
140 vif->bss_conf.beacon_int);
141
142 if (!min_bi)
143 break;
144
145 if ((data->vif->bss_conf.beacon_int -
146 vif->bss_conf.beacon_int) % min_bi == 0) {
147 data->preferred_tsf = mvmvif->tsf_id;
148 return;
149 }
150 break;
151
152 case NL80211_IFTYPE_AP:
153 /*
154 * The new interface is AP/GO, so if its beacon interval is a
155 * multiple or a divisor of the beacon interval of an existing
156 * interface, it should get drift updates from an existing
157 * client or use the same TSF as an existing GO. There's no
158 * drift between TSFs internally but if they used different
159 * TSFs then a new client MAC could update one of them and
160 * cause drift that way.
161 */
162 if ((vif->type != NL80211_IFTYPE_AP &&
163 vif->type != NL80211_IFTYPE_STATION) ||
164 data->preferred_tsf != NUM_TSF_IDS ||
165 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
166 break;
167
168 min_bi = min(data->vif->bss_conf.beacon_int,
169 vif->bss_conf.beacon_int);
170
171 if (!min_bi)
172 break;
173
174 if ((data->vif->bss_conf.beacon_int -
175 vif->bss_conf.beacon_int) % min_bi == 0) {
176 data->preferred_tsf = mvmvif->tsf_id;
177 return;
178 }
179 break;
180 default:
181 /*
182 * For all other interface types there's no need to
183 * take drift into account. Either they're exclusive
184 * like IBSS and monitor, or we don't care much about
185 * their TSF (like P2P Device), but we won't be able
186 * to share the TSF resource.
187 */
188 break;
189 }
190
191 /*
192 * Unless we exited above, we can't share the TSF resource
193 * that the virtual interface we're iterating over is using
194 * with the new one, so clear the available bit and if this
195 * was the preferred one, reset that as well.
196 */
197 __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
198
199 if (data->preferred_tsf == mvmvif->tsf_id)
200 data->preferred_tsf = NUM_TSF_IDS;
201}
202
203/*
204 * Get the mask of the queues used by the vif
205 */
206u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
207{
208 u32 qmask = 0, ac;
209
210 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
211 return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
212
213 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
214 if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
215 qmask |= BIT(vif->hw_queue[ac]);
216 }
217
218 if (vif->type == NL80211_IFTYPE_AP)
219 qmask |= BIT(vif->cab_queue);
220
221 return qmask;
222}
223
224static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
225 struct ieee80211_vif *vif)
226{
227 struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
228
229 /* exclude the given vif */
230 if (vif == data->exclude_vif)
231 return;
232
233 data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
234}
235
236static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
237 struct ieee80211_sta *sta)
238{
239 struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
240 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
241
242 /* Mark the queues used by the sta */
243 data->used_hw_queues |= mvmsta->tfd_queue_msk;
244}
245
246unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
247 struct ieee80211_vif *exclude_vif)
248{
249 u8 sta_id;
250 struct iwl_mvm_hw_queues_iface_iterator_data data = {
251 .exclude_vif = exclude_vif,
252 .used_hw_queues =
253 BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
254 BIT(mvm->aux_queue) |
255 BIT(IWL_MVM_CMD_QUEUE),
256 };
257
258 lockdep_assert_held(&mvm->mutex);
259
260 /* mark all VIF used hw queues */
261 ieee80211_iterate_active_interfaces_atomic(
262 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
263 iwl_mvm_iface_hw_queues_iter, &data);
264
265 /* don't assign the same hw queues as TDLS stations */
266 ieee80211_iterate_stations_atomic(mvm->hw,
267 iwl_mvm_mac_sta_hw_queues_iter,
268 &data);
269
270 /*
271 * Some TDLS stations may be removed but are in the process of being
272 * drained. Don't touch their queues.
273 */
274 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
275 data.used_hw_queues |= mvm->tfd_drained[sta_id];
276
277 return data.used_hw_queues;
278}
279
280static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
281 struct ieee80211_vif *vif)
282{
283 struct iwl_mvm_mac_iface_iterator_data *data = _data;
284 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
285
286 /* Iterator may already find the interface being added -- skip it */
287 if (vif == data->vif) {
288 data->found_vif = true;
289 return;
290 }
291
292 /* Mark MAC IDs as used by clearing the available bit, and
293 * (below) mark TSFs as used if their existing use is not
294 * compatible with the new interface type.
295 * No locking or atomic bit operations are needed since the
296 * data is on the stack of the caller function.
297 */
298 __clear_bit(mvmvif->id, data->available_mac_ids);
299
300 /* find a suitable tsf_id */
301 iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
302}
303
304void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
305 struct ieee80211_vif *vif)
306{
307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
308 struct iwl_mvm_mac_iface_iterator_data data = {
309 .mvm = mvm,
310 .vif = vif,
311 .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
312 /* no preference yet */
313 .preferred_tsf = NUM_TSF_IDS,
314 };
315
316 ieee80211_iterate_active_interfaces_atomic(
317 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
318 iwl_mvm_mac_tsf_id_iter, &data);
319
320 if (data.preferred_tsf != NUM_TSF_IDS)
321 mvmvif->tsf_id = data.preferred_tsf;
322 else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
323 mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
324 NUM_TSF_IDS);
325}
326
327static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
328 struct ieee80211_vif *vif)
329{
330 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
331 struct iwl_mvm_mac_iface_iterator_data data = {
332 .mvm = mvm,
333 .vif = vif,
334 .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
335 .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
336 /* no preference yet */
337 .preferred_tsf = NUM_TSF_IDS,
338 .found_vif = false,
339 };
340 u32 ac;
341 int ret, i;
342 unsigned long used_hw_queues;
343
344 /*
345 * Allocate a MAC ID and a TSF for this MAC, along with the queues
346 * and other resources.
347 */
348
349 /*
350 * Before the iterator, we start with all MAC IDs and TSFs available.
351 *
352 * During iteration, all MAC IDs are cleared that are in use by other
353 * virtual interfaces, and all TSF IDs are cleared that can't be used
354 * by this new virtual interface because they're used by an interface
355 * that can't share it with the new one.
356 * At the same time, we check if there's a preferred TSF in the case
357 * that we should share it with another interface.
358 */
359
360 /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
361 switch (vif->type) {
362 case NL80211_IFTYPE_ADHOC:
363 break;
364 case NL80211_IFTYPE_STATION:
365 if (!vif->p2p)
366 break;
367 /* fall through */
368 default:
369 __clear_bit(0, data.available_mac_ids);
370 }
371
372 ieee80211_iterate_active_interfaces_atomic(
373 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
374 iwl_mvm_mac_iface_iterator, &data);
375
376 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
377
378 /*
379 * In the case we're getting here during resume, it's similar to
380 * firmware restart, and with RESUME_ALL the iterator will find
381 * the vif being added already.
382 * We don't want to reassign any IDs in either case since doing
383 * so would probably assign different IDs (as interfaces aren't
384 * necessarily added in the same order), but the old IDs were
385 * preserved anyway, so skip ID assignment for both resume and
386 * recovery.
387 */
388 if (data.found_vif)
389 return 0;
390
391 /* Therefore, in recovery, we can't get here */
392 if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
393 return -EBUSY;
394
395 mvmvif->id = find_first_bit(data.available_mac_ids,
396 NUM_MAC_INDEX_DRIVER);
397 if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
398 IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
399 ret = -EIO;
400 goto exit_fail;
401 }
402
403 if (data.preferred_tsf != NUM_TSF_IDS)
404 mvmvif->tsf_id = data.preferred_tsf;
405 else
406 mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
407 NUM_TSF_IDS);
408 if (mvmvif->tsf_id == NUM_TSF_IDS) {
409 IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
410 ret = -EIO;
411 goto exit_fail;
412 }
413
414 mvmvif->color = 0;
415
416 INIT_LIST_HEAD(&mvmvif->time_event_data.list);
417 mvmvif->time_event_data.id = TE_MAX;
418
419 /* No need to allocate data queues to P2P Device MAC.*/
420 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
421 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
422 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
423
424 return 0;
425 }
426
427 /* Find available queues, and allocate them to the ACs */
428 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
429 u8 queue = find_first_zero_bit(&used_hw_queues,
430 mvm->first_agg_queue);
431
432 if (queue >= mvm->first_agg_queue) {
433 IWL_ERR(mvm, "Failed to allocate queue\n");
434 ret = -EIO;
435 goto exit_fail;
436 }
437
438 __set_bit(queue, &used_hw_queues);
439 vif->hw_queue[ac] = queue;
440 }
441
442 /* Allocate the CAB queue for softAP and GO interfaces */
443 if (vif->type == NL80211_IFTYPE_AP) {
444 u8 queue = find_first_zero_bit(&used_hw_queues,
445 mvm->first_agg_queue);
446
447 if (queue >= mvm->first_agg_queue) {
448 IWL_ERR(mvm, "Failed to allocate cab queue\n");
449 ret = -EIO;
450 goto exit_fail;
451 }
452
453 vif->cab_queue = queue;
454 } else {
455 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
456 }
457
458 mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
459 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
460
461 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
462 mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
463
464 return 0;
465
466exit_fail:
467 memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
468 memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
469 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
470 return ret;
471}
472
473int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
474{
475 unsigned int wdg_timeout =
476 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
477 u32 ac;
478 int ret;
479
480 lockdep_assert_held(&mvm->mutex);
481
482 ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
483 if (ret)
484 return ret;
485
486 switch (vif->type) {
487 case NL80211_IFTYPE_P2P_DEVICE:
488 iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
489 IWL_MVM_OFFCHANNEL_QUEUE,
490 IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
491 break;
492 case NL80211_IFTYPE_AP:
493 iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
494 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
495 /* fall through */
496 default:
497 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
498 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
499 vif->hw_queue[ac],
500 iwl_mvm_ac_to_tx_fifo[ac], 0,
501 wdg_timeout);
502 break;
503 }
504
505 return 0;
506}
507
508void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
509{
510 int ac;
511
512 lockdep_assert_held(&mvm->mutex);
513
514 switch (vif->type) {
515 case NL80211_IFTYPE_P2P_DEVICE:
516 iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
517 IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
518 0);
519 break;
520 case NL80211_IFTYPE_AP:
521 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
522 IWL_MAX_TID_COUNT, 0);
523 /* fall through */
524 default:
525 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
526 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
527 vif->hw_queue[ac],
528 IWL_MAX_TID_COUNT, 0);
529 }
530}
531
532static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
533 struct ieee80211_vif *vif,
534 enum ieee80211_band band,
535 u8 *cck_rates, u8 *ofdm_rates)
536{
537 struct ieee80211_supported_band *sband;
538 unsigned long basic = vif->bss_conf.basic_rates;
539 int lowest_present_ofdm = 100;
540 int lowest_present_cck = 100;
541 u8 cck = 0;
542 u8 ofdm = 0;
543 int i;
544
545 sband = mvm->hw->wiphy->bands[band];
546
547 for_each_set_bit(i, &basic, BITS_PER_LONG) {
548 int hw = sband->bitrates[i].hw_value;
549 if (hw >= IWL_FIRST_OFDM_RATE) {
550 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
551 if (lowest_present_ofdm > hw)
552 lowest_present_ofdm = hw;
553 } else {
554 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
555
556 cck |= BIT(hw);
557 if (lowest_present_cck > hw)
558 lowest_present_cck = hw;
559 }
560 }
561
562 /*
563 * Now we've got the basic rates as bitmaps in the ofdm and cck
564 * variables. This isn't sufficient though, as there might not
565 * be all the right rates in the bitmap. E.g. if the only basic
566 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
567 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
568 *
569 * [...] a STA responding to a received frame shall transmit
570 * its Control Response frame [...] at the highest rate in the
571 * BSSBasicRateSet parameter that is less than or equal to the
572 * rate of the immediately previous frame in the frame exchange
573 * sequence ([...]) and that is of the same modulation class
574 * ([...]) as the received frame. If no rate contained in the
575 * BSSBasicRateSet parameter meets these conditions, then the
576 * control frame sent in response to a received frame shall be
577 * transmitted at the highest mandatory rate of the PHY that is
578 * less than or equal to the rate of the received frame, and
579 * that is of the same modulation class as the received frame.
580 *
581 * As a consequence, we need to add all mandatory rates that are
582 * lower than all of the basic rates to these bitmaps.
583 */
584
585 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
586 ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
587 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
588 ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
589 /* 6M already there or needed so always add */
590 ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
591
592 /*
593 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
594 * Note, however:
595 * - if no CCK rates are basic, it must be ERP since there must
596 * be some basic rates at all, so they're OFDM => ERP PHY
597 * (or we're in 5 GHz, and the cck bitmap will never be used)
598 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
599 * - if 5.5M is basic, 1M and 2M are mandatory
600 * - if 2M is basic, 1M is mandatory
601 * - if 1M is basic, that's the only valid ACK rate.
602 * As a consequence, it's not as complicated as it sounds, just add
603 * any lower rates to the ACK rate bitmap.
604 */
605 if (IWL_RATE_11M_INDEX < lowest_present_cck)
606 cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
607 if (IWL_RATE_5M_INDEX < lowest_present_cck)
608 cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
609 if (IWL_RATE_2M_INDEX < lowest_present_cck)
610 cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
611 /* 1M already there or needed so always add */
612 cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
613
614 *cck_rates = cck;
615 *ofdm_rates = ofdm;
616}
617
618static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
619 struct ieee80211_vif *vif,
620 struct iwl_mac_ctx_cmd *cmd)
621{
622 /* for both sta and ap, ht_operation_mode hold the protection_mode */
623 u8 protection_mode = vif->bss_conf.ht_operation_mode &
624 IEEE80211_HT_OP_MODE_PROTECTION;
625 /* The fw does not distinguish between ht and fat */
626 u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
627
628 IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
629 /*
630 * See section 9.23.3.1 of IEEE 80211-2012.
631 * Nongreenfield HT STAs Present is not supported.
632 */
633 switch (protection_mode) {
634 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
635 break;
636 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
637 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
638 cmd->protection_flags |= cpu_to_le32(ht_flag);
639 break;
640 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
641 /* Protect when channel wider than 20MHz */
642 if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
643 cmd->protection_flags |= cpu_to_le32(ht_flag);
644 break;
645 default:
646 IWL_ERR(mvm, "Illegal protection mode %d\n",
647 protection_mode);
648 break;
649 }
650}
651
652static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
653 struct ieee80211_vif *vif,
654 struct iwl_mac_ctx_cmd *cmd,
655 const u8 *bssid_override,
656 u32 action)
657{
658 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
659 struct ieee80211_chanctx_conf *chanctx;
660 bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
661 IEEE80211_HT_OP_MODE_PROTECTION);
662 u8 cck_ack_rates, ofdm_ack_rates;
663 const u8 *bssid = bssid_override ?: vif->bss_conf.bssid;
664 int i;
665
666 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
667 mvmvif->color));
668 cmd->action = cpu_to_le32(action);
669
670 switch (vif->type) {
671 case NL80211_IFTYPE_STATION:
672 if (vif->p2p)
673 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
674 else
675 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
676 break;
677 case NL80211_IFTYPE_AP:
678 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
679 break;
680 case NL80211_IFTYPE_MONITOR:
681 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
682 break;
683 case NL80211_IFTYPE_P2P_DEVICE:
684 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
685 break;
686 case NL80211_IFTYPE_ADHOC:
687 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
688 break;
689 default:
690 WARN_ON_ONCE(1);
691 }
692
693 cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
694
695 memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
696
697 if (bssid)
698 memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
699 else
700 eth_broadcast_addr(cmd->bssid_addr);
701
702 rcu_read_lock();
703 chanctx = rcu_dereference(vif->chanctx_conf);
704 iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
705 : IEEE80211_BAND_2GHZ,
706 &cck_ack_rates, &ofdm_ack_rates);
707 rcu_read_unlock();
708
709 cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
710 cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
711
712 cmd->cck_short_preamble =
713 cpu_to_le32(vif->bss_conf.use_short_preamble ?
714 MAC_FLG_SHORT_PREAMBLE : 0);
715 cmd->short_slot =
716 cpu_to_le32(vif->bss_conf.use_short_slot ?
717 MAC_FLG_SHORT_SLOT : 0);
718
719 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
720 u8 txf = iwl_mvm_ac_to_tx_fifo[i];
721
722 cmd->ac[txf].cw_min =
723 cpu_to_le16(mvmvif->queue_params[i].cw_min);
724 cmd->ac[txf].cw_max =
725 cpu_to_le16(mvmvif->queue_params[i].cw_max);
726 cmd->ac[txf].edca_txop =
727 cpu_to_le16(mvmvif->queue_params[i].txop * 32);
728 cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
729 cmd->ac[txf].fifos_mask = BIT(txf);
730 }
731
732 /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
733 if (vif->type == NL80211_IFTYPE_AP)
734 cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
735 BIT(IWL_MVM_TX_FIFO_MCAST);
736
737 if (vif->bss_conf.qos)
738 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
739
740 if (vif->bss_conf.use_cts_prot)
741 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
742
743 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
744 vif->bss_conf.use_cts_prot,
745 vif->bss_conf.ht_operation_mode);
746 if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
747 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
748 if (ht_enabled)
749 iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
750
751 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
752}
753
754static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
755 struct iwl_mac_ctx_cmd *cmd)
756{
757 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
758 sizeof(*cmd), cmd);
759 if (ret)
760 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
761 le32_to_cpu(cmd->action), ret);
762 return ret;
763}
764
765static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
766 struct ieee80211_vif *vif,
767 u32 action, bool force_assoc_off,
768 const u8 *bssid_override)
769{
770 struct iwl_mac_ctx_cmd cmd = {};
771 struct iwl_mac_data_sta *ctxt_sta;
772
773 WARN_ON(vif->type != NL80211_IFTYPE_STATION);
774
775 /* Fill the common data for all mac context types */
776 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
777
778 if (vif->p2p) {
779 struct ieee80211_p2p_noa_attr *noa =
780 &vif->bss_conf.p2p_noa_attr;
781
782 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
783 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
784 ctxt_sta = &cmd.p2p_sta.sta;
785 } else {
786 ctxt_sta = &cmd.sta;
787 }
788
789 /* We need the dtim_period to set the MAC as associated */
790 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
791 !force_assoc_off) {
792 u32 dtim_offs;
793
794 /*
795 * The DTIM count counts down, so when it is N that means N
796 * more beacon intervals happen until the DTIM TBTT. Therefore
797 * add this to the current time. If that ends up being in the
798 * future, the firmware will handle it.
799 *
800 * Also note that the system_timestamp (which we get here as
801 * "sync_device_ts") and TSF timestamp aren't at exactly the
802 * same offset in the frame -- the TSF is at the first symbol
803 * of the TSF, the system timestamp is at signal acquisition
804 * time. This means there's an offset between them of at most
805 * a few hundred microseconds (24 * 8 bits + PLCP time gives
806 * 384us in the longest case), this is currently not relevant
807 * as the firmware wakes up around 2ms before the TBTT.
808 */
809 dtim_offs = vif->bss_conf.sync_dtim_count *
810 vif->bss_conf.beacon_int;
811 /* convert TU to usecs */
812 dtim_offs *= 1024;
813
814 ctxt_sta->dtim_tsf =
815 cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
816 ctxt_sta->dtim_time =
817 cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
818
819 IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
820 le64_to_cpu(ctxt_sta->dtim_tsf),
821 le32_to_cpu(ctxt_sta->dtim_time),
822 dtim_offs);
823
824 ctxt_sta->is_assoc = cpu_to_le32(1);
825 } else {
826 ctxt_sta->is_assoc = cpu_to_le32(0);
827
828 /* Allow beacons to pass through as long as we are not
829 * associated, or we do not have dtim period information.
830 */
831 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
832 }
833
834 ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
835 ctxt_sta->bi_reciprocal =
836 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
837 ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
838 vif->bss_conf.dtim_period);
839 ctxt_sta->dtim_reciprocal =
840 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
841 vif->bss_conf.dtim_period));
842
843 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
844 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
845
846 if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
847 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
848
849 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
850}
851
852static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
853 struct ieee80211_vif *vif,
854 u32 action)
855{
856 struct iwl_mac_ctx_cmd cmd = {};
857
858 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
859
860 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
861
862 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
863 MAC_FILTER_IN_CONTROL_AND_MGMT |
864 MAC_FILTER_IN_BEACON |
865 MAC_FILTER_IN_PROBE_REQUEST |
866 MAC_FILTER_IN_CRC32);
867 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
868
869 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
870}
871
872static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
873 struct ieee80211_vif *vif,
874 u32 action)
875{
876 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
877 struct iwl_mac_ctx_cmd cmd = {};
878
879 WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
880
881 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
882
883 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
884 MAC_FILTER_IN_PROBE_REQUEST);
885
886 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
887 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
888 cmd.ibss.bi_reciprocal =
889 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
890
891 /* TODO: Assumes that the beacon id == mac context id */
892 cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
893
894 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
895}
896
897struct iwl_mvm_go_iterator_data {
898 bool go_active;
899};
900
901static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
902{
903 struct iwl_mvm_go_iterator_data *data = _data;
904 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
905
906 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
907 mvmvif->ap_ibss_active)
908 data->go_active = true;
909}
910
911static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
912 struct ieee80211_vif *vif,
913 u32 action)
914{
915 struct iwl_mac_ctx_cmd cmd = {};
916 struct iwl_mvm_go_iterator_data data = {};
917
918 WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
919
920 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
921
922 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
923
924 /* Override the filter flags to accept only probe requests */
925 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
926
927 /*
928 * This flag should be set to true when the P2P Device is
929 * discoverable and there is at least another active P2P GO. Settings
930 * this flag will allow the P2P Device to be discoverable on other
931 * channels in addition to its listen channel.
932 * Note that this flag should not be set in other cases as it opens the
933 * Rx filters on all MAC and increases the number of interrupts.
934 */
935 ieee80211_iterate_active_interfaces_atomic(
936 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
937 iwl_mvm_go_iterator, &data);
938
939 cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
940 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
941}
942
943static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
944 struct iwl_mac_beacon_cmd *beacon_cmd,
945 u8 *beacon, u32 frame_size)
946{
947 u32 tim_idx;
948 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
949
950 /* The index is relative to frame start but we start looking at the
951 * variable-length part of the beacon. */
952 tim_idx = mgmt->u.beacon.variable - beacon;
953
954 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
955 while ((tim_idx < (frame_size - 2)) &&
956 (beacon[tim_idx] != WLAN_EID_TIM))
957 tim_idx += beacon[tim_idx+1] + 2;
958
959 /* If TIM field was found, set variables */
960 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
961 beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
962 beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
963 } else {
964 IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
965 }
966}
967
968static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
969 struct ieee80211_vif *vif,
970 struct sk_buff *beacon)
971{
972 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
973 struct iwl_host_cmd cmd = {
974 .id = BEACON_TEMPLATE_CMD,
975 .flags = CMD_ASYNC,
976 };
977 struct iwl_mac_beacon_cmd beacon_cmd = {};
978 struct ieee80211_tx_info *info;
979 u32 beacon_skb_len;
980 u32 rate, tx_flags;
981
982 if (WARN_ON(!beacon))
983 return -EINVAL;
984
985 beacon_skb_len = beacon->len;
986
987 /* TODO: for now the beacon template id is set to be the mac context id.
988 * Might be better to handle it as another resource ... */
989 beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
990 info = IEEE80211_SKB_CB(beacon);
991
992 /* Set up TX command fields */
993 beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
994 beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
995 beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
996 tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
997 tx_flags |=
998 iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
999 TX_CMD_FLG_BT_PRIO_POS;
1000 beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
1001
1002 mvm->mgmt_last_antenna_idx =
1003 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
1004 mvm->mgmt_last_antenna_idx);
1005
1006 beacon_cmd.tx.rate_n_flags =
1007 cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
1008 RATE_MCS_ANT_POS);
1009
1010 if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
1011 rate = IWL_FIRST_OFDM_RATE;
1012 } else {
1013 rate = IWL_FIRST_CCK_RATE;
1014 beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
1015 }
1016 beacon_cmd.tx.rate_n_flags |=
1017 cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
1018
1019 /* Set up TX beacon command fields */
1020 if (vif->type == NL80211_IFTYPE_AP)
1021 iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
1022 beacon->data,
1023 beacon_skb_len);
1024
1025 /* Submit command */
1026 cmd.len[0] = sizeof(beacon_cmd);
1027 cmd.data[0] = &beacon_cmd;
1028 cmd.dataflags[0] = 0;
1029 cmd.len[1] = beacon_skb_len;
1030 cmd.data[1] = beacon->data;
1031 cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
1032
1033 return iwl_mvm_send_cmd(mvm, &cmd);
1034}
1035
1036/* The beacon template for the AP/GO/IBSS has changed and needs update */
1037int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
1038 struct ieee80211_vif *vif)
1039{
1040 struct sk_buff *beacon;
1041 int ret;
1042
1043 WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1044 vif->type != NL80211_IFTYPE_ADHOC);
1045
1046 beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
1047 if (!beacon)
1048 return -ENOMEM;
1049
1050 ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
1051 dev_kfree_skb(beacon);
1052 return ret;
1053}
1054
1055struct iwl_mvm_mac_ap_iterator_data {
1056 struct iwl_mvm *mvm;
1057 struct ieee80211_vif *vif;
1058 u32 beacon_device_ts;
1059 u16 beacon_int;
1060};
1061
1062/* Find the beacon_device_ts and beacon_int for a managed interface */
1063static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
1064 struct ieee80211_vif *vif)
1065{
1066 struct iwl_mvm_mac_ap_iterator_data *data = _data;
1067
1068 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
1069 return;
1070
1071 /* Station client has higher priority over P2P client*/
1072 if (vif->p2p && data->beacon_device_ts)
1073 return;
1074
1075 data->beacon_device_ts = vif->bss_conf.sync_device_ts;
1076 data->beacon_int = vif->bss_conf.beacon_int;
1077}
1078
1079/*
1080 * Fill the specific data for mac context of type AP of P2P GO
1081 */
1082static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
1083 struct ieee80211_vif *vif,
1084 struct iwl_mac_data_ap *ctxt_ap,
1085 bool add)
1086{
1087 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1088 struct iwl_mvm_mac_ap_iterator_data data = {
1089 .mvm = mvm,
1090 .vif = vif,
1091 .beacon_device_ts = 0
1092 };
1093
1094 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
1095 ctxt_ap->bi_reciprocal =
1096 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
1097 ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
1098 vif->bss_conf.dtim_period);
1099 ctxt_ap->dtim_reciprocal =
1100 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
1101 vif->bss_conf.dtim_period));
1102
1103 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
1104
1105 /*
1106 * Only set the beacon time when the MAC is being added, when we
1107 * just modify the MAC then we should keep the time -- the firmware
1108 * can otherwise have a "jumping" TBTT.
1109 */
1110 if (add) {
1111 /*
1112 * If there is a station/P2P client interface which is
1113 * associated, set the AP's TBTT far enough from the station's
1114 * TBTT. Otherwise, set it to the current system time
1115 */
1116 ieee80211_iterate_active_interfaces_atomic(
1117 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1118 iwl_mvm_mac_ap_iterator, &data);
1119
1120 if (data.beacon_device_ts) {
1121 u32 rand = (prandom_u32() % (64 - 36)) + 36;
1122 mvmvif->ap_beacon_time = data.beacon_device_ts +
1123 ieee80211_tu_to_usec(data.beacon_int * rand /
1124 100);
1125 } else {
1126 mvmvif->ap_beacon_time =
1127 iwl_read_prph(mvm->trans,
1128 DEVICE_SYSTEM_TIME_REG);
1129 }
1130 }
1131
1132 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
1133 ctxt_ap->beacon_tsf = 0; /* unused */
1134
1135 /* TODO: Assume that the beacon id == mac context id */
1136 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
1137}
1138
1139static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
1140 struct ieee80211_vif *vif,
1141 u32 action)
1142{
1143 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1144 struct iwl_mac_ctx_cmd cmd = {};
1145
1146 WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
1147
1148 /* Fill the common data for all mac context types */
1149 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
1150
1151 /*
1152 * pass probe requests and beacons from other APs (needed
1153 * for ht protection); when there're no any associated station
1154 * don't ask FW to pass beacons to prevent unnecessary wake-ups.
1155 */
1156 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
1157 if (mvmvif->ap_assoc_sta_count) {
1158 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
1159 IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
1160 } else {
1161 IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
1162 }
1163
1164 /* Fill the data specific for ap mode */
1165 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
1166 action == FW_CTXT_ACTION_ADD);
1167
1168 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
1169}
1170
1171static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1172 struct ieee80211_vif *vif,
1173 u32 action)
1174{
1175 struct iwl_mac_ctx_cmd cmd = {};
1176 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
1177
1178 WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
1179
1180 /* Fill the common data for all mac context types */
1181 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
1182
1183 /*
1184 * pass probe requests and beacons from other APs (needed
1185 * for ht protection)
1186 */
1187 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1188 MAC_FILTER_IN_BEACON);
1189
1190 /* Fill the data specific for GO mode */
1191 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
1192 action == FW_CTXT_ACTION_ADD);
1193
1194 cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
1195 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
1196 cmd.go.opp_ps_enabled =
1197 cpu_to_le32(!!(noa->oppps_ctwindow &
1198 IEEE80211_P2P_OPPPS_ENABLE_BIT));
1199
1200 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
1201}
1202
1203static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1204 u32 action, bool force_assoc_off,
1205 const u8 *bssid_override)
1206{
1207 switch (vif->type) {
1208 case NL80211_IFTYPE_STATION:
1209 return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
1210 force_assoc_off,
1211 bssid_override);
1212 break;
1213 case NL80211_IFTYPE_AP:
1214 if (!vif->p2p)
1215 return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
1216 else
1217 return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
1218 break;
1219 case NL80211_IFTYPE_MONITOR:
1220 return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
1221 case NL80211_IFTYPE_P2P_DEVICE:
1222 return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
1223 case NL80211_IFTYPE_ADHOC:
1224 return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
1225 default:
1226 break;
1227 }
1228
1229 return -EOPNOTSUPP;
1230}
1231
1232int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1233{
1234 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1235 int ret;
1236
1237 if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
1238 vif->addr, ieee80211_vif_type_p2p(vif)))
1239 return -EIO;
1240
1241 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
1242 true, NULL);
1243 if (ret)
1244 return ret;
1245
1246 /* will only do anything at resume from D3 time */
1247 iwl_mvm_set_last_nonqos_seq(mvm, vif);
1248
1249 mvmvif->uploaded = true;
1250 return 0;
1251}
1252
1253int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1254 bool force_assoc_off, const u8 *bssid_override)
1255{
1256 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1257
1258 if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
1259 vif->addr, ieee80211_vif_type_p2p(vif)))
1260 return -EIO;
1261
1262 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
1263 force_assoc_off, bssid_override);
1264}
1265
1266int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1267{
1268 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1269 struct iwl_mac_ctx_cmd cmd;
1270 int ret;
1271
1272 if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
1273 vif->addr, ieee80211_vif_type_p2p(vif)))
1274 return -EIO;
1275
1276 memset(&cmd, 0, sizeof(cmd));
1277
1278 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
1279 mvmvif->color));
1280 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
1281
1282 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
1283 sizeof(cmd), &cmd);
1284 if (ret) {
1285 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
1286 return ret;
1287 }
1288
1289 mvmvif->uploaded = false;
1290
1291 if (vif->type == NL80211_IFTYPE_MONITOR)
1292 __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
1293
1294 return 0;
1295}
1296
1297static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
1298 struct ieee80211_vif *csa_vif, u32 gp2,
1299 bool tx_success)
1300{
1301 struct iwl_mvm_vif *mvmvif =
1302 iwl_mvm_vif_from_mac80211(csa_vif);
1303
1304 /* Don't start to countdown from a failed beacon */
1305 if (!tx_success && !mvmvif->csa_countdown)
1306 return;
1307
1308 mvmvif->csa_countdown = true;
1309
1310 if (!ieee80211_csa_is_complete(csa_vif)) {
1311 int c = ieee80211_csa_update_counter(csa_vif);
1312
1313 iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
1314 if (csa_vif->p2p &&
1315 !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
1316 tx_success) {
1317 u32 rel_time = (c + 1) *
1318 csa_vif->bss_conf.beacon_int -
1319 IWL_MVM_CHANNEL_SWITCH_TIME_GO;
1320 u32 apply_time = gp2 + rel_time * 1024;
1321
1322 iwl_mvm_schedule_csa_period(mvm, csa_vif,
1323 IWL_MVM_CHANNEL_SWITCH_TIME_GO -
1324 IWL_MVM_CHANNEL_SWITCH_MARGIN,
1325 apply_time);
1326 }
1327 } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
1328 /* we don't have CSA NoA scheduled yet, switch now */
1329 ieee80211_csa_finish(csa_vif);
1330 RCU_INIT_POINTER(mvm->csa_vif, NULL);
1331 }
1332}
1333
1334void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1335 struct iwl_rx_cmd_buffer *rxb)
1336{
1337 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1338 struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
1339 struct iwl_mvm_tx_resp *beacon_notify_hdr;
1340 struct ieee80211_vif *csa_vif;
1341 struct ieee80211_vif *tx_blocked_vif;
1342 u16 status;
1343
1344 lockdep_assert_held(&mvm->mutex);
1345
1346 beacon_notify_hdr = &beacon->beacon_notify_hdr;
1347 mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
1348
1349 status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
1350 IWL_DEBUG_RX(mvm,
1351 "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
1352 status, beacon_notify_hdr->failure_frame,
1353 le64_to_cpu(beacon->tsf),
1354 mvm->ap_last_beacon_gp2,
1355 le32_to_cpu(beacon_notify_hdr->initial_rate));
1356
1357 csa_vif = rcu_dereference_protected(mvm->csa_vif,
1358 lockdep_is_held(&mvm->mutex));
1359 if (unlikely(csa_vif && csa_vif->csa_active))
1360 iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
1361 (status == TX_STATUS_SUCCESS));
1362
1363 tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
1364 lockdep_is_held(&mvm->mutex));
1365 if (unlikely(tx_blocked_vif)) {
1366 struct iwl_mvm_vif *mvmvif =
1367 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
1368
1369 /*
1370 * The channel switch is started and we have blocked the
1371 * stations. If this is the first beacon (the timeout wasn't
1372 * set), set the unblock timeout, otherwise countdown
1373 */
1374 if (!mvm->csa_tx_block_bcn_timeout)
1375 mvm->csa_tx_block_bcn_timeout =
1376 IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
1377 else
1378 mvm->csa_tx_block_bcn_timeout--;
1379
1380 /* Check if the timeout is expired, and unblock tx */
1381 if (mvm->csa_tx_block_bcn_timeout == 0) {
1382 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
1383 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
1384 }
1385 }
1386}
1387
1388static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
1389 struct ieee80211_vif *vif)
1390{
1391 struct iwl_missed_beacons_notif *missed_beacons = _data;
1392 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1393 struct iwl_mvm *mvm = mvmvif->mvm;
1394 struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
1395 struct iwl_fw_dbg_trigger_tlv *trigger;
1396 u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
1397 u32 rx_missed_bcon, rx_missed_bcon_since_rx;
1398
1399 if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
1400 return;
1401
1402 rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
1403 rx_missed_bcon_since_rx =
1404 le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
1405 /*
1406 * TODO: the threshold should be adjusted based on latency conditions,
1407 * and/or in case of a CS flow on one of the other AP vifs.
1408 */
1409 if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
1410 IWL_MVM_MISSED_BEACONS_THRESHOLD)
1411 ieee80211_beacon_loss(vif);
1412
1413 if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
1414 FW_DBG_TRIGGER_MISSED_BEACONS))
1415 return;
1416
1417 trigger = iwl_fw_dbg_get_trigger(mvm->fw,
1418 FW_DBG_TRIGGER_MISSED_BEACONS);
1419 bcon_trig = (void *)trigger->data;
1420 stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
1421 stop_trig_missed_bcon_since_rx =
1422 le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
1423
1424 /* TODO: implement start trigger */
1425
1426 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
1427 return;
1428
1429 if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
1430 rx_missed_bcon >= stop_trig_missed_bcon)
1431 iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
1432}
1433
1434void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1435 struct iwl_rx_cmd_buffer *rxb)
1436{
1437 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1438 struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
1439
1440 IWL_DEBUG_INFO(mvm,
1441 "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
1442 le32_to_cpu(mb->mac_id),
1443 le32_to_cpu(mb->consec_missed_beacons),
1444 le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
1445 le32_to_cpu(mb->num_recvd_beacons),
1446 le32_to_cpu(mb->num_expected_beacons));
1447
1448 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1449 IEEE80211_IFACE_ITER_NORMAL,
1450 iwl_mvm_beacon_loss_iterator,
1451 mb);
1452}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
deleted file mode 100644
index 1fb684693040..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ /dev/null
@@ -1,4260 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/kernel.h>
66#include <linux/slab.h>
67#include <linux/skbuff.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include <linux/ip.h>
71#include <linux/if_arp.h>
72#include <linux/devcoredump.h>
73#include <net/mac80211.h>
74#include <net/ieee80211_radiotap.h>
75#include <net/tcp.h>
76
77#include "iwl-op-mode.h"
78#include "iwl-io.h"
79#include "mvm.h"
80#include "sta.h"
81#include "time-event.h"
82#include "iwl-eeprom-parse.h"
83#include "iwl-phy-db.h"
84#include "testmode.h"
85#include "iwl-fw-error-dump.h"
86#include "iwl-prph.h"
87#include "iwl-csr.h"
88#include "iwl-nvm-parse.h"
89
90static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 {
92 .max = 1,
93 .types = BIT(NL80211_IFTYPE_STATION),
94 },
95 {
96 .max = 1,
97 .types = BIT(NL80211_IFTYPE_AP) |
98 BIT(NL80211_IFTYPE_P2P_CLIENT) |
99 BIT(NL80211_IFTYPE_P2P_GO),
100 },
101 {
102 .max = 1,
103 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 },
105};
106
107static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 {
109 .num_different_channels = 2,
110 .max_interfaces = 3,
111 .limits = iwl_mvm_limits,
112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 },
114};
115
116#ifdef CONFIG_PM_SLEEP
117static const struct nl80211_wowlan_tcp_data_token_feature
118iwl_mvm_wowlan_tcp_token_feature = {
119 .min_len = 0,
120 .max_len = 255,
121 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122};
123
124static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 .tok = &iwl_mvm_wowlan_tcp_token_feature,
126 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 sizeof(struct ethhdr) -
128 sizeof(struct iphdr) -
129 sizeof(struct tcphdr),
130 .data_interval_max = 65535, /* __le16 in API */
131 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 sizeof(struct ethhdr) -
133 sizeof(struct iphdr) -
134 sizeof(struct tcphdr),
135 .seq = true,
136};
137#endif
138
139#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
140/*
141 * Use the reserved field to indicate magic values.
142 * these values will only be used internally by the driver,
143 * and won't make it to the fw (reserved will be 0).
144 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145 * be the vif's ip address. in case there is not a single
146 * ip address (0, or more than 1), this attribute will
147 * be skipped.
148 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149 * the LSB bytes of the vif's mac address
150 */
151enum {
152 BC_FILTER_MAGIC_NONE = 0,
153 BC_FILTER_MAGIC_IP,
154 BC_FILTER_MAGIC_MAC,
155};
156
157static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 {
159 /* arp */
160 .discard = 0,
161 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 .attrs = {
163 {
164 /* frame type - arp, hw type - ethernet */
165 .offset_type =
166 BCAST_FILTER_OFFSET_PAYLOAD_START,
167 .offset = sizeof(rfc1042_header),
168 .val = cpu_to_be32(0x08060001),
169 .mask = cpu_to_be32(0xffffffff),
170 },
171 {
172 /* arp dest ip */
173 .offset_type =
174 BCAST_FILTER_OFFSET_PAYLOAD_START,
175 .offset = sizeof(rfc1042_header) + 2 +
176 sizeof(struct arphdr) +
177 ETH_ALEN + sizeof(__be32) +
178 ETH_ALEN,
179 .mask = cpu_to_be32(0xffffffff),
180 /* mark it as special field */
181 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 },
183 },
184 },
185 {
186 /* dhcp offer bcast */
187 .discard = 0,
188 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 .attrs = {
190 {
191 /* udp dest port - 68 (bootp client)*/
192 .offset_type = BCAST_FILTER_OFFSET_IP_END,
193 .offset = offsetof(struct udphdr, dest),
194 .val = cpu_to_be32(0x00440000),
195 .mask = cpu_to_be32(0xffff0000),
196 },
197 {
198 /* dhcp - lsb bytes of client hw address */
199 .offset_type = BCAST_FILTER_OFFSET_IP_END,
200 .offset = 38,
201 .mask = cpu_to_be32(0xffffffff),
202 /* mark it as special field */
203 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 },
205 },
206 },
207 /* last filter must be empty */
208 {},
209};
210#endif
211
212void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213{
214 if (!iwl_mvm_is_d0i3_supported(mvm))
215 return;
216
217 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
218 spin_lock_bh(&mvm->refs_lock);
219 mvm->refs[ref_type]++;
220 spin_unlock_bh(&mvm->refs_lock);
221 iwl_trans_ref(mvm->trans);
222}
223
224void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225{
226 if (!iwl_mvm_is_d0i3_supported(mvm))
227 return;
228
229 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
230 spin_lock_bh(&mvm->refs_lock);
231 WARN_ON(!mvm->refs[ref_type]--);
232 spin_unlock_bh(&mvm->refs_lock);
233 iwl_trans_unref(mvm->trans);
234}
235
236static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 enum iwl_mvm_ref_type except_ref)
238{
239 int i, j;
240
241 if (!iwl_mvm_is_d0i3_supported(mvm))
242 return;
243
244 spin_lock_bh(&mvm->refs_lock);
245 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 if (except_ref == i || !mvm->refs[i])
247 continue;
248
249 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 i, mvm->refs[i]);
251 for (j = 0; j < mvm->refs[i]; j++)
252 iwl_trans_unref(mvm->trans);
253 mvm->refs[i] = 0;
254 }
255 spin_unlock_bh(&mvm->refs_lock);
256}
257
258bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259{
260 int i;
261 bool taken = false;
262
263 if (!iwl_mvm_is_d0i3_supported(mvm))
264 return true;
265
266 spin_lock_bh(&mvm->refs_lock);
267 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 if (mvm->refs[i]) {
269 taken = true;
270 break;
271 }
272 }
273 spin_unlock_bh(&mvm->refs_lock);
274
275 return taken;
276}
277
278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
279{
280 iwl_mvm_ref(mvm, ref_type);
281
282 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 HZ)) {
285 WARN_ON_ONCE(1);
286 iwl_mvm_unref(mvm, ref_type);
287 return -EIO;
288 }
289
290 return 0;
291}
292
293static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294{
295 int i;
296
297 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 for (i = 0; i < NUM_PHY_CTX; i++) {
299 mvm->phy_ctxts[i].id = i;
300 mvm->phy_ctxts[i].ref = 0;
301 }
302}
303
304struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
305 const char *alpha2,
306 enum iwl_mcc_source src_id,
307 bool *changed)
308{
309 struct ieee80211_regdomain *regd = NULL;
310 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 struct iwl_mcc_update_resp *resp;
313
314 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315
316 lockdep_assert_held(&mvm->mutex);
317
318 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
319 if (IS_ERR_OR_NULL(resp)) {
320 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
321 PTR_ERR_OR_ZERO(resp));
322 goto out;
323 }
324
325 if (changed)
326 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327
328 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
329 __le32_to_cpu(resp->n_channels),
330 resp->channels,
331 __le16_to_cpu(resp->mcc));
332 /* Store the return source id */
333 src_id = resp->source_id;
334 kfree(resp);
335 if (IS_ERR_OR_NULL(regd)) {
336 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
337 PTR_ERR_OR_ZERO(regd));
338 goto out;
339 }
340
341 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
343 mvm->lar_regdom_set = true;
344 mvm->mcc_src = src_id;
345
346out:
347 return regd;
348}
349
350void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351{
352 bool changed;
353 struct ieee80211_regdomain *regd;
354
355 if (!iwl_mvm_is_lar_supported(mvm))
356 return;
357
358 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 if (!IS_ERR_OR_NULL(regd)) {
360 /* only update the regulatory core if changed */
361 if (changed)
362 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363
364 kfree(regd);
365 }
366}
367
368struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 bool *changed)
370{
371 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 MCC_SOURCE_GET_CURRENT :
374 MCC_SOURCE_OLD_FW, changed);
375}
376
377int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378{
379 enum iwl_mcc_source used_src;
380 struct ieee80211_regdomain *regd;
381 int ret;
382 bool changed;
383 const struct ieee80211_regdomain *r =
384 rtnl_dereference(mvm->hw->wiphy->regd);
385
386 if (!r)
387 return -ENOENT;
388
389 /* save the last source in case we overwrite it below */
390 used_src = mvm->mcc_src;
391 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 /* Notify the firmware we support wifi location updates */
393 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
394 if (!IS_ERR_OR_NULL(regd))
395 kfree(regd);
396 }
397
398 /* Now set our last stored MCC and source */
399 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 &changed);
401 if (IS_ERR_OR_NULL(regd))
402 return -EIO;
403
404 /* update cfg80211 if the regdomain was changed */
405 if (changed)
406 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 else
408 ret = 0;
409
410 kfree(regd);
411 return ret;
412}
413
414int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415{
416 struct ieee80211_hw *hw = mvm->hw;
417 int num_mac, ret, i;
418 static const u32 mvm_ciphers[] = {
419 WLAN_CIPHER_SUITE_WEP40,
420 WLAN_CIPHER_SUITE_WEP104,
421 WLAN_CIPHER_SUITE_TKIP,
422 WLAN_CIPHER_SUITE_CCMP,
423 };
424
425 /* Tell mac80211 our characteristics */
426 ieee80211_hw_set(hw, SIGNAL_DBM);
427 ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 ieee80211_hw_set(hw, QUEUE_CONTROL);
430 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 ieee80211_hw_set(hw, SUPPORTS_PS);
432 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
439
440 hw->queues = mvm->first_agg_queue;
441 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
442 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
444 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
446 hw->rate_control_algorithm = "iwl-mvm-rs";
447 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
449
450 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 hw->wiphy->cipher_suites = mvm->ciphers;
454
455 /*
456 * Enable 11w if advertised by firmware and software crypto
457 * is not enabled (as the firmware will interpret some mgmt
458 * packets, so enabling it with software crypto isn't safe)
459 */
460 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
461 !iwlwifi_mod_params.sw_crypto) {
462 ieee80211_hw_set(hw, MFP_CAPABLE);
463 mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 WLAN_CIPHER_SUITE_AES_CMAC;
465 hw->wiphy->n_cipher_suites++;
466 }
467
468 /* currently FW API supports only one optional cipher scheme */
469 if (mvm->fw->cs[0].cipher) {
470 mvm->hw->n_cipher_schemes = 1;
471 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 mvm->fw->cs[0].cipher;
474 hw->wiphy->n_cipher_suites++;
475 }
476
477 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
478 hw->wiphy->features |=
479 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
480 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
482
483 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
485 hw->chanctx_data_size = sizeof(u16);
486
487 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
488 BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 BIT(NL80211_IFTYPE_AP) |
490 BIT(NL80211_IFTYPE_P2P_GO) |
491 BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 BIT(NL80211_IFTYPE_ADHOC);
493
494 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
495 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 if (iwl_mvm_is_lar_supported(mvm))
497 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 else
499 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 REGULATORY_DISABLE_BEACON_HINTS;
501
502 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504
505 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
506
507 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 hw->wiphy->n_iface_combinations =
509 ARRAY_SIZE(iwl_mvm_iface_combinations);
510
511 hw->wiphy->max_remain_on_channel_duration = 10000;
512 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
513 /* we can compensate an offset of up to 3 channels = 15 MHz */
514 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
515
516 /* Extract MAC address */
517 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 hw->wiphy->addresses = mvm->addresses;
519 hw->wiphy->n_addresses = 1;
520
521 /* Extract additional MAC addresses if available */
522 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524
525 for (i = 1; i < num_mac; i++) {
526 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
527 ETH_ALEN);
528 mvm->addresses[i].addr[5]++;
529 hw->wiphy->n_addresses++;
530 }
531
532 iwl_mvm_reset_phy_ctxts(mvm);
533
534 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
535
536 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537
538 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
539 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541
542 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
543 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 else
545 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546
547 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
550 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
551 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553
554 if (fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 fw_has_api(&mvm->fw->ucode_capa,
557 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
558 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 }
561
562 hw->wiphy->hw_version = mvm->trans->hw_id;
563
564 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
565 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 else
567 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568
569 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 /* we create the 802.11 header and zero length SSID IE. */
573 hw->wiphy->max_sched_scan_ie_len =
574 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
575 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
576 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
577
578 /*
579 * the firmware uses u8 for num of iterations, but 0xff is saved for
580 * infinite loop, so the maximum number of iterations is actually 254.
581 */
582 hw->wiphy->max_sched_scan_plan_iterations = 254;
583
584 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
585 NL80211_FEATURE_LOW_PRIORITY_SCAN |
586 NL80211_FEATURE_P2P_GO_OPPPS |
587 NL80211_FEATURE_DYNAMIC_SMPS |
588 NL80211_FEATURE_STATIC_SMPS |
589 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
590
591 if (fw_has_capa(&mvm->fw->ucode_capa,
592 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
593 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
596 hw->wiphy->features |= NL80211_FEATURE_QUIET;
597
598 if (fw_has_capa(&mvm->fw->ucode_capa,
599 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
600 hw->wiphy->features |=
601 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
602
603 if (fw_has_capa(&mvm->fw->ucode_capa,
604 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
605 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
606
607 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
608
609#ifdef CONFIG_PM_SLEEP
610 if (iwl_mvm_is_d0i3_supported(mvm) &&
611 device_can_wakeup(mvm->trans->dev)) {
612 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
613 hw->wiphy->wowlan = &mvm->wowlan;
614 }
615
616 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
617 mvm->trans->ops->d3_suspend &&
618 mvm->trans->ops->d3_resume &&
619 device_can_wakeup(mvm->trans->dev)) {
620 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
621 WIPHY_WOWLAN_DISCONNECT |
622 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
623 WIPHY_WOWLAN_RFKILL_RELEASE |
624 WIPHY_WOWLAN_NET_DETECT;
625 if (!iwlwifi_mod_params.sw_crypto)
626 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
627 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
628 WIPHY_WOWLAN_4WAY_HANDSHAKE;
629
630 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
631 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
632 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
633 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
634 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
635 hw->wiphy->wowlan = &mvm->wowlan;
636 }
637#endif
638
639#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
640 /* assign default bcast filtering configuration */
641 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
642#endif
643
644 ret = iwl_mvm_leds_init(mvm);
645 if (ret)
646 return ret;
647
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
650 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
651 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
652 ieee80211_hw_set(hw, TDLS_WIDER_BW);
653 }
654
655 if (fw_has_capa(&mvm->fw->ucode_capa,
656 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
657 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
658 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
659 }
660
661 hw->netdev_features |= mvm->cfg->features;
662 if (!iwl_mvm_is_csum_supported(mvm))
663 hw->netdev_features &= ~NETIF_F_RXCSUM;
664
665 ret = ieee80211_register_hw(mvm->hw);
666 if (ret)
667 iwl_mvm_leds_exit(mvm);
668
669 return ret;
670}
671
672static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
673 struct ieee80211_sta *sta,
674 struct sk_buff *skb)
675{
676 struct iwl_mvm_sta *mvmsta;
677 bool defer = false;
678
679 /*
680 * double check the IN_D0I3 flag both before and after
681 * taking the spinlock, in order to prevent taking
682 * the spinlock when not needed.
683 */
684 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
685 return false;
686
687 spin_lock(&mvm->d0i3_tx_lock);
688 /*
689 * testing the flag again ensures the skb dequeue
690 * loop (on d0i3 exit) hasn't run yet.
691 */
692 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
693 goto out;
694
695 mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
697 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
698 goto out;
699
700 __skb_queue_tail(&mvm->d0i3_tx, skb);
701 ieee80211_stop_queues(mvm->hw);
702
703 /* trigger wakeup */
704 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
705 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
706
707 defer = true;
708out:
709 spin_unlock(&mvm->d0i3_tx_lock);
710 return defer;
711}
712
713static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
714 struct ieee80211_tx_control *control,
715 struct sk_buff *skb)
716{
717 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
718 struct ieee80211_sta *sta = control->sta;
719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
720 struct ieee80211_hdr *hdr = (void *)skb->data;
721
722 if (iwl_mvm_is_radio_killed(mvm)) {
723 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
724 goto drop;
725 }
726
727 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
728 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
729 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
730 goto drop;
731
732 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
733 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
734 ieee80211_is_mgmt(hdr->frame_control) &&
735 !ieee80211_is_deauth(hdr->frame_control) &&
736 !ieee80211_is_disassoc(hdr->frame_control) &&
737 !ieee80211_is_action(hdr->frame_control)))
738 sta = NULL;
739
740 if (sta) {
741 if (iwl_mvm_defer_tx(mvm, sta, skb))
742 return;
743 if (iwl_mvm_tx_skb(mvm, skb, sta))
744 goto drop;
745 return;
746 }
747
748 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
749 goto drop;
750 return;
751 drop:
752 ieee80211_free_txskb(hw, skb);
753}
754
755static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
756{
757 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
758 return false;
759 return true;
760}
761
762static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
763{
764 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
765 return false;
766 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
767 return true;
768
769 /* enabled by default */
770 return true;
771}
772
773#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
774 do { \
775 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
776 break; \
777 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
778 } while (0)
779
780static void
781iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
782 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
783 enum ieee80211_ampdu_mlme_action action)
784{
785 struct iwl_fw_dbg_trigger_tlv *trig;
786 struct iwl_fw_dbg_trigger_ba *ba_trig;
787
788 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
789 return;
790
791 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
792 ba_trig = (void *)trig->data;
793
794 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
795 return;
796
797 switch (action) {
798 case IEEE80211_AMPDU_TX_OPERATIONAL: {
799 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
800 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
801
802 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
803 "TX AGG START: MAC %pM tid %d ssn %d\n",
804 sta->addr, tid, tid_data->ssn);
805 break;
806 }
807 case IEEE80211_AMPDU_TX_STOP_CONT:
808 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
809 "TX AGG STOP: MAC %pM tid %d\n",
810 sta->addr, tid);
811 break;
812 case IEEE80211_AMPDU_RX_START:
813 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
814 "RX AGG START: MAC %pM tid %d ssn %d\n",
815 sta->addr, tid, rx_ba_ssn);
816 break;
817 case IEEE80211_AMPDU_RX_STOP:
818 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
819 "RX AGG STOP: MAC %pM tid %d\n",
820 sta->addr, tid);
821 break;
822 default:
823 break;
824 }
825}
826
827static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
828 struct ieee80211_vif *vif,
829 enum ieee80211_ampdu_mlme_action action,
830 struct ieee80211_sta *sta, u16 tid,
831 u16 *ssn, u8 buf_size, bool amsdu)
832{
833 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
834 int ret;
835 bool tx_agg_ref = false;
836
837 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
838 sta->addr, tid, action);
839
840 if (!(mvm->nvm_data->sku_cap_11n_enable))
841 return -EACCES;
842
843 /* return from D0i3 before starting a new Tx aggregation */
844 switch (action) {
845 case IEEE80211_AMPDU_TX_START:
846 case IEEE80211_AMPDU_TX_STOP_CONT:
847 case IEEE80211_AMPDU_TX_STOP_FLUSH:
848 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
849 case IEEE80211_AMPDU_TX_OPERATIONAL:
850 /*
851 * for tx start, wait synchronously until D0i3 exit to
852 * get the correct sequence number for the tid.
853 * additionally, some other ampdu actions use direct
854 * target access, which is not handled automatically
855 * by the trans layer (unlike commands), so wait for
856 * d0i3 exit in these cases as well.
857 */
858 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
859 if (ret)
860 return ret;
861
862 tx_agg_ref = true;
863 break;
864 default:
865 break;
866 }
867
868 mutex_lock(&mvm->mutex);
869
870 switch (action) {
871 case IEEE80211_AMPDU_RX_START:
872 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
873 ret = -EINVAL;
874 break;
875 }
876 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
877 break;
878 case IEEE80211_AMPDU_RX_STOP:
879 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
880 break;
881 case IEEE80211_AMPDU_TX_START:
882 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
883 ret = -EINVAL;
884 break;
885 }
886 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
887 break;
888 case IEEE80211_AMPDU_TX_STOP_CONT:
889 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
890 break;
891 case IEEE80211_AMPDU_TX_STOP_FLUSH:
892 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
893 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
894 break;
895 case IEEE80211_AMPDU_TX_OPERATIONAL:
896 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
897 break;
898 default:
899 WARN_ON_ONCE(1);
900 ret = -EINVAL;
901 break;
902 }
903
904 if (!ret) {
905 u16 rx_ba_ssn = 0;
906
907 if (action == IEEE80211_AMPDU_RX_START)
908 rx_ba_ssn = *ssn;
909
910 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
911 rx_ba_ssn, action);
912 }
913 mutex_unlock(&mvm->mutex);
914
915 /*
916 * If the tid is marked as started, we won't use it for offloaded
917 * traffic on the next D0i3 entry. It's safe to unref.
918 */
919 if (tx_agg_ref)
920 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
921
922 return ret;
923}
924
925static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
926 struct ieee80211_vif *vif)
927{
928 struct iwl_mvm *mvm = data;
929 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
930
931 mvmvif->uploaded = false;
932 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
933
934 spin_lock_bh(&mvm->time_event_lock);
935 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
936 spin_unlock_bh(&mvm->time_event_lock);
937
938 mvmvif->phy_ctxt = NULL;
939 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
940}
941
942static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
943 const void *data, size_t datalen)
944{
945 const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
946 ssize_t bytes_read;
947 ssize_t bytes_read_trans;
948
949 if (offset < dump_ptrs->op_mode_len) {
950 bytes_read = min_t(ssize_t, count,
951 dump_ptrs->op_mode_len - offset);
952 memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
953 bytes_read);
954 offset += bytes_read;
955 count -= bytes_read;
956
957 if (count == 0)
958 return bytes_read;
959 } else {
960 bytes_read = 0;
961 }
962
963 if (!dump_ptrs->trans_ptr)
964 return bytes_read;
965
966 offset -= dump_ptrs->op_mode_len;
967 bytes_read_trans = min_t(ssize_t, count,
968 dump_ptrs->trans_ptr->len - offset);
969 memcpy(buffer + bytes_read,
970 (u8 *)dump_ptrs->trans_ptr->data + offset,
971 bytes_read_trans);
972
973 return bytes_read + bytes_read_trans;
974}
975
976static void iwl_mvm_free_coredump(const void *data)
977{
978 const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
979
980 vfree(fw_error_dump->op_mode_ptr);
981 vfree(fw_error_dump->trans_ptr);
982 kfree(fw_error_dump);
983}
984
985static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
986 struct iwl_fw_error_dump_data **dump_data)
987{
988 struct iwl_fw_error_dump_fifo *fifo_hdr;
989 u32 *fifo_data;
990 u32 fifo_len;
991 unsigned long flags;
992 int i, j;
993
994 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
995 return;
996
997 /* Pull RXF data from all RXFs */
998 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
999 /*
1000 * Keep aside the additional offset that might be needed for
1001 * next RXF
1002 */
1003 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1004
1005 fifo_hdr = (void *)(*dump_data)->data;
1006 fifo_data = (void *)fifo_hdr->data;
1007 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1008
1009 /* No need to try to read the data if the length is 0 */
1010 if (fifo_len == 0)
1011 continue;
1012
1013 /* Add a TLV for the RXF */
1014 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1015 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1016
1017 fifo_hdr->fifo_num = cpu_to_le32(i);
1018 fifo_hdr->available_bytes =
1019 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1020 RXF_RD_D_SPACE +
1021 offset_diff));
1022 fifo_hdr->wr_ptr =
1023 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1024 RXF_RD_WR_PTR +
1025 offset_diff));
1026 fifo_hdr->rd_ptr =
1027 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1028 RXF_RD_RD_PTR +
1029 offset_diff));
1030 fifo_hdr->fence_ptr =
1031 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1032 RXF_RD_FENCE_PTR +
1033 offset_diff));
1034 fifo_hdr->fence_mode =
1035 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1036 RXF_SET_FENCE_MODE +
1037 offset_diff));
1038
1039 /* Lock fence */
1040 iwl_trans_write_prph(mvm->trans,
1041 RXF_SET_FENCE_MODE + offset_diff, 0x1);
1042 /* Set fence pointer to the same place like WR pointer */
1043 iwl_trans_write_prph(mvm->trans,
1044 RXF_LD_WR2FENCE + offset_diff, 0x1);
1045 /* Set fence offset */
1046 iwl_trans_write_prph(mvm->trans,
1047 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1048 0x0);
1049
1050 /* Read FIFO */
1051 fifo_len /= sizeof(u32); /* Size in DWORDS */
1052 for (j = 0; j < fifo_len; j++)
1053 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1054 RXF_FIFO_RD_FENCE_INC +
1055 offset_diff);
1056 *dump_data = iwl_fw_error_next_data(*dump_data);
1057 }
1058
1059 /* Pull TXF data from all TXFs */
1060 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1061 /* Mark the number of TXF we're pulling now */
1062 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1063
1064 fifo_hdr = (void *)(*dump_data)->data;
1065 fifo_data = (void *)fifo_hdr->data;
1066 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1067
1068 /* No need to try to read the data if the length is 0 */
1069 if (fifo_len == 0)
1070 continue;
1071
1072 /* Add a TLV for the FIFO */
1073 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1074 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1075
1076 fifo_hdr->fifo_num = cpu_to_le32(i);
1077 fifo_hdr->available_bytes =
1078 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1079 TXF_FIFO_ITEM_CNT));
1080 fifo_hdr->wr_ptr =
1081 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1082 TXF_WR_PTR));
1083 fifo_hdr->rd_ptr =
1084 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1085 TXF_RD_PTR));
1086 fifo_hdr->fence_ptr =
1087 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1088 TXF_FENCE_PTR));
1089 fifo_hdr->fence_mode =
1090 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1091 TXF_LOCK_FENCE));
1092
1093 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1094 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1095 TXF_WR_PTR);
1096
1097 /* Dummy-read to advance the read pointer to the head */
1098 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1099
1100 /* Read FIFO */
1101 fifo_len /= sizeof(u32); /* Size in DWORDS */
1102 for (j = 0; j < fifo_len; j++)
1103 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1104 TXF_READ_MODIFY_DATA);
1105 *dump_data = iwl_fw_error_next_data(*dump_data);
1106 }
1107
1108 iwl_trans_release_nic_access(mvm->trans, &flags);
1109}
1110
1111void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1112{
1113 if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1114 !mvm->fw_dump_desc)
1115 return;
1116
1117 kfree(mvm->fw_dump_desc);
1118 mvm->fw_dump_desc = NULL;
1119}
1120
1121#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
1122#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
1123
1124void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1125{
1126 struct iwl_fw_error_dump_file *dump_file;
1127 struct iwl_fw_error_dump_data *dump_data;
1128 struct iwl_fw_error_dump_info *dump_info;
1129 struct iwl_fw_error_dump_mem *dump_mem;
1130 struct iwl_fw_error_dump_trigger_desc *dump_trig;
1131 struct iwl_mvm_dump_ptrs *fw_error_dump;
1132 u32 sram_len, sram_ofs;
1133 u32 file_len, fifo_data_len = 0;
1134 u32 smem_len = mvm->cfg->smem_len;
1135 u32 sram2_len = mvm->cfg->dccm2_len;
1136 bool monitor_dump_only = false;
1137
1138 lockdep_assert_held(&mvm->mutex);
1139
1140 /* there's no point in fw dump if the bus is dead */
1141 if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1142 IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1143 return;
1144 }
1145
1146 if (mvm->fw_dump_trig &&
1147 mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1148 monitor_dump_only = true;
1149
1150 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1151 if (!fw_error_dump)
1152 return;
1153
1154 /* SRAM - include stack CCM if driver knows the values for it */
1155 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1156 const struct fw_img *img;
1157
1158 img = &mvm->fw->img[mvm->cur_ucode];
1159 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1160 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1161 } else {
1162 sram_ofs = mvm->cfg->dccm_offset;
1163 sram_len = mvm->cfg->dccm_len;
1164 }
1165
1166 /* reading RXF/TXF sizes */
1167 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1168 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1169 int i;
1170
1171 fifo_data_len = 0;
1172
1173 /* Count RXF size */
1174 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1175 if (!mem_cfg->rxfifo_size[i])
1176 continue;
1177
1178 /* Add header info */
1179 fifo_data_len += mem_cfg->rxfifo_size[i] +
1180 sizeof(*dump_data) +
1181 sizeof(struct iwl_fw_error_dump_fifo);
1182 }
1183
1184 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1185 if (!mem_cfg->txfifo_size[i])
1186 continue;
1187
1188 /* Add header info */
1189 fifo_data_len += mem_cfg->txfifo_size[i] +
1190 sizeof(*dump_data) +
1191 sizeof(struct iwl_fw_error_dump_fifo);
1192 }
1193 }
1194
1195 file_len = sizeof(*dump_file) +
1196 sizeof(*dump_data) * 2 +
1197 sram_len + sizeof(*dump_mem) +
1198 fifo_data_len +
1199 sizeof(*dump_info);
1200
1201 /* Make room for the SMEM, if it exists */
1202 if (smem_len)
1203 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1204
1205 /* Make room for the secondary SRAM, if it exists */
1206 if (sram2_len)
1207 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1208
1209 /* Make room for fw's virtual image pages, if it exists */
1210 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1211 file_len += mvm->num_of_paging_blk *
1212 (sizeof(*dump_data) +
1213 sizeof(struct iwl_fw_error_dump_paging) +
1214 PAGING_BLOCK_SIZE);
1215
1216 /* If we only want a monitor dump, reset the file length */
1217 if (monitor_dump_only) {
1218 file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1219 sizeof(*dump_info);
1220 }
1221
1222 /*
1223 * In 8000 HW family B-step include the ICCM (which resides separately)
1224 */
1225 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1226 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1227 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1228 IWL8260_ICCM_LEN;
1229
1230 if (mvm->fw_dump_desc)
1231 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1232 mvm->fw_dump_desc->len;
1233
1234 dump_file = vzalloc(file_len);
1235 if (!dump_file) {
1236 kfree(fw_error_dump);
1237 iwl_mvm_free_fw_dump_desc(mvm);
1238 return;
1239 }
1240
1241 fw_error_dump->op_mode_ptr = dump_file;
1242
1243 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1244 dump_data = (void *)dump_file->data;
1245
1246 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1247 dump_data->len = cpu_to_le32(sizeof(*dump_info));
1248 dump_info = (void *) dump_data->data;
1249 dump_info->device_family =
1250 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1251 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1252 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1253 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1254 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1255 sizeof(dump_info->fw_human_readable));
1256 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1257 sizeof(dump_info->dev_human_readable));
1258 strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1259 sizeof(dump_info->bus_human_readable));
1260
1261 dump_data = iwl_fw_error_next_data(dump_data);
1262 /* We only dump the FIFOs if the FW is in error state */
1263 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1264 iwl_mvm_dump_fifos(mvm, &dump_data);
1265
1266 if (mvm->fw_dump_desc) {
1267 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1268 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1269 mvm->fw_dump_desc->len);
1270 dump_trig = (void *)dump_data->data;
1271 memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1272 sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1273
1274 /* now we can free this copy */
1275 iwl_mvm_free_fw_dump_desc(mvm);
1276 dump_data = iwl_fw_error_next_data(dump_data);
1277 }
1278
1279 /* In case we only want monitor dump, skip to dump trasport data */
1280 if (monitor_dump_only)
1281 goto dump_trans_data;
1282
1283 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1284 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1285 dump_mem = (void *)dump_data->data;
1286 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1287 dump_mem->offset = cpu_to_le32(sram_ofs);
1288 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1289 sram_len);
1290
1291 if (smem_len) {
1292 dump_data = iwl_fw_error_next_data(dump_data);
1293 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1294 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1295 dump_mem = (void *)dump_data->data;
1296 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1297 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1298 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1299 dump_mem->data, smem_len);
1300 }
1301
1302 if (sram2_len) {
1303 dump_data = iwl_fw_error_next_data(dump_data);
1304 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1305 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1306 dump_mem = (void *)dump_data->data;
1307 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1308 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1309 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1310 dump_mem->data, sram2_len);
1311 }
1312
1313 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1314 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1315 dump_data = iwl_fw_error_next_data(dump_data);
1316 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1317 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1318 sizeof(*dump_mem));
1319 dump_mem = (void *)dump_data->data;
1320 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1321 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1322 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1323 dump_mem->data, IWL8260_ICCM_LEN);
1324 }
1325
1326 /* Dump fw's virtual image */
1327 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1328 u32 i;
1329
1330 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1331 struct iwl_fw_error_dump_paging *paging;
1332 struct page *pages =
1333 mvm->fw_paging_db[i].fw_paging_block;
1334
1335 dump_data = iwl_fw_error_next_data(dump_data);
1336 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1337 dump_data->len = cpu_to_le32(sizeof(*paging) +
1338 PAGING_BLOCK_SIZE);
1339 paging = (void *)dump_data->data;
1340 paging->index = cpu_to_le32(i);
1341 memcpy(paging->data, page_address(pages),
1342 PAGING_BLOCK_SIZE);
1343 }
1344 }
1345
1346dump_trans_data:
1347 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1348 mvm->fw_dump_trig);
1349 fw_error_dump->op_mode_len = file_len;
1350 if (fw_error_dump->trans_ptr)
1351 file_len += fw_error_dump->trans_ptr->len;
1352 dump_file->file_len = cpu_to_le32(file_len);
1353
1354 dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1355 GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1356
1357 mvm->fw_dump_trig = NULL;
1358 clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1359}
1360
1361struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1362 .trig_desc = {
1363 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1364 },
1365};
1366
1367static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1368{
1369 /* clear the D3 reconfig, we only need it to avoid dumping a
1370 * firmware coredump on reconfiguration, we shouldn't do that
1371 * on D3->D0 transition
1372 */
1373 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1374 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1375 iwl_mvm_fw_error_dump(mvm);
1376 }
1377
1378 /* cleanup all stale references (scan, roc), but keep the
1379 * ucode_down ref until reconfig is complete
1380 */
1381 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1382
1383 iwl_trans_stop_device(mvm->trans);
1384
1385 mvm->scan_status = 0;
1386 mvm->ps_disabled = false;
1387 mvm->calibrating = false;
1388
1389 /* just in case one was running */
1390 ieee80211_remain_on_channel_expired(mvm->hw);
1391
1392 /*
1393 * cleanup all interfaces, even inactive ones, as some might have
1394 * gone down during the HW restart
1395 */
1396 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1397
1398 mvm->p2p_device_vif = NULL;
1399 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1400
1401 iwl_mvm_reset_phy_ctxts(mvm);
1402 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1403 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1404 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1405 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1406 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1407 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1408 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1409 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1410
1411 ieee80211_wake_queues(mvm->hw);
1412
1413 /* clear any stale d0i3 state */
1414 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1415
1416 mvm->vif_count = 0;
1417 mvm->rx_ba_sessions = 0;
1418 mvm->fw_dbg_conf = FW_DBG_INVALID;
1419
1420 /* keep statistics ticking */
1421 iwl_mvm_accu_radio_stats(mvm);
1422}
1423
1424int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1425{
1426 int ret;
1427
1428 lockdep_assert_held(&mvm->mutex);
1429
1430 /* Clean up some internal and mac80211 state on restart */
1431 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1432 iwl_mvm_restart_cleanup(mvm);
1433
1434 ret = iwl_mvm_up(mvm);
1435
1436 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1437 /* Something went wrong - we need to finish some cleanup
1438 * that normally iwl_mvm_mac_restart_complete() below
1439 * would do.
1440 */
1441 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1442 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1443 }
1444
1445 return ret;
1446}
1447
1448static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1449{
1450 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1451 int ret;
1452
1453 /* Some hw restart cleanups must not hold the mutex */
1454 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1455 /*
1456 * Make sure we are out of d0i3. This is needed
1457 * to make sure the reference accounting is correct
1458 * (and there is no stale d0i3_exit_work).
1459 */
1460 wait_event_timeout(mvm->d0i3_exit_waitq,
1461 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1462 &mvm->status),
1463 HZ);
1464 }
1465
1466 mutex_lock(&mvm->mutex);
1467 ret = __iwl_mvm_mac_start(mvm);
1468 mutex_unlock(&mvm->mutex);
1469
1470 return ret;
1471}
1472
1473static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1474{
1475 int ret;
1476
1477 mutex_lock(&mvm->mutex);
1478
1479 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1480 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1481 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1482 if (ret)
1483 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1484 ret);
1485
1486 /* allow transport/FW low power modes */
1487 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1488
1489 /*
1490 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1491 * of packets the FW sent out, so we must reconnect.
1492 */
1493 iwl_mvm_teardown_tdls_peers(mvm);
1494
1495 mutex_unlock(&mvm->mutex);
1496}
1497
1498static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1499{
1500 if (!iwl_mvm_is_d0i3_supported(mvm))
1501 return;
1502
1503 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1504 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1505 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1506 &mvm->status),
1507 HZ))
1508 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1509}
1510
1511static void
1512iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1513 enum ieee80211_reconfig_type reconfig_type)
1514{
1515 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1516
1517 switch (reconfig_type) {
1518 case IEEE80211_RECONFIG_TYPE_RESTART:
1519 iwl_mvm_restart_complete(mvm);
1520 break;
1521 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1522 iwl_mvm_resume_complete(mvm);
1523 break;
1524 }
1525}
1526
1527void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1528{
1529 lockdep_assert_held(&mvm->mutex);
1530
1531 /* firmware counters are obviously reset now, but we shouldn't
1532 * partially track so also clear the fw_reset_accu counters.
1533 */
1534 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1535
1536 /*
1537 * Disallow low power states when the FW is down by taking
1538 * the UCODE_DOWN ref. in case of ongoing hw restart the
1539 * ref is already taken, so don't take it again.
1540 */
1541 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1542 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1543
1544 /* async_handlers_wk is now blocked */
1545
1546 /*
1547 * The work item could be running or queued if the
1548 * ROC time event stops just as we get here.
1549 */
1550 flush_work(&mvm->roc_done_wk);
1551
1552 iwl_trans_stop_device(mvm->trans);
1553
1554 iwl_mvm_async_handlers_purge(mvm);
1555 /* async_handlers_list is empty and will stay empty: HW is stopped */
1556
1557 /* the fw is stopped, the aux sta is dead: clean up driver state */
1558 iwl_mvm_del_aux_sta(mvm);
1559
1560 /*
1561 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1562 * won't be called in this case).
1563 * But make sure to cleanup interfaces that have gone down before/during
1564 * HW restart was requested.
1565 */
1566 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1567 ieee80211_iterate_interfaces(mvm->hw, 0,
1568 iwl_mvm_cleanup_iterator, mvm);
1569
1570 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1571 * make sure there's nothing left there and warn if any is found.
1572 */
1573 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1574 int i;
1575
1576 for (i = 0; i < mvm->max_scans; i++) {
1577 if (WARN_ONCE(mvm->scan_uid_status[i],
1578 "UMAC scan UID %d status was not cleaned\n",
1579 i))
1580 mvm->scan_uid_status[i] = 0;
1581 }
1582 }
1583
1584 mvm->ucode_loaded = false;
1585}
1586
1587static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1588{
1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1590
1591 flush_work(&mvm->d0i3_exit_work);
1592 flush_work(&mvm->async_handlers_wk);
1593 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1594 iwl_mvm_free_fw_dump_desc(mvm);
1595
1596 mutex_lock(&mvm->mutex);
1597 __iwl_mvm_mac_stop(mvm);
1598 mutex_unlock(&mvm->mutex);
1599
1600 /*
1601 * The worker might have been waiting for the mutex, let it run and
1602 * discover that its list is now empty.
1603 */
1604 cancel_work_sync(&mvm->async_handlers_wk);
1605}
1606
1607static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1608{
1609 u16 i;
1610
1611 lockdep_assert_held(&mvm->mutex);
1612
1613 for (i = 0; i < NUM_PHY_CTX; i++)
1614 if (!mvm->phy_ctxts[i].ref)
1615 return &mvm->phy_ctxts[i];
1616
1617 IWL_ERR(mvm, "No available PHY context\n");
1618 return NULL;
1619}
1620
1621static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1622 s16 tx_power)
1623{
1624 struct iwl_dev_tx_power_cmd cmd = {
1625 .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1626 .v2.mac_context_id =
1627 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1628 .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1629 };
1630 int len = sizeof(cmd);
1631
1632 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1633 cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1634
1635 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1636 len = sizeof(cmd.v2);
1637
1638 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1639}
1640
1641static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1642 struct ieee80211_vif *vif)
1643{
1644 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1645 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1646 int ret;
1647
1648 mvmvif->mvm = mvm;
1649
1650 /*
1651 * make sure D0i3 exit is completed, otherwise a target access
1652 * during tx queue configuration could be done when still in
1653 * D0i3 state.
1654 */
1655 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1656 if (ret)
1657 return ret;
1658
1659 /*
1660 * Not much to do here. The stack will not allow interface
1661 * types or combinations that we didn't advertise, so we
1662 * don't really have to check the types.
1663 */
1664
1665 mutex_lock(&mvm->mutex);
1666
1667 /* make sure that beacon statistics don't go backwards with FW reset */
1668 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1669 mvmvif->beacon_stats.accu_num_beacons +=
1670 mvmvif->beacon_stats.num_beacons;
1671
1672 /* Allocate resources for the MAC context, and add it to the fw */
1673 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1674 if (ret)
1675 goto out_unlock;
1676
1677 /* Counting number of interfaces is needed for legacy PM */
1678 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1679 mvm->vif_count++;
1680
1681 /*
1682 * The AP binding flow can be done only after the beacon
1683 * template is configured (which happens only in the mac80211
1684 * start_ap() flow), and adding the broadcast station can happen
1685 * only after the binding.
1686 * In addition, since modifying the MAC before adding a bcast
1687 * station is not allowed by the FW, delay the adding of MAC context to
1688 * the point where we can also add the bcast station.
1689 * In short: there's not much we can do at this point, other than
1690 * allocating resources :)
1691 */
1692 if (vif->type == NL80211_IFTYPE_AP ||
1693 vif->type == NL80211_IFTYPE_ADHOC) {
1694 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1695 if (ret) {
1696 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1697 goto out_release;
1698 }
1699
1700 iwl_mvm_vif_dbgfs_register(mvm, vif);
1701 goto out_unlock;
1702 }
1703
1704 mvmvif->features |= hw->netdev_features;
1705
1706 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1707 if (ret)
1708 goto out_release;
1709
1710 ret = iwl_mvm_power_update_mac(mvm);
1711 if (ret)
1712 goto out_remove_mac;
1713
1714 /* beacon filtering */
1715 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1716 if (ret)
1717 goto out_remove_mac;
1718
1719 if (!mvm->bf_allowed_vif &&
1720 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1721 mvm->bf_allowed_vif = mvmvif;
1722 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1723 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1724 }
1725
1726 /*
1727 * P2P_DEVICE interface does not have a channel context assigned to it,
1728 * so a dedicated PHY context is allocated to it and the corresponding
1729 * MAC context is bound to it at this stage.
1730 */
1731 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1732
1733 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1734 if (!mvmvif->phy_ctxt) {
1735 ret = -ENOSPC;
1736 goto out_free_bf;
1737 }
1738
1739 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1740 ret = iwl_mvm_binding_add_vif(mvm, vif);
1741 if (ret)
1742 goto out_unref_phy;
1743
1744 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1745 if (ret)
1746 goto out_unbind;
1747
1748 /* Save a pointer to p2p device vif, so it can later be used to
1749 * update the p2p device MAC when a GO is started/stopped */
1750 mvm->p2p_device_vif = vif;
1751 }
1752
1753 iwl_mvm_vif_dbgfs_register(mvm, vif);
1754 goto out_unlock;
1755
1756 out_unbind:
1757 iwl_mvm_binding_remove_vif(mvm, vif);
1758 out_unref_phy:
1759 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1760 out_free_bf:
1761 if (mvm->bf_allowed_vif == mvmvif) {
1762 mvm->bf_allowed_vif = NULL;
1763 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1764 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1765 }
1766 out_remove_mac:
1767 mvmvif->phy_ctxt = NULL;
1768 iwl_mvm_mac_ctxt_remove(mvm, vif);
1769 out_release:
1770 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1771 mvm->vif_count--;
1772
1773 iwl_mvm_mac_ctxt_release(mvm, vif);
1774 out_unlock:
1775 mutex_unlock(&mvm->mutex);
1776
1777 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1778
1779 return ret;
1780}
1781
1782static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1783 struct ieee80211_vif *vif)
1784{
1785 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1786
1787 if (tfd_msk) {
1788 /*
1789 * mac80211 first removes all the stations of the vif and
1790 * then removes the vif. When it removes a station it also
1791 * flushes the AMPDU session. So by now, all the AMPDU sessions
1792 * of all the stations of this vif are closed, and the queues
1793 * of these AMPDU sessions are properly closed.
1794 * We still need to take care of the shared queues of the vif.
1795 * Flush them here.
1796 */
1797 mutex_lock(&mvm->mutex);
1798 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1799 mutex_unlock(&mvm->mutex);
1800
1801 /*
1802 * There are transports that buffer a few frames in the host.
1803 * For these, the flush above isn't enough since while we were
1804 * flushing, the transport might have sent more frames to the
1805 * device. To solve this, wait here until the transport is
1806 * empty. Technically, this could have replaced the flush
1807 * above, but flush is much faster than draining. So flush
1808 * first, and drain to make sure we have no frames in the
1809 * transport anymore.
1810 * If a station still had frames on the shared queues, it is
1811 * already marked as draining, so to complete the draining, we
1812 * just need to wait until the transport is empty.
1813 */
1814 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1815 }
1816
1817 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1818 /*
1819 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1820 * We assume here that all the packets sent to the OFFCHANNEL
1821 * queue are sent in ROC session.
1822 */
1823 flush_work(&mvm->roc_done_wk);
1824 } else {
1825 /*
1826 * By now, all the AC queues are empty. The AGG queues are
1827 * empty too. We already got all the Tx responses for all the
1828 * packets in the queues. The drain work can have been
1829 * triggered. Flush it.
1830 */
1831 flush_work(&mvm->sta_drained_wk);
1832 }
1833}
1834
1835static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1836 struct ieee80211_vif *vif)
1837{
1838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1840
1841 iwl_mvm_prepare_mac_removal(mvm, vif);
1842
1843 mutex_lock(&mvm->mutex);
1844
1845 if (mvm->bf_allowed_vif == mvmvif) {
1846 mvm->bf_allowed_vif = NULL;
1847 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1848 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1849 }
1850
1851 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1852
1853 /*
1854 * For AP/GO interface, the tear down of the resources allocated to the
1855 * interface is be handled as part of the stop_ap flow.
1856 */
1857 if (vif->type == NL80211_IFTYPE_AP ||
1858 vif->type == NL80211_IFTYPE_ADHOC) {
1859#ifdef CONFIG_NL80211_TESTMODE
1860 if (vif == mvm->noa_vif) {
1861 mvm->noa_vif = NULL;
1862 mvm->noa_duration = 0;
1863 }
1864#endif
1865 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1866 goto out_release;
1867 }
1868
1869 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1870 mvm->p2p_device_vif = NULL;
1871 iwl_mvm_rm_bcast_sta(mvm, vif);
1872 iwl_mvm_binding_remove_vif(mvm, vif);
1873 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1874 mvmvif->phy_ctxt = NULL;
1875 }
1876
1877 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1878 mvm->vif_count--;
1879
1880 iwl_mvm_power_update_mac(mvm);
1881 iwl_mvm_mac_ctxt_remove(mvm, vif);
1882
1883out_release:
1884 iwl_mvm_mac_ctxt_release(mvm, vif);
1885 mutex_unlock(&mvm->mutex);
1886}
1887
1888static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1889{
1890 return 0;
1891}
1892
1893struct iwl_mvm_mc_iter_data {
1894 struct iwl_mvm *mvm;
1895 int port_id;
1896};
1897
1898static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1899 struct ieee80211_vif *vif)
1900{
1901 struct iwl_mvm_mc_iter_data *data = _data;
1902 struct iwl_mvm *mvm = data->mvm;
1903 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1904 int ret, len;
1905
1906 /* if we don't have free ports, mcast frames will be dropped */
1907 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1908 return;
1909
1910 if (vif->type != NL80211_IFTYPE_STATION ||
1911 !vif->bss_conf.assoc)
1912 return;
1913
1914 cmd->port_id = data->port_id++;
1915 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1916 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1917
1918 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1919 if (ret)
1920 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1921}
1922
1923static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1924{
1925 struct iwl_mvm_mc_iter_data iter_data = {
1926 .mvm = mvm,
1927 };
1928
1929 lockdep_assert_held(&mvm->mutex);
1930
1931 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1932 return;
1933
1934 ieee80211_iterate_active_interfaces_atomic(
1935 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1936 iwl_mvm_mc_iface_iterator, &iter_data);
1937}
1938
1939static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1940 struct netdev_hw_addr_list *mc_list)
1941{
1942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1943 struct iwl_mcast_filter_cmd *cmd;
1944 struct netdev_hw_addr *addr;
1945 int addr_count;
1946 bool pass_all;
1947 int len;
1948
1949 addr_count = netdev_hw_addr_list_count(mc_list);
1950 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1951 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1952 if (pass_all)
1953 addr_count = 0;
1954
1955 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1956 cmd = kzalloc(len, GFP_ATOMIC);
1957 if (!cmd)
1958 return 0;
1959
1960 if (pass_all) {
1961 cmd->pass_all = 1;
1962 return (u64)(unsigned long)cmd;
1963 }
1964
1965 netdev_hw_addr_list_for_each(addr, mc_list) {
1966 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1967 cmd->count, addr->addr);
1968 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1969 addr->addr, ETH_ALEN);
1970 cmd->count++;
1971 }
1972
1973 return (u64)(unsigned long)cmd;
1974}
1975
1976static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1977 unsigned int changed_flags,
1978 unsigned int *total_flags,
1979 u64 multicast)
1980{
1981 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1982 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1983
1984 mutex_lock(&mvm->mutex);
1985
1986 /* replace previous configuration */
1987 kfree(mvm->mcast_filter_cmd);
1988 mvm->mcast_filter_cmd = cmd;
1989
1990 if (!cmd)
1991 goto out;
1992
1993 iwl_mvm_recalc_multicast(mvm);
1994out:
1995 mutex_unlock(&mvm->mutex);
1996 *total_flags = 0;
1997}
1998
1999static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
2000 struct ieee80211_vif *vif,
2001 unsigned int filter_flags,
2002 unsigned int changed_flags)
2003{
2004 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2005
2006 /* We support only filter for probe requests */
2007 if (!(changed_flags & FIF_PROBE_REQ))
2008 return;
2009
2010 /* Supported only for p2p client interfaces */
2011 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
2012 !vif->p2p)
2013 return;
2014
2015 mutex_lock(&mvm->mutex);
2016 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2017 mutex_unlock(&mvm->mutex);
2018}
2019
2020#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2021struct iwl_bcast_iter_data {
2022 struct iwl_mvm *mvm;
2023 struct iwl_bcast_filter_cmd *cmd;
2024 u8 current_filter;
2025};
2026
2027static void
2028iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2029 const struct iwl_fw_bcast_filter *in_filter,
2030 struct iwl_fw_bcast_filter *out_filter)
2031{
2032 struct iwl_fw_bcast_filter_attr *attr;
2033 int i;
2034
2035 memcpy(out_filter, in_filter, sizeof(*out_filter));
2036
2037 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2038 attr = &out_filter->attrs[i];
2039
2040 if (!attr->mask)
2041 break;
2042
2043 switch (attr->reserved1) {
2044 case cpu_to_le16(BC_FILTER_MAGIC_IP):
2045 if (vif->bss_conf.arp_addr_cnt != 1) {
2046 attr->mask = 0;
2047 continue;
2048 }
2049
2050 attr->val = vif->bss_conf.arp_addr_list[0];
2051 break;
2052 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2053 attr->val = *(__be32 *)&vif->addr[2];
2054 break;
2055 default:
2056 break;
2057 }
2058 attr->reserved1 = 0;
2059 out_filter->num_attrs++;
2060 }
2061}
2062
2063static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2064 struct ieee80211_vif *vif)
2065{
2066 struct iwl_bcast_iter_data *data = _data;
2067 struct iwl_mvm *mvm = data->mvm;
2068 struct iwl_bcast_filter_cmd *cmd = data->cmd;
2069 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2070 struct iwl_fw_bcast_mac *bcast_mac;
2071 int i;
2072
2073 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2074 return;
2075
2076 bcast_mac = &cmd->macs[mvmvif->id];
2077
2078 /*
2079 * enable filtering only for associated stations, but not for P2P
2080 * Clients
2081 */
2082 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2083 !vif->bss_conf.assoc)
2084 return;
2085
2086 bcast_mac->default_discard = 1;
2087
2088 /* copy all configured filters */
2089 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2090 /*
2091 * Make sure we don't exceed our filters limit.
2092 * if there is still a valid filter to be configured,
2093 * be on the safe side and just allow bcast for this mac.
2094 */
2095 if (WARN_ON_ONCE(data->current_filter >=
2096 ARRAY_SIZE(cmd->filters))) {
2097 bcast_mac->default_discard = 0;
2098 bcast_mac->attached_filters = 0;
2099 break;
2100 }
2101
2102 iwl_mvm_set_bcast_filter(vif,
2103 &mvm->bcast_filters[i],
2104 &cmd->filters[data->current_filter]);
2105
2106 /* skip current filter if it contains no attributes */
2107 if (!cmd->filters[data->current_filter].num_attrs)
2108 continue;
2109
2110 /* attach the filter to current mac */
2111 bcast_mac->attached_filters |=
2112 cpu_to_le16(BIT(data->current_filter));
2113
2114 data->current_filter++;
2115 }
2116}
2117
2118bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2119 struct iwl_bcast_filter_cmd *cmd)
2120{
2121 struct iwl_bcast_iter_data iter_data = {
2122 .mvm = mvm,
2123 .cmd = cmd,
2124 };
2125
2126 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2127 return false;
2128
2129 memset(cmd, 0, sizeof(*cmd));
2130 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2131 cmd->max_macs = ARRAY_SIZE(cmd->macs);
2132
2133#ifdef CONFIG_IWLWIFI_DEBUGFS
2134 /* use debugfs filters/macs if override is configured */
2135 if (mvm->dbgfs_bcast_filtering.override) {
2136 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2137 sizeof(cmd->filters));
2138 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2139 sizeof(cmd->macs));
2140 return true;
2141 }
2142#endif
2143
2144 /* if no filters are configured, do nothing */
2145 if (!mvm->bcast_filters)
2146 return false;
2147
2148 /* configure and attach these filters for each associated sta vif */
2149 ieee80211_iterate_active_interfaces(
2150 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2151 iwl_mvm_bcast_filter_iterator, &iter_data);
2152
2153 return true;
2154}
2155static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2156 struct ieee80211_vif *vif)
2157{
2158 struct iwl_bcast_filter_cmd cmd;
2159
2160 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2161 return 0;
2162
2163 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2164 return 0;
2165
2166 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2167 sizeof(cmd), &cmd);
2168}
2169#else
2170static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2171 struct ieee80211_vif *vif)
2172{
2173 return 0;
2174}
2175#endif
2176
2177static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2178 struct ieee80211_vif *vif,
2179 struct ieee80211_bss_conf *bss_conf,
2180 u32 changes)
2181{
2182 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2183 int ret;
2184
2185 /*
2186 * Re-calculate the tsf id, as the master-slave relations depend on the
2187 * beacon interval, which was not known when the station interface was
2188 * added.
2189 */
2190 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2191 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2192
2193 /*
2194 * If we're not associated yet, take the (new) BSSID before associating
2195 * so the firmware knows. If we're already associated, then use the old
2196 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2197 * branch for disassociation below.
2198 */
2199 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2200 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2201
2202 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2203 if (ret)
2204 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2205
2206 /* after sending it once, adopt mac80211 data */
2207 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2208 mvmvif->associated = bss_conf->assoc;
2209
2210 if (changes & BSS_CHANGED_ASSOC) {
2211 if (bss_conf->assoc) {
2212 /* clear statistics to get clean beacon counter */
2213 iwl_mvm_request_statistics(mvm, true);
2214 memset(&mvmvif->beacon_stats, 0,
2215 sizeof(mvmvif->beacon_stats));
2216
2217 /* add quota for this interface */
2218 ret = iwl_mvm_update_quotas(mvm, true, NULL);
2219 if (ret) {
2220 IWL_ERR(mvm, "failed to update quotas\n");
2221 return;
2222 }
2223
2224 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2225 &mvm->status)) {
2226 /*
2227 * If we're restarting then the firmware will
2228 * obviously have lost synchronisation with
2229 * the AP. It will attempt to synchronise by
2230 * itself, but we can make it more reliable by
2231 * scheduling a session protection time event.
2232 *
2233 * The firmware needs to receive a beacon to
2234 * catch up with synchronisation, use 110% of
2235 * the beacon interval.
2236 *
2237 * Set a large maximum delay to allow for more
2238 * than a single interface.
2239 */
2240 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2241 iwl_mvm_protect_session(mvm, vif, dur, dur,
2242 5 * dur, false);
2243 }
2244
2245 iwl_mvm_sf_update(mvm, vif, false);
2246 iwl_mvm_power_vif_assoc(mvm, vif);
2247 if (vif->p2p) {
2248 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2249 iwl_mvm_update_smps(mvm, vif,
2250 IWL_MVM_SMPS_REQ_PROT,
2251 IEEE80211_SMPS_DYNAMIC);
2252 }
2253 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2254 /*
2255 * If update fails - SF might be running in associated
2256 * mode while disassociated - which is forbidden.
2257 */
2258 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2259 "Failed to update SF upon disassociation\n");
2260
2261 /* remove AP station now that the MAC is unassoc */
2262 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2263 if (ret)
2264 IWL_ERR(mvm, "failed to remove AP station\n");
2265
2266 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2267 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2268 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2269 /* remove quota for this interface */
2270 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2271 if (ret)
2272 IWL_ERR(mvm, "failed to update quotas\n");
2273
2274 if (vif->p2p)
2275 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2276
2277 /* this will take the cleared BSSID from bss_conf */
2278 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2279 if (ret)
2280 IWL_ERR(mvm,
2281 "failed to update MAC %pM (clear after unassoc)\n",
2282 vif->addr);
2283 }
2284
2285 iwl_mvm_recalc_multicast(mvm);
2286 iwl_mvm_configure_bcast_filter(mvm, vif);
2287
2288 /* reset rssi values */
2289 mvmvif->bf_data.ave_beacon_signal = 0;
2290
2291 iwl_mvm_bt_coex_vif_change(mvm);
2292 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2293 IEEE80211_SMPS_AUTOMATIC);
2294 } else if (changes & BSS_CHANGED_BEACON_INFO) {
2295 /*
2296 * We received a beacon _after_ association so
2297 * remove the session protection.
2298 */
2299 iwl_mvm_remove_time_event(mvm, mvmvif,
2300 &mvmvif->time_event_data);
2301 }
2302
2303 if (changes & BSS_CHANGED_BEACON_INFO) {
2304 iwl_mvm_sf_update(mvm, vif, false);
2305 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2306 }
2307
2308 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2309 ret = iwl_mvm_power_update_mac(mvm);
2310 if (ret)
2311 IWL_ERR(mvm, "failed to update power mode\n");
2312 }
2313
2314 if (changes & BSS_CHANGED_TXPOWER) {
2315 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2316 bss_conf->txpower);
2317 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2318 }
2319
2320 if (changes & BSS_CHANGED_CQM) {
2321 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2322 /* reset cqm events tracking */
2323 mvmvif->bf_data.last_cqm_event = 0;
2324 if (mvmvif->bf_data.bf_enabled) {
2325 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2326 if (ret)
2327 IWL_ERR(mvm,
2328 "failed to update CQM thresholds\n");
2329 }
2330 }
2331
2332 if (changes & BSS_CHANGED_ARP_FILTER) {
2333 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2334 iwl_mvm_configure_bcast_filter(mvm, vif);
2335 }
2336}
2337
2338static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2339 struct ieee80211_vif *vif)
2340{
2341 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2343 int ret;
2344
2345 /*
2346 * iwl_mvm_mac_ctxt_add() might read directly from the device
2347 * (the system time), so make sure it is available.
2348 */
2349 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2350 if (ret)
2351 return ret;
2352
2353 mutex_lock(&mvm->mutex);
2354
2355 /* Send the beacon template */
2356 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2357 if (ret)
2358 goto out_unlock;
2359
2360 /*
2361 * Re-calculate the tsf id, as the master-slave relations depend on the
2362 * beacon interval, which was not known when the AP interface was added.
2363 */
2364 if (vif->type == NL80211_IFTYPE_AP)
2365 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2366
2367 mvmvif->ap_assoc_sta_count = 0;
2368
2369 /* Add the mac context */
2370 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2371 if (ret)
2372 goto out_unlock;
2373
2374 /* Perform the binding */
2375 ret = iwl_mvm_binding_add_vif(mvm, vif);
2376 if (ret)
2377 goto out_remove;
2378
2379 /* Send the bcast station. At this stage the TBTT and DTIM time events
2380 * are added and applied to the scheduler */
2381 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2382 if (ret)
2383 goto out_unbind;
2384
2385 /* must be set before quota calculations */
2386 mvmvif->ap_ibss_active = true;
2387
2388 /* power updated needs to be done before quotas */
2389 iwl_mvm_power_update_mac(mvm);
2390
2391 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2392 if (ret)
2393 goto out_quota_failed;
2394
2395 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2396 if (vif->p2p && mvm->p2p_device_vif)
2397 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2398
2399 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2400
2401 iwl_mvm_bt_coex_vif_change(mvm);
2402
2403 /* we don't support TDLS during DCM */
2404 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2405 iwl_mvm_teardown_tdls_peers(mvm);
2406
2407 goto out_unlock;
2408
2409out_quota_failed:
2410 iwl_mvm_power_update_mac(mvm);
2411 mvmvif->ap_ibss_active = false;
2412 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2413out_unbind:
2414 iwl_mvm_binding_remove_vif(mvm, vif);
2415out_remove:
2416 iwl_mvm_mac_ctxt_remove(mvm, vif);
2417out_unlock:
2418 mutex_unlock(&mvm->mutex);
2419 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2420 return ret;
2421}
2422
2423static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2424 struct ieee80211_vif *vif)
2425{
2426 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2427 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2428
2429 iwl_mvm_prepare_mac_removal(mvm, vif);
2430
2431 mutex_lock(&mvm->mutex);
2432
2433 /* Handle AP stop while in CSA */
2434 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2435 iwl_mvm_remove_time_event(mvm, mvmvif,
2436 &mvmvif->time_event_data);
2437 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2438 mvmvif->csa_countdown = false;
2439 }
2440
2441 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2442 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2443 mvm->csa_tx_block_bcn_timeout = 0;
2444 }
2445
2446 mvmvif->ap_ibss_active = false;
2447 mvm->ap_last_beacon_gp2 = 0;
2448
2449 iwl_mvm_bt_coex_vif_change(mvm);
2450
2451 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2452
2453 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2454 if (vif->p2p && mvm->p2p_device_vif)
2455 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2456
2457 iwl_mvm_update_quotas(mvm, false, NULL);
2458 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2459 iwl_mvm_binding_remove_vif(mvm, vif);
2460
2461 iwl_mvm_power_update_mac(mvm);
2462
2463 iwl_mvm_mac_ctxt_remove(mvm, vif);
2464
2465 mutex_unlock(&mvm->mutex);
2466}
2467
2468static void
2469iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2470 struct ieee80211_vif *vif,
2471 struct ieee80211_bss_conf *bss_conf,
2472 u32 changes)
2473{
2474 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2475
2476 /* Changes will be applied when the AP/IBSS is started */
2477 if (!mvmvif->ap_ibss_active)
2478 return;
2479
2480 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2481 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2482 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2483 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2484
2485 /* Need to send a new beacon template to the FW */
2486 if (changes & BSS_CHANGED_BEACON &&
2487 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2488 IWL_WARN(mvm, "Failed updating beacon data\n");
2489
2490 if (changes & BSS_CHANGED_TXPOWER) {
2491 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2492 bss_conf->txpower);
2493 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2494 }
2495
2496}
2497
2498static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2499 struct ieee80211_vif *vif,
2500 struct ieee80211_bss_conf *bss_conf,
2501 u32 changes)
2502{
2503 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2504
2505 /*
2506 * iwl_mvm_bss_info_changed_station() might call
2507 * iwl_mvm_protect_session(), which reads directly from
2508 * the device (the system time), so make sure it is available.
2509 */
2510 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2511 return;
2512
2513 mutex_lock(&mvm->mutex);
2514
2515 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2516 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2517
2518 switch (vif->type) {
2519 case NL80211_IFTYPE_STATION:
2520 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2521 break;
2522 case NL80211_IFTYPE_AP:
2523 case NL80211_IFTYPE_ADHOC:
2524 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2525 break;
2526 default:
2527 /* shouldn't happen */
2528 WARN_ON_ONCE(1);
2529 }
2530
2531 mutex_unlock(&mvm->mutex);
2532 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2533}
2534
2535static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2536 struct ieee80211_vif *vif,
2537 struct ieee80211_scan_request *hw_req)
2538{
2539 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2540 int ret;
2541
2542 if (hw_req->req.n_channels == 0 ||
2543 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2544 return -EINVAL;
2545
2546 mutex_lock(&mvm->mutex);
2547 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2548 mutex_unlock(&mvm->mutex);
2549
2550 return ret;
2551}
2552
2553static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2554 struct ieee80211_vif *vif)
2555{
2556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2557
2558 mutex_lock(&mvm->mutex);
2559
2560 /* Due to a race condition, it's possible that mac80211 asks
2561 * us to stop a hw_scan when it's already stopped. This can
2562 * happen, for instance, if we stopped the scan ourselves,
2563 * called ieee80211_scan_completed() and the userspace called
2564 * cancel scan scan before ieee80211_scan_work() could run.
2565 * To handle that, simply return if the scan is not running.
2566 */
2567 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2568 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2569
2570 mutex_unlock(&mvm->mutex);
2571}
2572
2573static void
2574iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2575 struct ieee80211_sta *sta, u16 tids,
2576 int num_frames,
2577 enum ieee80211_frame_release_type reason,
2578 bool more_data)
2579{
2580 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2581
2582 /* Called when we need to transmit (a) frame(s) from mac80211 */
2583
2584 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2585 tids, more_data, false);
2586}
2587
2588static void
2589iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2590 struct ieee80211_sta *sta, u16 tids,
2591 int num_frames,
2592 enum ieee80211_frame_release_type reason,
2593 bool more_data)
2594{
2595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2596
2597 /* Called when we need to transmit (a) frame(s) from agg queue */
2598
2599 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2600 tids, more_data, true);
2601}
2602
2603static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2604 struct ieee80211_vif *vif,
2605 enum sta_notify_cmd cmd,
2606 struct ieee80211_sta *sta)
2607{
2608 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2609 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2610 unsigned long txqs = 0, tids = 0;
2611 int tid;
2612
2613 spin_lock_bh(&mvmsta->lock);
2614 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2615 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2616
2617 if (tid_data->state != IWL_AGG_ON &&
2618 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2619 continue;
2620
2621 __set_bit(tid_data->txq_id, &txqs);
2622
2623 if (iwl_mvm_tid_queued(tid_data) == 0)
2624 continue;
2625
2626 __set_bit(tid, &tids);
2627 }
2628
2629 switch (cmd) {
2630 case STA_NOTIFY_SLEEP:
2631 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2632 ieee80211_sta_block_awake(hw, sta, true);
2633
2634 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2635 ieee80211_sta_set_buffered(sta, tid, true);
2636
2637 if (txqs)
2638 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2639 /*
2640 * The fw updates the STA to be asleep. Tx packets on the Tx
2641 * queues to this station will not be transmitted. The fw will
2642 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2643 */
2644 break;
2645 case STA_NOTIFY_AWAKE:
2646 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2647 break;
2648
2649 if (txqs)
2650 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2651 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2652 break;
2653 default:
2654 break;
2655 }
2656 spin_unlock_bh(&mvmsta->lock);
2657}
2658
2659static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2660 struct ieee80211_vif *vif,
2661 struct ieee80211_sta *sta)
2662{
2663 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2664 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2665 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2666
2667 /*
2668 * This is called before mac80211 does RCU synchronisation,
2669 * so here we already invalidate our internal RCU-protected
2670 * station pointer. The rest of the code will thus no longer
2671 * be able to find the station this way, and we don't rely
2672 * on further RCU synchronisation after the sta_state()
2673 * callback deleted the station.
2674 */
2675 mutex_lock(&mvm->mutex);
2676 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2677 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2678 ERR_PTR(-ENOENT));
2679
2680 if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2681 mvmvif->ap_assoc_sta_count--;
2682 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2683 }
2684
2685 mutex_unlock(&mvm->mutex);
2686}
2687
2688static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2689 const u8 *bssid)
2690{
2691 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2692 return;
2693
2694 if (iwlwifi_mod_params.uapsd_disable) {
2695 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2696 return;
2697 }
2698
2699 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2700}
2701
2702static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2703 struct ieee80211_vif *vif,
2704 struct ieee80211_sta *sta,
2705 enum ieee80211_sta_state old_state,
2706 enum ieee80211_sta_state new_state)
2707{
2708 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2709 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2710 int ret;
2711
2712 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2713 sta->addr, old_state, new_state);
2714
2715 /* this would be a mac80211 bug ... but don't crash */
2716 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2717 return -EINVAL;
2718
2719 /* if a STA is being removed, reuse its ID */
2720 flush_work(&mvm->sta_drained_wk);
2721
2722 mutex_lock(&mvm->mutex);
2723 if (old_state == IEEE80211_STA_NOTEXIST &&
2724 new_state == IEEE80211_STA_NONE) {
2725 /*
2726 * Firmware bug - it'll crash if the beacon interval is less
2727 * than 16. We can't avoid connecting at all, so refuse the
2728 * station state change, this will cause mac80211 to abandon
2729 * attempts to connect to this AP, and eventually wpa_s will
2730 * blacklist the AP...
2731 */
2732 if (vif->type == NL80211_IFTYPE_STATION &&
2733 vif->bss_conf.beacon_int < 16) {
2734 IWL_ERR(mvm,
2735 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2736 sta->addr, vif->bss_conf.beacon_int);
2737 ret = -EINVAL;
2738 goto out_unlock;
2739 }
2740
2741 if (sta->tdls &&
2742 (vif->p2p ||
2743 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2744 IWL_MVM_TDLS_STA_COUNT ||
2745 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2746 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2747 ret = -EBUSY;
2748 goto out_unlock;
2749 }
2750
2751 ret = iwl_mvm_add_sta(mvm, vif, sta);
2752 if (sta->tdls && ret == 0)
2753 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2754 } else if (old_state == IEEE80211_STA_NONE &&
2755 new_state == IEEE80211_STA_AUTH) {
2756 /*
2757 * EBS may be disabled due to previous failures reported by FW.
2758 * Reset EBS status here assuming environment has been changed.
2759 */
2760 mvm->last_ebs_successful = true;
2761 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2762 ret = 0;
2763 } else if (old_state == IEEE80211_STA_AUTH &&
2764 new_state == IEEE80211_STA_ASSOC) {
2765 ret = iwl_mvm_update_sta(mvm, vif, sta);
2766 if (ret == 0)
2767 iwl_mvm_rs_rate_init(mvm, sta,
2768 mvmvif->phy_ctxt->channel->band,
2769 true);
2770 } else if (old_state == IEEE80211_STA_ASSOC &&
2771 new_state == IEEE80211_STA_AUTHORIZED) {
2772
2773 /* we don't support TDLS during DCM */
2774 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2775 iwl_mvm_teardown_tdls_peers(mvm);
2776
2777 /* enable beacon filtering */
2778 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2779 ret = 0;
2780 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2781 new_state == IEEE80211_STA_ASSOC) {
2782 /* disable beacon filtering */
2783 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2784 ret = 0;
2785 } else if (old_state == IEEE80211_STA_ASSOC &&
2786 new_state == IEEE80211_STA_AUTH) {
2787 ret = 0;
2788 } else if (old_state == IEEE80211_STA_AUTH &&
2789 new_state == IEEE80211_STA_NONE) {
2790 ret = 0;
2791 } else if (old_state == IEEE80211_STA_NONE &&
2792 new_state == IEEE80211_STA_NOTEXIST) {
2793 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2794 if (sta->tdls)
2795 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2796 } else {
2797 ret = -EIO;
2798 }
2799 out_unlock:
2800 mutex_unlock(&mvm->mutex);
2801
2802 if (sta->tdls && ret == 0) {
2803 if (old_state == IEEE80211_STA_NOTEXIST &&
2804 new_state == IEEE80211_STA_NONE)
2805 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2806 else if (old_state == IEEE80211_STA_NONE &&
2807 new_state == IEEE80211_STA_NOTEXIST)
2808 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2809 }
2810
2811 return ret;
2812}
2813
2814static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2815{
2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2817
2818 mvm->rts_threshold = value;
2819
2820 return 0;
2821}
2822
2823static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2824 struct ieee80211_vif *vif,
2825 struct ieee80211_sta *sta, u32 changed)
2826{
2827 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2828
2829 if (vif->type == NL80211_IFTYPE_STATION &&
2830 changed & IEEE80211_RC_NSS_CHANGED)
2831 iwl_mvm_sf_update(mvm, vif, false);
2832}
2833
2834static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2835 struct ieee80211_vif *vif, u16 ac,
2836 const struct ieee80211_tx_queue_params *params)
2837{
2838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2840
2841 mvmvif->queue_params[ac] = *params;
2842
2843 /*
2844 * No need to update right away, we'll get BSS_CHANGED_QOS
2845 * The exception is P2P_DEVICE interface which needs immediate update.
2846 */
2847 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2848 int ret;
2849
2850 mutex_lock(&mvm->mutex);
2851 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2852 mutex_unlock(&mvm->mutex);
2853 return ret;
2854 }
2855 return 0;
2856}
2857
2858static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2859 struct ieee80211_vif *vif)
2860{
2861 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2862 u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2863 200 + vif->bss_conf.beacon_int);
2864 u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2865 100 + vif->bss_conf.beacon_int);
2866
2867 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2868 return;
2869
2870 /*
2871 * iwl_mvm_protect_session() reads directly from the device
2872 * (the system time), so make sure it is available.
2873 */
2874 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2875 return;
2876
2877 mutex_lock(&mvm->mutex);
2878 /* Try really hard to protect the session and hear a beacon */
2879 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2880 mutex_unlock(&mvm->mutex);
2881
2882 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2883}
2884
2885static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2886 struct ieee80211_vif *vif,
2887 struct cfg80211_sched_scan_request *req,
2888 struct ieee80211_scan_ies *ies)
2889{
2890 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2891
2892 int ret;
2893
2894 mutex_lock(&mvm->mutex);
2895
2896 if (!vif->bss_conf.idle) {
2897 ret = -EBUSY;
2898 goto out;
2899 }
2900
2901 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2902
2903out:
2904 mutex_unlock(&mvm->mutex);
2905 return ret;
2906}
2907
2908static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2909 struct ieee80211_vif *vif)
2910{
2911 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2912 int ret;
2913
2914 mutex_lock(&mvm->mutex);
2915
2916 /* Due to a race condition, it's possible that mac80211 asks
2917 * us to stop a sched_scan when it's already stopped. This
2918 * can happen, for instance, if we stopped the scan ourselves,
2919 * called ieee80211_sched_scan_stopped() and the userspace called
2920 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2921 * could run. To handle this, simply return if the scan is
2922 * not running.
2923 */
2924 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2925 mutex_unlock(&mvm->mutex);
2926 return 0;
2927 }
2928
2929 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2930 mutex_unlock(&mvm->mutex);
2931 iwl_mvm_wait_for_async_handlers(mvm);
2932
2933 return ret;
2934}
2935
2936static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2937 enum set_key_cmd cmd,
2938 struct ieee80211_vif *vif,
2939 struct ieee80211_sta *sta,
2940 struct ieee80211_key_conf *key)
2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret;
2944
2945 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2947 return -EOPNOTSUPP;
2948 }
2949
2950 switch (key->cipher) {
2951 case WLAN_CIPHER_SUITE_TKIP:
2952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2953 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2954 break;
2955 case WLAN_CIPHER_SUITE_CCMP:
2956 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2957 break;
2958 case WLAN_CIPHER_SUITE_AES_CMAC:
2959 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2960 break;
2961 case WLAN_CIPHER_SUITE_WEP40:
2962 case WLAN_CIPHER_SUITE_WEP104:
2963 /* For non-client mode, only use WEP keys for TX as we probably
2964 * don't have a station yet anyway and would then have to keep
2965 * track of the keys, linking them to each of the clients/peers
2966 * as they appear. For now, don't do that, for performance WEP
2967 * offload doesn't really matter much, but we need it for some
2968 * other offload features in client mode.
2969 */
2970 if (vif->type != NL80211_IFTYPE_STATION)
2971 return 0;
2972 break;
2973 default:
2974 /* currently FW supports only one optional cipher scheme */
2975 if (hw->n_cipher_schemes &&
2976 hw->cipher_schemes->cipher == key->cipher)
2977 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2978 else
2979 return -EOPNOTSUPP;
2980 }
2981
2982 mutex_lock(&mvm->mutex);
2983
2984 switch (cmd) {
2985 case SET_KEY:
2986 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2987 vif->type == NL80211_IFTYPE_AP) && !sta) {
2988 /*
2989 * GTK on AP interface is a TX-only key, return 0;
2990 * on IBSS they're per-station and because we're lazy
2991 * we don't support them for RX, so do the same.
2992 */
2993 ret = 0;
2994 key->hw_key_idx = STA_KEY_IDX_INVALID;
2995 break;
2996 }
2997
2998 /* During FW restart, in order to restore the state as it was,
2999 * don't try to reprogram keys we previously failed for.
3000 */
3001 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3002 key->hw_key_idx == STA_KEY_IDX_INVALID) {
3003 IWL_DEBUG_MAC80211(mvm,
3004 "skip invalid idx key programming during restart\n");
3005 ret = 0;
3006 break;
3007 }
3008
3009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
3013 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n");
3015 /*
3016 * can't add key for RX, but we don't need it
3017 * in the device for TX so still return 0
3018 */
3019 key->hw_key_idx = STA_KEY_IDX_INVALID;
3020 ret = 0;
3021 }
3022
3023 break;
3024 case DISABLE_KEY:
3025 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3026 ret = 0;
3027 break;
3028 }
3029
3030 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3031 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3032 break;
3033 default:
3034 ret = -EINVAL;
3035 }
3036
3037 mutex_unlock(&mvm->mutex);
3038 return ret;
3039}
3040
3041static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3042 struct ieee80211_vif *vif,
3043 struct ieee80211_key_conf *keyconf,
3044 struct ieee80211_sta *sta,
3045 u32 iv32, u16 *phase1key)
3046{
3047 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3048
3049 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3050 return;
3051
3052 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3053}
3054
3055
3056static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3057 struct iwl_rx_packet *pkt, void *data)
3058{
3059 struct iwl_mvm *mvm =
3060 container_of(notif_wait, struct iwl_mvm, notif_wait);
3061 struct iwl_hs20_roc_res *resp;
3062 int resp_len = iwl_rx_packet_payload_len(pkt);
3063 struct iwl_mvm_time_event_data *te_data = data;
3064
3065 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3066 return true;
3067
3068 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3069 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3070 return true;
3071 }
3072
3073 resp = (void *)pkt->data;
3074
3075 IWL_DEBUG_TE(mvm,
3076 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3077 resp->status, resp->event_unique_id);
3078
3079 te_data->uid = le32_to_cpu(resp->event_unique_id);
3080 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3081 te_data->uid);
3082
3083 spin_lock_bh(&mvm->time_event_lock);
3084 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3085 spin_unlock_bh(&mvm->time_event_lock);
3086
3087 return true;
3088}
3089
3090#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
3091static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3092 struct ieee80211_channel *channel,
3093 struct ieee80211_vif *vif,
3094 int duration)
3095{
3096 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3097 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3098 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3099 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3100 struct iwl_notification_wait wait_time_event;
3101 struct iwl_hs20_roc_req aux_roc_req = {
3102 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3103 .id_and_color =
3104 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3105 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3106 /* Set the channel info data */
3107 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3108 PHY_BAND_24 : PHY_BAND_5,
3109 .channel_info.channel = channel->hw_value,
3110 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3111 /* Set the time and duration */
3112 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3113 .apply_time_max_delay =
3114 cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3115 .duration = cpu_to_le32(MSEC_TO_TU(duration)),
3116 };
3117
3118 /* Set the node address */
3119 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3120
3121 lockdep_assert_held(&mvm->mutex);
3122
3123 spin_lock_bh(&mvm->time_event_lock);
3124
3125 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3126 spin_unlock_bh(&mvm->time_event_lock);
3127 return -EIO;
3128 }
3129
3130 te_data->vif = vif;
3131 te_data->duration = duration;
3132 te_data->id = HOT_SPOT_CMD;
3133
3134 spin_unlock_bh(&mvm->time_event_lock);
3135
3136 /*
3137 * Use a notification wait, which really just processes the
3138 * command response and doesn't wait for anything, in order
3139 * to be able to process the response and get the UID inside
3140 * the RX path. Using CMD_WANT_SKB doesn't work because it
3141 * stores the buffer and then wakes up this thread, by which
3142 * time another notification (that the time event started)
3143 * might already be processed unsuccessfully.
3144 */
3145 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3146 time_event_response,
3147 ARRAY_SIZE(time_event_response),
3148 iwl_mvm_rx_aux_roc, te_data);
3149
3150 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3151 &aux_roc_req);
3152
3153 if (res) {
3154 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3155 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3156 goto out_clear_te;
3157 }
3158
3159 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3160 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3161 /* should never fail */
3162 WARN_ON_ONCE(res);
3163
3164 if (res) {
3165 out_clear_te:
3166 spin_lock_bh(&mvm->time_event_lock);
3167 iwl_mvm_te_clear_data(mvm, te_data);
3168 spin_unlock_bh(&mvm->time_event_lock);
3169 }
3170
3171 return res;
3172}
3173
3174static int iwl_mvm_roc(struct ieee80211_hw *hw,
3175 struct ieee80211_vif *vif,
3176 struct ieee80211_channel *channel,
3177 int duration,
3178 enum ieee80211_roc_type type)
3179{
3180 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3182 struct cfg80211_chan_def chandef;
3183 struct iwl_mvm_phy_ctxt *phy_ctxt;
3184 int ret, i;
3185
3186 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3187 duration, type);
3188
3189 flush_work(&mvm->roc_done_wk);
3190
3191 mutex_lock(&mvm->mutex);
3192
3193 switch (vif->type) {
3194 case NL80211_IFTYPE_STATION:
3195 if (fw_has_capa(&mvm->fw->ucode_capa,
3196 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3197 /* Use aux roc framework (HS20) */
3198 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3199 vif, duration);
3200 goto out_unlock;
3201 }
3202 IWL_ERR(mvm, "hotspot not supported\n");
3203 ret = -EINVAL;
3204 goto out_unlock;
3205 case NL80211_IFTYPE_P2P_DEVICE:
3206 /* handle below */
3207 break;
3208 default:
3209 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3210 ret = -EINVAL;
3211 goto out_unlock;
3212 }
3213
3214 for (i = 0; i < NUM_PHY_CTX; i++) {
3215 phy_ctxt = &mvm->phy_ctxts[i];
3216 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3217 continue;
3218
3219 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3220 /*
3221 * Unbind the P2P_DEVICE from the current PHY context,
3222 * and if the PHY context is not used remove it.
3223 */
3224 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3225 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3226 goto out_unlock;
3227
3228 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3229
3230 /* Bind the P2P_DEVICE to the current PHY Context */
3231 mvmvif->phy_ctxt = phy_ctxt;
3232
3233 ret = iwl_mvm_binding_add_vif(mvm, vif);
3234 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3235 goto out_unlock;
3236
3237 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3238 goto schedule_time_event;
3239 }
3240 }
3241
3242 /* Need to update the PHY context only if the ROC channel changed */
3243 if (channel == mvmvif->phy_ctxt->channel)
3244 goto schedule_time_event;
3245
3246 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3247
3248 /*
3249 * Change the PHY context configuration as it is currently referenced
3250 * only by the P2P Device MAC
3251 */
3252 if (mvmvif->phy_ctxt->ref == 1) {
3253 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3254 &chandef, 1, 1);
3255 if (ret)
3256 goto out_unlock;
3257 } else {
3258 /*
3259 * The PHY context is shared with other MACs. Need to remove the
3260 * P2P Device from the binding, allocate an new PHY context and
3261 * create a new binding
3262 */
3263 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3264 if (!phy_ctxt) {
3265 ret = -ENOSPC;
3266 goto out_unlock;
3267 }
3268
3269 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3270 1, 1);
3271 if (ret) {
3272 IWL_ERR(mvm, "Failed to change PHY context\n");
3273 goto out_unlock;
3274 }
3275
3276 /* Unbind the P2P_DEVICE from the current PHY context */
3277 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3278 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3279 goto out_unlock;
3280
3281 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3282
3283 /* Bind the P2P_DEVICE to the new allocated PHY context */
3284 mvmvif->phy_ctxt = phy_ctxt;
3285
3286 ret = iwl_mvm_binding_add_vif(mvm, vif);
3287 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3288 goto out_unlock;
3289
3290 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3291 }
3292
3293schedule_time_event:
3294 /* Schedule the time events */
3295 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3296
3297out_unlock:
3298 mutex_unlock(&mvm->mutex);
3299 IWL_DEBUG_MAC80211(mvm, "leave\n");
3300 return ret;
3301}
3302
3303static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3304{
3305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3306
3307 IWL_DEBUG_MAC80211(mvm, "enter\n");
3308
3309 mutex_lock(&mvm->mutex);
3310 iwl_mvm_stop_roc(mvm);
3311 mutex_unlock(&mvm->mutex);
3312
3313 IWL_DEBUG_MAC80211(mvm, "leave\n");
3314 return 0;
3315}
3316
3317static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3318 struct ieee80211_chanctx_conf *ctx)
3319{
3320 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3321 struct iwl_mvm_phy_ctxt *phy_ctxt;
3322 int ret;
3323
3324 lockdep_assert_held(&mvm->mutex);
3325
3326 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3327
3328 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3329 if (!phy_ctxt) {
3330 ret = -ENOSPC;
3331 goto out;
3332 }
3333
3334 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3335 ctx->rx_chains_static,
3336 ctx->rx_chains_dynamic);
3337 if (ret) {
3338 IWL_ERR(mvm, "Failed to add PHY context\n");
3339 goto out;
3340 }
3341
3342 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3343 *phy_ctxt_id = phy_ctxt->id;
3344out:
3345 return ret;
3346}
3347
3348static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3349 struct ieee80211_chanctx_conf *ctx)
3350{
3351 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3352 int ret;
3353
3354 mutex_lock(&mvm->mutex);
3355 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3356 mutex_unlock(&mvm->mutex);
3357
3358 return ret;
3359}
3360
3361static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3362 struct ieee80211_chanctx_conf *ctx)
3363{
3364 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3365 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3366
3367 lockdep_assert_held(&mvm->mutex);
3368
3369 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3370}
3371
3372static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3373 struct ieee80211_chanctx_conf *ctx)
3374{
3375 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3376
3377 mutex_lock(&mvm->mutex);
3378 __iwl_mvm_remove_chanctx(mvm, ctx);
3379 mutex_unlock(&mvm->mutex);
3380}
3381
3382static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3383 struct ieee80211_chanctx_conf *ctx,
3384 u32 changed)
3385{
3386 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3387 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3388 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3389
3390 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3391 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3392 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3393 IEEE80211_CHANCTX_CHANGE_RADAR |
3394 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3395 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3396 phy_ctxt->ref, changed))
3397 return;
3398
3399 mutex_lock(&mvm->mutex);
3400 iwl_mvm_bt_coex_vif_change(mvm);
3401 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3402 ctx->rx_chains_static,
3403 ctx->rx_chains_dynamic);
3404 mutex_unlock(&mvm->mutex);
3405}
3406
3407static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3408 struct ieee80211_vif *vif,
3409 struct ieee80211_chanctx_conf *ctx,
3410 bool switching_chanctx)
3411{
3412 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3413 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3414 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3415 int ret;
3416
3417 lockdep_assert_held(&mvm->mutex);
3418
3419 mvmvif->phy_ctxt = phy_ctxt;
3420
3421 switch (vif->type) {
3422 case NL80211_IFTYPE_AP:
3423 /* only needed if we're switching chanctx (i.e. during CSA) */
3424 if (switching_chanctx) {
3425 mvmvif->ap_ibss_active = true;
3426 break;
3427 }
3428 case NL80211_IFTYPE_ADHOC:
3429 /*
3430 * The AP binding flow is handled as part of the start_ap flow
3431 * (in bss_info_changed), similarly for IBSS.
3432 */
3433 ret = 0;
3434 goto out;
3435 case NL80211_IFTYPE_STATION:
3436 break;
3437 case NL80211_IFTYPE_MONITOR:
3438 /* always disable PS when a monitor interface is active */
3439 mvmvif->ps_disabled = true;
3440 break;
3441 default:
3442 ret = -EINVAL;
3443 goto out;
3444 }
3445
3446 ret = iwl_mvm_binding_add_vif(mvm, vif);
3447 if (ret)
3448 goto out;
3449
3450 /*
3451 * Power state must be updated before quotas,
3452 * otherwise fw will complain.
3453 */
3454 iwl_mvm_power_update_mac(mvm);
3455
3456 /* Setting the quota at this stage is only required for monitor
3457 * interfaces. For the other types, the bss_info changed flow
3458 * will handle quota settings.
3459 */
3460 if (vif->type == NL80211_IFTYPE_MONITOR) {
3461 mvmvif->monitor_active = true;
3462 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3463 if (ret)
3464 goto out_remove_binding;
3465 }
3466
3467 /* Handle binding during CSA */
3468 if (vif->type == NL80211_IFTYPE_AP) {
3469 iwl_mvm_update_quotas(mvm, false, NULL);
3470 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3471 }
3472
3473 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3474 u32 duration = 2 * vif->bss_conf.beacon_int;
3475
3476 /* iwl_mvm_protect_session() reads directly from the
3477 * device (the system time), so make sure it is
3478 * available.
3479 */
3480 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3481 if (ret)
3482 goto out_remove_binding;
3483
3484 /* Protect the session to make sure we hear the first
3485 * beacon on the new channel.
3486 */
3487 iwl_mvm_protect_session(mvm, vif, duration, duration,
3488 vif->bss_conf.beacon_int / 2,
3489 true);
3490
3491 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3492
3493 iwl_mvm_update_quotas(mvm, false, NULL);
3494 }
3495
3496 goto out;
3497
3498out_remove_binding:
3499 iwl_mvm_binding_remove_vif(mvm, vif);
3500 iwl_mvm_power_update_mac(mvm);
3501out:
3502 if (ret)
3503 mvmvif->phy_ctxt = NULL;
3504 return ret;
3505}
3506static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3507 struct ieee80211_vif *vif,
3508 struct ieee80211_chanctx_conf *ctx)
3509{
3510 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3511 int ret;
3512
3513 mutex_lock(&mvm->mutex);
3514 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3515 mutex_unlock(&mvm->mutex);
3516
3517 return ret;
3518}
3519
3520static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3521 struct ieee80211_vif *vif,
3522 struct ieee80211_chanctx_conf *ctx,
3523 bool switching_chanctx)
3524{
3525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3526 struct ieee80211_vif *disabled_vif = NULL;
3527
3528 lockdep_assert_held(&mvm->mutex);
3529
3530 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3531
3532 switch (vif->type) {
3533 case NL80211_IFTYPE_ADHOC:
3534 goto out;
3535 case NL80211_IFTYPE_MONITOR:
3536 mvmvif->monitor_active = false;
3537 mvmvif->ps_disabled = false;
3538 break;
3539 case NL80211_IFTYPE_AP:
3540 /* This part is triggered only during CSA */
3541 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3542 goto out;
3543
3544 mvmvif->csa_countdown = false;
3545
3546 /* Set CS bit on all the stations */
3547 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3548
3549 /* Save blocked iface, the timeout is set on the next beacon */
3550 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3551
3552 mvmvif->ap_ibss_active = false;
3553 break;
3554 case NL80211_IFTYPE_STATION:
3555 if (!switching_chanctx)
3556 break;
3557
3558 disabled_vif = vif;
3559
3560 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3561 break;
3562 default:
3563 break;
3564 }
3565
3566 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3567 iwl_mvm_binding_remove_vif(mvm, vif);
3568
3569out:
3570 mvmvif->phy_ctxt = NULL;
3571 iwl_mvm_power_update_mac(mvm);
3572}
3573
3574static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3575 struct ieee80211_vif *vif,
3576 struct ieee80211_chanctx_conf *ctx)
3577{
3578 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3579
3580 mutex_lock(&mvm->mutex);
3581 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3582 mutex_unlock(&mvm->mutex);
3583}
3584
3585static int
3586iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3587 struct ieee80211_vif_chanctx_switch *vifs)
3588{
3589 int ret;
3590
3591 mutex_lock(&mvm->mutex);
3592 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3593 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3594
3595 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3596 if (ret) {
3597 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3598 goto out_reassign;
3599 }
3600
3601 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3602 true);
3603 if (ret) {
3604 IWL_ERR(mvm,
3605 "failed to assign new_ctx during channel switch\n");
3606 goto out_remove;
3607 }
3608
3609 /* we don't support TDLS during DCM - can be caused by channel switch */
3610 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3611 iwl_mvm_teardown_tdls_peers(mvm);
3612
3613 goto out;
3614
3615out_remove:
3616 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3617
3618out_reassign:
3619 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3620 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3621 goto out_restart;
3622 }
3623
3624 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3625 true)) {
3626 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3627 goto out_restart;
3628 }
3629
3630 goto out;
3631
3632out_restart:
3633 /* things keep failing, better restart the hw */
3634 iwl_mvm_nic_restart(mvm, false);
3635
3636out:
3637 mutex_unlock(&mvm->mutex);
3638
3639 return ret;
3640}
3641
3642static int
3643iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3644 struct ieee80211_vif_chanctx_switch *vifs)
3645{
3646 int ret;
3647
3648 mutex_lock(&mvm->mutex);
3649 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3650
3651 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3652 true);
3653 if (ret) {
3654 IWL_ERR(mvm,
3655 "failed to assign new_ctx during channel switch\n");
3656 goto out_reassign;
3657 }
3658
3659 goto out;
3660
3661out_reassign:
3662 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3663 true)) {
3664 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3665 goto out_restart;
3666 }
3667
3668 goto out;
3669
3670out_restart:
3671 /* things keep failing, better restart the hw */
3672 iwl_mvm_nic_restart(mvm, false);
3673
3674out:
3675 mutex_unlock(&mvm->mutex);
3676
3677 return ret;
3678}
3679
3680static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3681 struct ieee80211_vif_chanctx_switch *vifs,
3682 int n_vifs,
3683 enum ieee80211_chanctx_switch_mode mode)
3684{
3685 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3686 int ret;
3687
3688 /* we only support a single-vif right now */
3689 if (n_vifs > 1)
3690 return -EOPNOTSUPP;
3691
3692 switch (mode) {
3693 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3694 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3695 break;
3696 case CHANCTX_SWMODE_REASSIGN_VIF:
3697 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3698 break;
3699 default:
3700 ret = -EOPNOTSUPP;
3701 break;
3702 }
3703
3704 return ret;
3705}
3706
3707static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3708 struct ieee80211_sta *sta,
3709 bool set)
3710{
3711 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3712 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3713
3714 if (!mvm_sta || !mvm_sta->vif) {
3715 IWL_ERR(mvm, "Station is not associated to a vif\n");
3716 return -EINVAL;
3717 }
3718
3719 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3720}
3721
3722#ifdef CONFIG_NL80211_TESTMODE
3723static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3724 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3725 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3726 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3727};
3728
3729static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3730 struct ieee80211_vif *vif,
3731 void *data, int len)
3732{
3733 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3734 int err;
3735 u32 noa_duration;
3736
3737 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3738 if (err)
3739 return err;
3740
3741 if (!tb[IWL_MVM_TM_ATTR_CMD])
3742 return -EINVAL;
3743
3744 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3745 case IWL_MVM_TM_CMD_SET_NOA:
3746 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3747 !vif->bss_conf.enable_beacon ||
3748 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3749 return -EINVAL;
3750
3751 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3752 if (noa_duration >= vif->bss_conf.beacon_int)
3753 return -EINVAL;
3754
3755 mvm->noa_duration = noa_duration;
3756 mvm->noa_vif = vif;
3757
3758 return iwl_mvm_update_quotas(mvm, false, NULL);
3759 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3760 /* must be associated client vif - ignore authorized */
3761 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3762 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3763 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3764 return -EINVAL;
3765
3766 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3767 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3768 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3769 }
3770
3771 return -EOPNOTSUPP;
3772}
3773
3774static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3775 struct ieee80211_vif *vif,
3776 void *data, int len)
3777{
3778 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3779 int err;
3780
3781 mutex_lock(&mvm->mutex);
3782 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3783 mutex_unlock(&mvm->mutex);
3784
3785 return err;
3786}
3787#endif
3788
3789static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3790 struct ieee80211_vif *vif,
3791 struct ieee80211_channel_switch *chsw)
3792{
3793 /* By implementing this operation, we prevent mac80211 from
3794 * starting its own channel switch timer, so that we can call
3795 * ieee80211_chswitch_done() ourselves at the right time
3796 * (which is when the absence time event starts).
3797 */
3798
3799 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3800 "dummy channel switch op\n");
3801}
3802
3803static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3804 struct ieee80211_vif *vif,
3805 struct ieee80211_channel_switch *chsw)
3806{
3807 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3808 struct ieee80211_vif *csa_vif;
3809 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3810 u32 apply_time;
3811 int ret;
3812
3813 mutex_lock(&mvm->mutex);
3814
3815 mvmvif->csa_failed = false;
3816
3817 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3818 chsw->chandef.center_freq1);
3819
3820 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3821
3822 switch (vif->type) {
3823 case NL80211_IFTYPE_AP:
3824 csa_vif =
3825 rcu_dereference_protected(mvm->csa_vif,
3826 lockdep_is_held(&mvm->mutex));
3827 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3828 "Another CSA is already in progress")) {
3829 ret = -EBUSY;
3830 goto out_unlock;
3831 }
3832
3833 rcu_assign_pointer(mvm->csa_vif, vif);
3834
3835 if (WARN_ONCE(mvmvif->csa_countdown,
3836 "Previous CSA countdown didn't complete")) {
3837 ret = -EBUSY;
3838 goto out_unlock;
3839 }
3840
3841 break;
3842 case NL80211_IFTYPE_STATION:
3843 /* Schedule the time event to a bit before beacon 1,
3844 * to make sure we're in the new channel when the
3845 * GO/AP arrives.
3846 */
3847 apply_time = chsw->device_timestamp +
3848 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3849 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3850
3851 if (chsw->block_tx)
3852 iwl_mvm_csa_client_absent(mvm, vif);
3853
3854 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3855 apply_time);
3856 if (mvmvif->bf_data.bf_enabled) {
3857 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3858 if (ret)
3859 goto out_unlock;
3860 }
3861
3862 break;
3863 default:
3864 break;
3865 }
3866
3867 mvmvif->ps_disabled = true;
3868
3869 ret = iwl_mvm_power_update_ps(mvm);
3870 if (ret)
3871 goto out_unlock;
3872
3873 /* we won't be on this channel any longer */
3874 iwl_mvm_teardown_tdls_peers(mvm);
3875
3876out_unlock:
3877 mutex_unlock(&mvm->mutex);
3878
3879 return ret;
3880}
3881
3882static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3883 struct ieee80211_vif *vif)
3884{
3885 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3886 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3887 int ret;
3888
3889 mutex_lock(&mvm->mutex);
3890
3891 if (mvmvif->csa_failed) {
3892 mvmvif->csa_failed = false;
3893 ret = -EIO;
3894 goto out_unlock;
3895 }
3896
3897 if (vif->type == NL80211_IFTYPE_STATION) {
3898 struct iwl_mvm_sta *mvmsta;
3899
3900 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3901 mvmvif->ap_sta_id);
3902
3903 if (WARN_ON(!mvmsta)) {
3904 ret = -EIO;
3905 goto out_unlock;
3906 }
3907
3908 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3909
3910 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3911
3912 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3913 if (ret)
3914 goto out_unlock;
3915
3916 iwl_mvm_stop_session_protection(mvm, vif);
3917 }
3918
3919 mvmvif->ps_disabled = false;
3920
3921 ret = iwl_mvm_power_update_ps(mvm);
3922
3923out_unlock:
3924 mutex_unlock(&mvm->mutex);
3925
3926 return ret;
3927}
3928
3929static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3930 struct ieee80211_vif *vif, u32 queues, bool drop)
3931{
3932 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3933 struct iwl_mvm_vif *mvmvif;
3934 struct iwl_mvm_sta *mvmsta;
3935 struct ieee80211_sta *sta;
3936 int i;
3937 u32 msk = 0;
3938
3939 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3940 return;
3941
3942 mutex_lock(&mvm->mutex);
3943 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3944
3945 /* flush the AP-station and all TDLS peers */
3946 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3947 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3948 lockdep_is_held(&mvm->mutex));
3949 if (IS_ERR_OR_NULL(sta))
3950 continue;
3951
3952 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3953 if (mvmsta->vif != vif)
3954 continue;
3955
3956 /* make sure only TDLS peers or the AP are flushed */
3957 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3958
3959 msk |= mvmsta->tfd_queue_msk;
3960 }
3961
3962 if (drop) {
3963 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3964 IWL_ERR(mvm, "flush request fail\n");
3965 mutex_unlock(&mvm->mutex);
3966 } else {
3967 mutex_unlock(&mvm->mutex);
3968
3969 /* this can take a while, and we may need/want other operations
3970 * to succeed while doing this, so do it without the mutex held
3971 */
3972 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3973 }
3974}
3975
3976static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3977 struct survey_info *survey)
3978{
3979 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3980 int ret;
3981
3982 memset(survey, 0, sizeof(*survey));
3983
3984 /* only support global statistics right now */
3985 if (idx != 0)
3986 return -ENOENT;
3987
3988 if (fw_has_capa(&mvm->fw->ucode_capa,
3989 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3990 return -ENOENT;
3991
3992 mutex_lock(&mvm->mutex);
3993
3994 if (mvm->ucode_loaded) {
3995 ret = iwl_mvm_request_statistics(mvm, false);
3996 if (ret)
3997 goto out;
3998 }
3999
4000 survey->filled = SURVEY_INFO_TIME |
4001 SURVEY_INFO_TIME_RX |
4002 SURVEY_INFO_TIME_TX |
4003 SURVEY_INFO_TIME_SCAN;
4004 survey->time = mvm->accu_radio_stats.on_time_rf +
4005 mvm->radio_stats.on_time_rf;
4006 do_div(survey->time, USEC_PER_MSEC);
4007
4008 survey->time_rx = mvm->accu_radio_stats.rx_time +
4009 mvm->radio_stats.rx_time;
4010 do_div(survey->time_rx, USEC_PER_MSEC);
4011
4012 survey->time_tx = mvm->accu_radio_stats.tx_time +
4013 mvm->radio_stats.tx_time;
4014 do_div(survey->time_tx, USEC_PER_MSEC);
4015
4016 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4017 mvm->radio_stats.on_time_scan;
4018 do_div(survey->time_scan, USEC_PER_MSEC);
4019
4020 ret = 0;
4021 out:
4022 mutex_unlock(&mvm->mutex);
4023 return ret;
4024}
4025
4026static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4027 struct ieee80211_vif *vif,
4028 struct ieee80211_sta *sta,
4029 struct station_info *sinfo)
4030{
4031 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4032 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4033 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4034
4035 if (fw_has_capa(&mvm->fw->ucode_capa,
4036 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4037 return;
4038
4039 /* if beacon filtering isn't on mac80211 does it anyway */
4040 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4041 return;
4042
4043 if (!vif->bss_conf.assoc)
4044 return;
4045
4046 mutex_lock(&mvm->mutex);
4047
4048 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4049 goto unlock;
4050
4051 if (iwl_mvm_request_statistics(mvm, false))
4052 goto unlock;
4053
4054 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4055 mvmvif->beacon_stats.accu_num_beacons;
4056 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4057 if (mvmvif->beacon_stats.avg_signal) {
4058 /* firmware only reports a value after RXing a few beacons */
4059 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4060 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4061 }
4062 unlock:
4063 mutex_unlock(&mvm->mutex);
4064}
4065
4066static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4067 struct ieee80211_vif *vif,
4068 const struct ieee80211_event *event)
4069{
4070#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4071 do { \
4072 if ((_cnt) && --(_cnt)) \
4073 break; \
4074 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4075 } while (0)
4076
4077 struct iwl_fw_dbg_trigger_tlv *trig;
4078 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4079
4080 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4081 return;
4082
4083 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4084 trig_mlme = (void *)trig->data;
4085 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4086 return;
4087
4088 if (event->u.mlme.data == ASSOC_EVENT) {
4089 if (event->u.mlme.status == MLME_DENIED)
4090 CHECK_MLME_TRIGGER(mvm, trig, buf,
4091 trig_mlme->stop_assoc_denied,
4092 "DENIED ASSOC: reason %d",
4093 event->u.mlme.reason);
4094 else if (event->u.mlme.status == MLME_TIMEOUT)
4095 CHECK_MLME_TRIGGER(mvm, trig, buf,
4096 trig_mlme->stop_assoc_timeout,
4097 "ASSOC TIMEOUT");
4098 } else if (event->u.mlme.data == AUTH_EVENT) {
4099 if (event->u.mlme.status == MLME_DENIED)
4100 CHECK_MLME_TRIGGER(mvm, trig, buf,
4101 trig_mlme->stop_auth_denied,
4102 "DENIED AUTH: reason %d",
4103 event->u.mlme.reason);
4104 else if (event->u.mlme.status == MLME_TIMEOUT)
4105 CHECK_MLME_TRIGGER(mvm, trig, buf,
4106 trig_mlme->stop_auth_timeout,
4107 "AUTH TIMEOUT");
4108 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4109 CHECK_MLME_TRIGGER(mvm, trig, buf,
4110 trig_mlme->stop_rx_deauth,
4111 "DEAUTH RX %d", event->u.mlme.reason);
4112 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4113 CHECK_MLME_TRIGGER(mvm, trig, buf,
4114 trig_mlme->stop_tx_deauth,
4115 "DEAUTH TX %d", event->u.mlme.reason);
4116 }
4117#undef CHECK_MLME_TRIGGER
4118}
4119
4120static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4121 struct ieee80211_vif *vif,
4122 const struct ieee80211_event *event)
4123{
4124 struct iwl_fw_dbg_trigger_tlv *trig;
4125 struct iwl_fw_dbg_trigger_ba *ba_trig;
4126
4127 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4128 return;
4129
4130 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4131 ba_trig = (void *)trig->data;
4132 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4133 return;
4134
4135 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4136 return;
4137
4138 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4139 "BAR received from %pM, tid %d, ssn %d",
4140 event->u.ba.sta->addr, event->u.ba.tid,
4141 event->u.ba.ssn);
4142}
4143
4144static void
4145iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4146 struct ieee80211_vif *vif,
4147 const struct ieee80211_event *event)
4148{
4149 struct iwl_fw_dbg_trigger_tlv *trig;
4150 struct iwl_fw_dbg_trigger_ba *ba_trig;
4151
4152 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4153 return;
4154
4155 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4156 ba_trig = (void *)trig->data;
4157 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4158 return;
4159
4160 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4161 return;
4162
4163 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4164 "Frame from %pM timed out, tid %d",
4165 event->u.ba.sta->addr, event->u.ba.tid);
4166}
4167
4168static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4169 struct ieee80211_vif *vif,
4170 const struct ieee80211_event *event)
4171{
4172 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4173
4174 switch (event->type) {
4175 case MLME_EVENT:
4176 iwl_mvm_event_mlme_callback(mvm, vif, event);
4177 break;
4178 case BAR_RX_EVENT:
4179 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4180 break;
4181 case BA_FRAME_TIMEOUT:
4182 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4183 break;
4184 default:
4185 break;
4186 }
4187}
4188
4189const struct ieee80211_ops iwl_mvm_hw_ops = {
4190 .tx = iwl_mvm_mac_tx,
4191 .ampdu_action = iwl_mvm_mac_ampdu_action,
4192 .start = iwl_mvm_mac_start,
4193 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4194 .stop = iwl_mvm_mac_stop,
4195 .add_interface = iwl_mvm_mac_add_interface,
4196 .remove_interface = iwl_mvm_mac_remove_interface,
4197 .config = iwl_mvm_mac_config,
4198 .prepare_multicast = iwl_mvm_prepare_multicast,
4199 .configure_filter = iwl_mvm_configure_filter,
4200 .config_iface_filter = iwl_mvm_config_iface_filter,
4201 .bss_info_changed = iwl_mvm_bss_info_changed,
4202 .hw_scan = iwl_mvm_mac_hw_scan,
4203 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4204 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4205 .sta_state = iwl_mvm_mac_sta_state,
4206 .sta_notify = iwl_mvm_mac_sta_notify,
4207 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4208 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4209 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4210 .sta_rc_update = iwl_mvm_sta_rc_update,
4211 .conf_tx = iwl_mvm_mac_conf_tx,
4212 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4213 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4214 .flush = iwl_mvm_mac_flush,
4215 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4216 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4217 .set_key = iwl_mvm_mac_set_key,
4218 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4219 .remain_on_channel = iwl_mvm_roc,
4220 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4221 .add_chanctx = iwl_mvm_add_chanctx,
4222 .remove_chanctx = iwl_mvm_remove_chanctx,
4223 .change_chanctx = iwl_mvm_change_chanctx,
4224 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4225 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4226 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4227
4228 .start_ap = iwl_mvm_start_ap_ibss,
4229 .stop_ap = iwl_mvm_stop_ap_ibss,
4230 .join_ibss = iwl_mvm_start_ap_ibss,
4231 .leave_ibss = iwl_mvm_stop_ap_ibss,
4232
4233 .set_tim = iwl_mvm_set_tim,
4234
4235 .channel_switch = iwl_mvm_channel_switch,
4236 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4237 .post_channel_switch = iwl_mvm_post_channel_switch,
4238
4239 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4240 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4241 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4242
4243 .event_callback = iwl_mvm_mac_event_callback,
4244
4245 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4246
4247#ifdef CONFIG_PM_SLEEP
4248 /* look at d3.c */
4249 .suspend = iwl_mvm_suspend,
4250 .resume = iwl_mvm_resume,
4251 .set_wakeup = iwl_mvm_set_wakeup,
4252 .set_rekey_data = iwl_mvm_set_rekey_data,
4253#if IS_ENABLED(CONFIG_IPV6)
4254 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4255#endif
4256 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4257#endif
4258 .get_survey = iwl_mvm_mac_get_survey,
4259 .sta_statistics = iwl_mvm_mac_sta_statistics,
4260};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
deleted file mode 100644
index 4bde2d027dcd..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ /dev/null
@@ -1,1535 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __IWL_MVM_H__
67#define __IWL_MVM_H__
68
69#include <linux/list.h>
70#include <linux/spinlock.h>
71#include <linux/leds.h>
72#include <linux/in6.h>
73
74#include "iwl-op-mode.h"
75#include "iwl-trans.h"
76#include "iwl-notif-wait.h"
77#include "iwl-eeprom-parse.h"
78#include "iwl-fw-file.h"
79#include "iwl-config.h"
80#include "sta.h"
81#include "fw-api.h"
82#include "constants.h"
83#include "tof.h"
84
85#define IWL_MVM_MAX_ADDRESSES 5
86/* RSSI offset for WkP */
87#define IWL_RSSI_OFFSET 50
88#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
89/* A TimeUnit is 1024 microsecond */
90#define MSEC_TO_TU(_msec) (_msec*1000/1024)
91
92/* For GO, this value represents the number of TUs before CSA "beacon
93 * 0" TBTT when the CSA time-event needs to be scheduled to start. It
94 * must be big enough to ensure that we switch in time.
95 */
96#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
97
98/* For client, this value represents the number of TUs before CSA
99 * "beacon 1" TBTT, instead. This is because we don't know when the
100 * GO/AP will be in the new channel, so we switch early enough.
101 */
102#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
103
104/*
105 * This value (in TUs) is used to fine tune the CSA NoA end time which should
106 * be just before "beacon 0" TBTT.
107 */
108#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
109
110/*
111 * Number of beacons to transmit on a new channel until we unblock tx to
112 * the stations, even if we didn't identify them on a new channel
113 */
114#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
115
116extern const struct ieee80211_ops iwl_mvm_hw_ops;
117
118/**
119 * struct iwl_mvm_mod_params - module parameters for iwlmvm
120 * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
121 * We will register to mac80211 to have testmode working. The NIC must not
122 * be up'ed after the INIT fw asserted. This is useful to be able to use
123 * proprietary tools over testmode to debug the INIT fw.
124 * @tfd_q_hang_detect: enabled the detection of hung transmit queues
125 * @power_scheme: one of enum iwl_power_scheme
126 */
127struct iwl_mvm_mod_params {
128 bool init_dbg;
129 bool tfd_q_hang_detect;
130 int power_scheme;
131};
132extern struct iwl_mvm_mod_params iwlmvm_mod_params;
133
134/**
135 * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump
136 *
137 * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode
138 * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
139 * transport's data.
140 * @trans_len: length of the valid data in trans_ptr
141 * @op_mode_len: length of the valid data in op_mode_ptr
142 */
143struct iwl_mvm_dump_ptrs {
144 struct iwl_trans_dump_data *trans_ptr;
145 void *op_mode_ptr;
146 u32 op_mode_len;
147};
148
149/**
150 * struct iwl_mvm_dump_desc - describes the dump
151 * @len: length of trig_desc->data
152 * @trig_desc: the description of the dump
153 */
154struct iwl_mvm_dump_desc {
155 size_t len;
156 /* must be last */
157 struct iwl_fw_error_dump_trigger_desc trig_desc;
158};
159
160extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
161
162struct iwl_mvm_phy_ctxt {
163 u16 id;
164 u16 color;
165 u32 ref;
166
167 /*
168 * TODO: This should probably be removed. Currently here only for rate
169 * scaling algorithm
170 */
171 struct ieee80211_channel *channel;
172};
173
174struct iwl_mvm_time_event_data {
175 struct ieee80211_vif *vif;
176 struct list_head list;
177 unsigned long end_jiffies;
178 u32 duration;
179 bool running;
180 u32 uid;
181
182 /*
183 * The access to the 'id' field must be done when the
184 * mvm->time_event_lock is held, as it value is used to indicate
185 * if the te is in the time event list or not (when id == TE_MAX)
186 */
187 u32 id;
188};
189
190 /* Power management */
191
192/**
193 * enum iwl_power_scheme
194 * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
195 * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
196 * @IWL_POWER_LEVEL_LP - Low Power
197 */
198enum iwl_power_scheme {
199 IWL_POWER_SCHEME_CAM = 1,
200 IWL_POWER_SCHEME_BPS,
201 IWL_POWER_SCHEME_LP
202};
203
204#define IWL_CONN_MAX_LISTEN_INTERVAL 10
205#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
206
207#ifdef CONFIG_IWLWIFI_DEBUGFS
208enum iwl_dbgfs_pm_mask {
209 MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
210 MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
211 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
212 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
213 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
214 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
215 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
216 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
217 MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
218 MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
219};
220
221struct iwl_dbgfs_pm {
222 u16 keep_alive_seconds;
223 u32 rx_data_timeout;
224 u32 tx_data_timeout;
225 bool skip_over_dtim;
226 u8 skip_dtim_periods;
227 bool lprx_ena;
228 u32 lprx_rssi_threshold;
229 bool snooze_ena;
230 bool uapsd_misbehaving;
231 bool use_ps_poll;
232 int mask;
233};
234
235/* beacon filtering */
236
237enum iwl_dbgfs_bf_mask {
238 MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
239 MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
240 MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
241 MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
242 MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
243 MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
244 MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
245 MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
246 MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
247 MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
248 MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
249};
250
251struct iwl_dbgfs_bf {
252 u32 bf_energy_delta;
253 u32 bf_roaming_energy_delta;
254 u32 bf_roaming_state;
255 u32 bf_temp_threshold;
256 u32 bf_temp_fast_filter;
257 u32 bf_temp_slow_filter;
258 u32 bf_enable_beacon_filter;
259 u32 bf_debug_flag;
260 u32 bf_escape_timer;
261 u32 ba_escape_timer;
262 u32 ba_enable_beacon_abort;
263 int mask;
264};
265#endif
266
267enum iwl_mvm_smps_type_request {
268 IWL_MVM_SMPS_REQ_BT_COEX,
269 IWL_MVM_SMPS_REQ_TT,
270 IWL_MVM_SMPS_REQ_PROT,
271 NUM_IWL_MVM_SMPS_REQ,
272};
273
274enum iwl_mvm_ref_type {
275 IWL_MVM_REF_UCODE_DOWN,
276 IWL_MVM_REF_SCAN,
277 IWL_MVM_REF_ROC,
278 IWL_MVM_REF_ROC_AUX,
279 IWL_MVM_REF_P2P_CLIENT,
280 IWL_MVM_REF_AP_IBSS,
281 IWL_MVM_REF_USER,
282 IWL_MVM_REF_TX,
283 IWL_MVM_REF_TX_AGG,
284 IWL_MVM_REF_ADD_IF,
285 IWL_MVM_REF_START_AP,
286 IWL_MVM_REF_BSS_CHANGED,
287 IWL_MVM_REF_PREPARE_TX,
288 IWL_MVM_REF_PROTECT_TDLS,
289 IWL_MVM_REF_CHECK_CTKILL,
290 IWL_MVM_REF_PRPH_READ,
291 IWL_MVM_REF_PRPH_WRITE,
292 IWL_MVM_REF_NMI,
293 IWL_MVM_REF_TM_CMD,
294 IWL_MVM_REF_EXIT_WORK,
295 IWL_MVM_REF_PROTECT_CSA,
296 IWL_MVM_REF_FW_DBG_COLLECT,
297
298 /* update debugfs.c when changing this */
299
300 IWL_MVM_REF_COUNT,
301};
302
303enum iwl_bt_force_ant_mode {
304 BT_FORCE_ANT_DIS = 0,
305 BT_FORCE_ANT_AUTO,
306 BT_FORCE_ANT_BT,
307 BT_FORCE_ANT_WIFI,
308
309 BT_FORCE_ANT_MAX,
310};
311
312/**
313* struct iwl_mvm_vif_bf_data - beacon filtering related data
314* @bf_enabled: indicates if beacon filtering is enabled
315* @ba_enabled: indicated if beacon abort is enabled
316* @ave_beacon_signal: average beacon signal
317* @last_cqm_event: rssi of the last cqm event
318* @bt_coex_min_thold: minimum threshold for BT coex
319* @bt_coex_max_thold: maximum threshold for BT coex
320* @last_bt_coex_event: rssi of the last BT coex event
321*/
322struct iwl_mvm_vif_bf_data {
323 bool bf_enabled;
324 bool ba_enabled;
325 int ave_beacon_signal;
326 int last_cqm_event;
327 int bt_coex_min_thold;
328 int bt_coex_max_thold;
329 int last_bt_coex_event;
330};
331
332/**
333 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
334 * @id: between 0 and 3
335 * @color: to solve races upon MAC addition and removal
336 * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
337 * @bssid: BSSID for this (client) interface
338 * @associated: indicates that we're currently associated, used only for
339 * managing the firmware state in iwl_mvm_bss_info_changed_station()
340 * @ap_assoc_sta_count: count of stations associated to us - valid only
341 * if VIF type is AP
342 * @uploaded: indicates the MAC context has been added to the device
343 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
344 * should get quota etc.
345 * @pm_enabled - Indicate if MAC power management is allowed
346 * @monitor_active: indicates that monitor context is configured, and that the
347 * interface should get quota etc.
348 * @low_latency: indicates that this interface is in low-latency mode
349 * (VMACLowLatencyMode)
350 * @ps_disabled: indicates that this interface requires PS to be disabled
351 * @queue_params: QoS params for this MAC
352 * @bcast_sta: station used for broadcast packets. Used by the following
353 * vifs: P2P_DEVICE, GO and AP.
354 * @beacon_skb: the skb used to hold the AP/GO beacon template
355 * @smps_requests: the SMPS requests of different parts of the driver,
356 * combined on update to yield the overall request to mac80211.
357 * @beacon_stats: beacon statistics, containing the # of received beacons,
358 * # of received beacons accumulated over FW restart, and the current
359 * average signal of beacons retrieved from the firmware
360 * @csa_failed: CSA failed to schedule time event, report an error later
361 * @features: hw features active for this vif
362 */
363struct iwl_mvm_vif {
364 struct iwl_mvm *mvm;
365 u16 id;
366 u16 color;
367 u8 ap_sta_id;
368
369 u8 bssid[ETH_ALEN];
370 bool associated;
371 u8 ap_assoc_sta_count;
372
373 bool uploaded;
374 bool ap_ibss_active;
375 bool pm_enabled;
376 bool monitor_active;
377 bool low_latency;
378 bool ps_disabled;
379 struct iwl_mvm_vif_bf_data bf_data;
380
381 struct {
382 u32 num_beacons, accu_num_beacons;
383 u8 avg_signal;
384 } beacon_stats;
385
386 u32 ap_beacon_time;
387
388 enum iwl_tsf_id tsf_id;
389
390 /*
391 * QoS data from mac80211, need to store this here
392 * as mac80211 has a separate callback but we need
393 * to have the data for the MAC context
394 */
395 struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
396 struct iwl_mvm_time_event_data time_event_data;
397 struct iwl_mvm_time_event_data hs_time_event_data;
398
399 struct iwl_mvm_int_sta bcast_sta;
400
401 /*
402 * Assigned while mac80211 has the interface in a channel context,
403 * or, for P2P Device, while it exists.
404 */
405 struct iwl_mvm_phy_ctxt *phy_ctxt;
406
407#ifdef CONFIG_PM_SLEEP
408 /* WoWLAN GTK rekey data */
409 struct {
410 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
411 __le64 replay_ctr;
412 bool valid;
413 } rekey_data;
414
415 int tx_key_idx;
416
417 bool seqno_valid;
418 u16 seqno;
419#endif
420
421#if IS_ENABLED(CONFIG_IPV6)
422 /* IPv6 addresses for WoWLAN */
423 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
424 int num_target_ipv6_addrs;
425#endif
426
427#ifdef CONFIG_IWLWIFI_DEBUGFS
428 struct dentry *dbgfs_dir;
429 struct dentry *dbgfs_slink;
430 struct iwl_dbgfs_pm dbgfs_pm;
431 struct iwl_dbgfs_bf dbgfs_bf;
432 struct iwl_mac_power_cmd mac_pwr_cmd;
433#endif
434
435 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
436
437 /* FW identified misbehaving AP */
438 u8 uapsd_misbehaving_bssid[ETH_ALEN];
439
440 /* Indicates that CSA countdown may be started */
441 bool csa_countdown;
442 bool csa_failed;
443
444 /* TCP Checksum Offload */
445 netdev_features_t features;
446};
447
448static inline struct iwl_mvm_vif *
449iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
450{
451 return (void *)vif->drv_priv;
452}
453
454extern const u8 tid_to_mac80211_ac[];
455
456#define IWL_MVM_SCAN_STOPPING_SHIFT 8
457
458enum iwl_scan_status {
459 IWL_MVM_SCAN_REGULAR = BIT(0),
460 IWL_MVM_SCAN_SCHED = BIT(1),
461 IWL_MVM_SCAN_NETDETECT = BIT(2),
462
463 IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
464 IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
465 IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
466
467 IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
468 IWL_MVM_SCAN_STOPPING_REGULAR,
469 IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
470 IWL_MVM_SCAN_STOPPING_SCHED,
471 IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
472 IWL_MVM_SCAN_STOPPING_NETDETECT,
473
474 IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
475 IWL_MVM_SCAN_MASK = 0xff,
476};
477
478/**
479 * struct iwl_nvm_section - describes an NVM section in memory.
480 *
481 * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
482 * and saved for later use by the driver. Not all NVM sections are saved
483 * this way, only the needed ones.
484 */
485struct iwl_nvm_section {
486 u16 length;
487 const u8 *data;
488};
489
490/**
491 * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
492 * @ct_kill_exit: worker to exit thermal kill
493 * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
494 * @tx_backoff: The current thremal throttling tx backoff in uSec.
495 * @min_backoff: The minimal tx backoff due to power restrictions
496 * @params: Parameters to configure the thermal throttling algorithm.
497 * @throttle: Is thermal throttling is active?
498 */
499struct iwl_mvm_tt_mgmt {
500 struct delayed_work ct_kill_exit;
501 bool dynamic_smps;
502 u32 tx_backoff;
503 u32 min_backoff;
504 struct iwl_tt_params params;
505 bool throttle;
506};
507
508#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
509
510struct iwl_mvm_frame_stats {
511 u32 legacy_frames;
512 u32 ht_frames;
513 u32 vht_frames;
514 u32 bw_20_frames;
515 u32 bw_40_frames;
516 u32 bw_80_frames;
517 u32 bw_160_frames;
518 u32 sgi_frames;
519 u32 ngi_frames;
520 u32 siso_frames;
521 u32 mimo2_frames;
522 u32 agg_frames;
523 u32 ampdu_count;
524 u32 success_frames;
525 u32 fail_frames;
526 u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
527 int last_frame_idx;
528};
529
530enum {
531 D0I3_DEFER_WAKEUP,
532 D0I3_PENDING_WAKEUP,
533};
534
535#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
536#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
537#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
538
539enum iwl_mvm_tdls_cs_state {
540 IWL_MVM_TDLS_SW_IDLE = 0,
541 IWL_MVM_TDLS_SW_REQ_SENT,
542 IWL_MVM_TDLS_SW_RESP_RCVD,
543 IWL_MVM_TDLS_SW_REQ_RCVD,
544 IWL_MVM_TDLS_SW_ACTIVE,
545};
546
547struct iwl_mvm_shared_mem_cfg {
548 u32 shared_mem_addr;
549 u32 shared_mem_size;
550 u32 sample_buff_addr;
551 u32 sample_buff_size;
552 u32 txfifo_addr;
553 u32 txfifo_size[TX_FIFO_MAX_NUM];
554 u32 rxfifo_size[RX_FIFO_MAX_NUM];
555 u32 page_buff_addr;
556 u32 page_buff_size;
557};
558
559struct iwl_mvm {
560 /* for logger access */
561 struct device *dev;
562
563 struct iwl_trans *trans;
564 const struct iwl_fw *fw;
565 const struct iwl_cfg *cfg;
566 struct iwl_phy_db *phy_db;
567 struct ieee80211_hw *hw;
568
569 /* for protecting access to iwl_mvm */
570 struct mutex mutex;
571 struct list_head async_handlers_list;
572 spinlock_t async_handlers_lock;
573 struct work_struct async_handlers_wk;
574
575 struct work_struct roc_done_wk;
576
577 unsigned long status;
578
579 /*
580 * for beacon filtering -
581 * currently only one interface can be supported
582 */
583 struct iwl_mvm_vif *bf_allowed_vif;
584
585 enum iwl_ucode_type cur_ucode;
586 bool ucode_loaded;
587 bool calibrating;
588 u32 error_event_table;
589 u32 log_event_table;
590 u32 umac_error_event_table;
591 bool support_umac_log;
592 struct iwl_sf_region sf_space;
593
594 u32 ampdu_ref;
595
596 struct iwl_notif_wait_data notif_wait;
597
598 struct mvm_statistics_rx rx_stats;
599
600 struct {
601 u64 rx_time;
602 u64 tx_time;
603 u64 on_time_rf;
604 u64 on_time_scan;
605 } radio_stats, accu_radio_stats;
606
607 struct {
608 /* Map to HW queue */
609 u32 hw_queue_to_mac80211;
610 u8 hw_queue_refcount;
611 bool setup_reserved;
612 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
613 } queue_info[IWL_MAX_HW_QUEUES];
614 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
615 atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
616
617 const char *nvm_file_name;
618 struct iwl_nvm_data *nvm_data;
619 /* NVM sections */
620 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
621
622 /* Paging section */
623 struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
624 u16 num_of_paging_blk;
625 u16 num_of_pages_in_last_blk;
626
627 /* EEPROM MAC addresses */
628 struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
629
630 /* data related to data path */
631 struct iwl_rx_phy_info last_phy_info;
632 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
633 struct work_struct sta_drained_wk;
634 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
635 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
636 u32 tfd_drained[IWL_MVM_STATION_COUNT];
637 u8 rx_ba_sessions;
638
639 /* configured by mac80211 */
640 u32 rts_threshold;
641
642 /* Scan status, cmd (pre-allocated) and auxiliary station */
643 unsigned int scan_status;
644 void *scan_cmd;
645 struct iwl_mcast_filter_cmd *mcast_filter_cmd;
646
647 /* max number of simultaneous scans the FW supports */
648 unsigned int max_scans;
649
650 /* UMAC scan tracking */
651 u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
652
653 /* rx chain antennas set through debugfs for the scan command */
654 u8 scan_rx_ant;
655
656#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
657 /* broadcast filters to configure for each associated station */
658 const struct iwl_fw_bcast_filter *bcast_filters;
659#ifdef CONFIG_IWLWIFI_DEBUGFS
660 struct {
661 bool override;
662 struct iwl_bcast_filter_cmd cmd;
663 } dbgfs_bcast_filtering;
664#endif
665#endif
666
667 /* Internal station */
668 struct iwl_mvm_int_sta aux_sta;
669
670 bool last_ebs_successful;
671
672 u8 scan_last_antenna_idx; /* to toggle TX between antennas */
673 u8 mgmt_last_antenna_idx;
674
675 /* last smart fifo state that was successfully sent to firmware */
676 enum iwl_sf_state sf_state;
677
678#ifdef CONFIG_IWLWIFI_DEBUGFS
679 struct dentry *debugfs_dir;
680 u32 dbgfs_sram_offset, dbgfs_sram_len;
681 u32 dbgfs_prph_reg_addr;
682 bool disable_power_off;
683 bool disable_power_off_d3;
684
685 bool scan_iter_notif_enabled;
686
687 struct debugfs_blob_wrapper nvm_hw_blob;
688 struct debugfs_blob_wrapper nvm_sw_blob;
689 struct debugfs_blob_wrapper nvm_calib_blob;
690 struct debugfs_blob_wrapper nvm_prod_blob;
691 struct debugfs_blob_wrapper nvm_phy_sku_blob;
692
693 struct iwl_mvm_frame_stats drv_rx_stats;
694 spinlock_t drv_stats_lock;
695 u16 dbgfs_rx_phyinfo;
696#endif
697
698 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
699
700 struct list_head time_event_list;
701 spinlock_t time_event_lock;
702
703 /*
704 * A bitmap indicating the index of the key in use. The firmware
705 * can hold 16 keys at most. Reflect this fact.
706 */
707 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
708 u8 fw_key_deleted[STA_KEY_MAX_NUM];
709
710 /* references taken by the driver and spinlock protecting them */
711 spinlock_t refs_lock;
712 u8 refs[IWL_MVM_REF_COUNT];
713
714 u8 vif_count;
715
716 /* -1 for always, 0 for never, >0 for that many times */
717 s8 restart_fw;
718 u8 fw_dbg_conf;
719 struct delayed_work fw_dump_wk;
720 struct iwl_mvm_dump_desc *fw_dump_desc;
721 struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
722
723#ifdef CONFIG_IWLWIFI_LEDS
724 struct led_classdev led;
725#endif
726
727 struct ieee80211_vif *p2p_device_vif;
728
729#ifdef CONFIG_PM_SLEEP
730 struct wiphy_wowlan_support wowlan;
731 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
732
733 /* sched scan settings for net detect */
734 struct cfg80211_sched_scan_request *nd_config;
735 struct ieee80211_scan_ies nd_ies;
736 struct cfg80211_match_set *nd_match_sets;
737 int n_nd_match_sets;
738 struct ieee80211_channel **nd_channels;
739 int n_nd_channels;
740 bool net_detect;
741#ifdef CONFIG_IWLWIFI_DEBUGFS
742 bool d3_wake_sysassert;
743 bool d3_test_active;
744 bool store_d3_resume_sram;
745 void *d3_resume_sram;
746 u32 d3_test_pme_ptr;
747 struct ieee80211_vif *keep_vif;
748 u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
749#endif
750#endif
751
752 /* d0i3 */
753 u8 d0i3_ap_sta_id;
754 bool d0i3_offloading;
755 struct work_struct d0i3_exit_work;
756 struct sk_buff_head d0i3_tx;
757 /* protect d0i3_suspend_flags */
758 struct mutex d0i3_suspend_mutex;
759 unsigned long d0i3_suspend_flags;
760 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
761 spinlock_t d0i3_tx_lock;
762 wait_queue_head_t d0i3_exit_waitq;
763
764 /* BT-Coex */
765 u8 bt_ack_kill_msk[NUM_PHY_CTX];
766 u8 bt_cts_kill_msk[NUM_PHY_CTX];
767
768 struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
769 struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
770 struct iwl_bt_coex_profile_notif last_bt_notif;
771 struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
772
773 u32 last_ant_isol;
774 u8 last_corun_lut;
775 u8 bt_tx_prio;
776 enum iwl_bt_force_ant_mode bt_force_ant_mode;
777
778 /* Aux ROC */
779 struct list_head aux_roc_te_list;
780
781 /* Thermal Throttling and CTkill */
782 struct iwl_mvm_tt_mgmt thermal_throttle;
783 s32 temperature; /* Celsius */
784 /*
785 * Debug option to set the NIC temperature. This option makes the
786 * driver think this is the actual NIC temperature, and ignore the
787 * real temperature that is received from the fw
788 */
789 bool temperature_test; /* Debug test temperature is enabled */
790
791 struct iwl_time_quota_cmd last_quota_cmd;
792
793#ifdef CONFIG_NL80211_TESTMODE
794 u32 noa_duration;
795 struct ieee80211_vif *noa_vif;
796#endif
797
798 /* Tx queues */
799 u8 aux_queue;
800 u8 first_agg_queue;
801 u8 last_agg_queue;
802
803 /* Indicate if device power save is allowed */
804 u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
805
806 struct ieee80211_vif __rcu *csa_vif;
807 struct ieee80211_vif __rcu *csa_tx_blocked_vif;
808 u8 csa_tx_block_bcn_timeout;
809
810 /* system time of last beacon (for AP/GO interface) */
811 u32 ap_last_beacon_gp2;
812
813 bool lar_regdom_set;
814 enum iwl_mcc_source mcc_src;
815
816 u8 low_latency_agg_frame_limit;
817
818 /* TDLS channel switch data */
819 struct {
820 struct delayed_work dwork;
821 enum iwl_mvm_tdls_cs_state state;
822
823 /*
824 * Current cs sta - might be different from periodic cs peer
825 * station. Value is meaningless when the cs-state is idle.
826 */
827 u8 cur_sta_id;
828
829 /* TDLS periodic channel-switch peer */
830 struct {
831 u8 sta_id;
832 u8 op_class;
833 bool initiator; /* are we the link initiator */
834 struct cfg80211_chan_def chandef;
835 struct sk_buff *skb; /* ch sw template */
836 u32 ch_sw_tm_ie;
837
838 /* timestamp of last ch-sw request sent (GP2 time) */
839 u32 sent_timestamp;
840 } peer;
841 } tdls_cs;
842
843 struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
844
845 u32 ciphers[6];
846 struct iwl_mvm_tof_data tof_data;
847};
848
849/* Extract MVM priv from op_mode and _hw */
850#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
851 ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
852
853#define IWL_MAC80211_GET_MVM(_hw) \
854 IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
855
856enum iwl_mvm_status {
857 IWL_MVM_STATUS_HW_RFKILL,
858 IWL_MVM_STATUS_HW_CTKILL,
859 IWL_MVM_STATUS_ROC_RUNNING,
860 IWL_MVM_STATUS_IN_HW_RESTART,
861 IWL_MVM_STATUS_IN_D0I3,
862 IWL_MVM_STATUS_ROC_AUX_RUNNING,
863 IWL_MVM_STATUS_D3_RECONFIG,
864 IWL_MVM_STATUS_DUMPING_FW_LOG,
865};
866
867static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
868{
869 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
870 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
871}
872
873static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
874{
875 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
876}
877
878/* Must be called with rcu_read_lock() held and it can only be
879 * released when mvmsta is not needed anymore.
880 */
881static inline struct iwl_mvm_sta *
882iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
883{
884 struct ieee80211_sta *sta;
885
886 if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
887 return NULL;
888
889 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
890
891 /* This can happen if the station has been removed right now */
892 if (IS_ERR_OR_NULL(sta))
893 return NULL;
894
895 return iwl_mvm_sta_from_mac80211(sta);
896}
897
898static inline struct iwl_mvm_sta *
899iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
900{
901 struct ieee80211_sta *sta;
902
903 if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
904 return NULL;
905
906 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
907 lockdep_is_held(&mvm->mutex));
908
909 /* This can happen if the station has been removed right now */
910 if (IS_ERR_OR_NULL(sta))
911 return NULL;
912
913 return iwl_mvm_sta_from_mac80211(sta);
914}
915
916static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
917{
918 return mvm->trans->cfg->d0i3 &&
919 mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
920 !iwlwifi_mod_params.d0i3_disable &&
921 fw_has_capa(&mvm->fw->ucode_capa,
922 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
923}
924
925static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
926{
927 return fw_has_capa(&mvm->fw->ucode_capa,
928 IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
929}
930
931static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
932{
933 bool nvm_lar = mvm->nvm_data->lar_enabled;
934 bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
935 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
936
937 if (iwlwifi_mod_params.lar_disable)
938 return false;
939
940 /*
941 * Enable LAR only if it is supported by the FW (TLV) &&
942 * enabled in the NVM
943 */
944 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
945 return nvm_lar && tlv_lar;
946 else
947 return tlv_lar;
948}
949
950static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
951{
952 return fw_has_api(&mvm->fw->ucode_capa,
953 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
954 fw_has_capa(&mvm->fw->ucode_capa,
955 IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
956}
957
958static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
959{
960 return fw_has_capa(&mvm->fw->ucode_capa,
961 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
962 IWL_MVM_BT_COEX_CORUNNING;
963}
964
965static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
966{
967 return fw_has_capa(&mvm->fw->ucode_capa,
968 IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
969 IWL_MVM_BT_COEX_RRC;
970}
971
972static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
973{
974 return fw_has_capa(&mvm->fw->ucode_capa,
975 IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
976}
977
978static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
979{
980 /* firmware flag isn't defined yet */
981 return false;
982}
983
984extern const u8 iwl_mvm_ac_to_tx_fifo[];
985
986struct iwl_rate_info {
987 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
988 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
989 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
990 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
991 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
992};
993
994void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
995int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
996
997/******************
998 * MVM Methods
999 ******************/
1000/* uCode */
1001int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
1002
1003/* Utils */
1004int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
1005 enum ieee80211_band band);
1006void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1007 enum ieee80211_band band,
1008 struct ieee80211_tx_rate *r);
1009u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
1010void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
1011u8 first_antenna(u8 mask);
1012u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
1013
1014/* Tx / Host Commands */
1015int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
1016 struct iwl_host_cmd *cmd);
1017int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
1018 u32 flags, u16 len, const void *data);
1019int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
1020 struct iwl_host_cmd *cmd,
1021 u32 *status);
1022int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
1023 u16 len, const void *data,
1024 u32 *status);
1025int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1026 struct ieee80211_sta *sta);
1027int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
1028void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
1029 struct iwl_tx_cmd *tx_cmd,
1030 struct ieee80211_tx_info *info, u8 sta_id);
1031void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
1032 struct ieee80211_tx_info *info,
1033 struct ieee80211_sta *sta, __le16 fc);
1034#ifdef CONFIG_IWLWIFI_DEBUG
1035const char *iwl_mvm_get_tx_fail_reason(u32 status);
1036#else
1037static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
1038#endif
1039int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
1040void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
1041
1042static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
1043 struct iwl_tx_cmd *tx_cmd)
1044{
1045 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1046
1047 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1048 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1049 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1050 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
1051}
1052
1053static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
1054{
1055 flush_work(&mvm->async_handlers_wk);
1056}
1057
1058/* Statistics */
1059void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
1060 struct iwl_rx_packet *pkt);
1061void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
1062 struct iwl_rx_cmd_buffer *rxb);
1063int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
1064void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
1065
1066/* NVM */
1067int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
1068int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
1069
1070static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
1071{
1072 return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
1073 mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
1074 mvm->fw->valid_tx_ant;
1075}
1076
1077static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
1078{
1079 return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
1080 mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
1081 mvm->fw->valid_rx_ant;
1082}
1083
1084static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
1085{
1086 u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
1087 FW_PHY_CFG_RX_CHAIN);
1088 u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
1089 u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
1090
1091 phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
1092 valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
1093
1094 return mvm->fw->phy_config & phy_config;
1095}
1096
1097int iwl_mvm_up(struct iwl_mvm *mvm);
1098int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
1099
1100int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
1101bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1102 struct iwl_bcast_filter_cmd *cmd);
1103
1104/*
1105 * FW notifications / CMD responses handlers
1106 * Convention: iwl_mvm_rx_<NAME OF THE CMD>
1107 */
1108void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1109void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
1110 struct iwl_rx_cmd_buffer *rxb);
1111void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1112void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1113void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1114 struct iwl_rx_cmd_buffer *rxb);
1115void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1116void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1117 struct iwl_rx_cmd_buffer *rxb);
1118void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1119 struct iwl_rx_cmd_buffer *rxb);
1120void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
1121 struct iwl_rx_cmd_buffer *rxb);
1122
1123/* MVM PHY */
1124int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1125 struct cfg80211_chan_def *chandef,
1126 u8 chains_static, u8 chains_dynamic);
1127int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1128 struct cfg80211_chan_def *chandef,
1129 u8 chains_static, u8 chains_dynamic);
1130void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
1131 struct iwl_mvm_phy_ctxt *ctxt);
1132void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
1133 struct iwl_mvm_phy_ctxt *ctxt);
1134int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
1135u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
1136u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
1137
1138/* MAC (virtual interface) programming */
1139int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1140void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1141int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1142int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1143 bool force_assoc_off, const u8 *bssid_override);
1144int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1145u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
1146int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
1147 struct ieee80211_vif *vif);
1148void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1149 struct iwl_rx_cmd_buffer *rxb);
1150void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1151 struct iwl_rx_cmd_buffer *rxb);
1152void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
1153 struct ieee80211_vif *vif);
1154unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
1155 struct ieee80211_vif *exclude_vif);
1156/* Bindings */
1157int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1158int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1159
1160/* Quota management */
1161int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
1162 struct ieee80211_vif *disabled_vif);
1163
1164/* Scanning */
1165int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1166 struct cfg80211_scan_request *req,
1167 struct ieee80211_scan_ies *ies);
1168int iwl_mvm_scan_size(struct iwl_mvm *mvm);
1169int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
1170int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
1171void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
1172
1173/* Scheduled scan */
1174void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
1175 struct iwl_rx_cmd_buffer *rxb);
1176void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1177 struct iwl_rx_cmd_buffer *rxb);
1178int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1179 struct ieee80211_vif *vif,
1180 struct cfg80211_sched_scan_request *req,
1181 struct ieee80211_scan_ies *ies,
1182 int type);
1183void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
1184 struct iwl_rx_cmd_buffer *rxb);
1185
1186/* UMAC scan */
1187int iwl_mvm_config_scan(struct iwl_mvm *mvm);
1188void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1189 struct iwl_rx_cmd_buffer *rxb);
1190void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1191 struct iwl_rx_cmd_buffer *rxb);
1192
1193/* MVM debugfs */
1194#ifdef CONFIG_IWLWIFI_DEBUGFS
1195int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
1196void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1197void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1198#else
1199static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
1200 struct dentry *dbgfs_dir)
1201{
1202 return 0;
1203}
1204static inline void
1205iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1206{
1207}
1208static inline void
1209iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1210{
1211}
1212#endif /* CONFIG_IWLWIFI_DEBUGFS */
1213
1214/* rate scaling */
1215int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
1216void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
1217int rs_pretty_print_rate(char *buf, const u32 rate);
1218void rs_update_last_rssi(struct iwl_mvm *mvm,
1219 struct iwl_lq_sta *lq_sta,
1220 struct ieee80211_rx_status *rx_status);
1221
1222/* power management */
1223int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
1224int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
1225int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
1226int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1227 char *buf, int bufsz);
1228
1229void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1230void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
1231 struct iwl_rx_cmd_buffer *rxb);
1232
1233#ifdef CONFIG_IWLWIFI_LEDS
1234int iwl_mvm_leds_init(struct iwl_mvm *mvm);
1235void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
1236#else
1237static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
1238{
1239 return 0;
1240}
1241static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
1242{
1243}
1244#endif
1245
1246/* D3 (WoWLAN, NetDetect) */
1247int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
1248int iwl_mvm_resume(struct ieee80211_hw *hw);
1249void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
1250void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
1251 struct ieee80211_vif *vif,
1252 struct cfg80211_gtk_rekey_data *data);
1253void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
1254 struct ieee80211_vif *vif,
1255 struct inet6_dev *idev);
1256void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
1257 struct ieee80211_vif *vif, int idx);
1258extern const struct file_operations iwl_dbgfs_d3_test_ops;
1259#ifdef CONFIG_PM_SLEEP
1260void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
1261 struct ieee80211_vif *vif);
1262#else
1263static inline void
1264iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1265{
1266}
1267#endif
1268void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
1269 struct iwl_wowlan_config_cmd *cmd);
1270int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
1271 struct ieee80211_vif *vif,
1272 bool disable_offloading,
1273 u32 cmd_flags);
1274
1275/* D0i3 */
1276void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1277void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1279bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
1280void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
1281int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
1282int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
1283int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
1284
1285/* BT Coex */
1286int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
1287void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
1288 struct iwl_rx_cmd_buffer *rxb);
1289void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1290 enum ieee80211_rssi_event_data);
1291void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
1292u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
1293 struct ieee80211_sta *sta);
1294bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1295 struct ieee80211_sta *sta);
1296bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
1297bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
1298bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1299 enum ieee80211_band band);
1300u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1301 struct ieee80211_tx_info *info, u8 ac);
1302
1303bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
1304void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
1305int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
1306void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1307 struct iwl_rx_cmd_buffer *rxb);
1308void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1309 enum ieee80211_rssi_event_data);
1310u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1311 struct ieee80211_sta *sta);
1312bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1313 struct ieee80211_sta *sta);
1314bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1315 enum ieee80211_band band);
1316void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1317 struct iwl_rx_cmd_buffer *rxb);
1318
1319/* beacon filtering */
1320#ifdef CONFIG_IWLWIFI_DEBUGFS
1321void
1322iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1323 struct iwl_beacon_filter_cmd *cmd);
1324#else
1325static inline void
1326iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1327 struct iwl_beacon_filter_cmd *cmd)
1328{}
1329#endif
1330int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
1331 struct ieee80211_vif *vif,
1332 bool enable, u32 flags);
1333int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
1334 struct ieee80211_vif *vif,
1335 u32 flags);
1336int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
1337 struct ieee80211_vif *vif,
1338 u32 flags);
1339/* SMPS */
1340void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1341 enum iwl_mvm_smps_type_request req_type,
1342 enum ieee80211_smps_mode smps_request);
1343bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
1344
1345/* Low latency */
1346int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1347 bool value);
1348/* get SystemLowLatencyMode - only needed for beacon threshold? */
1349bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
1350/* get VMACLowLatencyMode */
1351static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1352{
1353 /*
1354 * should this consider associated/active/... state?
1355 *
1356 * Normally low-latency should only be active on interfaces
1357 * that are active, but at least with debugfs it can also be
1358 * enabled on interfaces that aren't active. However, when
1359 * interface aren't active then they aren't added into the
1360 * binding, so this has no real impact. For now, just return
1361 * the current desired low-latency state.
1362 */
1363
1364 return mvmvif->low_latency;
1365}
1366
1367/* hw scheduler queue config */
1368void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1369 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
1370 unsigned int wdg_timeout);
1371/*
1372 * Disable a TXQ.
1373 * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
1374 */
1375void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1376 u8 tid, u8 flags);
1377int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
1378
1379static inline
1380void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1381 u8 fifo, u16 ssn, unsigned int wdg_timeout)
1382{
1383 struct iwl_trans_txq_scd_cfg cfg = {
1384 .fifo = fifo,
1385 .tid = IWL_MAX_TID_COUNT,
1386 .aggregate = false,
1387 .frame_limit = IWL_FRAME_LIMIT,
1388 };
1389
1390 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1391}
1392
1393static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
1394 int mac80211_queue, int fifo,
1395 int sta_id, int tid, int frame_limit,
1396 u16 ssn, unsigned int wdg_timeout)
1397{
1398 struct iwl_trans_txq_scd_cfg cfg = {
1399 .fifo = fifo,
1400 .sta_id = sta_id,
1401 .tid = tid,
1402 .frame_limit = frame_limit,
1403 .aggregate = true,
1404 };
1405
1406 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1407}
1408
1409/* Thermal management and CT-kill */
1410void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1411void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
1412void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
1413 struct iwl_rx_cmd_buffer *rxb);
1414void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
1415void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
1416void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
1417void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
1418int iwl_mvm_get_temp(struct iwl_mvm *mvm);
1419
1420/* Location Aware Regulatory */
1421struct iwl_mcc_update_resp *
1422iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
1423 enum iwl_mcc_source src_id);
1424int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
1425void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
1426 struct iwl_rx_cmd_buffer *rxb);
1427struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
1428 const char *alpha2,
1429 enum iwl_mcc_source src_id,
1430 bool *changed);
1431struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
1432 bool *changed);
1433int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
1434void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
1435
1436/* smart fifo */
1437int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1438 bool added_vif);
1439
1440/* TDLS */
1441
1442/*
1443 * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
1444 * This TID is marked as used vs the AP and all connected TDLS peers.
1445 */
1446#define IWL_MVM_TDLS_FW_TID 4
1447
1448int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1449void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
1450void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1451 bool sta_added);
1452void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
1453 struct ieee80211_vif *vif);
1454int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
1455 struct ieee80211_vif *vif,
1456 struct ieee80211_sta *sta, u8 oper_class,
1457 struct cfg80211_chan_def *chandef,
1458 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
1459void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
1460 struct ieee80211_vif *vif,
1461 struct ieee80211_tdls_ch_sw_params *params);
1462void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
1463 struct ieee80211_vif *vif,
1464 struct ieee80211_sta *sta);
1465void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1466void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
1467
1468struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
1469
1470void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
1471void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
1472
1473int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
1474int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
1475 const char *str, size_t len,
1476 struct iwl_fw_dbg_trigger_tlv *trigger);
1477int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
1478 struct iwl_mvm_dump_desc *desc,
1479 struct iwl_fw_dbg_trigger_tlv *trigger);
1480void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
1481int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
1482 struct iwl_fw_dbg_trigger_tlv *trigger,
1483 const char *fmt, ...) __printf(3, 4);
1484unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1485 struct ieee80211_vif *vif,
1486 bool tdls, bool cmd_q);
1487void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1488 const char *errmsg);
1489static inline bool
1490iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
1491 struct ieee80211_vif *vif)
1492{
1493 u32 trig_vif = le32_to_cpu(trig->vif_type);
1494
1495 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
1496}
1497
1498static inline bool
1499iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
1500 struct iwl_fw_dbg_trigger_tlv *trig)
1501{
1502 return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
1503 (mvm->fw_dbg_conf == FW_DBG_INVALID ||
1504 (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
1505}
1506
1507static inline bool
1508iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
1509 struct ieee80211_vif *vif,
1510 struct iwl_fw_dbg_trigger_tlv *trig)
1511{
1512 if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
1513 return false;
1514
1515 return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
1516}
1517
1518static inline void
1519iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
1520 struct ieee80211_vif *vif,
1521 enum iwl_fw_dbg_trigger trig)
1522{
1523 struct iwl_fw_dbg_trigger_tlv *trigger;
1524
1525 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
1526 return;
1527
1528 trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
1529 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
1530 return;
1531
1532 iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
1533}
1534
1535#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
deleted file mode 100644
index 2ee0f6fe56a1..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ /dev/null
@@ -1,864 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/firmware.h>
66#include <linux/rtnetlink.h>
67#include <linux/pci.h>
68#include <linux/acpi.h>
69#include "iwl-trans.h"
70#include "iwl-csr.h"
71#include "mvm.h"
72#include "iwl-eeprom-parse.h"
73#include "iwl-eeprom-read.h"
74#include "iwl-nvm-parse.h"
75#include "iwl-prph.h"
76
77/* Default NVM size to read */
78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
79#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
80#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc
81
82#define NVM_WRITE_OPCODE 1
83#define NVM_READ_OPCODE 0
84
85/* load nvm chunk response */
86enum {
87 READ_NVM_CHUNK_SUCCEED = 0,
88 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
89};
90
91/*
92 * prepare the NVM host command w/ the pointers to the nvm buffer
93 * and send it to fw
94 */
95static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
96 u16 offset, u16 length, const u8 *data)
97{
98 struct iwl_nvm_access_cmd nvm_access_cmd = {
99 .offset = cpu_to_le16(offset),
100 .length = cpu_to_le16(length),
101 .type = cpu_to_le16(section),
102 .op_code = NVM_WRITE_OPCODE,
103 };
104 struct iwl_host_cmd cmd = {
105 .id = NVM_ACCESS_CMD,
106 .len = { sizeof(struct iwl_nvm_access_cmd), length },
107 .flags = CMD_SEND_IN_RFKILL,
108 .data = { &nvm_access_cmd, data },
109 /* data may come from vmalloc, so use _DUP */
110 .dataflags = { 0, IWL_HCMD_DFL_DUP },
111 };
112
113 return iwl_mvm_send_cmd(mvm, &cmd);
114}
115
116static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
117 u16 offset, u16 length, u8 *data)
118{
119 struct iwl_nvm_access_cmd nvm_access_cmd = {
120 .offset = cpu_to_le16(offset),
121 .length = cpu_to_le16(length),
122 .type = cpu_to_le16(section),
123 .op_code = NVM_READ_OPCODE,
124 };
125 struct iwl_nvm_access_resp *nvm_resp;
126 struct iwl_rx_packet *pkt;
127 struct iwl_host_cmd cmd = {
128 .id = NVM_ACCESS_CMD,
129 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
130 .data = { &nvm_access_cmd, },
131 };
132 int ret, bytes_read, offset_read;
133 u8 *resp_data;
134
135 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
136
137 ret = iwl_mvm_send_cmd(mvm, &cmd);
138 if (ret)
139 return ret;
140
141 pkt = cmd.resp_pkt;
142
143 /* Extract NVM response */
144 nvm_resp = (void *)pkt->data;
145 ret = le16_to_cpu(nvm_resp->status);
146 bytes_read = le16_to_cpu(nvm_resp->length);
147 offset_read = le16_to_cpu(nvm_resp->offset);
148 resp_data = nvm_resp->data;
149 if (ret) {
150 if ((offset != 0) &&
151 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
152 /*
153 * meaning of NOT_VALID_ADDRESS:
154 * driver try to read chunk from address that is
155 * multiple of 2K and got an error since addr is empty.
156 * meaning of (offset != 0): driver already
157 * read valid data from another chunk so this case
158 * is not an error.
159 */
160 IWL_DEBUG_EEPROM(mvm->trans->dev,
161 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
162 offset);
163 ret = 0;
164 } else {
165 IWL_DEBUG_EEPROM(mvm->trans->dev,
166 "NVM access command failed with status %d (device: %s)\n",
167 ret, mvm->cfg->name);
168 ret = -EIO;
169 }
170 goto exit;
171 }
172
173 if (offset_read != offset) {
174 IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
175 offset_read);
176 ret = -EINVAL;
177 goto exit;
178 }
179
180 /* Write data to NVM */
181 memcpy(data + offset, resp_data, bytes_read);
182 ret = bytes_read;
183
184exit:
185 iwl_free_resp(&cmd);
186 return ret;
187}
188
189static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
190 const u8 *data, u16 length)
191{
192 int offset = 0;
193
194 /* copy data in chunks of 2k (and remainder if any) */
195
196 while (offset < length) {
197 int chunk_size, ret;
198
199 chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
200 length - offset);
201
202 ret = iwl_nvm_write_chunk(mvm, section, offset,
203 chunk_size, data + offset);
204 if (ret < 0)
205 return ret;
206
207 offset += chunk_size;
208 }
209
210 return 0;
211}
212
213/*
214 * Reads an NVM section completely.
215 * NICs prior to 7000 family doesn't have a real NVM, but just read
216 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
217 * by uCode, we need to manually check in this case that we don't
218 * overflow and try to read more than the EEPROM size.
219 * For 7000 family NICs, we supply the maximal size we can read, and
220 * the uCode fills the response with as much data as we can,
221 * without overflowing, so no check is needed.
222 */
223static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
224 u8 *data, u32 size_read)
225{
226 u16 length, offset = 0;
227 int ret;
228
229 /* Set nvm section read length */
230 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
231
232 ret = length;
233
234 /* Read the NVM until exhausted (reading less than requested) */
235 while (ret == length) {
236 /* Check no memory assumptions fail and cause an overflow */
237 if ((size_read + offset + length) >
238 mvm->cfg->base_params->eeprom_size) {
239 IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
240 return -ENOBUFS;
241 }
242
243 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
244 if (ret < 0) {
245 IWL_DEBUG_EEPROM(mvm->trans->dev,
246 "Cannot read NVM from section %d offset %d, length %d\n",
247 section, offset, length);
248 return ret;
249 }
250 offset += ret;
251 }
252
253 IWL_DEBUG_EEPROM(mvm->trans->dev,
254 "NVM section %d read completed\n", section);
255 return offset;
256}
257
258static struct iwl_nvm_data *
259iwl_parse_nvm_sections(struct iwl_mvm *mvm)
260{
261 struct iwl_nvm_section *sections = mvm->nvm_sections;
262 const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
263 bool lar_enabled;
264 u32 mac_addr0, mac_addr1;
265
266 /* Checking for required sections */
267 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
268 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
269 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
270 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
271 return NULL;
272 }
273 } else {
274 /* SW and REGULATORY sections are mandatory */
275 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
276 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
277 IWL_ERR(mvm,
278 "Can't parse empty family 8000 OTP/NVM sections\n");
279 return NULL;
280 }
281 /* MAC_OVERRIDE or at least HW section must exist */
282 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
283 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
284 IWL_ERR(mvm,
285 "Can't parse mac_address, empty sections\n");
286 return NULL;
287 }
288
289 /* PHY_SKU section is mandatory in B0 */
290 if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
291 IWL_ERR(mvm,
292 "Can't parse phy_sku in B0, empty sections\n");
293 return NULL;
294 }
295 }
296
297 if (WARN_ON(!mvm->cfg))
298 return NULL;
299
300 /* read the mac address from WFMP registers */
301 mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
302 mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
303
304 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
305 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
306 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
307 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
308 mac_override =
309 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
310 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
311
312 lar_enabled = !iwlwifi_mod_params.lar_disable &&
313 fw_has_capa(&mvm->fw->ucode_capa,
314 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
315
316 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
317 regulatory, mac_override, phy_sku,
318 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
319 lar_enabled, mac_addr0, mac_addr1,
320 mvm->trans->hw_id);
321}
322
323#define MAX_NVM_FILE_LEN 16384
324
325/*
326 * Reads external NVM from a file into mvm->nvm_sections
327 *
328 * HOW TO CREATE THE NVM FILE FORMAT:
329 * ------------------------------
330 * 1. create hex file, format:
331 * 3800 -> header
332 * 0000 -> header
333 * 5a40 -> data
334 *
335 * rev - 6 bit (word1)
336 * len - 10 bit (word1)
337 * id - 4 bit (word2)
338 * rsv - 12 bit (word2)
339 *
340 * 2. flip 8bits with 8 bits per line to get the right NVM file format
341 *
342 * 3. create binary file from the hex file
343 *
344 * 4. save as "iNVM_xxx.bin" under /lib/firmware
345 */
346static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
347{
348 int ret, section_size;
349 u16 section_id;
350 const struct firmware *fw_entry;
351 const struct {
352 __le16 word1;
353 __le16 word2;
354 u8 data[];
355 } *file_sec;
356 const u8 *eof, *temp;
357 int max_section_size;
358 const __le32 *dword_buff;
359
360#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
361#define NVM_WORD2_ID(x) (x >> 12)
362#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
363#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
364#define NVM_HEADER_0 (0x2A504C54)
365#define NVM_HEADER_1 (0x4E564D2A)
366#define NVM_HEADER_SIZE (4 * sizeof(u32))
367
368 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
369
370 /* Maximal size depends on HW family and step */
371 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
372 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
373 else
374 max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
375
376 /*
377 * Obtain NVM image via request_firmware. Since we already used
378 * request_firmware_nowait() for the firmware binary load and only
379 * get here after that we assume the NVM request can be satisfied
380 * synchronously.
381 */
382 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
383 mvm->trans->dev);
384 if (ret) {
385 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
386 mvm->nvm_file_name, ret);
387 return ret;
388 }
389
390 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
391 mvm->nvm_file_name, fw_entry->size);
392
393 if (fw_entry->size > MAX_NVM_FILE_LEN) {
394 IWL_ERR(mvm, "NVM file too large\n");
395 ret = -EINVAL;
396 goto out;
397 }
398
399 eof = fw_entry->data + fw_entry->size;
400 dword_buff = (__le32 *)fw_entry->data;
401
402 /* some NVM file will contain a header.
403 * The header is identified by 2 dwords header as follow:
404 * dword[0] = 0x2A504C54
405 * dword[1] = 0x4E564D2A
406 *
407 * This header must be skipped when providing the NVM data to the FW.
408 */
409 if (fw_entry->size > NVM_HEADER_SIZE &&
410 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
411 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
412 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
413 IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
414 IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
415 le32_to_cpu(dword_buff[3]));
416
417 /* nvm file validation, dword_buff[2] holds the file version */
418 if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
419 le32_to_cpu(dword_buff[2]) < 0xE4A) ||
420 (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
421 le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
422 ret = -EFAULT;
423 goto out;
424 }
425 } else {
426 file_sec = (void *)fw_entry->data;
427 }
428
429 while (true) {
430 if (file_sec->data > eof) {
431 IWL_ERR(mvm,
432 "ERROR - NVM file too short for section header\n");
433 ret = -EINVAL;
434 break;
435 }
436
437 /* check for EOF marker */
438 if (!file_sec->word1 && !file_sec->word2) {
439 ret = 0;
440 break;
441 }
442
443 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
444 section_size =
445 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
446 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
447 } else {
448 section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
449 le16_to_cpu(file_sec->word2));
450 section_id = NVM_WORD1_ID_FAMILY_8000(
451 le16_to_cpu(file_sec->word1));
452 }
453
454 if (section_size > max_section_size) {
455 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
456 section_size);
457 ret = -EINVAL;
458 break;
459 }
460
461 if (!section_size) {
462 IWL_ERR(mvm, "ERROR - section empty\n");
463 ret = -EINVAL;
464 break;
465 }
466
467 if (file_sec->data + section_size > eof) {
468 IWL_ERR(mvm,
469 "ERROR - NVM file too short for section (%d bytes)\n",
470 section_size);
471 ret = -EINVAL;
472 break;
473 }
474
475 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
476 "Invalid NVM section ID %d\n", section_id)) {
477 ret = -EINVAL;
478 break;
479 }
480
481 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
482 if (!temp) {
483 ret = -ENOMEM;
484 break;
485 }
486 kfree(mvm->nvm_sections[section_id].data);
487 mvm->nvm_sections[section_id].data = temp;
488 mvm->nvm_sections[section_id].length = section_size;
489
490 /* advance to the next section */
491 file_sec = (void *)(file_sec->data + section_size);
492 }
493out:
494 release_firmware(fw_entry);
495 return ret;
496}
497
498/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
499int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
500{
501 int i, ret = 0;
502 struct iwl_nvm_section *sections = mvm->nvm_sections;
503
504 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
505
506 for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
507 if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
508 continue;
509 ret = iwl_nvm_write_section(mvm, i, sections[i].data,
510 sections[i].length);
511 if (ret < 0) {
512 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
513 break;
514 }
515 }
516 return ret;
517}
518
519int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
520{
521 int ret, section;
522 u32 size_read = 0;
523 u8 *nvm_buffer, *temp;
524 const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
525 const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
526
527 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
528 return -EINVAL;
529
530 /* load NVM values from nic */
531 if (read_nvm_from_nic) {
532 /* Read From FW NVM */
533 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
534
535 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
536 GFP_KERNEL);
537 if (!nvm_buffer)
538 return -ENOMEM;
539 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
540 /* we override the constness for initial read */
541 ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
542 size_read);
543 if (ret < 0)
544 continue;
545 size_read += ret;
546 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
547 if (!temp) {
548 ret = -ENOMEM;
549 break;
550 }
551 mvm->nvm_sections[section].data = temp;
552 mvm->nvm_sections[section].length = ret;
553
554#ifdef CONFIG_IWLWIFI_DEBUGFS
555 switch (section) {
556 case NVM_SECTION_TYPE_SW:
557 mvm->nvm_sw_blob.data = temp;
558 mvm->nvm_sw_blob.size = ret;
559 break;
560 case NVM_SECTION_TYPE_CALIBRATION:
561 mvm->nvm_calib_blob.data = temp;
562 mvm->nvm_calib_blob.size = ret;
563 break;
564 case NVM_SECTION_TYPE_PRODUCTION:
565 mvm->nvm_prod_blob.data = temp;
566 mvm->nvm_prod_blob.size = ret;
567 break;
568 case NVM_SECTION_TYPE_PHY_SKU:
569 mvm->nvm_phy_sku_blob.data = temp;
570 mvm->nvm_phy_sku_blob.size = ret;
571 break;
572 default:
573 if (section == mvm->cfg->nvm_hw_section_num) {
574 mvm->nvm_hw_blob.data = temp;
575 mvm->nvm_hw_blob.size = ret;
576 break;
577 }
578 }
579#endif
580 }
581 if (!size_read)
582 IWL_ERR(mvm, "OTP is blank\n");
583 kfree(nvm_buffer);
584 }
585
586 /* Only if PNVM selected in the mod param - load external NVM */
587 if (mvm->nvm_file_name) {
588 /* read External NVM file from the mod param */
589 ret = iwl_mvm_read_external_nvm(mvm);
590 if (ret) {
591 /* choose the nvm_file name according to the
592 * HW step
593 */
594 if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
595 SILICON_B_STEP)
596 mvm->nvm_file_name = nvm_file_B;
597 else
598 mvm->nvm_file_name = nvm_file_C;
599
600 if (ret == -EFAULT && mvm->nvm_file_name) {
601 /* in case nvm file was failed try again */
602 ret = iwl_mvm_read_external_nvm(mvm);
603 if (ret)
604 return ret;
605 } else {
606 return ret;
607 }
608 }
609 }
610
611 /* parse the relevant nvm sections */
612 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
613 if (!mvm->nvm_data)
614 return -ENODATA;
615 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
616 mvm->nvm_data->nvm_version);
617
618 return 0;
619}
620
621struct iwl_mcc_update_resp *
622iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
623 enum iwl_mcc_source src_id)
624{
625 struct iwl_mcc_update_cmd mcc_update_cmd = {
626 .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
627 .source_id = (u8)src_id,
628 };
629 struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
630 struct iwl_rx_packet *pkt;
631 struct iwl_host_cmd cmd = {
632 .id = MCC_UPDATE_CMD,
633 .flags = CMD_WANT_SKB,
634 .data = { &mcc_update_cmd },
635 };
636
637 int ret;
638 u32 status;
639 int resp_len, n_channels;
640 u16 mcc;
641
642 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
643 return ERR_PTR(-EOPNOTSUPP);
644
645 cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
646
647 IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
648 alpha2[0], alpha2[1], src_id);
649
650 ret = iwl_mvm_send_cmd(mvm, &cmd);
651 if (ret)
652 return ERR_PTR(ret);
653
654 pkt = cmd.resp_pkt;
655
656 /* Extract MCC response */
657 mcc_resp = (void *)pkt->data;
658 status = le32_to_cpu(mcc_resp->status);
659
660 mcc = le16_to_cpu(mcc_resp->mcc);
661
662 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
663 if (mcc == 0) {
664 mcc = 0x3030; /* "00" - world */
665 mcc_resp->mcc = cpu_to_le16(mcc);
666 }
667
668 n_channels = __le32_to_cpu(mcc_resp->n_channels);
669 IWL_DEBUG_LAR(mvm,
670 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
671 status, mcc, mcc >> 8, mcc & 0xff,
672 !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
673
674 resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
675 resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
676 if (!resp_cp) {
677 ret = -ENOMEM;
678 goto exit;
679 }
680
681 ret = 0;
682exit:
683 iwl_free_resp(&cmd);
684 if (ret)
685 return ERR_PTR(ret);
686 return resp_cp;
687}
688
689#ifdef CONFIG_ACPI
690#define WRD_METHOD "WRDD"
691#define WRDD_WIFI (0x07)
692#define WRDD_WIGIG (0x10)
693
694static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
695{
696 union acpi_object *mcc_pkg, *domain_type, *mcc_value;
697 u32 i;
698
699 if (wrdd->type != ACPI_TYPE_PACKAGE ||
700 wrdd->package.count < 2 ||
701 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
702 wrdd->package.elements[0].integer.value != 0) {
703 IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
704 return 0;
705 }
706
707 for (i = 1 ; i < wrdd->package.count ; ++i) {
708 mcc_pkg = &wrdd->package.elements[i];
709
710 if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
711 mcc_pkg->package.count < 2 ||
712 mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
713 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
714 mcc_pkg = NULL;
715 continue;
716 }
717
718 domain_type = &mcc_pkg->package.elements[0];
719 if (domain_type->integer.value == WRDD_WIFI)
720 break;
721
722 mcc_pkg = NULL;
723 }
724
725 if (mcc_pkg) {
726 mcc_value = &mcc_pkg->package.elements[1];
727 return mcc_value->integer.value;
728 }
729
730 return 0;
731}
732
733static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
734{
735 acpi_handle root_handle;
736 acpi_handle handle;
737 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
738 acpi_status status;
739 u32 mcc_val;
740 struct pci_dev *pdev = to_pci_dev(mvm->dev);
741
742 root_handle = ACPI_HANDLE(&pdev->dev);
743 if (!root_handle) {
744 IWL_DEBUG_LAR(mvm,
745 "Could not retrieve root port ACPI handle\n");
746 return -ENOENT;
747 }
748
749 /* Get the method's handle */
750 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
751 if (ACPI_FAILURE(status)) {
752 IWL_DEBUG_LAR(mvm, "WRD method not found\n");
753 return -ENOENT;
754 }
755
756 /* Call WRDD with no arguments */
757 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
758 if (ACPI_FAILURE(status)) {
759 IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
760 return -ENOENT;
761 }
762
763 mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
764 kfree(wrdd.pointer);
765 if (!mcc_val)
766 return -ENOENT;
767
768 mcc[0] = (mcc_val >> 8) & 0xff;
769 mcc[1] = mcc_val & 0xff;
770 mcc[2] = '\0';
771 return 0;
772}
773#else /* CONFIG_ACPI */
774static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
775{
776 return -ENOENT;
777}
778#endif
779
780int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
781{
782 bool tlv_lar;
783 bool nvm_lar;
784 int retval;
785 struct ieee80211_regdomain *regd;
786 char mcc[3];
787
788 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
789 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
790 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
791 nvm_lar = mvm->nvm_data->lar_enabled;
792 if (tlv_lar != nvm_lar)
793 IWL_INFO(mvm,
794 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
795 tlv_lar ? "enabled" : "disabled",
796 nvm_lar ? "enabled" : "disabled");
797 }
798
799 if (!iwl_mvm_is_lar_supported(mvm))
800 return 0;
801
802 /*
803 * try to replay the last set MCC to FW. If it doesn't exist,
804 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
805 */
806 retval = iwl_mvm_init_fw_regd(mvm);
807 if (retval != -ENOENT)
808 return retval;
809
810 /*
811 * Driver regulatory hint for initial update, this also informs the
812 * firmware we support wifi location updates.
813 * Disallow scans that might crash the FW while the LAR regdomain
814 * is not set.
815 */
816 mvm->lar_regdom_set = false;
817
818 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
819 if (IS_ERR_OR_NULL(regd))
820 return -EIO;
821
822 if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
823 !iwl_mvm_get_bios_mcc(mvm, mcc)) {
824 kfree(regd);
825 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
826 MCC_SOURCE_BIOS, NULL);
827 if (IS_ERR_OR_NULL(regd))
828 return -EIO;
829 }
830
831 retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
832 kfree(regd);
833 return retval;
834}
835
836void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
837 struct iwl_rx_cmd_buffer *rxb)
838{
839 struct iwl_rx_packet *pkt = rxb_addr(rxb);
840 struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
841 enum iwl_mcc_source src;
842 char mcc[3];
843 struct ieee80211_regdomain *regd;
844
845 lockdep_assert_held(&mvm->mutex);
846
847 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
848 return;
849
850 mcc[0] = notif->mcc >> 8;
851 mcc[1] = notif->mcc & 0xff;
852 mcc[2] = '\0';
853 src = notif->source_id;
854
855 IWL_DEBUG_LAR(mvm,
856 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
857 mcc, src);
858 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
859 if (IS_ERR_OR_NULL(regd))
860 return;
861
862 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
863 kfree(regd);
864}
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
deleted file mode 100644
index 68b0169c8892..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/offloading.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/ipv6.h>
66#include <net/addrconf.h>
67#include "mvm.h"
68
69void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
70 struct iwl_wowlan_config_cmd *cmd)
71{
72 int i;
73
74 /*
75 * For QoS counters, we store the one to use next, so subtract 0x10
76 * since the uCode will add 0x10 *before* using the value while we
77 * increment after using the value (i.e. store the next value to use).
78 */
79 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
80 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
81 seq -= 0x10;
82 cmd->qos_seq[i] = cpu_to_le16(seq);
83 }
84}
85
86int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
87 struct ieee80211_vif *vif,
88 bool disable_offloading,
89 u32 cmd_flags)
90{
91 union {
92 struct iwl_proto_offload_cmd_v1 v1;
93 struct iwl_proto_offload_cmd_v2 v2;
94 struct iwl_proto_offload_cmd_v3_small v3s;
95 struct iwl_proto_offload_cmd_v3_large v3l;
96 } cmd = {};
97 struct iwl_host_cmd hcmd = {
98 .id = PROT_OFFLOAD_CONFIG_CMD,
99 .flags = cmd_flags,
100 .data[0] = &cmd,
101 .dataflags[0] = IWL_HCMD_DFL_DUP,
102 };
103 struct iwl_proto_offload_cmd_common *common;
104 u32 enabled = 0, size;
105 u32 capa_flags = mvm->fw->ucode_capa.flags;
106#if IS_ENABLED(CONFIG_IPV6)
107 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
108 int i;
109
110 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
111 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
112 struct iwl_ns_config *nsc;
113 struct iwl_targ_addr *addrs;
114 int n_nsc, n_addrs;
115 int c;
116
117 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
118 nsc = cmd.v3s.ns_config;
119 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
120 addrs = cmd.v3s.targ_addrs;
121 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
122 } else {
123 nsc = cmd.v3l.ns_config;
124 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
125 addrs = cmd.v3l.targ_addrs;
126 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
127 }
128
129 if (mvmvif->num_target_ipv6_addrs)
130 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
131
132 /*
133 * For each address we have (and that will fit) fill a target
134 * address struct and combine for NS offload structs with the
135 * solicited node addresses.
136 */
137 for (i = 0, c = 0;
138 i < mvmvif->num_target_ipv6_addrs &&
139 i < n_addrs && c < n_nsc; i++) {
140 struct in6_addr solicited_addr;
141 int j;
142
143 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
144 &solicited_addr);
145 for (j = 0; j < c; j++)
146 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
147 &solicited_addr) == 0)
148 break;
149 if (j == c)
150 c++;
151 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
152 addrs[i].config_num = cpu_to_le32(j);
153 nsc[j].dest_ipv6_addr = solicited_addr;
154 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
155 }
156
157 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
158 cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
159 else
160 cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
161 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
162 if (mvmvif->num_target_ipv6_addrs) {
163 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
164 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
165 }
166
167 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
168 sizeof(mvmvif->target_ipv6_addrs[0]));
169
170 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
171 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
172 memcpy(cmd.v2.target_ipv6_addr[i],
173 &mvmvif->target_ipv6_addrs[i],
174 sizeof(cmd.v2.target_ipv6_addr[i]));
175 } else {
176 if (mvmvif->num_target_ipv6_addrs) {
177 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
178 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
179 }
180
181 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
182 sizeof(mvmvif->target_ipv6_addrs[0]));
183
184 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
185 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
186 memcpy(cmd.v1.target_ipv6_addr[i],
187 &mvmvif->target_ipv6_addrs[i],
188 sizeof(cmd.v1.target_ipv6_addr[i]));
189 }
190#endif
191
192 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
193 common = &cmd.v3s.common;
194 size = sizeof(cmd.v3s);
195 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
196 common = &cmd.v3l.common;
197 size = sizeof(cmd.v3l);
198 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
199 common = &cmd.v2.common;
200 size = sizeof(cmd.v2);
201 } else {
202 common = &cmd.v1.common;
203 size = sizeof(cmd.v1);
204 }
205
206 if (vif->bss_conf.arp_addr_cnt) {
207 enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
208 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
209 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
210 }
211
212 if (!disable_offloading)
213 common->enabled = cpu_to_le32(enabled);
214
215 hcmd.len[0] = size;
216 return iwl_mvm_send_cmd(mvm, &hcmd);
217}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
deleted file mode 100644
index 13c97f665ba8..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ /dev/null
@@ -1,1434 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/module.h>
66#include <linux/vmalloc.h>
67#include <net/mac80211.h>
68
69#include "iwl-notif-wait.h"
70#include "iwl-trans.h"
71#include "iwl-op-mode.h"
72#include "iwl-fw.h"
73#include "iwl-debug.h"
74#include "iwl-drv.h"
75#include "iwl-modparams.h"
76#include "mvm.h"
77#include "iwl-phy-db.h"
78#include "iwl-eeprom-parse.h"
79#include "iwl-csr.h"
80#include "iwl-io.h"
81#include "iwl-prph.h"
82#include "rs.h"
83#include "fw-api-scan.h"
84#include "time-event.h"
85
86#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
87MODULE_DESCRIPTION(DRV_DESCRIPTION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91static const struct iwl_op_mode_ops iwl_mvm_ops;
92static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
93
94struct iwl_mvm_mod_params iwlmvm_mod_params = {
95 .power_scheme = IWL_POWER_SCHEME_BPS,
96 .tfd_q_hang_detect = true
97 /* rest of fields are 0 by default */
98};
99
100module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
101MODULE_PARM_DESC(init_dbg,
102 "set to true to debug an ASSERT in INIT fw (default: false");
103module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
104MODULE_PARM_DESC(power_scheme,
105 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
106module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
107 bool, S_IRUGO);
108MODULE_PARM_DESC(tfd_q_hang_detect,
109 "TFD queues hang detection (default: true");
110
111/*
112 * module init and exit functions
113 */
114static int __init iwl_mvm_init(void)
115{
116 int ret;
117
118 ret = iwl_mvm_rate_control_register();
119 if (ret) {
120 pr_err("Unable to register rate control algorithm: %d\n", ret);
121 return ret;
122 }
123
124 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
125
126 if (ret) {
127 pr_err("Unable to register MVM op_mode: %d\n", ret);
128 iwl_mvm_rate_control_unregister();
129 }
130
131 return ret;
132}
133module_init(iwl_mvm_init);
134
135static void __exit iwl_mvm_exit(void)
136{
137 iwl_opmode_deregister("iwlmvm");
138 iwl_mvm_rate_control_unregister();
139}
140module_exit(iwl_mvm_exit);
141
142static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
143{
144 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
145 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
146 u32 reg_val = 0;
147 u32 phy_config = iwl_mvm_get_phy_config(mvm);
148
149 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
150 FW_PHY_CFG_RADIO_TYPE_POS;
151 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
152 FW_PHY_CFG_RADIO_STEP_POS;
153 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
154 FW_PHY_CFG_RADIO_DASH_POS;
155
156 /* SKU control */
157 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
158 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
159 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
160 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
161
162 /* radio configuration */
163 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
164 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
165 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
166
167 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
168 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
169
170 /*
171 * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
172 * shouldn't be set to any non-zero value. The same is supposed to be
173 * true of the other HW, but unsetting them (such as the 7260) causes
174 * automatic tests to fail on seemingly unrelated errors. Need to
175 * further investigate this, but for now we'll separate cases.
176 */
177 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
178 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
179
180 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
181 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
182 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
183 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
184 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
185 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
186 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
187 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
188 reg_val);
189
190 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
191 radio_cfg_step, radio_cfg_dash);
192
193 /*
194 * W/A : NIC is stuck in a reset state after Early PCIe power off
195 * (PCIe power is lost before PERST# is asserted), causing ME FW
196 * to lose ownership and not being able to obtain it back.
197 */
198 if (!mvm->trans->cfg->apmg_not_supported)
199 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
200 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
201 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
202}
203
204struct iwl_rx_handlers {
205 u16 cmd_id;
206 bool async;
207 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
208};
209
210#define RX_HANDLER(_cmd_id, _fn, _async) \
211 { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
212#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
213 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
214
215/*
216 * Handlers for fw notifications
217 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
218 * This list should be in order of frequency for performance purposes.
219 *
220 * The handler can be SYNC - this means that it will be called in the Rx path
221 * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
222 * only in this case!), it should be set as ASYNC. In that case, it will be
223 * called from a worker with mvm->mutex held.
224 */
225static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
226 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
227 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
228
229 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
230 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
231 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
232 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
233 iwl_mvm_rx_ant_coupling_notif, true),
234
235 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
236 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
237
238 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
239
240 RX_HANDLER(SCAN_ITERATION_COMPLETE,
241 iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
242 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
243 iwl_mvm_rx_lmac_scan_complete_notif, true),
244 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
245 false),
246 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
247 true),
248 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
249 iwl_mvm_rx_umac_scan_iter_complete_notif, false),
250
251 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
252
253 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
254 false),
255
256 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
257 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
258 iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
259 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
260 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
261 iwl_mvm_temp_notif, true),
262
263 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
264 true),
265 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
266 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
267
268};
269#undef RX_HANDLER
270#undef RX_HANDLER_GRP
271#define CMD(x) [x] = #x
272
273static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
274 CMD(MVM_ALIVE),
275 CMD(REPLY_ERROR),
276 CMD(ECHO_CMD),
277 CMD(INIT_COMPLETE_NOTIF),
278 CMD(PHY_CONTEXT_CMD),
279 CMD(MGMT_MCAST_KEY),
280 CMD(TX_CMD),
281 CMD(TXPATH_FLUSH),
282 CMD(SHARED_MEM_CFG),
283 CMD(MAC_CONTEXT_CMD),
284 CMD(TIME_EVENT_CMD),
285 CMD(TIME_EVENT_NOTIFICATION),
286 CMD(BINDING_CONTEXT_CMD),
287 CMD(TIME_QUOTA_CMD),
288 CMD(NON_QOS_TX_COUNTER_CMD),
289 CMD(DC2DC_CONFIG_CMD),
290 CMD(NVM_ACCESS_CMD),
291 CMD(PHY_CONFIGURATION_CMD),
292 CMD(CALIB_RES_NOTIF_PHY_DB),
293 CMD(SET_CALIB_DEFAULT_CMD),
294 CMD(FW_PAGING_BLOCK_CMD),
295 CMD(ADD_STA_KEY),
296 CMD(ADD_STA),
297 CMD(FW_GET_ITEM_CMD),
298 CMD(REMOVE_STA),
299 CMD(LQ_CMD),
300 CMD(SCAN_OFFLOAD_CONFIG_CMD),
301 CMD(MATCH_FOUND_NOTIFICATION),
302 CMD(SCAN_OFFLOAD_REQUEST_CMD),
303 CMD(SCAN_OFFLOAD_ABORT_CMD),
304 CMD(HOT_SPOT_CMD),
305 CMD(SCAN_OFFLOAD_COMPLETE),
306 CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
307 CMD(SCAN_ITERATION_COMPLETE),
308 CMD(POWER_TABLE_CMD),
309 CMD(WEP_KEY),
310 CMD(REPLY_RX_PHY_CMD),
311 CMD(REPLY_RX_MPDU_CMD),
312 CMD(BEACON_NOTIFICATION),
313 CMD(BEACON_TEMPLATE_CMD),
314 CMD(STATISTICS_CMD),
315 CMD(STATISTICS_NOTIFICATION),
316 CMD(EOSP_NOTIFICATION),
317 CMD(REDUCE_TX_POWER_CMD),
318 CMD(TX_ANT_CONFIGURATION_CMD),
319 CMD(D3_CONFIG_CMD),
320 CMD(D0I3_END_CMD),
321 CMD(PROT_OFFLOAD_CONFIG_CMD),
322 CMD(OFFLOADS_QUERY_CMD),
323 CMD(REMOTE_WAKE_CONFIG_CMD),
324 CMD(WOWLAN_PATTERNS),
325 CMD(WOWLAN_CONFIGURATION),
326 CMD(WOWLAN_TSC_RSC_PARAM),
327 CMD(WOWLAN_TKIP_PARAM),
328 CMD(WOWLAN_KEK_KCK_MATERIAL),
329 CMD(WOWLAN_GET_STATUSES),
330 CMD(WOWLAN_TX_POWER_PER_DB),
331 CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
332 CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
333 CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
334 CMD(CARD_STATE_NOTIFICATION),
335 CMD(MISSED_BEACONS_NOTIFICATION),
336 CMD(BT_COEX_PRIO_TABLE),
337 CMD(BT_COEX_PROT_ENV),
338 CMD(BT_PROFILE_NOTIFICATION),
339 CMD(BT_CONFIG),
340 CMD(MCAST_FILTER_CMD),
341 CMD(BCAST_FILTER_CMD),
342 CMD(REPLY_SF_CFG_CMD),
343 CMD(REPLY_BEACON_FILTERING_CMD),
344 CMD(CMD_DTS_MEASUREMENT_TRIGGER),
345 CMD(DTS_MEASUREMENT_NOTIFICATION),
346 CMD(REPLY_THERMAL_MNG_BACKOFF),
347 CMD(MAC_PM_POWER_TABLE),
348 CMD(LTR_CONFIG),
349 CMD(BT_COEX_CI),
350 CMD(BT_COEX_UPDATE_SW_BOOST),
351 CMD(BT_COEX_UPDATE_CORUN_LUT),
352 CMD(BT_COEX_UPDATE_REDUCED_TXP),
353 CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
354 CMD(ANTENNA_COUPLING_NOTIFICATION),
355 CMD(SCD_QUEUE_CFG),
356 CMD(SCAN_CFG_CMD),
357 CMD(SCAN_REQ_UMAC),
358 CMD(SCAN_ABORT_UMAC),
359 CMD(SCAN_COMPLETE_UMAC),
360 CMD(TDLS_CHANNEL_SWITCH_CMD),
361 CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
362 CMD(TDLS_CONFIG_CMD),
363 CMD(MCC_UPDATE_CMD),
364 CMD(SCAN_ITERATION_COMPLETE_UMAC),
365};
366#undef CMD
367
368/* this forward declaration can avoid to export the function */
369static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
370static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
371
372static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
373{
374 const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
375
376 if (!pwr_tx_backoff)
377 return 0;
378
379 while (pwr_tx_backoff->pwr) {
380 if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
381 return pwr_tx_backoff->backoff;
382
383 pwr_tx_backoff++;
384 }
385
386 return 0;
387}
388
389static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
390
391static struct iwl_op_mode *
392iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
393 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
394{
395 struct ieee80211_hw *hw;
396 struct iwl_op_mode *op_mode;
397 struct iwl_mvm *mvm;
398 struct iwl_trans_config trans_cfg = {};
399 static const u8 no_reclaim_cmds[] = {
400 TX_CMD,
401 };
402 int err, scan_size;
403 u32 min_backoff;
404
405 /*
406 * We use IWL_MVM_STATION_COUNT to check the validity of the station
407 * index all over the driver - check that its value corresponds to the
408 * array size.
409 */
410 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
411
412 /********************************
413 * 1. Allocating and configuring HW data
414 ********************************/
415 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
416 sizeof(struct iwl_mvm),
417 &iwl_mvm_hw_ops);
418 if (!hw)
419 return NULL;
420
421 if (cfg->max_rx_agg_size)
422 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
423
424 if (cfg->max_tx_agg_size)
425 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
426
427 op_mode = hw->priv;
428
429 mvm = IWL_OP_MODE_GET_MVM(op_mode);
430 mvm->dev = trans->dev;
431 mvm->trans = trans;
432 mvm->cfg = cfg;
433 mvm->fw = fw;
434 mvm->hw = hw;
435
436 if (iwl_mvm_has_new_rx_api(mvm)) {
437 op_mode->ops = &iwl_mvm_ops_mq;
438 } else {
439 op_mode->ops = &iwl_mvm_ops;
440
441 if (WARN_ON(trans->num_rx_queues > 1))
442 goto out_free;
443 }
444
445 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
446
447 mvm->aux_queue = 15;
448 mvm->first_agg_queue = 16;
449 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
450 if (mvm->cfg->base_params->num_of_queues == 16) {
451 mvm->aux_queue = 11;
452 mvm->first_agg_queue = 12;
453 }
454 mvm->sf_state = SF_UNINIT;
455 mvm->low_latency_agg_frame_limit = 6;
456 mvm->cur_ucode = IWL_UCODE_INIT;
457
458 mutex_init(&mvm->mutex);
459 mutex_init(&mvm->d0i3_suspend_mutex);
460 spin_lock_init(&mvm->async_handlers_lock);
461 INIT_LIST_HEAD(&mvm->time_event_list);
462 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
463 INIT_LIST_HEAD(&mvm->async_handlers_list);
464 spin_lock_init(&mvm->time_event_lock);
465 spin_lock_init(&mvm->queue_info_lock);
466
467 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
468 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
469 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
470 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
471 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
472 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
473
474 spin_lock_init(&mvm->d0i3_tx_lock);
475 spin_lock_init(&mvm->refs_lock);
476 skb_queue_head_init(&mvm->d0i3_tx);
477 init_waitqueue_head(&mvm->d0i3_exit_waitq);
478
479 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
480
481 /*
482 * Populate the state variables that the transport layer needs
483 * to know about.
484 */
485 trans_cfg.op_mode = op_mode;
486 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
487 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
488 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
489 trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
490 IWL_UCODE_TLV_API_WIDE_CMD_HDR);
491
492 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
493 trans_cfg.bc_table_dword = true;
494
495 trans_cfg.command_names = iwl_mvm_cmd_strings;
496
497 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
498 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
499 trans_cfg.scd_set_active = true;
500
501 trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
502
503 /* Set a short watchdog for the command queue */
504 trans_cfg.cmd_q_wdg_timeout =
505 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
506
507 snprintf(mvm->hw->wiphy->fw_version,
508 sizeof(mvm->hw->wiphy->fw_version),
509 "%s", fw->fw_version);
510
511 /* Configure transport layer */
512 iwl_trans_configure(mvm->trans, &trans_cfg);
513
514 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
515 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
516 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
517 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
518 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
519 sizeof(trans->dbg_conf_tlv));
520 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
521
522 /* set up notification wait support */
523 iwl_notification_wait_init(&mvm->notif_wait);
524
525 /* Init phy db */
526 mvm->phy_db = iwl_phy_db_init(trans);
527 if (!mvm->phy_db) {
528 IWL_ERR(mvm, "Cannot init phy_db\n");
529 goto out_free;
530 }
531
532 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
533 mvm->cfg->name, mvm->trans->hw_rev);
534
535 min_backoff = calc_min_backoff(trans, cfg);
536 iwl_mvm_tt_initialize(mvm, min_backoff);
537
538 if (iwlwifi_mod_params.nvm_file)
539 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
540 else
541 IWL_DEBUG_EEPROM(mvm->trans->dev,
542 "working without external nvm file\n");
543
544 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
545 "not allowing power-up and not having nvm_file\n"))
546 goto out_free;
547
548 /*
549 * Even if nvm exists in the nvm_file driver should read again the nvm
550 * from the nic because there might be entries that exist in the OTP
551 * and not in the file.
552 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
553 */
554 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
555 err = iwl_nvm_init(mvm, false);
556 if (err)
557 goto out_free;
558 } else {
559 err = iwl_trans_start_hw(mvm->trans);
560 if (err)
561 goto out_free;
562
563 mutex_lock(&mvm->mutex);
564 err = iwl_run_init_mvm_ucode(mvm, true);
565 if (!err || !iwlmvm_mod_params.init_dbg)
566 iwl_trans_stop_device(trans);
567 mutex_unlock(&mvm->mutex);
568 /* returns 0 if successful, 1 if success but in rfkill */
569 if (err < 0 && !iwlmvm_mod_params.init_dbg) {
570 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
571 goto out_free;
572 }
573 }
574
575 scan_size = iwl_mvm_scan_size(mvm);
576
577 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
578 if (!mvm->scan_cmd)
579 goto out_free;
580
581 /* Set EBS as successful as long as not stated otherwise by the FW. */
582 mvm->last_ebs_successful = true;
583
584 err = iwl_mvm_mac_setup_register(mvm);
585 if (err)
586 goto out_free;
587
588 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
589 if (err)
590 goto out_unregister;
591
592 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
593
594 /* rpm starts with a taken ref. only set the appropriate bit here. */
595 mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
596
597 iwl_mvm_tof_init(mvm);
598
599 return op_mode;
600
601 out_unregister:
602 ieee80211_unregister_hw(mvm->hw);
603 iwl_mvm_leds_exit(mvm);
604 out_free:
605 flush_delayed_work(&mvm->fw_dump_wk);
606 iwl_phy_db_free(mvm->phy_db);
607 kfree(mvm->scan_cmd);
608 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
609 iwl_trans_op_mode_leave(trans);
610 ieee80211_free_hw(mvm->hw);
611 return NULL;
612}
613
614static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
615{
616 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
617 int i;
618
619 iwl_mvm_leds_exit(mvm);
620
621 iwl_mvm_tt_exit(mvm);
622
623 ieee80211_unregister_hw(mvm->hw);
624
625 kfree(mvm->scan_cmd);
626 kfree(mvm->mcast_filter_cmd);
627 mvm->mcast_filter_cmd = NULL;
628
629#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
630 kfree(mvm->d3_resume_sram);
631 if (mvm->nd_config) {
632 kfree(mvm->nd_config->match_sets);
633 kfree(mvm->nd_config->scan_plans);
634 kfree(mvm->nd_config);
635 mvm->nd_config = NULL;
636 }
637#endif
638
639 iwl_trans_op_mode_leave(mvm->trans);
640
641 iwl_phy_db_free(mvm->phy_db);
642 mvm->phy_db = NULL;
643
644 iwl_free_nvm_data(mvm->nvm_data);
645 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
646 kfree(mvm->nvm_sections[i].data);
647
648 iwl_mvm_tof_clean(mvm);
649
650 ieee80211_free_hw(mvm->hw);
651}
652
653struct iwl_async_handler_entry {
654 struct list_head list;
655 struct iwl_rx_cmd_buffer rxb;
656 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
657};
658
659void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
660{
661 struct iwl_async_handler_entry *entry, *tmp;
662
663 spin_lock_bh(&mvm->async_handlers_lock);
664 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
665 iwl_free_rxb(&entry->rxb);
666 list_del(&entry->list);
667 kfree(entry);
668 }
669 spin_unlock_bh(&mvm->async_handlers_lock);
670}
671
672static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
673{
674 struct iwl_mvm *mvm =
675 container_of(wk, struct iwl_mvm, async_handlers_wk);
676 struct iwl_async_handler_entry *entry, *tmp;
677 struct list_head local_list;
678
679 INIT_LIST_HEAD(&local_list);
680
681 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
682 mutex_lock(&mvm->mutex);
683
684 /*
685 * Sync with Rx path with a lock. Remove all the entries from this list,
686 * add them to a local one (lock free), and then handle them.
687 */
688 spin_lock_bh(&mvm->async_handlers_lock);
689 list_splice_init(&mvm->async_handlers_list, &local_list);
690 spin_unlock_bh(&mvm->async_handlers_lock);
691
692 list_for_each_entry_safe(entry, tmp, &local_list, list) {
693 entry->fn(mvm, &entry->rxb);
694 iwl_free_rxb(&entry->rxb);
695 list_del(&entry->list);
696 kfree(entry);
697 }
698 mutex_unlock(&mvm->mutex);
699}
700
701static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
702 struct iwl_rx_packet *pkt)
703{
704 struct iwl_fw_dbg_trigger_tlv *trig;
705 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
706 int i;
707
708 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
709 return;
710
711 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
712 cmds_trig = (void *)trig->data;
713
714 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
715 return;
716
717 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
718 /* don't collect on CMD 0 */
719 if (!cmds_trig->cmds[i].cmd_id)
720 break;
721
722 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
723 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
724 continue;
725
726 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
727 "CMD 0x%02x.%02x received",
728 pkt->hdr.group_id, pkt->hdr.cmd);
729 break;
730 }
731}
732
733static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
734 struct iwl_rx_cmd_buffer *rxb,
735 struct iwl_rx_packet *pkt)
736{
737 int i;
738
739 iwl_mvm_rx_check_trigger(mvm, pkt);
740
741 /*
742 * Do the notification wait before RX handlers so
743 * even if the RX handler consumes the RXB we have
744 * access to it in the notification wait entry.
745 */
746 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
747
748 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
749 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
750 struct iwl_async_handler_entry *entry;
751
752 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
753 continue;
754
755 if (!rx_h->async) {
756 rx_h->fn(mvm, rxb);
757 return;
758 }
759
760 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
761 /* we can't do much... */
762 if (!entry)
763 return;
764
765 entry->rxb._page = rxb_steal_page(rxb);
766 entry->rxb._offset = rxb->_offset;
767 entry->rxb._rx_page_order = rxb->_rx_page_order;
768 entry->fn = rx_h->fn;
769 spin_lock(&mvm->async_handlers_lock);
770 list_add_tail(&entry->list, &mvm->async_handlers_list);
771 spin_unlock(&mvm->async_handlers_lock);
772 schedule_work(&mvm->async_handlers_wk);
773 break;
774 }
775}
776
777static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
778 struct napi_struct *napi,
779 struct iwl_rx_cmd_buffer *rxb)
780{
781 struct iwl_rx_packet *pkt = rxb_addr(rxb);
782 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
783
784 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
785 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
786 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
787 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
788 else
789 iwl_mvm_rx_common(mvm, rxb, pkt);
790}
791
792static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
793 struct napi_struct *napi,
794 struct iwl_rx_cmd_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
798
799 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
800 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
801 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
802 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
803 else
804 iwl_mvm_rx_common(mvm, rxb, pkt);
805}
806
807static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
808{
809 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
810 unsigned long mq;
811 int q;
812
813 spin_lock_bh(&mvm->queue_info_lock);
814 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
815 spin_unlock_bh(&mvm->queue_info_lock);
816
817 if (WARN_ON_ONCE(!mq))
818 return;
819
820 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
821 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
822 IWL_DEBUG_TX_QUEUES(mvm,
823 "queue %d (mac80211 %d) already stopped\n",
824 queue, q);
825 continue;
826 }
827
828 ieee80211_stop_queue(mvm->hw, q);
829 }
830}
831
832static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
833{
834 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
835 unsigned long mq;
836 int q;
837
838 spin_lock_bh(&mvm->queue_info_lock);
839 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
840 spin_unlock_bh(&mvm->queue_info_lock);
841
842 if (WARN_ON_ONCE(!mq))
843 return;
844
845 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
846 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
847 IWL_DEBUG_TX_QUEUES(mvm,
848 "queue %d (mac80211 %d) still stopped\n",
849 queue, q);
850 continue;
851 }
852
853 ieee80211_wake_queue(mvm->hw, q);
854 }
855}
856
857void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
858{
859 if (state)
860 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
861 else
862 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
863
864 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
865}
866
867static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
868{
869 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
870 bool calibrating = ACCESS_ONCE(mvm->calibrating);
871
872 if (state)
873 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
874 else
875 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
876
877 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
878
879 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
880 if (calibrating)
881 iwl_abort_notification_waits(&mvm->notif_wait);
882
883 /*
884 * Stop the device if we run OPERATIONAL firmware or if we are in the
885 * middle of the calibrations.
886 */
887 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
888}
889
890static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
891{
892 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
893 struct ieee80211_tx_info *info;
894
895 info = IEEE80211_SKB_CB(skb);
896 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
897 ieee80211_free_txskb(mvm->hw, skb);
898}
899
900struct iwl_mvm_reprobe {
901 struct device *dev;
902 struct work_struct work;
903};
904
905static void iwl_mvm_reprobe_wk(struct work_struct *wk)
906{
907 struct iwl_mvm_reprobe *reprobe;
908
909 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
910 if (device_reprobe(reprobe->dev))
911 dev_err(reprobe->dev, "reprobe failed!\n");
912 kfree(reprobe);
913 module_put(THIS_MODULE);
914}
915
916static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
917{
918 struct iwl_mvm *mvm =
919 container_of(work, struct iwl_mvm, fw_dump_wk.work);
920
921 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
922 return;
923
924 mutex_lock(&mvm->mutex);
925
926 /* stop recording */
927 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
928 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
929 } else {
930 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
931 /* wait before we collect the data till the DBGC stop */
932 udelay(100);
933 }
934
935 iwl_mvm_fw_error_dump(mvm);
936
937 /* start recording again if the firmware is not crashed */
938 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
939 mvm->fw->dbg_dest_tlv &&
940 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
941
942 mutex_unlock(&mvm->mutex);
943
944 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
945}
946
947void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
948{
949 iwl_abort_notification_waits(&mvm->notif_wait);
950
951 /*
952 * This is a bit racy, but worst case we tell mac80211 about
953 * a stopped/aborted scan when that was already done which
954 * is not a problem. It is necessary to abort any os scan
955 * here because mac80211 requires having the scan cleared
956 * before restarting.
957 * We'll reset the scan_status to NONE in restart cleanup in
958 * the next start() call from mac80211. If restart isn't called
959 * (no fw restart) scan status will stay busy.
960 */
961 iwl_mvm_report_scan_aborted(mvm);
962
963 /*
964 * If we're restarting already, don't cycle restarts.
965 * If INIT fw asserted, it will likely fail again.
966 * If WoWLAN fw asserted, don't restart either, mac80211
967 * can't recover this since we're already half suspended.
968 */
969 if (!mvm->restart_fw && fw_error) {
970 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
971 NULL);
972 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
973 &mvm->status)) {
974 struct iwl_mvm_reprobe *reprobe;
975
976 IWL_ERR(mvm,
977 "Firmware error during reconfiguration - reprobe!\n");
978
979 /*
980 * get a module reference to avoid doing this while unloading
981 * anyway and to avoid scheduling a work with code that's
982 * being removed.
983 */
984 if (!try_module_get(THIS_MODULE)) {
985 IWL_ERR(mvm, "Module is being unloaded - abort\n");
986 return;
987 }
988
989 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
990 if (!reprobe) {
991 module_put(THIS_MODULE);
992 return;
993 }
994 reprobe->dev = mvm->trans->dev;
995 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
996 schedule_work(&reprobe->work);
997 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
998 /* don't let the transport/FW power down */
999 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1000
1001 if (fw_error && mvm->restart_fw > 0)
1002 mvm->restart_fw--;
1003 ieee80211_restart_hw(mvm->hw);
1004 }
1005}
1006
1007static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1008{
1009 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1010
1011 iwl_mvm_dump_nic_error_log(mvm);
1012
1013 iwl_mvm_nic_restart(mvm, true);
1014}
1015
1016static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1017{
1018 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1019
1020 WARN_ON(1);
1021 iwl_mvm_nic_restart(mvm, true);
1022}
1023
1024struct iwl_d0i3_iter_data {
1025 struct iwl_mvm *mvm;
1026 u8 ap_sta_id;
1027 u8 vif_count;
1028 u8 offloading_tid;
1029 bool disable_offloading;
1030};
1031
1032static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1033 struct ieee80211_vif *vif,
1034 struct iwl_d0i3_iter_data *iter_data)
1035{
1036 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1037 struct ieee80211_sta *ap_sta;
1038 struct iwl_mvm_sta *mvmsta;
1039 u32 available_tids = 0;
1040 u8 tid;
1041
1042 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1043 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
1044 return false;
1045
1046 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
1047 if (IS_ERR_OR_NULL(ap_sta))
1048 return false;
1049
1050 mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
1051 spin_lock_bh(&mvmsta->lock);
1052 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1053 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1054
1055 /*
1056 * in case of pending tx packets, don't use this tid
1057 * for offloading in order to prevent reuse of the same
1058 * qos seq counters.
1059 */
1060 if (iwl_mvm_tid_queued(tid_data))
1061 continue;
1062
1063 if (tid_data->state != IWL_AGG_OFF)
1064 continue;
1065
1066 available_tids |= BIT(tid);
1067 }
1068 spin_unlock_bh(&mvmsta->lock);
1069
1070 /*
1071 * disallow protocol offloading if we have no available tid
1072 * (with no pending frames and no active aggregation,
1073 * as we don't handle "holes" properly - the scheduler needs the
1074 * frame's seq number and TFD index to match)
1075 */
1076 if (!available_tids)
1077 return true;
1078
1079 /* for simplicity, just use the first available tid */
1080 iter_data->offloading_tid = ffs(available_tids) - 1;
1081 return false;
1082}
1083
1084static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1085 struct ieee80211_vif *vif)
1086{
1087 struct iwl_d0i3_iter_data *data = _data;
1088 struct iwl_mvm *mvm = data->mvm;
1089 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1090 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1091
1092 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1093 if (vif->type != NL80211_IFTYPE_STATION ||
1094 !vif->bss_conf.assoc)
1095 return;
1096
1097 /*
1098 * in case of pending tx packets or active aggregations,
1099 * avoid offloading features in order to prevent reuse of
1100 * the same qos seq counters.
1101 */
1102 if (iwl_mvm_disallow_offloading(mvm, vif, data))
1103 data->disable_offloading = true;
1104
1105 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1106 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
1107
1108 /*
1109 * on init/association, mvm already configures POWER_TABLE_CMD
1110 * and REPLY_MCAST_FILTER_CMD, so currently don't
1111 * reconfigure them (we might want to use different
1112 * params later on, though).
1113 */
1114 data->ap_sta_id = mvmvif->ap_sta_id;
1115 data->vif_count++;
1116}
1117
1118static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1119 struct iwl_wowlan_config_cmd *cmd,
1120 struct iwl_d0i3_iter_data *iter_data)
1121{
1122 struct ieee80211_sta *ap_sta;
1123 struct iwl_mvm_sta *mvm_ap_sta;
1124
1125 if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
1126 return;
1127
1128 rcu_read_lock();
1129
1130 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1131 if (IS_ERR_OR_NULL(ap_sta))
1132 goto out;
1133
1134 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1135 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1136 cmd->offloading_tid = iter_data->offloading_tid;
1137
1138 /*
1139 * The d0i3 uCode takes care of the nonqos counters,
1140 * so configure only the qos seq ones.
1141 */
1142 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1143out:
1144 rcu_read_unlock();
1145}
1146
1147int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1148{
1149 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1150 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1151 int ret;
1152 struct iwl_d0i3_iter_data d0i3_iter_data = {
1153 .mvm = mvm,
1154 };
1155 struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1156 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1157 IWL_WOWLAN_WAKEUP_BEACON_MISS |
1158 IWL_WOWLAN_WAKEUP_LINK_CHANGE |
1159 IWL_WOWLAN_WAKEUP_BCN_FILTERING),
1160 };
1161 struct iwl_d3_manager_config d3_cfg_cmd = {
1162 .min_sleep_time = cpu_to_le32(1000),
1163 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1164 };
1165
1166 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1167
1168 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1169
1170 /*
1171 * iwl_mvm_ref_sync takes a reference before checking the flag.
1172 * so by checking there is no held reference we prevent a state
1173 * in which iwl_mvm_ref_sync continues successfully while we
1174 * configure the firmware to enter d0i3
1175 */
1176 if (iwl_mvm_ref_taken(mvm)) {
1177 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1178 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1179 wake_up(&mvm->d0i3_exit_waitq);
1180 return 1;
1181 }
1182
1183 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1184 IEEE80211_IFACE_ITER_NORMAL,
1185 iwl_mvm_enter_d0i3_iterator,
1186 &d0i3_iter_data);
1187 if (d0i3_iter_data.vif_count == 1) {
1188 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1189 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1190 } else {
1191 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1192 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1193 mvm->d0i3_offloading = false;
1194 }
1195
1196 /* make sure we have no running tx while configuring the seqno */
1197 synchronize_net();
1198
1199 /* configure wowlan configuration only if needed */
1200 if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
1201 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1202 &d0i3_iter_data);
1203
1204 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1205 sizeof(wowlan_config_cmd),
1206 &wowlan_config_cmd);
1207 if (ret)
1208 return ret;
1209 }
1210
1211 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1212 flags | CMD_MAKE_TRANS_IDLE,
1213 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1214}
1215
1216static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1217 struct ieee80211_vif *vif)
1218{
1219 struct iwl_mvm *mvm = _data;
1220 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1221
1222 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1223 if (vif->type != NL80211_IFTYPE_STATION ||
1224 !vif->bss_conf.assoc)
1225 return;
1226
1227 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1228}
1229
1230struct iwl_mvm_wakeup_reason_iter_data {
1231 struct iwl_mvm *mvm;
1232 u32 wakeup_reasons;
1233};
1234
1235static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
1236 struct ieee80211_vif *vif)
1237{
1238 struct iwl_mvm_wakeup_reason_iter_data *data = _data;
1239 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1240
1241 if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
1242 data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
1243 if (data->wakeup_reasons &
1244 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1245 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1246 else
1247 ieee80211_beacon_loss(vif);
1248 }
1249}
1250
1251void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1252{
1253 struct ieee80211_sta *sta = NULL;
1254 struct iwl_mvm_sta *mvm_ap_sta;
1255 int i;
1256 bool wake_queues = false;
1257
1258 lockdep_assert_held(&mvm->mutex);
1259
1260 spin_lock_bh(&mvm->d0i3_tx_lock);
1261
1262 if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
1263 goto out;
1264
1265 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1266
1267 /* get the sta in order to update seq numbers and re-enqueue skbs */
1268 sta = rcu_dereference_protected(
1269 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1270 lockdep_is_held(&mvm->mutex));
1271
1272 if (IS_ERR_OR_NULL(sta)) {
1273 sta = NULL;
1274 goto out;
1275 }
1276
1277 if (mvm->d0i3_offloading && qos_seq) {
1278 /* update qos seq numbers if offloading was enabled */
1279 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1280 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1281 u16 seq = le16_to_cpu(qos_seq[i]);
1282 /* firmware stores last-used one, we store next one */
1283 seq += 0x10;
1284 mvm_ap_sta->tid_data[i].seq_number = seq;
1285 }
1286 }
1287out:
1288 /* re-enqueue (or drop) all packets */
1289 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1290 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1291
1292 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1293 ieee80211_free_txskb(mvm->hw, skb);
1294
1295 /* if the skb_queue is not empty, we need to wake queues */
1296 wake_queues = true;
1297 }
1298 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1299 wake_up(&mvm->d0i3_exit_waitq);
1300 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1301 if (wake_queues)
1302 ieee80211_wake_queues(mvm->hw);
1303
1304 spin_unlock_bh(&mvm->d0i3_tx_lock);
1305}
1306
1307static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1308{
1309 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1310 struct iwl_host_cmd get_status_cmd = {
1311 .id = WOWLAN_GET_STATUSES,
1312 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1313 };
1314 struct iwl_wowlan_status *status;
1315 int ret;
1316 u32 handled_reasons, wakeup_reasons = 0;
1317 __le16 *qos_seq = NULL;
1318
1319 mutex_lock(&mvm->mutex);
1320 ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1321 if (ret)
1322 goto out;
1323
1324 if (!get_status_cmd.resp_pkt)
1325 goto out;
1326
1327 status = (void *)get_status_cmd.resp_pkt->data;
1328 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1329 qos_seq = status->qos_seq_ctr;
1330
1331 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1332
1333 handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1334 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1335 if (wakeup_reasons & handled_reasons) {
1336 struct iwl_mvm_wakeup_reason_iter_data data = {
1337 .mvm = mvm,
1338 .wakeup_reasons = wakeup_reasons,
1339 };
1340
1341 ieee80211_iterate_active_interfaces(
1342 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1343 iwl_mvm_d0i3_wakeup_reason_iter, &data);
1344 }
1345out:
1346 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1347
1348 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1349 wakeup_reasons);
1350
1351 /* qos_seq might point inside resp_pkt, so free it only now */
1352 if (get_status_cmd.resp_pkt)
1353 iwl_free_resp(&get_status_cmd);
1354
1355 /* the FW might have updated the regdomain */
1356 iwl_mvm_update_changed_regdom(mvm);
1357
1358 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1359 mutex_unlock(&mvm->mutex);
1360}
1361
1362int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1363{
1364 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1365 CMD_WAKE_UP_TRANS;
1366 int ret;
1367
1368 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1369
1370 mutex_lock(&mvm->d0i3_suspend_mutex);
1371 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1372 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1373 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1374 mutex_unlock(&mvm->d0i3_suspend_mutex);
1375 return 0;
1376 }
1377 mutex_unlock(&mvm->d0i3_suspend_mutex);
1378
1379 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1380 if (ret)
1381 goto out;
1382
1383 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1384 IEEE80211_IFACE_ITER_NORMAL,
1385 iwl_mvm_exit_d0i3_iterator,
1386 mvm);
1387out:
1388 schedule_work(&mvm->d0i3_exit_work);
1389 return ret;
1390}
1391
1392int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1393{
1394 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1395
1396 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1397 return _iwl_mvm_exit_d0i3(mvm);
1398}
1399
1400#define IWL_MVM_COMMON_OPS \
1401 /* these could be differentiated */ \
1402 .queue_full = iwl_mvm_stop_sw_queue, \
1403 .queue_not_full = iwl_mvm_wake_sw_queue, \
1404 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1405 .free_skb = iwl_mvm_free_skb, \
1406 .nic_error = iwl_mvm_nic_error, \
1407 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1408 .nic_config = iwl_mvm_nic_config, \
1409 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1410 .exit_d0i3 = iwl_mvm_exit_d0i3, \
1411 /* as we only register one, these MUST be common! */ \
1412 .start = iwl_op_mode_mvm_start, \
1413 .stop = iwl_op_mode_mvm_stop
1414
1415static const struct iwl_op_mode_ops iwl_mvm_ops = {
1416 IWL_MVM_COMMON_OPS,
1417 .rx = iwl_mvm_rx,
1418};
1419
1420static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1421 struct napi_struct *napi,
1422 struct iwl_rx_cmd_buffer *rxb,
1423 unsigned int queue)
1424{
1425 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1426
1427 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1428}
1429
1430static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1431 IWL_MVM_COMMON_OPS,
1432 .rx = iwl_mvm_rx_mq,
1433 .rx_rss = iwl_mvm_rx_mq_rss,
1434};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
deleted file mode 100644
index e68a475e3071..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ /dev/null
@@ -1,295 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <net/mac80211.h>
67#include "fw-api.h"
68#include "mvm.h"
69
70/* Maps the driver specific channel width definition to the fw values */
71u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
72{
73 switch (chandef->width) {
74 case NL80211_CHAN_WIDTH_20_NOHT:
75 case NL80211_CHAN_WIDTH_20:
76 return PHY_VHT_CHANNEL_MODE20;
77 case NL80211_CHAN_WIDTH_40:
78 return PHY_VHT_CHANNEL_MODE40;
79 case NL80211_CHAN_WIDTH_80:
80 return PHY_VHT_CHANNEL_MODE80;
81 case NL80211_CHAN_WIDTH_160:
82 return PHY_VHT_CHANNEL_MODE160;
83 default:
84 WARN(1, "Invalid channel width=%u", chandef->width);
85 return PHY_VHT_CHANNEL_MODE20;
86 }
87}
88
89/*
90 * Maps the driver specific control channel position (relative to the center
91 * freq) definitions to the the fw values
92 */
93u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
94{
95 switch (chandef->chan->center_freq - chandef->center_freq1) {
96 case -70:
97 return PHY_VHT_CTRL_POS_4_BELOW;
98 case -50:
99 return PHY_VHT_CTRL_POS_3_BELOW;
100 case -30:
101 return PHY_VHT_CTRL_POS_2_BELOW;
102 case -10:
103 return PHY_VHT_CTRL_POS_1_BELOW;
104 case 10:
105 return PHY_VHT_CTRL_POS_1_ABOVE;
106 case 30:
107 return PHY_VHT_CTRL_POS_2_ABOVE;
108 case 50:
109 return PHY_VHT_CTRL_POS_3_ABOVE;
110 case 70:
111 return PHY_VHT_CTRL_POS_4_ABOVE;
112 default:
113 WARN(1, "Invalid channel definition");
114 case 0:
115 /*
116 * The FW is expected to check the control channel position only
117 * when in HT/VHT and the channel width is not 20MHz. Return
118 * this value as the default one.
119 */
120 return PHY_VHT_CTRL_POS_1_BELOW;
121 }
122}
123
124/*
125 * Construct the generic fields of the PHY context command
126 */
127static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
128 struct iwl_phy_context_cmd *cmd,
129 u32 action, u32 apply_time)
130{
131 memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
132
133 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
134 ctxt->color));
135 cmd->action = cpu_to_le32(action);
136 cmd->apply_time = cpu_to_le32(apply_time);
137}
138
139/*
140 * Add the phy configuration to the PHY context command
141 */
142static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
143 struct iwl_phy_context_cmd *cmd,
144 struct cfg80211_chan_def *chandef,
145 u8 chains_static, u8 chains_dynamic)
146{
147 u8 active_cnt, idle_cnt;
148
149 /* Set the channel info data */
150 cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
151 PHY_BAND_24 : PHY_BAND_5);
152
153 cmd->ci.channel = chandef->chan->hw_value;
154 cmd->ci.width = iwl_mvm_get_channel_width(chandef);
155 cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
156
157 /* Set rx the chains */
158 idle_cnt = chains_static;
159 active_cnt = chains_dynamic;
160
161 /* In scenarios where we only ever use a single-stream rates,
162 * i.e. legacy 11b/g/a associations, single-stream APs or even
163 * static SMPS, enable both chains to get diversity, improving
164 * the case where we're far enough from the AP that attenuation
165 * between the two antennas is sufficiently different to impact
166 * performance.
167 */
168 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
169 idle_cnt = 2;
170 active_cnt = 2;
171 }
172
173 cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
174 PHY_RX_CHAIN_VALID_POS);
175 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
176 cmd->rxchain_info |= cpu_to_le32(active_cnt <<
177 PHY_RX_CHAIN_MIMO_CNT_POS);
178#ifdef CONFIG_IWLWIFI_DEBUGFS
179 if (unlikely(mvm->dbgfs_rx_phyinfo))
180 cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
181#endif
182
183 cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
184}
185
186/*
187 * Send a command to apply the current phy configuration. The command is send
188 * only if something in the configuration changed: in case that this is the
189 * first time that the phy configuration is applied or in case that the phy
190 * configuration changed from the previous apply.
191 */
192static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
193 struct iwl_mvm_phy_ctxt *ctxt,
194 struct cfg80211_chan_def *chandef,
195 u8 chains_static, u8 chains_dynamic,
196 u32 action, u32 apply_time)
197{
198 struct iwl_phy_context_cmd cmd;
199 int ret;
200
201 /* Set the command header fields */
202 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
203
204 /* Set the command data */
205 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
206 chains_static, chains_dynamic);
207
208 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
209 sizeof(struct iwl_phy_context_cmd),
210 &cmd);
211 if (ret)
212 IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
213 return ret;
214}
215
216/*
217 * Send a command to add a PHY context based on the current HW configuration.
218 */
219int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
220 struct cfg80211_chan_def *chandef,
221 u8 chains_static, u8 chains_dynamic)
222{
223 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
224 ctxt->ref);
225 lockdep_assert_held(&mvm->mutex);
226
227 ctxt->channel = chandef->chan;
228
229 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
230 chains_static, chains_dynamic,
231 FW_CTXT_ACTION_ADD, 0);
232}
233
234/*
235 * Update the number of references to the given PHY context. This is valid only
236 * in case the PHY context was already created, i.e., its reference count > 0.
237 */
238void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
239{
240 lockdep_assert_held(&mvm->mutex);
241 ctxt->ref++;
242}
243
244/*
245 * Send a command to modify the PHY context based on the current HW
246 * configuration. Note that the function does not check that the configuration
247 * changed.
248 */
249int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
250 struct cfg80211_chan_def *chandef,
251 u8 chains_static, u8 chains_dynamic)
252{
253 lockdep_assert_held(&mvm->mutex);
254
255 ctxt->channel = chandef->chan;
256 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
257 chains_static, chains_dynamic,
258 FW_CTXT_ACTION_MODIFY, 0);
259}
260
261void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
262{
263 lockdep_assert_held(&mvm->mutex);
264
265 if (WARN_ON_ONCE(!ctxt))
266 return;
267
268 ctxt->ref--;
269}
270
271static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
272 struct ieee80211_vif *vif)
273{
274 unsigned long *data = _data;
275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
276
277 if (!mvmvif->phy_ctxt)
278 return;
279
280 if (vif->type == NL80211_IFTYPE_STATION ||
281 vif->type == NL80211_IFTYPE_AP)
282 __set_bit(mvmvif->phy_ctxt->id, data);
283}
284
285int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
286{
287 unsigned long phy_ctxt_counter = 0;
288
289 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
290 IEEE80211_IFACE_ITER_NORMAL,
291 iwl_mvm_binding_iterator,
292 &phy_ctxt_counter);
293
294 return hweight8(phy_ctxt_counter);
295}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
deleted file mode 100644
index bed9696ee410..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ /dev/null
@@ -1,1040 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/slab.h>
71#include <linux/etherdevice.h>
72
73#include <net/mac80211.h>
74
75#include "iwl-debug.h"
76#include "mvm.h"
77#include "iwl-modparams.h"
78#include "fw-api-power.h"
79
80#define POWER_KEEP_ALIVE_PERIOD_SEC 25
81
82static
83int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
84 struct iwl_beacon_filter_cmd *cmd,
85 u32 flags)
86{
87 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
88 le32_to_cpu(cmd->ba_enable_beacon_abort));
89 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
90 le32_to_cpu(cmd->ba_escape_timer));
91 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
92 le32_to_cpu(cmd->bf_debug_flag));
93 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
94 le32_to_cpu(cmd->bf_enable_beacon_filter));
95 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
96 le32_to_cpu(cmd->bf_energy_delta));
97 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
98 le32_to_cpu(cmd->bf_escape_timer));
99 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
100 le32_to_cpu(cmd->bf_roaming_energy_delta));
101 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
102 le32_to_cpu(cmd->bf_roaming_state));
103 IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
104 le32_to_cpu(cmd->bf_temp_threshold));
105 IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
106 le32_to_cpu(cmd->bf_temp_fast_filter));
107 IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
108 le32_to_cpu(cmd->bf_temp_slow_filter));
109
110 return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
111 sizeof(struct iwl_beacon_filter_cmd), cmd);
112}
113
114static
115void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
116 struct ieee80211_vif *vif,
117 struct iwl_beacon_filter_cmd *cmd,
118 bool d0i3)
119{
120 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
121
122 if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
123 cmd->bf_energy_delta =
124 cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
125 /* fw uses an absolute value for this */
126 cmd->bf_roaming_state =
127 cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
128 }
129 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
130}
131
132static void iwl_mvm_power_log(struct iwl_mvm *mvm,
133 struct iwl_mac_power_cmd *cmd)
134{
135 IWL_DEBUG_POWER(mvm,
136 "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
137 cmd->id_and_color, iwlmvm_mod_params.power_scheme,
138 le16_to_cpu(cmd->flags));
139 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
140 le16_to_cpu(cmd->keep_alive_seconds));
141
142 if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
143 IWL_DEBUG_POWER(mvm, "Disable power management\n");
144 return;
145 }
146
147 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
148 le32_to_cpu(cmd->rx_data_timeout));
149 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
150 le32_to_cpu(cmd->tx_data_timeout));
151 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
152 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
153 cmd->skip_dtim_periods);
154 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
155 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
156 cmd->lprx_rssi_threshold);
157 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
158 IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
159 IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
160 le32_to_cpu(cmd->rx_data_timeout_uapsd));
161 IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
162 le32_to_cpu(cmd->tx_data_timeout_uapsd));
163 IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
164 IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
165 IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
166 }
167}
168
169static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
170 struct ieee80211_vif *vif,
171 struct iwl_mac_power_cmd *cmd)
172{
173 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
174 enum ieee80211_ac_numbers ac;
175 bool tid_found = false;
176
177 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
178 if (!mvmvif->queue_params[ac].uapsd)
179 continue;
180
181 if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
182 cmd->flags |=
183 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
184
185 cmd->uapsd_ac_flags |= BIT(ac);
186
187 /* QNDP TID - the highest TID with no admission control */
188 if (!tid_found && !mvmvif->queue_params[ac].acm) {
189 tid_found = true;
190 switch (ac) {
191 case IEEE80211_AC_VO:
192 cmd->qndp_tid = 6;
193 break;
194 case IEEE80211_AC_VI:
195 cmd->qndp_tid = 5;
196 break;
197 case IEEE80211_AC_BE:
198 cmd->qndp_tid = 0;
199 break;
200 case IEEE80211_AC_BK:
201 cmd->qndp_tid = 1;
202 break;
203 }
204 }
205 }
206
207 if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
208#ifdef CONFIG_IWLWIFI_DEBUGFS
209 /* set advanced pm flag with no uapsd ACs to enable ps-poll */
210 if (mvmvif->dbgfs_pm.use_ps_poll)
211 cmd->flags |=
212 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
213#endif
214 return;
215 }
216
217 cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
218
219 if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
220 BIT(IEEE80211_AC_VI) |
221 BIT(IEEE80211_AC_BE) |
222 BIT(IEEE80211_AC_BK))) {
223 cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
224 cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
225 cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
226 cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
227 cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
228 }
229
230 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
231
232 if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
233 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
234 cmd->rx_data_timeout_uapsd =
235 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
236 cmd->tx_data_timeout_uapsd =
237 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
238 } else {
239 cmd->rx_data_timeout_uapsd =
240 cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
241 cmd->tx_data_timeout_uapsd =
242 cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
243 }
244
245 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
246 cmd->heavy_tx_thld_packets =
247 IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
248 cmd->heavy_rx_thld_packets =
249 IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
250 } else {
251 cmd->heavy_tx_thld_packets =
252 IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
253 cmd->heavy_rx_thld_packets =
254 IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
255 }
256 cmd->heavy_tx_thld_percentage =
257 IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
258 cmd->heavy_rx_thld_percentage =
259 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
260}
261
262static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
263 struct ieee80211_vif *vif)
264{
265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
266
267 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
268 ETH_ALEN))
269 return false;
270
271 if (vif->p2p &&
272 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
273 return false;
274 /*
275 * Avoid using uAPSD if P2P client is associated to GO that uses
276 * opportunistic power save. This is due to current FW limitation.
277 */
278 if (vif->p2p &&
279 (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
280 IEEE80211_P2P_OPPPS_ENABLE_BIT))
281 return false;
282
283 /*
284 * Avoid using uAPSD if client is in DCM -
285 * low latency issue in Miracast
286 */
287 if (iwl_mvm_phy_ctx_count(mvm) >= 2)
288 return false;
289
290 return true;
291}
292
293static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
294{
295 struct ieee80211_chanctx_conf *chanctx_conf;
296 struct ieee80211_channel *chan;
297 bool radar_detect = false;
298
299 rcu_read_lock();
300 chanctx_conf = rcu_dereference(vif->chanctx_conf);
301 WARN_ON(!chanctx_conf);
302 if (chanctx_conf) {
303 chan = chanctx_conf->def.chan;
304 radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
305 }
306 rcu_read_unlock();
307
308 return radar_detect;
309}
310
311static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
312 struct ieee80211_vif *vif,
313 struct iwl_mac_power_cmd *cmd,
314 bool host_awake)
315{
316 int dtimper = vif->bss_conf.dtim_period ?: 1;
317 int skip;
318
319 /* disable, in case we're supposed to override */
320 cmd->skip_dtim_periods = 0;
321 cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
322
323 if (iwl_mvm_power_is_radar(vif))
324 return;
325
326 if (dtimper >= 10)
327 return;
328
329 /* TODO: check that multicast wake lock is off */
330
331 if (host_awake) {
332 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
333 return;
334 skip = 2;
335 } else {
336 int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
337
338 if (WARN_ON(!dtimper_tu))
339 return;
340 /* configure skip over dtim up to 306TU - 314 msec */
341 skip = max_t(u8, 1, 306 / dtimper_tu);
342 }
343
344 /* the firmware really expects "look at every X DTIMs", so add 1 */
345 cmd->skip_dtim_periods = 1 + skip;
346 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
347}
348
349static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
350 struct ieee80211_vif *vif,
351 struct iwl_mac_power_cmd *cmd,
352 bool host_awake)
353{
354 int dtimper, bi;
355 int keep_alive;
356 struct iwl_mvm_vif *mvmvif __maybe_unused =
357 iwl_mvm_vif_from_mac80211(vif);
358
359 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
360 mvmvif->color));
361 dtimper = vif->bss_conf.dtim_period;
362 bi = vif->bss_conf.beacon_int;
363
364 /*
365 * Regardless of power management state the driver must set
366 * keep alive period. FW will use it for sending keep alive NDPs
367 * immediately after association. Check that keep alive period
368 * is at least 3 * DTIM
369 */
370 keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
371 USEC_PER_SEC);
372 keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
373 cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
374
375 if (mvm->ps_disabled)
376 return;
377
378 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
379
380 if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
381 return;
382
383 if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
384 (!fw_has_capa(&mvm->fw->ucode_capa,
385 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
386 !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
387 return;
388
389 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
390
391 if (vif->bss_conf.beacon_rate &&
392 (vif->bss_conf.beacon_rate->bitrate == 10 ||
393 vif->bss_conf.beacon_rate->bitrate == 60)) {
394 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
395 cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
396 }
397
398 iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
399
400 if (!host_awake) {
401 cmd->rx_data_timeout =
402 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
403 cmd->tx_data_timeout =
404 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
405 } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
406 fw_has_capa(&mvm->fw->ucode_capa,
407 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
408 cmd->tx_data_timeout =
409 cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
410 cmd->rx_data_timeout =
411 cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
412 } else {
413 cmd->rx_data_timeout =
414 cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
415 cmd->tx_data_timeout =
416 cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
417 }
418
419 if (iwl_mvm_power_allow_uapsd(mvm, vif))
420 iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
421
422#ifdef CONFIG_IWLWIFI_DEBUGFS
423 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
424 cmd->keep_alive_seconds =
425 cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
426 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
427 if (mvmvif->dbgfs_pm.skip_over_dtim)
428 cmd->flags |=
429 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
430 else
431 cmd->flags &=
432 cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
433 }
434 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
435 cmd->rx_data_timeout =
436 cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
437 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
438 cmd->tx_data_timeout =
439 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
440 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
441 cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
442 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
443 if (mvmvif->dbgfs_pm.lprx_ena)
444 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
445 else
446 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
447 }
448 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
449 cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
450 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
451 if (mvmvif->dbgfs_pm.snooze_ena)
452 cmd->flags |=
453 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
454 else
455 cmd->flags &=
456 cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
457 }
458 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
459 u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
460 if (mvmvif->dbgfs_pm.uapsd_misbehaving)
461 cmd->flags |= cpu_to_le16(flag);
462 else
463 cmd->flags &= cpu_to_le16(flag);
464 }
465#endif /* CONFIG_IWLWIFI_DEBUGFS */
466}
467
468static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
469 struct ieee80211_vif *vif)
470{
471 struct iwl_mac_power_cmd cmd = {};
472
473 iwl_mvm_power_build_cmd(mvm, vif, &cmd,
474 mvm->cur_ucode != IWL_UCODE_WOWLAN);
475 iwl_mvm_power_log(mvm, &cmd);
476#ifdef CONFIG_IWLWIFI_DEBUGFS
477 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
478#endif
479
480 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
481 sizeof(cmd), &cmd);
482}
483
484int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
485{
486 struct iwl_device_power_cmd cmd = {
487 .flags = 0,
488 };
489
490 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
491 mvm->ps_disabled = true;
492
493 if (!mvm->ps_disabled)
494 cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
495
496#ifdef CONFIG_IWLWIFI_DEBUGFS
497 if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
498 mvm->disable_power_off)
499 cmd.flags &=
500 cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
501#endif
502 IWL_DEBUG_POWER(mvm,
503 "Sending device power command with flags = 0x%X\n",
504 cmd.flags);
505
506 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
507 &cmd);
508}
509
510void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
511{
512 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
513
514 if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
515 ETH_ALEN))
516 eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
517}
518
519static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
520 struct ieee80211_vif *vif)
521{
522 u8 *ap_sta_id = _data;
523 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
524
525 /* The ap_sta_id is not expected to change during current association
526 * so no explicit protection is needed
527 */
528 if (mvmvif->ap_sta_id == *ap_sta_id)
529 memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
530 ETH_ALEN);
531}
532
533void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
534 struct iwl_rx_cmd_buffer *rxb)
535{
536 struct iwl_rx_packet *pkt = rxb_addr(rxb);
537 struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
538 u8 ap_sta_id = le32_to_cpu(notif->sta_id);
539
540 ieee80211_iterate_active_interfaces_atomic(
541 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
542 iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
543}
544
545struct iwl_power_vifs {
546 struct iwl_mvm *mvm;
547 struct ieee80211_vif *bf_vif;
548 struct ieee80211_vif *bss_vif;
549 struct ieee80211_vif *p2p_vif;
550 struct ieee80211_vif *ap_vif;
551 struct ieee80211_vif *monitor_vif;
552 bool p2p_active;
553 bool bss_active;
554 bool ap_active;
555 bool monitor_active;
556};
557
558static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
559 struct ieee80211_vif *vif)
560{
561 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
562
563 mvmvif->pm_enabled = false;
564}
565
566static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
567 struct ieee80211_vif *vif)
568{
569 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
570 bool *disable_ps = _data;
571
572 if (mvmvif->phy_ctxt)
573 if (mvmvif->phy_ctxt->id < MAX_PHYS)
574 *disable_ps |= mvmvif->ps_disabled;
575}
576
577static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
578 struct ieee80211_vif *vif)
579{
580 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
581 struct iwl_power_vifs *power_iterator = _data;
582
583 switch (ieee80211_vif_type_p2p(vif)) {
584 case NL80211_IFTYPE_P2P_DEVICE:
585 break;
586
587 case NL80211_IFTYPE_P2P_GO:
588 case NL80211_IFTYPE_AP:
589 /* only a single MAC of the same type */
590 WARN_ON(power_iterator->ap_vif);
591 power_iterator->ap_vif = vif;
592 if (mvmvif->phy_ctxt)
593 if (mvmvif->phy_ctxt->id < MAX_PHYS)
594 power_iterator->ap_active = true;
595 break;
596
597 case NL80211_IFTYPE_MONITOR:
598 /* only a single MAC of the same type */
599 WARN_ON(power_iterator->monitor_vif);
600 power_iterator->monitor_vif = vif;
601 if (mvmvif->phy_ctxt)
602 if (mvmvif->phy_ctxt->id < MAX_PHYS)
603 power_iterator->monitor_active = true;
604 break;
605
606 case NL80211_IFTYPE_P2P_CLIENT:
607 /* only a single MAC of the same type */
608 WARN_ON(power_iterator->p2p_vif);
609 power_iterator->p2p_vif = vif;
610 if (mvmvif->phy_ctxt)
611 if (mvmvif->phy_ctxt->id < MAX_PHYS)
612 power_iterator->p2p_active = true;
613 break;
614
615 case NL80211_IFTYPE_STATION:
616 /* only a single MAC of the same type */
617 WARN_ON(power_iterator->bss_vif);
618 power_iterator->bss_vif = vif;
619 if (mvmvif->phy_ctxt)
620 if (mvmvif->phy_ctxt->id < MAX_PHYS)
621 power_iterator->bss_active = true;
622
623 if (mvmvif->bf_data.bf_enabled &&
624 !WARN_ON(power_iterator->bf_vif))
625 power_iterator->bf_vif = vif;
626
627 break;
628
629 default:
630 break;
631 }
632}
633
634static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
635 struct iwl_power_vifs *vifs)
636{
637 struct iwl_mvm_vif *bss_mvmvif = NULL;
638 struct iwl_mvm_vif *p2p_mvmvif = NULL;
639 struct iwl_mvm_vif *ap_mvmvif = NULL;
640 bool client_same_channel = false;
641 bool ap_same_channel = false;
642
643 lockdep_assert_held(&mvm->mutex);
644
645 /* set pm_enable to false */
646 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
647 IEEE80211_IFACE_ITER_NORMAL,
648 iwl_mvm_power_disable_pm_iterator,
649 NULL);
650
651 if (vifs->bss_vif)
652 bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
653
654 if (vifs->p2p_vif)
655 p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
656
657 if (vifs->ap_vif)
658 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
659
660 /* don't allow PM if any TDLS stations exist */
661 if (iwl_mvm_tdls_sta_count(mvm, NULL))
662 return;
663
664 /* enable PM on bss if bss stand alone */
665 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
666 bss_mvmvif->pm_enabled = true;
667 return;
668 }
669
670 /* enable PM on p2p if p2p stand alone */
671 if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
672 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
673 p2p_mvmvif->pm_enabled = true;
674 return;
675 }
676
677 if (vifs->bss_active && vifs->p2p_active)
678 client_same_channel = (bss_mvmvif->phy_ctxt->id ==
679 p2p_mvmvif->phy_ctxt->id);
680 if (vifs->bss_active && vifs->ap_active)
681 ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
682 ap_mvmvif->phy_ctxt->id);
683
684 /* clients are not stand alone: enable PM if DCM */
685 if (!(client_same_channel || ap_same_channel) &&
686 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
687 if (vifs->bss_active)
688 bss_mvmvif->pm_enabled = true;
689 if (vifs->p2p_active &&
690 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
691 p2p_mvmvif->pm_enabled = true;
692 return;
693 }
694
695 /*
696 * There is only one channel in the system and there are only
697 * bss and p2p clients that share it
698 */
699 if (client_same_channel && !vifs->ap_active &&
700 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
701 /* share same channel*/
702 bss_mvmvif->pm_enabled = true;
703 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
704 p2p_mvmvif->pm_enabled = true;
705 }
706}
707
708#ifdef CONFIG_IWLWIFI_DEBUGFS
709int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
710 struct ieee80211_vif *vif, char *buf,
711 int bufsz)
712{
713 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
714 struct iwl_mac_power_cmd cmd = {};
715 int pos = 0;
716
717 mutex_lock(&mvm->mutex);
718 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
719 mutex_unlock(&mvm->mutex);
720
721 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
722 iwlmvm_mod_params.power_scheme);
723 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
724 le16_to_cpu(cmd.flags));
725 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
726 le16_to_cpu(cmd.keep_alive_seconds));
727
728 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
729 return pos;
730
731 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
732 (cmd.flags &
733 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
734 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
735 cmd.skip_dtim_periods);
736 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
737 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
738 le32_to_cpu(cmd.rx_data_timeout));
739 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
740 le32_to_cpu(cmd.tx_data_timeout));
741 }
742 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
743 pos += scnprintf(buf+pos, bufsz-pos,
744 "lprx_rssi_threshold = %d\n",
745 cmd.lprx_rssi_threshold);
746
747 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
748 return pos;
749
750 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
751 le32_to_cpu(cmd.rx_data_timeout_uapsd));
752 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
753 le32_to_cpu(cmd.tx_data_timeout_uapsd));
754 pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
755 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
756 cmd.uapsd_ac_flags);
757 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
758 cmd.uapsd_max_sp);
759 pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
760 cmd.heavy_tx_thld_packets);
761 pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
762 cmd.heavy_rx_thld_packets);
763 pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
764 cmd.heavy_tx_thld_percentage);
765 pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
766 cmd.heavy_rx_thld_percentage);
767 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
768 (cmd.flags &
769 cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
770 1 : 0);
771
772 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
773 return pos;
774
775 pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
776 cmd.snooze_interval);
777 pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
778 cmd.snooze_window);
779
780 return pos;
781}
782
783void
784iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
785 struct iwl_beacon_filter_cmd *cmd)
786{
787 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
788 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
789
790 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
791 cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
792 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
793 cmd->bf_roaming_energy_delta =
794 cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
795 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
796 cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
797 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
798 cmd->bf_temp_threshold =
799 cpu_to_le32(dbgfs_bf->bf_temp_threshold);
800 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
801 cmd->bf_temp_fast_filter =
802 cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
803 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
804 cmd->bf_temp_slow_filter =
805 cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
806 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
807 cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
808 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
809 cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
810 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
811 cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
812 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
813 cmd->ba_enable_beacon_abort =
814 cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
815}
816#endif
817
818static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
819 struct ieee80211_vif *vif,
820 struct iwl_beacon_filter_cmd *cmd,
821 u32 cmd_flags,
822 bool d0i3)
823{
824 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
825 int ret;
826
827 if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
828 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
829 return 0;
830
831 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
832 if (!d0i3)
833 iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
834 ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
835
836 /* don't change bf_enabled in case of temporary d0i3 configuration */
837 if (!ret && !d0i3)
838 mvmvif->bf_data.bf_enabled = true;
839
840 return ret;
841}
842
843int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
844 struct ieee80211_vif *vif,
845 u32 flags)
846{
847 struct iwl_beacon_filter_cmd cmd = {
848 IWL_BF_CMD_CONFIG_DEFAULTS,
849 .bf_enable_beacon_filter = cpu_to_le32(1),
850 };
851
852 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
853}
854
855static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
856 struct ieee80211_vif *vif,
857 bool enable)
858{
859 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
860 struct iwl_beacon_filter_cmd cmd = {
861 IWL_BF_CMD_CONFIG_DEFAULTS,
862 .bf_enable_beacon_filter = cpu_to_le32(1),
863 };
864
865 if (!mvmvif->bf_data.bf_enabled)
866 return 0;
867
868 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
869 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
870
871 mvmvif->bf_data.ba_enabled = enable;
872 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
873}
874
875int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
876 struct ieee80211_vif *vif,
877 u32 flags)
878{
879 struct iwl_beacon_filter_cmd cmd = {};
880 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
881 int ret;
882
883 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
884 return 0;
885
886 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
887
888 if (!ret)
889 mvmvif->bf_data.bf_enabled = false;
890
891 return ret;
892}
893
894static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
895{
896 bool disable_ps;
897 int ret;
898
899 /* disable PS if CAM */
900 disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
901 /* ...or if any of the vifs require PS to be off */
902 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
903 IEEE80211_IFACE_ITER_NORMAL,
904 iwl_mvm_power_ps_disabled_iterator,
905 &disable_ps);
906
907 /* update device power state if it has changed */
908 if (mvm->ps_disabled != disable_ps) {
909 bool old_ps_disabled = mvm->ps_disabled;
910
911 mvm->ps_disabled = disable_ps;
912 ret = iwl_mvm_power_update_device(mvm);
913 if (ret) {
914 mvm->ps_disabled = old_ps_disabled;
915 return ret;
916 }
917 }
918
919 return 0;
920}
921
922static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
923 struct iwl_power_vifs *vifs)
924{
925 struct iwl_mvm_vif *mvmvif;
926 bool ba_enable;
927
928 if (!vifs->bf_vif)
929 return 0;
930
931 mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif);
932
933 ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
934 !vifs->bf_vif->bss_conf.ps ||
935 iwl_mvm_vif_low_latency(mvmvif));
936
937 return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable);
938}
939
940int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
941{
942 struct iwl_power_vifs vifs = {
943 .mvm = mvm,
944 };
945 int ret;
946
947 lockdep_assert_held(&mvm->mutex);
948
949 /* get vifs info */
950 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
951 IEEE80211_IFACE_ITER_NORMAL,
952 iwl_mvm_power_get_vifs_iterator, &vifs);
953
954 ret = iwl_mvm_power_set_ps(mvm);
955 if (ret)
956 return ret;
957
958 return iwl_mvm_power_set_ba(mvm, &vifs);
959}
960
961int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
962{
963 struct iwl_power_vifs vifs = {
964 .mvm = mvm,
965 };
966 int ret;
967
968 lockdep_assert_held(&mvm->mutex);
969
970 /* get vifs info */
971 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
972 IEEE80211_IFACE_ITER_NORMAL,
973 iwl_mvm_power_get_vifs_iterator, &vifs);
974
975 iwl_mvm_power_set_pm(mvm, &vifs);
976
977 ret = iwl_mvm_power_set_ps(mvm);
978 if (ret)
979 return ret;
980
981 if (vifs.bss_vif) {
982 ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
983 if (ret)
984 return ret;
985 }
986
987 if (vifs.p2p_vif) {
988 ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
989 if (ret)
990 return ret;
991 }
992
993 return iwl_mvm_power_set_ba(mvm, &vifs);
994}
995
996int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
997 struct ieee80211_vif *vif,
998 bool enable, u32 flags)
999{
1000 int ret;
1001 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1002 struct iwl_mac_power_cmd cmd = {};
1003
1004 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1005 return 0;
1006
1007 if (!vif->bss_conf.assoc)
1008 return 0;
1009
1010 iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
1011
1012 iwl_mvm_power_log(mvm, &cmd);
1013#ifdef CONFIG_IWLWIFI_DEBUGFS
1014 memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
1015#endif
1016 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
1017 sizeof(cmd), &cmd);
1018 if (ret)
1019 return ret;
1020
1021 /* configure beacon filtering */
1022 if (mvmvif != mvm->bf_allowed_vif)
1023 return 0;
1024
1025 if (enable) {
1026 struct iwl_beacon_filter_cmd cmd_bf = {
1027 IWL_BF_CMD_CONFIG_D0I3,
1028 .bf_enable_beacon_filter = cpu_to_le32(1),
1029 };
1030 ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
1031 flags, true);
1032 } else {
1033 if (mvmvif->bf_data.bf_enabled)
1034 ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
1035 else
1036 ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
1037 }
1038
1039 return ret;
1040}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
deleted file mode 100644
index 509a66d05245..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <net/mac80211.h>
67#include "fw-api.h"
68#include "mvm.h"
69
70#define QUOTA_100 IWL_MVM_MAX_QUOTA
71#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
72
73struct iwl_mvm_quota_iterator_data {
74 int n_interfaces[MAX_BINDINGS];
75 int colors[MAX_BINDINGS];
76 int low_latency[MAX_BINDINGS];
77 int n_low_latency_bindings;
78 struct ieee80211_vif *disabled_vif;
79};
80
81static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
82 struct ieee80211_vif *vif)
83{
84 struct iwl_mvm_quota_iterator_data *data = _data;
85 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
86 u16 id;
87
88 /* skip disabled interfaces here immediately */
89 if (vif == data->disabled_vif)
90 return;
91
92 if (!mvmvif->phy_ctxt)
93 return;
94
95 /* currently, PHY ID == binding ID */
96 id = mvmvif->phy_ctxt->id;
97
98 /* need at least one binding per PHY */
99 BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
100
101 if (WARN_ON_ONCE(id >= MAX_BINDINGS))
102 return;
103
104 switch (vif->type) {
105 case NL80211_IFTYPE_STATION:
106 if (vif->bss_conf.assoc)
107 break;
108 return;
109 case NL80211_IFTYPE_AP:
110 case NL80211_IFTYPE_ADHOC:
111 if (mvmvif->ap_ibss_active)
112 break;
113 return;
114 case NL80211_IFTYPE_MONITOR:
115 if (mvmvif->monitor_active)
116 break;
117 return;
118 case NL80211_IFTYPE_P2P_DEVICE:
119 return;
120 default:
121 WARN_ON_ONCE(1);
122 return;
123 }
124
125 if (data->colors[id] < 0)
126 data->colors[id] = mvmvif->phy_ctxt->color;
127 else
128 WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
129
130 data->n_interfaces[id]++;
131
132 if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
133 data->n_low_latency_bindings++;
134 data->low_latency[id] = true;
135 }
136}
137
138static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
139 struct iwl_time_quota_cmd *cmd)
140{
141#ifdef CONFIG_NL80211_TESTMODE
142 struct iwl_mvm_vif *mvmvif;
143 int i, phy_id = -1, beacon_int = 0;
144
145 if (!mvm->noa_duration || !mvm->noa_vif)
146 return;
147
148 mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
149 if (!mvmvif->ap_ibss_active)
150 return;
151
152 phy_id = mvmvif->phy_ctxt->id;
153 beacon_int = mvm->noa_vif->bss_conf.beacon_int;
154
155 for (i = 0; i < MAX_BINDINGS; i++) {
156 u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
157 u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
158 u32 quota = le32_to_cpu(cmd->quotas[i].quota);
159
160 if (id != phy_id)
161 continue;
162
163 quota *= (beacon_int - mvm->noa_duration);
164 quota /= beacon_int;
165
166 IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
167 le32_to_cpu(cmd->quotas[i].quota), quota);
168
169 cmd->quotas[i].quota = cpu_to_le32(quota);
170 }
171#endif
172}
173
174int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
175 bool force_update,
176 struct ieee80211_vif *disabled_vif)
177{
178 struct iwl_time_quota_cmd cmd = {};
179 int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
180 struct iwl_mvm_quota_iterator_data data = {
181 .n_interfaces = {},
182 .colors = { -1, -1, -1, -1 },
183 .disabled_vif = disabled_vif,
184 };
185 struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
186 bool send = false;
187
188 lockdep_assert_held(&mvm->mutex);
189
190 /* update all upon completion */
191 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
192 return 0;
193
194 /* iterator data above must match */
195 BUILD_BUG_ON(MAX_BINDINGS != 4);
196
197 ieee80211_iterate_active_interfaces_atomic(
198 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
199 iwl_mvm_quota_iterator, &data);
200
201 /*
202 * The FW's scheduling session consists of
203 * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
204 * equally between all the bindings that require quota
205 */
206 num_active_macs = 0;
207 for (i = 0; i < MAX_BINDINGS; i++) {
208 cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
209 num_active_macs += data.n_interfaces[i];
210 }
211
212 n_non_lowlat = num_active_macs;
213
214 if (data.n_low_latency_bindings == 1) {
215 for (i = 0; i < MAX_BINDINGS; i++) {
216 if (data.low_latency[i]) {
217 n_non_lowlat -= data.n_interfaces[i];
218 break;
219 }
220 }
221 }
222
223 if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
224 /*
225 * Reserve quota for the low latency binding in case that
226 * there are several data bindings but only a single
227 * low latency one. Split the rest of the quota equally
228 * between the other data interfaces.
229 */
230 quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
231 quota_rem = QUOTA_100 - n_non_lowlat * quota -
232 QUOTA_LOWLAT_MIN;
233 IWL_DEBUG_QUOTA(mvm,
234 "quota: low-latency binding active, remaining quota per other binding: %d\n",
235 quota);
236 } else if (num_active_macs) {
237 /*
238 * There are 0 or more than 1 low latency bindings, or all the
239 * data interfaces belong to the single low latency binding.
240 * Split the quota equally between the data interfaces.
241 */
242 quota = QUOTA_100 / num_active_macs;
243 quota_rem = QUOTA_100 % num_active_macs;
244 IWL_DEBUG_QUOTA(mvm,
245 "quota: splitting evenly per binding: %d\n",
246 quota);
247 } else {
248 /* values don't really matter - won't be used */
249 quota = 0;
250 quota_rem = 0;
251 }
252
253 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
254 if (data.colors[i] < 0)
255 continue;
256
257 cmd.quotas[idx].id_and_color =
258 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
259
260 if (data.n_interfaces[i] <= 0)
261 cmd.quotas[idx].quota = cpu_to_le32(0);
262 else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
263 data.low_latency[i])
264 /*
265 * There is more than one binding, but only one of the
266 * bindings is in low latency. For this case, allocate
267 * the minimal required quota for the low latency
268 * binding.
269 */
270 cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
271 else
272 cmd.quotas[idx].quota =
273 cpu_to_le32(quota * data.n_interfaces[i]);
274
275 WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
276 "Binding=%d, quota=%u > max=%u\n",
277 idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
278
279 cmd.quotas[idx].max_duration = cpu_to_le32(0);
280
281 idx++;
282 }
283
284 /* Give the remainder of the session to the first data binding */
285 for (i = 0; i < MAX_BINDINGS; i++) {
286 if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
287 le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
288 IWL_DEBUG_QUOTA(mvm,
289 "quota: giving remainder of %d to binding %d\n",
290 quota_rem, i);
291 break;
292 }
293 }
294
295 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
296
297 /* check that we have non-zero quota for all valid bindings */
298 for (i = 0; i < MAX_BINDINGS; i++) {
299 if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color)
300 send = true;
301 if (cmd.quotas[i].max_duration != last->quotas[i].max_duration)
302 send = true;
303 if (abs((int)le32_to_cpu(cmd.quotas[i].quota) -
304 (int)le32_to_cpu(last->quotas[i].quota))
305 > IWL_MVM_QUOTA_THRESHOLD)
306 send = true;
307 if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID))
308 continue;
309 WARN_ONCE(cmd.quotas[i].quota == 0,
310 "zero quota on binding %d\n", i);
311 }
312
313 if (!send && !force_update) {
314 /* don't send a practically unchanged command, the firmware has
315 * to re-initialize a lot of state and that can have an adverse
316 * impact on it
317 */
318 return 0;
319 }
320
321 err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
322
323 if (err)
324 IWL_ERR(mvm, "Failed to send quota: %d\n", err);
325 else
326 mvm->last_quota_cmd = cmd;
327 return err;
328}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
deleted file mode 100644
index d1ad10391b47..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ /dev/null
@@ -1,3983 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27#include <linux/kernel.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37#include "rs.h"
38#include "fw-api.h"
39#include "sta.h"
40#include "iwl-op-mode.h"
41#include "mvm.h"
42#include "debugfs.h"
43
44#define RS_NAME "iwl-mvm-rs"
45
46#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
47
48/* Calculations of success ratio are done in fixed point where 12800 is 100%.
49 * Use this macro when dealing with thresholds consts set as a percentage
50 */
51#define RS_PERCENT(x) (128 * x)
52
53static u8 rs_ht_to_legacy[] = {
54 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
55 [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
56 [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
57 [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
58 [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
59 [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
60 [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
61 [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
62 [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
63 [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
64};
65
66static const u8 ant_toggle_lookup[] = {
67 [ANT_NONE] = ANT_NONE,
68 [ANT_A] = ANT_B,
69 [ANT_B] = ANT_C,
70 [ANT_AB] = ANT_BC,
71 [ANT_C] = ANT_A,
72 [ANT_AC] = ANT_AB,
73 [ANT_BC] = ANT_AC,
74 [ANT_ABC] = ANT_ABC,
75};
76
77#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
78 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
79 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
80 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
81 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
82 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
83 IWL_RATE_##rp##M_INDEX, \
84 IWL_RATE_##rn##M_INDEX }
85
86#define IWL_DECLARE_MCS_RATE(s) \
87 [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
88 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
89 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
90 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
91 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
92 IWL_RATE_INVM_INDEX, \
93 IWL_RATE_INVM_INDEX }
94
95/*
96 * Parameter order:
97 * rate, ht rate, prev rate, next rate
98 *
99 * If there isn't a valid next or previous rate then INV is used which
100 * maps to IWL_RATE_INVALID
101 *
102 */
103static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
104 IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
105 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
106 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
107 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
108 IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
109 IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
110 IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
111 IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
112 IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
113 IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
114 IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
115 IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
116 IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
117 IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
118 IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
119};
120
121enum rs_action {
122 RS_ACTION_STAY = 0,
123 RS_ACTION_DOWNSCALE = -1,
124 RS_ACTION_UPSCALE = 1,
125};
126
127enum rs_column_mode {
128 RS_INVALID = 0,
129 RS_LEGACY,
130 RS_SISO,
131 RS_MIMO2,
132};
133
134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3
136
137struct rs_tx_column;
138
139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
140 struct ieee80211_sta *sta,
141 struct rs_rate *rate,
142 const struct rs_tx_column *next_col);
143
144struct rs_tx_column {
145 enum rs_column_mode mode;
146 u8 ant;
147 bool sgi;
148 enum rs_column next_columns[MAX_NEXT_COLUMNS];
149 allow_column_func_t checks[MAX_COLUMN_CHECKS];
150};
151
152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
153 struct rs_rate *rate,
154 const struct rs_tx_column *next_col)
155{
156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
157}
158
159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
160 struct rs_rate *rate,
161 const struct rs_tx_column *next_col)
162{
163 struct iwl_mvm_sta *mvmsta;
164 struct iwl_mvm_vif *mvmvif;
165
166 if (!sta->ht_cap.ht_supported)
167 return false;
168
169 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
170 return false;
171
172 if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
173 return false;
174
175 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
176 return false;
177
178 mvmsta = iwl_mvm_sta_from_mac80211(sta);
179 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
180
181 if (mvm->nvm_data->sku_cap_mimo_disabled)
182 return false;
183
184 return true;
185}
186
187static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
188 struct rs_rate *rate,
189 const struct rs_tx_column *next_col)
190{
191 if (!sta->ht_cap.ht_supported)
192 return false;
193
194 return true;
195}
196
197static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
198 struct rs_rate *rate,
199 const struct rs_tx_column *next_col)
200{
201 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
202 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
203
204 if (is_ht20(rate) && (ht_cap->cap &
205 IEEE80211_HT_CAP_SGI_20))
206 return true;
207 if (is_ht40(rate) && (ht_cap->cap &
208 IEEE80211_HT_CAP_SGI_40))
209 return true;
210 if (is_ht80(rate) && (vht_cap->cap &
211 IEEE80211_VHT_CAP_SHORT_GI_80))
212 return true;
213
214 return false;
215}
216
217static const struct rs_tx_column rs_tx_columns[] = {
218 [RS_COLUMN_LEGACY_ANT_A] = {
219 .mode = RS_LEGACY,
220 .ant = ANT_A,
221 .next_columns = {
222 RS_COLUMN_LEGACY_ANT_B,
223 RS_COLUMN_SISO_ANT_A,
224 RS_COLUMN_MIMO2,
225 RS_COLUMN_INVALID,
226 RS_COLUMN_INVALID,
227 RS_COLUMN_INVALID,
228 RS_COLUMN_INVALID,
229 },
230 .checks = {
231 rs_ant_allow,
232 },
233 },
234 [RS_COLUMN_LEGACY_ANT_B] = {
235 .mode = RS_LEGACY,
236 .ant = ANT_B,
237 .next_columns = {
238 RS_COLUMN_LEGACY_ANT_A,
239 RS_COLUMN_SISO_ANT_B,
240 RS_COLUMN_MIMO2,
241 RS_COLUMN_INVALID,
242 RS_COLUMN_INVALID,
243 RS_COLUMN_INVALID,
244 RS_COLUMN_INVALID,
245 },
246 .checks = {
247 rs_ant_allow,
248 },
249 },
250 [RS_COLUMN_SISO_ANT_A] = {
251 .mode = RS_SISO,
252 .ant = ANT_A,
253 .next_columns = {
254 RS_COLUMN_SISO_ANT_B,
255 RS_COLUMN_MIMO2,
256 RS_COLUMN_SISO_ANT_A_SGI,
257 RS_COLUMN_LEGACY_ANT_A,
258 RS_COLUMN_LEGACY_ANT_B,
259 RS_COLUMN_INVALID,
260 RS_COLUMN_INVALID,
261 },
262 .checks = {
263 rs_siso_allow,
264 rs_ant_allow,
265 },
266 },
267 [RS_COLUMN_SISO_ANT_B] = {
268 .mode = RS_SISO,
269 .ant = ANT_B,
270 .next_columns = {
271 RS_COLUMN_SISO_ANT_A,
272 RS_COLUMN_MIMO2,
273 RS_COLUMN_SISO_ANT_B_SGI,
274 RS_COLUMN_LEGACY_ANT_A,
275 RS_COLUMN_LEGACY_ANT_B,
276 RS_COLUMN_INVALID,
277 RS_COLUMN_INVALID,
278 },
279 .checks = {
280 rs_siso_allow,
281 rs_ant_allow,
282 },
283 },
284 [RS_COLUMN_SISO_ANT_A_SGI] = {
285 .mode = RS_SISO,
286 .ant = ANT_A,
287 .sgi = true,
288 .next_columns = {
289 RS_COLUMN_SISO_ANT_B_SGI,
290 RS_COLUMN_MIMO2_SGI,
291 RS_COLUMN_SISO_ANT_A,
292 RS_COLUMN_LEGACY_ANT_A,
293 RS_COLUMN_LEGACY_ANT_B,
294 RS_COLUMN_INVALID,
295 RS_COLUMN_INVALID,
296 },
297 .checks = {
298 rs_siso_allow,
299 rs_ant_allow,
300 rs_sgi_allow,
301 },
302 },
303 [RS_COLUMN_SISO_ANT_B_SGI] = {
304 .mode = RS_SISO,
305 .ant = ANT_B,
306 .sgi = true,
307 .next_columns = {
308 RS_COLUMN_SISO_ANT_A_SGI,
309 RS_COLUMN_MIMO2_SGI,
310 RS_COLUMN_SISO_ANT_B,
311 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B,
313 RS_COLUMN_INVALID,
314 RS_COLUMN_INVALID,
315 },
316 .checks = {
317 rs_siso_allow,
318 rs_ant_allow,
319 rs_sgi_allow,
320 },
321 },
322 [RS_COLUMN_MIMO2] = {
323 .mode = RS_MIMO2,
324 .ant = ANT_AB,
325 .next_columns = {
326 RS_COLUMN_SISO_ANT_A,
327 RS_COLUMN_MIMO2_SGI,
328 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B,
330 RS_COLUMN_INVALID,
331 RS_COLUMN_INVALID,
332 RS_COLUMN_INVALID,
333 },
334 .checks = {
335 rs_mimo_allow,
336 },
337 },
338 [RS_COLUMN_MIMO2_SGI] = {
339 .mode = RS_MIMO2,
340 .ant = ANT_AB,
341 .sgi = true,
342 .next_columns = {
343 RS_COLUMN_SISO_ANT_A_SGI,
344 RS_COLUMN_MIMO2,
345 RS_COLUMN_LEGACY_ANT_A,
346 RS_COLUMN_LEGACY_ANT_B,
347 RS_COLUMN_INVALID,
348 RS_COLUMN_INVALID,
349 RS_COLUMN_INVALID,
350 },
351 .checks = {
352 rs_mimo_allow,
353 rs_sgi_allow,
354 },
355 },
356};
357
358static inline u8 rs_extract_rate(u32 rate_n_flags)
359{
360 /* also works for HT because bits 7:6 are zero there */
361 return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
362}
363
364static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
365{
366 int idx = 0;
367
368 if (rate_n_flags & RATE_MCS_HT_MSK) {
369 idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
370 idx += IWL_RATE_MCS_0_INDEX;
371
372 /* skip 9M not supported in HT*/
373 if (idx >= IWL_RATE_9M_INDEX)
374 idx += 1;
375 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
376 return idx;
377 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
378 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
379 idx += IWL_RATE_MCS_0_INDEX;
380
381 /* skip 9M not supported in VHT*/
382 if (idx >= IWL_RATE_9M_INDEX)
383 idx++;
384 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
385 return idx;
386 } else {
387 /* legacy rate format, search for match in table */
388
389 u8 legacy_rate = rs_extract_rate(rate_n_flags);
390 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
391 if (iwl_rates[idx].plcp == legacy_rate)
392 return idx;
393 }
394
395 return IWL_RATE_INVALID;
396}
397
398static void rs_rate_scale_perform(struct iwl_mvm *mvm,
399 struct ieee80211_sta *sta,
400 struct iwl_lq_sta *lq_sta,
401 int tid);
402static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
403 struct ieee80211_sta *sta,
404 struct iwl_lq_sta *lq_sta,
405 const struct rs_rate *initial_rate);
406static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
407
408/**
409 * The following tables contain the expected throughput metrics for all rates
410 *
411 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
412 *
413 * where invalid entries are zeros.
414 *
415 * CCK rates are only valid in legacy table and will only be used in G
416 * (2.4 GHz) band.
417 */
418
419static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
420 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
421};
422
423/* Expected TpT tables. 4 indexes:
424 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
425 */
426static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
427 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
428 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
429 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
430 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
431};
432
433static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
434 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
435 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
436 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
437 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
438};
439
440static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
441 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
442 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
443 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
444 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
445};
446
447static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
448 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
449 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
450 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
451 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
452};
453
454static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
455 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
456 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
457 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
458 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
459};
460
461static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
462 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
463 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
464 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
465 {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
466};
467
468/* mbps, mcs */
469static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
470 { "1", "BPSK DSSS"},
471 { "2", "QPSK DSSS"},
472 {"5.5", "BPSK CCK"},
473 { "11", "QPSK CCK"},
474 { "6", "BPSK 1/2"},
475 { "9", "BPSK 1/2"},
476 { "12", "QPSK 1/2"},
477 { "18", "QPSK 3/4"},
478 { "24", "16QAM 1/2"},
479 { "36", "16QAM 3/4"},
480 { "48", "64QAM 2/3"},
481 { "54", "64QAM 3/4"},
482 { "60", "64QAM 5/6"},
483};
484
485#define MCS_INDEX_PER_STREAM (8)
486
487static const char *rs_pretty_ant(u8 ant)
488{
489 static const char * const ant_name[] = {
490 [ANT_NONE] = "None",
491 [ANT_A] = "A",
492 [ANT_B] = "B",
493 [ANT_AB] = "AB",
494 [ANT_C] = "C",
495 [ANT_AC] = "AC",
496 [ANT_BC] = "BC",
497 [ANT_ABC] = "ABC",
498 };
499
500 if (ant > ANT_ABC)
501 return "UNKNOWN";
502
503 return ant_name[ant];
504}
505
506static const char *rs_pretty_lq_type(enum iwl_table_type type)
507{
508 static const char * const lq_types[] = {
509 [LQ_NONE] = "NONE",
510 [LQ_LEGACY_A] = "LEGACY_A",
511 [LQ_LEGACY_G] = "LEGACY_G",
512 [LQ_HT_SISO] = "HT SISO",
513 [LQ_HT_MIMO2] = "HT MIMO",
514 [LQ_VHT_SISO] = "VHT SISO",
515 [LQ_VHT_MIMO2] = "VHT MIMO",
516 };
517
518 if (type < LQ_NONE || type >= LQ_MAX)
519 return "UNKNOWN";
520
521 return lq_types[type];
522}
523
524static char *rs_pretty_rate(const struct rs_rate *rate)
525{
526 static char buf[40];
527 static const char * const legacy_rates[] = {
528 [IWL_RATE_1M_INDEX] = "1M",
529 [IWL_RATE_2M_INDEX] = "2M",
530 [IWL_RATE_5M_INDEX] = "5.5M",
531 [IWL_RATE_11M_INDEX] = "11M",
532 [IWL_RATE_6M_INDEX] = "6M",
533 [IWL_RATE_9M_INDEX] = "9M",
534 [IWL_RATE_12M_INDEX] = "12M",
535 [IWL_RATE_18M_INDEX] = "18M",
536 [IWL_RATE_24M_INDEX] = "24M",
537 [IWL_RATE_36M_INDEX] = "36M",
538 [IWL_RATE_48M_INDEX] = "48M",
539 [IWL_RATE_54M_INDEX] = "54M",
540 };
541 static const char *const ht_vht_rates[] = {
542 [IWL_RATE_MCS_0_INDEX] = "MCS0",
543 [IWL_RATE_MCS_1_INDEX] = "MCS1",
544 [IWL_RATE_MCS_2_INDEX] = "MCS2",
545 [IWL_RATE_MCS_3_INDEX] = "MCS3",
546 [IWL_RATE_MCS_4_INDEX] = "MCS4",
547 [IWL_RATE_MCS_5_INDEX] = "MCS5",
548 [IWL_RATE_MCS_6_INDEX] = "MCS6",
549 [IWL_RATE_MCS_7_INDEX] = "MCS7",
550 [IWL_RATE_MCS_8_INDEX] = "MCS8",
551 [IWL_RATE_MCS_9_INDEX] = "MCS9",
552 };
553 const char *rate_str;
554
555 if (is_type_legacy(rate->type))
556 rate_str = legacy_rates[rate->index];
557 else if (is_type_ht(rate->type) || is_type_vht(rate->type))
558 rate_str = ht_vht_rates[rate->index];
559 else
560 rate_str = "BAD_RATE";
561
562 sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
563 rs_pretty_ant(rate->ant), rate_str);
564 return buf;
565}
566
567static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
568 const char *prefix)
569{
570 IWL_DEBUG_RATE(mvm,
571 "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
572 prefix, rs_pretty_rate(rate), rate->bw,
573 rate->sgi, rate->ldpc, rate->stbc);
574}
575
576static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
577{
578 window->data = 0;
579 window->success_counter = 0;
580 window->success_ratio = IWL_INVALID_VALUE;
581 window->counter = 0;
582 window->average_tpt = IWL_INVALID_VALUE;
583}
584
585static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
586 struct iwl_scale_tbl_info *tbl)
587{
588 int i;
589
590 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
591 for (i = 0; i < IWL_RATE_COUNT; i++)
592 rs_rate_scale_clear_window(&tbl->win[i]);
593
594 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
595 rs_rate_scale_clear_window(&tbl->tpc_win[i]);
596}
597
598static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
599{
600 return (ant_type & valid_antenna) == ant_type;
601}
602
603static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
604 struct iwl_lq_sta *lq_data, u8 tid,
605 struct ieee80211_sta *sta)
606{
607 int ret = -EAGAIN;
608
609 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
610 sta->addr, tid);
611 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
612 if (ret == -EAGAIN) {
613 /*
614 * driver and mac80211 is out of sync
615 * this might be cause by reloading firmware
616 * stop the tx ba session here
617 */
618 IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
619 tid);
620 ieee80211_stop_tx_ba_session(sta, tid);
621 }
622 return ret;
623}
624
625static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
626 struct iwl_lq_sta *lq_data,
627 struct ieee80211_sta *sta)
628{
629 if (tid < IWL_MAX_TID_COUNT)
630 rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
631 else
632 IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
633 tid, IWL_MAX_TID_COUNT);
634}
635
636static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
637{
638 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
639 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
640 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
641}
642
643/*
644 * Static function to get the expected throughput from an iwl_scale_tbl_info
645 * that wraps a NULL pointer check
646 */
647static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
648{
649 if (tbl->expected_tpt)
650 return tbl->expected_tpt[rs_index];
651 return 0;
652}
653
654/**
655 * rs_collect_tx_data - Update the success/failure sliding window
656 *
657 * We keep a sliding window of the last 62 packets transmitted
658 * at this rate. window->data contains the bitmask of successful
659 * packets.
660 */
661static int _rs_collect_tx_data(struct iwl_mvm *mvm,
662 struct iwl_scale_tbl_info *tbl,
663 int scale_index, int attempts, int successes,
664 struct iwl_rate_scale_data *window)
665{
666 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
667 s32 fail_count, tpt;
668
669 /* Get expected throughput */
670 tpt = get_expected_tpt(tbl, scale_index);
671
672 /*
673 * Keep track of only the latest 62 tx frame attempts in this rate's
674 * history window; anything older isn't really relevant any more.
675 * If we have filled up the sliding window, drop the oldest attempt;
676 * if the oldest attempt (highest bit in bitmap) shows "success",
677 * subtract "1" from the success counter (this is the main reason
678 * we keep these bitmaps!).
679 */
680 while (attempts > 0) {
681 if (window->counter >= IWL_RATE_MAX_WINDOW) {
682 /* remove earliest */
683 window->counter = IWL_RATE_MAX_WINDOW - 1;
684
685 if (window->data & mask) {
686 window->data &= ~mask;
687 window->success_counter--;
688 }
689 }
690
691 /* Increment frames-attempted counter */
692 window->counter++;
693
694 /* Shift bitmap by one frame to throw away oldest history */
695 window->data <<= 1;
696
697 /* Mark the most recent #successes attempts as successful */
698 if (successes > 0) {
699 window->success_counter++;
700 window->data |= 0x1;
701 successes--;
702 }
703
704 attempts--;
705 }
706
707 /* Calculate current success ratio, avoid divide-by-0! */
708 if (window->counter > 0)
709 window->success_ratio = 128 * (100 * window->success_counter)
710 / window->counter;
711 else
712 window->success_ratio = IWL_INVALID_VALUE;
713
714 fail_count = window->counter - window->success_counter;
715
716 /* Calculate average throughput, if we have enough history. */
717 if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
718 (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
719 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
720 else
721 window->average_tpt = IWL_INVALID_VALUE;
722
723 return 0;
724}
725
726static int rs_collect_tx_data(struct iwl_mvm *mvm,
727 struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 int scale_index, int attempts, int successes,
730 u8 reduced_txp)
731{
732 struct iwl_rate_scale_data *window = NULL;
733 int ret;
734
735 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
736 return -EINVAL;
737
738 if (tbl->column != RS_COLUMN_INVALID) {
739 struct lq_sta_pers *pers = &lq_sta->pers;
740
741 pers->tx_stats[tbl->column][scale_index].total += attempts;
742 pers->tx_stats[tbl->column][scale_index].success += successes;
743 }
744
745 /* Select window for current tx bit rate */
746 window = &(tbl->win[scale_index]);
747
748 ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
749 window);
750 if (ret)
751 return ret;
752
753 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
754 return -EINVAL;
755
756 window = &tbl->tpc_win[reduced_txp];
757 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
758 window);
759}
760
761/* Convert rs_rate object into ucode rate bitmask */
762static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
763 struct rs_rate *rate)
764{
765 u32 ucode_rate = 0;
766 int index = rate->index;
767
768 ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
769 RATE_MCS_ANT_ABC_MSK);
770
771 if (is_legacy(rate)) {
772 ucode_rate |= iwl_rates[index].plcp;
773 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
774 ucode_rate |= RATE_MCS_CCK_MSK;
775 return ucode_rate;
776 }
777
778 if (is_ht(rate)) {
779 if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
780 IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
781 index = IWL_LAST_HT_RATE;
782 }
783 ucode_rate |= RATE_MCS_HT_MSK;
784
785 if (is_ht_siso(rate))
786 ucode_rate |= iwl_rates[index].plcp_ht_siso;
787 else if (is_ht_mimo2(rate))
788 ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
789 else
790 WARN_ON_ONCE(1);
791 } else if (is_vht(rate)) {
792 if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
793 IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
794 index = IWL_LAST_VHT_RATE;
795 }
796 ucode_rate |= RATE_MCS_VHT_MSK;
797 if (is_vht_siso(rate))
798 ucode_rate |= iwl_rates[index].plcp_vht_siso;
799 else if (is_vht_mimo2(rate))
800 ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
801 else
802 WARN_ON_ONCE(1);
803
804 } else {
805 IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
806 }
807
808 if (is_siso(rate) && rate->stbc) {
809 /* To enable STBC we need to set both a flag and ANT_AB */
810 ucode_rate |= RATE_MCS_ANT_AB_MSK;
811 ucode_rate |= RATE_MCS_VHT_STBC_MSK;
812 }
813
814 ucode_rate |= rate->bw;
815 if (rate->sgi)
816 ucode_rate |= RATE_MCS_SGI_MSK;
817 if (rate->ldpc)
818 ucode_rate |= RATE_MCS_LDPC_MSK;
819
820 return ucode_rate;
821}
822
823/* Convert a ucode rate into an rs_rate object */
824static int rs_rate_from_ucode_rate(const u32 ucode_rate,
825 enum ieee80211_band band,
826 struct rs_rate *rate)
827{
828 u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
829 u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
830 u8 nss;
831
832 memset(rate, 0, sizeof(*rate));
833 rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
834
835 if (rate->index == IWL_RATE_INVALID)
836 return -EINVAL;
837
838 rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
839
840 /* Legacy */
841 if (!(ucode_rate & RATE_MCS_HT_MSK) &&
842 !(ucode_rate & RATE_MCS_VHT_MSK)) {
843 if (num_of_ant == 1) {
844 if (band == IEEE80211_BAND_5GHZ)
845 rate->type = LQ_LEGACY_A;
846 else
847 rate->type = LQ_LEGACY_G;
848 }
849
850 return 0;
851 }
852
853 /* HT or VHT */
854 if (ucode_rate & RATE_MCS_SGI_MSK)
855 rate->sgi = true;
856 if (ucode_rate & RATE_MCS_LDPC_MSK)
857 rate->ldpc = true;
858 if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
859 rate->stbc = true;
860 if (ucode_rate & RATE_MCS_BF_MSK)
861 rate->bfer = true;
862
863 rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
864
865 if (ucode_rate & RATE_MCS_HT_MSK) {
866 nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
867 RATE_HT_MCS_NSS_POS) + 1;
868
869 if (nss == 1) {
870 rate->type = LQ_HT_SISO;
871 WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
872 "stbc %d bfer %d",
873 rate->stbc, rate->bfer);
874 } else if (nss == 2) {
875 rate->type = LQ_HT_MIMO2;
876 WARN_ON_ONCE(num_of_ant != 2);
877 } else {
878 WARN_ON_ONCE(1);
879 }
880 } else if (ucode_rate & RATE_MCS_VHT_MSK) {
881 nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
882 RATE_VHT_MCS_NSS_POS) + 1;
883
884 if (nss == 1) {
885 rate->type = LQ_VHT_SISO;
886 WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
887 "stbc %d bfer %d",
888 rate->stbc, rate->bfer);
889 } else if (nss == 2) {
890 rate->type = LQ_VHT_MIMO2;
891 WARN_ON_ONCE(num_of_ant != 2);
892 } else {
893 WARN_ON_ONCE(1);
894 }
895 }
896
897 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
898 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
899 !is_vht(rate));
900
901 return 0;
902}
903
904/* switch to another antenna/antennas and return 1 */
905/* if no other valid antenna found, return 0 */
906static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
907{
908 u8 new_ant_type;
909
910 if (!rate->ant || rate->ant > ANT_ABC)
911 return 0;
912
913 if (!rs_is_valid_ant(valid_ant, rate->ant))
914 return 0;
915
916 new_ant_type = ant_toggle_lookup[rate->ant];
917
918 while ((new_ant_type != rate->ant) &&
919 !rs_is_valid_ant(valid_ant, new_ant_type))
920 new_ant_type = ant_toggle_lookup[new_ant_type];
921
922 if (new_ant_type == rate->ant)
923 return 0;
924
925 rate->ant = new_ant_type;
926
927 return 1;
928}
929
930static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
931 struct rs_rate *rate)
932{
933 if (is_legacy(rate))
934 return lq_sta->active_legacy_rate;
935 else if (is_siso(rate))
936 return lq_sta->active_siso_rate;
937 else if (is_mimo2(rate))
938 return lq_sta->active_mimo2_rate;
939
940 WARN_ON_ONCE(1);
941 return 0;
942}
943
944static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
945 int rate_type)
946{
947 u8 high = IWL_RATE_INVALID;
948 u8 low = IWL_RATE_INVALID;
949
950 /* 802.11A or ht walks to the next literal adjacent rate in
951 * the rate table */
952 if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
953 int i;
954 u32 mask;
955
956 /* Find the previous rate that is in the rate mask */
957 i = index - 1;
958 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
959 if (rate_mask & mask) {
960 low = i;
961 break;
962 }
963 }
964
965 /* Find the next rate that is in the rate mask */
966 i = index + 1;
967 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
968 if (rate_mask & mask) {
969 high = i;
970 break;
971 }
972 }
973
974 return (high << 8) | low;
975 }
976
977 low = index;
978 while (low != IWL_RATE_INVALID) {
979 low = iwl_rates[low].prev_rs;
980 if (low == IWL_RATE_INVALID)
981 break;
982 if (rate_mask & (1 << low))
983 break;
984 }
985
986 high = index;
987 while (high != IWL_RATE_INVALID) {
988 high = iwl_rates[high].next_rs;
989 if (high == IWL_RATE_INVALID)
990 break;
991 if (rate_mask & (1 << high))
992 break;
993 }
994
995 return (high << 8) | low;
996}
997
998static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
999 struct rs_rate *rate)
1000{
1001 return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
1002}
1003
1004/* Get the next supported lower rate in the current column.
1005 * Return true if bottom rate in the current column was reached
1006 */
1007static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
1008 struct rs_rate *rate)
1009{
1010 u8 low;
1011 u16 high_low;
1012 u16 rate_mask;
1013 struct iwl_mvm *mvm = lq_sta->pers.drv;
1014
1015 rate_mask = rs_get_supported_rates(lq_sta, rate);
1016 high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
1017 rate->type);
1018 low = high_low & 0xff;
1019
1020 /* Bottom rate of column reached */
1021 if (low == IWL_RATE_INVALID)
1022 return true;
1023
1024 rate->index = low;
1025 return false;
1026}
1027
1028/* Get the next rate to use following a column downgrade */
1029static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1030 struct rs_rate *rate)
1031{
1032 struct iwl_mvm *mvm = lq_sta->pers.drv;
1033
1034 if (is_legacy(rate)) {
1035 /* No column to downgrade from Legacy */
1036 return;
1037 } else if (is_siso(rate)) {
1038 /* Downgrade to Legacy if we were in SISO */
1039 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1040 rate->type = LQ_LEGACY_A;
1041 else
1042 rate->type = LQ_LEGACY_G;
1043
1044 rate->bw = RATE_MCS_CHAN_WIDTH_20;
1045
1046 WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
1047 rate->index > IWL_RATE_MCS_9_INDEX);
1048
1049 rate->index = rs_ht_to_legacy[rate->index];
1050 rate->ldpc = false;
1051 } else {
1052 /* Downgrade to SISO with same MCS if in MIMO */
1053 rate->type = is_vht_mimo2(rate) ?
1054 LQ_VHT_SISO : LQ_HT_SISO;
1055 }
1056
1057 if (num_of_ant(rate->ant) > 1)
1058 rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
1059
1060 /* Relevant in both switching to SISO or Legacy */
1061 rate->sgi = false;
1062
1063 if (!rs_rate_supported(lq_sta, rate))
1064 rs_get_lower_rate_in_column(lq_sta, rate);
1065}
1066
1067/* Check if both rates are identical
1068 * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
1069 * with a rate indicating STBC/BFER and ANT_AB.
1070 */
1071static inline bool rs_rate_equal(struct rs_rate *a,
1072 struct rs_rate *b,
1073 bool allow_ant_mismatch)
1074
1075{
1076 bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
1077 (a->bfer == b->bfer);
1078
1079 if (allow_ant_mismatch) {
1080 if (a->stbc || a->bfer) {
1081 WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
1082 a->stbc, a->bfer, a->ant);
1083 ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
1084 } else if (b->stbc || b->bfer) {
1085 WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
1086 b->stbc, b->bfer, b->ant);
1087 ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
1088 }
1089 }
1090
1091 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
1092 (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
1093}
1094
1095/* Check if both rates share the same column */
1096static inline bool rs_rate_column_match(struct rs_rate *a,
1097 struct rs_rate *b)
1098{
1099 bool ant_match;
1100
1101 if (a->stbc || a->bfer)
1102 ant_match = (b->ant == ANT_A || b->ant == ANT_B);
1103 else
1104 ant_match = (a->ant == b->ant);
1105
1106 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
1107 && ant_match;
1108}
1109
1110static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
1111{
1112 if (is_legacy(rate)) {
1113 if (rate->ant == ANT_A)
1114 return RS_COLUMN_LEGACY_ANT_A;
1115
1116 if (rate->ant == ANT_B)
1117 return RS_COLUMN_LEGACY_ANT_B;
1118
1119 goto err;
1120 }
1121
1122 if (is_siso(rate)) {
1123 if (rate->ant == ANT_A || rate->stbc || rate->bfer)
1124 return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
1125 RS_COLUMN_SISO_ANT_A;
1126
1127 if (rate->ant == ANT_B)
1128 return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
1129 RS_COLUMN_SISO_ANT_B;
1130
1131 goto err;
1132 }
1133
1134 if (is_mimo(rate))
1135 return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
1136
1137err:
1138 return RS_COLUMN_INVALID;
1139}
1140
1141static u8 rs_get_tid(struct ieee80211_hdr *hdr)
1142{
1143 u8 tid = IWL_MAX_TID_COUNT;
1144
1145 if (ieee80211_is_data_qos(hdr->frame_control)) {
1146 u8 *qc = ieee80211_get_qos_ctl(hdr);
1147 tid = qc[0] & 0xf;
1148 }
1149
1150 if (unlikely(tid > IWL_MAX_TID_COUNT))
1151 tid = IWL_MAX_TID_COUNT;
1152
1153 return tid;
1154}
1155
1156void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1157 int tid, struct ieee80211_tx_info *info)
1158{
1159 int legacy_success;
1160 int retries;
1161 int i;
1162 struct iwl_lq_cmd *table;
1163 u32 lq_hwrate;
1164 struct rs_rate lq_rate, tx_resp_rate;
1165 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1166 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
1167 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1168 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
1170 bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
1171 IWL_UCODE_TLV_API_LQ_SS_PARAMS);
1172
1173 /* Treat uninitialized rate scaling data same as non-existing. */
1174 if (!lq_sta) {
1175 IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
1176 return;
1177 } else if (!lq_sta->pers.drv) {
1178 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
1179 return;
1180 }
1181
1182 /* This packet was aggregated but doesn't carry status info */
1183 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1184 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1185 return;
1186
1187 rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
1188
1189#ifdef CONFIG_MAC80211_DEBUGFS
1190 /* Disable last tx check if we are debugging with fixed rate but
1191 * update tx stats */
1192 if (lq_sta->pers.dbg_fixed_rate) {
1193 int index = tx_resp_rate.index;
1194 enum rs_column column;
1195 int attempts, success;
1196
1197 column = rs_get_column_from_rate(&tx_resp_rate);
1198 if (WARN_ONCE(column == RS_COLUMN_INVALID,
1199 "Can't map rate 0x%x to column",
1200 tx_resp_hwrate))
1201 return;
1202
1203 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1204 attempts = info->status.ampdu_len;
1205 success = info->status.ampdu_ack_len;
1206 } else {
1207 attempts = info->status.rates[0].count;
1208 success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1209 }
1210
1211 lq_sta->pers.tx_stats[column][index].total += attempts;
1212 lq_sta->pers.tx_stats[column][index].success += success;
1213
1214 IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
1215 tx_resp_hwrate, success, attempts);
1216 return;
1217 }
1218#endif
1219
1220 if (time_after(jiffies,
1221 (unsigned long)(lq_sta->last_tx +
1222 (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
1223 int t;
1224
1225 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1226 for (t = 0; t < IWL_MAX_TID_COUNT; t++)
1227 ieee80211_stop_tx_ba_session(sta, t);
1228
1229 iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
1230 return;
1231 }
1232 lq_sta->last_tx = jiffies;
1233
1234 /* Ignore this Tx frame response if its initial rate doesn't match
1235 * that of latest Link Quality command. There may be stragglers
1236 * from a previous Link Quality command, but we're no longer interested
1237 * in those; they're either from the "active" mode while we're trying
1238 * to check "search" mode, or a prior "search" mode after we've moved
1239 * to a new "search" mode (which might become the new "active" mode).
1240 */
1241 table = &lq_sta->lq;
1242 lq_hwrate = le32_to_cpu(table->rs_table[0]);
1243 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1244
1245 /* Here we actually compare this rate to the latest LQ command */
1246 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
1247 IWL_DEBUG_RATE(mvm,
1248 "initial tx resp rate 0x%x does not match 0x%x\n",
1249 tx_resp_hwrate, lq_hwrate);
1250
1251 /*
1252 * Since rates mis-match, the last LQ command may have failed.
1253 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
1254 * ... driver.
1255 */
1256 lq_sta->missed_rate_counter++;
1257 if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
1258 lq_sta->missed_rate_counter = 0;
1259 IWL_DEBUG_RATE(mvm,
1260 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
1261 lq_sta->rs_state);
1262 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1263 }
1264 /* Regardless, ignore this status info for outdated rate */
1265 return;
1266 } else
1267 /* Rate did match, so reset the missed_rate_counter */
1268 lq_sta->missed_rate_counter = 0;
1269
1270 if (!lq_sta->search_better_tbl) {
1271 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1272 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1273 } else {
1274 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1275 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1276 }
1277
1278 if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
1279 IWL_DEBUG_RATE(mvm,
1280 "Neither active nor search matches tx rate\n");
1281 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1282 rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
1283 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1284 rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
1285 rs_dump_rate(mvm, &lq_rate, "ACTUAL");
1286
1287 /*
1288 * no matching table found, let's by-pass the data collection
1289 * and continue to perform rate scale to find the rate table
1290 */
1291 rs_stay_in_table(lq_sta, true);
1292 goto done;
1293 }
1294
1295 /*
1296 * Updating the frame history depends on whether packets were
1297 * aggregated.
1298 *
1299 * For aggregation, all packets were transmitted at the same rate, the
1300 * first index into rate scale table.
1301 */
1302 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1303 /* ampdu_ack_len = 0 marks no BA was received. In this case
1304 * treat it as a single frame loss as we don't want the success
1305 * ratio to dip too quickly because a BA wasn't received
1306 */
1307 if (info->status.ampdu_ack_len == 0)
1308 info->status.ampdu_len = 1;
1309
1310 rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
1311 info->status.ampdu_len,
1312 info->status.ampdu_ack_len,
1313 reduced_txp);
1314
1315 /* Update success/fail counts if not searching for new mode */
1316 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1317 lq_sta->total_success += info->status.ampdu_ack_len;
1318 lq_sta->total_failed += (info->status.ampdu_len -
1319 info->status.ampdu_ack_len);
1320 }
1321 } else {
1322 /* For legacy, update frame history with for each Tx retry. */
1323 retries = info->status.rates[0].count - 1;
1324 /* HW doesn't send more than 15 retries */
1325 retries = min(retries, 15);
1326
1327 /* The last transmission may have been successful */
1328 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1329 /* Collect data for each rate used during failed TX attempts */
1330 for (i = 0; i <= retries; ++i) {
1331 lq_hwrate = le32_to_cpu(table->rs_table[i]);
1332 rs_rate_from_ucode_rate(lq_hwrate, info->band,
1333 &lq_rate);
1334 /*
1335 * Only collect stats if retried rate is in the same RS
1336 * table as active/search.
1337 */
1338 if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
1339 tmp_tbl = curr_tbl;
1340 else if (rs_rate_column_match(&lq_rate,
1341 &other_tbl->rate))
1342 tmp_tbl = other_tbl;
1343 else
1344 continue;
1345
1346 rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
1347 1, i < retries ? 0 : legacy_success,
1348 reduced_txp);
1349 }
1350
1351 /* Update success/fail counts if not searching for new mode */
1352 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1353 lq_sta->total_success += legacy_success;
1354 lq_sta->total_failed += retries + (1 - legacy_success);
1355 }
1356 }
1357 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1358 lq_sta->last_rate_n_flags = lq_hwrate;
1359 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1360done:
1361 /* See if there's a better rate or modulation mode to try. */
1362 if (sta->supp_rates[info->band])
1363 rs_rate_scale_perform(mvm, sta, lq_sta, tid);
1364}
1365
1366/*
1367 * mac80211 sends us Tx status
1368 */
1369static void rs_mac80211_tx_status(void *mvm_r,
1370 struct ieee80211_supported_band *sband,
1371 struct ieee80211_sta *sta, void *priv_sta,
1372 struct sk_buff *skb)
1373{
1374 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1375 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
1376 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1378
1379 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
1380 return;
1381
1382 if (!ieee80211_is_data(hdr->frame_control) ||
1383 info->flags & IEEE80211_TX_CTL_NO_ACK)
1384 return;
1385
1386 iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info);
1387}
1388
1389/*
1390 * Begin a period of staying with a selected modulation mode.
1391 * Set "stay_in_tbl" flag to prevent any mode switches.
1392 * Set frame tx success limits according to legacy vs. high-throughput,
1393 * and reset overall (spanning all rates) tx success history statistics.
1394 * These control how long we stay using same modulation mode before
1395 * searching for a new mode.
1396 */
1397static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1398 struct iwl_lq_sta *lq_sta)
1399{
1400 IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
1401 lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
1402 if (is_legacy) {
1403 lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
1404 lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
1405 lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
1406 } else {
1407 lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
1408 lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
1409 lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
1410 }
1411 lq_sta->table_count = 0;
1412 lq_sta->total_failed = 0;
1413 lq_sta->total_success = 0;
1414 lq_sta->flush_timer = jiffies;
1415 lq_sta->visited_columns = 0;
1416}
1417
1418static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
1419{
1420 if (rate_mask)
1421 return find_last_bit(&rate_mask, BITS_PER_LONG);
1422 return IWL_RATE_INVALID;
1423}
1424
1425static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1426 const struct rs_tx_column *column)
1427{
1428 switch (column->mode) {
1429 case RS_LEGACY:
1430 return lq_sta->max_legacy_rate_idx;
1431 case RS_SISO:
1432 return lq_sta->max_siso_rate_idx;
1433 case RS_MIMO2:
1434 return lq_sta->max_mimo2_rate_idx;
1435 default:
1436 WARN_ON_ONCE(1);
1437 }
1438
1439 return lq_sta->max_legacy_rate_idx;
1440}
1441
1442static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1443 const struct rs_tx_column *column,
1444 u32 bw)
1445{
1446 /* Used to choose among HT tables */
1447 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1448
1449 if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
1450 column->mode != RS_SISO &&
1451 column->mode != RS_MIMO2))
1452 return expected_tpt_legacy;
1453
1454 /* Legacy rates have only one table */
1455 if (column->mode == RS_LEGACY)
1456 return expected_tpt_legacy;
1457
1458 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1459 /* Choose among many HT tables depending on number of streams
1460 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
1461 * status */
1462 if (column->mode == RS_SISO) {
1463 switch (bw) {
1464 case RATE_MCS_CHAN_WIDTH_20:
1465 ht_tbl_pointer = expected_tpt_siso_20MHz;
1466 break;
1467 case RATE_MCS_CHAN_WIDTH_40:
1468 ht_tbl_pointer = expected_tpt_siso_40MHz;
1469 break;
1470 case RATE_MCS_CHAN_WIDTH_80:
1471 ht_tbl_pointer = expected_tpt_siso_80MHz;
1472 break;
1473 default:
1474 WARN_ON_ONCE(1);
1475 }
1476 } else if (column->mode == RS_MIMO2) {
1477 switch (bw) {
1478 case RATE_MCS_CHAN_WIDTH_20:
1479 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1480 break;
1481 case RATE_MCS_CHAN_WIDTH_40:
1482 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1483 break;
1484 case RATE_MCS_CHAN_WIDTH_80:
1485 ht_tbl_pointer = expected_tpt_mimo2_80MHz;
1486 break;
1487 default:
1488 WARN_ON_ONCE(1);
1489 }
1490 } else {
1491 WARN_ON_ONCE(1);
1492 }
1493
1494 if (!column->sgi && !lq_sta->is_agg) /* Normal */
1495 return ht_tbl_pointer[0];
1496 else if (column->sgi && !lq_sta->is_agg) /* SGI */
1497 return ht_tbl_pointer[1];
1498 else if (!column->sgi && lq_sta->is_agg) /* AGG */
1499 return ht_tbl_pointer[2];
1500 else /* AGG+SGI */
1501 return ht_tbl_pointer[3];
1502}
1503
1504static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1505 struct iwl_scale_tbl_info *tbl)
1506{
1507 struct rs_rate *rate = &tbl->rate;
1508 const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
1509
1510 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1511}
1512
1513static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1514 struct iwl_lq_sta *lq_sta,
1515 struct iwl_scale_tbl_info *tbl, /* "search" */
1516 unsigned long rate_mask, s8 index)
1517{
1518 struct iwl_scale_tbl_info *active_tbl =
1519 &(lq_sta->lq_info[lq_sta->active_tbl]);
1520 s32 success_ratio = active_tbl->win[index].success_ratio;
1521 u16 expected_current_tpt = active_tbl->expected_tpt[index];
1522 const u16 *tpt_tbl = tbl->expected_tpt;
1523 u16 high_low;
1524 u32 target_tpt;
1525 int rate_idx;
1526
1527 if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
1528 target_tpt = 100 * expected_current_tpt;
1529 IWL_DEBUG_RATE(mvm,
1530 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
1531 success_ratio, target_tpt);
1532 } else {
1533 target_tpt = lq_sta->last_tpt;
1534 IWL_DEBUG_RATE(mvm,
1535 "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
1536 success_ratio, target_tpt);
1537 }
1538
1539 rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
1540
1541 while (rate_idx != IWL_RATE_INVALID) {
1542 if (target_tpt < (100 * tpt_tbl[rate_idx]))
1543 break;
1544
1545 high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
1546 tbl->rate.type);
1547
1548 rate_idx = (high_low >> 8) & 0xff;
1549 }
1550
1551 IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
1552 rate_idx, target_tpt,
1553 rate_idx != IWL_RATE_INVALID ?
1554 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
1555
1556 return rate_idx;
1557}
1558
1559static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
1560{
1561 if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
1562 return RATE_MCS_CHAN_WIDTH_80;
1563 else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
1564 return RATE_MCS_CHAN_WIDTH_40;
1565
1566 return RATE_MCS_CHAN_WIDTH_20;
1567}
1568
1569/*
1570 * Check whether we should continue using same modulation mode, or
1571 * begin search for a new mode, based on:
1572 * 1) # tx successes or failures while using this mode
1573 * 2) # times calling this function
1574 * 3) elapsed time in this mode (not used, for now)
1575 */
1576static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1577{
1578 struct iwl_scale_tbl_info *tbl;
1579 int active_tbl;
1580 int flush_interval_passed = 0;
1581 struct iwl_mvm *mvm;
1582
1583 mvm = lq_sta->pers.drv;
1584 active_tbl = lq_sta->active_tbl;
1585
1586 tbl = &(lq_sta->lq_info[active_tbl]);
1587
1588 /* If we've been disallowing search, see if we should now allow it */
1589 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1590 /* Elapsed time using current modulation mode */
1591 if (lq_sta->flush_timer)
1592 flush_interval_passed =
1593 time_after(jiffies,
1594 (unsigned long)(lq_sta->flush_timer +
1595 (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
1596
1597 /*
1598 * Check if we should allow search for new modulation mode.
1599 * If many frames have failed or succeeded, or we've used
1600 * this same modulation for a long time, allow search, and
1601 * reset history stats that keep track of whether we should
1602 * allow a new search. Also (below) reset all bitmaps and
1603 * stats in active history.
1604 */
1605 if (force_search ||
1606 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1607 (lq_sta->total_success > lq_sta->max_success_limit) ||
1608 ((!lq_sta->search_better_tbl) &&
1609 (lq_sta->flush_timer) && (flush_interval_passed))) {
1610 IWL_DEBUG_RATE(mvm,
1611 "LQ: stay is expired %d %d %d\n",
1612 lq_sta->total_failed,
1613 lq_sta->total_success,
1614 flush_interval_passed);
1615
1616 /* Allow search for new mode */
1617 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
1618 IWL_DEBUG_RATE(mvm,
1619 "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
1620 lq_sta->total_failed = 0;
1621 lq_sta->total_success = 0;
1622 lq_sta->flush_timer = 0;
1623 /* mark the current column as visited */
1624 lq_sta->visited_columns = BIT(tbl->column);
1625 /*
1626 * Else if we've used this modulation mode enough repetitions
1627 * (regardless of elapsed time or success/failure), reset
1628 * history bitmaps and rate-specific stats for all rates in
1629 * active table.
1630 */
1631 } else {
1632 lq_sta->table_count++;
1633 if (lq_sta->table_count >=
1634 lq_sta->table_count_limit) {
1635 lq_sta->table_count = 0;
1636
1637 IWL_DEBUG_RATE(mvm,
1638 "LQ: stay in table clear win\n");
1639 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1640 }
1641 }
1642
1643 /* If transitioning to allow "search", reset all history
1644 * bitmaps and stats in active table (this will become the new
1645 * "search" table). */
1646 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1647 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1648 }
1649 }
1650}
1651
1652/*
1653 * setup rate table in uCode
1654 */
1655static void rs_update_rate_tbl(struct iwl_mvm *mvm,
1656 struct ieee80211_sta *sta,
1657 struct iwl_lq_sta *lq_sta,
1658 struct iwl_scale_tbl_info *tbl)
1659{
1660 rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
1661 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1662}
1663
1664static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
1665 struct ieee80211_sta *sta,
1666 struct iwl_lq_sta *lq_sta,
1667 struct iwl_scale_tbl_info *tbl,
1668 enum rs_action scale_action)
1669{
1670 if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
1671 return false;
1672
1673 if (!is_vht_siso(&tbl->rate))
1674 return false;
1675
1676 if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
1677 (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
1678 (scale_action == RS_ACTION_DOWNSCALE)) {
1679 tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
1680 tbl->rate.index = IWL_RATE_MCS_4_INDEX;
1681 IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
1682 goto tweaked;
1683 }
1684
1685 /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
1686 * sustainable, i.e. we're past the test window. We can't go back
1687 * if MCS5 is just tested as this will happen always after switching
1688 * to 20Mhz MCS4 because the rate stats are cleared.
1689 */
1690 if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
1691 (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
1692 (scale_action == RS_ACTION_STAY)) ||
1693 ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
1694 (scale_action == RS_ACTION_UPSCALE)))) {
1695 tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
1696 tbl->rate.index = IWL_RATE_MCS_1_INDEX;
1697 IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
1698 goto tweaked;
1699 }
1700
1701 return false;
1702
1703tweaked:
1704 rs_set_expected_tpt_table(lq_sta, tbl);
1705 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1706 return true;
1707}
1708
1709static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1710 struct iwl_lq_sta *lq_sta,
1711 struct ieee80211_sta *sta,
1712 struct iwl_scale_tbl_info *tbl)
1713{
1714 int i, j, max_rate;
1715 enum rs_column next_col_id;
1716 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1717 const struct rs_tx_column *next_col;
1718 allow_column_func_t allow_func;
1719 u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
1720 const u16 *expected_tpt_tbl;
1721 u16 tpt, max_expected_tpt;
1722
1723 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1724 next_col_id = curr_col->next_columns[i];
1725
1726 if (next_col_id == RS_COLUMN_INVALID)
1727 continue;
1728
1729 if (lq_sta->visited_columns & BIT(next_col_id)) {
1730 IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
1731 next_col_id);
1732 continue;
1733 }
1734
1735 next_col = &rs_tx_columns[next_col_id];
1736
1737 if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
1738 IWL_DEBUG_RATE(mvm,
1739 "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
1740 next_col_id, valid_ants, next_col->ant);
1741 continue;
1742 }
1743
1744 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1745 allow_func = next_col->checks[j];
1746 if (allow_func && !allow_func(mvm, sta, &tbl->rate,
1747 next_col))
1748 break;
1749 }
1750
1751 if (j != MAX_COLUMN_CHECKS) {
1752 IWL_DEBUG_RATE(mvm,
1753 "Skip column %d: not allowed (check %d failed)\n",
1754 next_col_id, j);
1755
1756 continue;
1757 }
1758
1759 tpt = lq_sta->last_tpt / 100;
1760 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
1761 rs_bw_from_sta_bw(sta));
1762 if (WARN_ON_ONCE(!expected_tpt_tbl))
1763 continue;
1764
1765 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1766 if (max_rate == IWL_RATE_INVALID) {
1767 IWL_DEBUG_RATE(mvm,
1768 "Skip column %d: no rate is allowed in this column\n",
1769 next_col_id);
1770 continue;
1771 }
1772
1773 max_expected_tpt = expected_tpt_tbl[max_rate];
1774 if (tpt >= max_expected_tpt) {
1775 IWL_DEBUG_RATE(mvm,
1776 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
1777 next_col_id, max_expected_tpt, tpt);
1778 continue;
1779 }
1780
1781 IWL_DEBUG_RATE(mvm,
1782 "Found potential column %d. Max expected %d current %d\n",
1783 next_col_id, max_expected_tpt, tpt);
1784 break;
1785 }
1786
1787 if (i == MAX_NEXT_COLUMNS)
1788 return RS_COLUMN_INVALID;
1789
1790 return next_col_id;
1791}
1792
1793static int rs_switch_to_column(struct iwl_mvm *mvm,
1794 struct iwl_lq_sta *lq_sta,
1795 struct ieee80211_sta *sta,
1796 enum rs_column col_id)
1797{
1798 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1799 struct iwl_scale_tbl_info *search_tbl =
1800 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1801 struct rs_rate *rate = &search_tbl->rate;
1802 const struct rs_tx_column *column = &rs_tx_columns[col_id];
1803 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
1804 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1805 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1806 unsigned long rate_mask = 0;
1807 u32 rate_idx = 0;
1808
1809 memcpy(search_tbl, tbl, sz);
1810
1811 rate->sgi = column->sgi;
1812 rate->ant = column->ant;
1813
1814 if (column->mode == RS_LEGACY) {
1815 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1816 rate->type = LQ_LEGACY_A;
1817 else
1818 rate->type = LQ_LEGACY_G;
1819
1820 rate->bw = RATE_MCS_CHAN_WIDTH_20;
1821 rate->ldpc = false;
1822 rate_mask = lq_sta->active_legacy_rate;
1823 } else if (column->mode == RS_SISO) {
1824 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
1825 rate_mask = lq_sta->active_siso_rate;
1826 } else if (column->mode == RS_MIMO2) {
1827 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
1828 rate_mask = lq_sta->active_mimo2_rate;
1829 } else {
1830 WARN_ON_ONCE("Bad column mode");
1831 }
1832
1833 if (column->mode != RS_LEGACY) {
1834 rate->bw = rs_bw_from_sta_bw(sta);
1835 rate->ldpc = lq_sta->ldpc;
1836 }
1837
1838 search_tbl->column = col_id;
1839 rs_set_expected_tpt_table(lq_sta, search_tbl);
1840
1841 lq_sta->visited_columns |= BIT(col_id);
1842
1843 /* Get the best matching rate if we're changing modes. e.g.
1844 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
1845 */
1846 if (curr_column->mode != column->mode) {
1847 rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
1848 rate_mask, rate->index);
1849
1850 if ((rate_idx == IWL_RATE_INVALID) ||
1851 !(BIT(rate_idx) & rate_mask)) {
1852 IWL_DEBUG_RATE(mvm,
1853 "can not switch with index %d"
1854 " rate mask %lx\n",
1855 rate_idx, rate_mask);
1856
1857 goto err;
1858 }
1859
1860 rate->index = rate_idx;
1861 }
1862
1863 IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
1864 col_id, rate->index);
1865
1866 return 0;
1867
1868err:
1869 rate->type = LQ_NONE;
1870 return -1;
1871}
1872
1873static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1874 struct iwl_scale_tbl_info *tbl,
1875 s32 sr, int low, int high,
1876 int current_tpt,
1877 int low_tpt, int high_tpt)
1878{
1879 enum rs_action action = RS_ACTION_STAY;
1880
1881 if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
1882 (current_tpt == 0)) {
1883 IWL_DEBUG_RATE(mvm,
1884 "Decrease rate because of low SR\n");
1885 return RS_ACTION_DOWNSCALE;
1886 }
1887
1888 if ((low_tpt == IWL_INVALID_VALUE) &&
1889 (high_tpt == IWL_INVALID_VALUE) &&
1890 (high != IWL_RATE_INVALID)) {
1891 IWL_DEBUG_RATE(mvm,
1892 "No data about high/low rates. Increase rate\n");
1893 return RS_ACTION_UPSCALE;
1894 }
1895
1896 if ((high_tpt == IWL_INVALID_VALUE) &&
1897 (high != IWL_RATE_INVALID) &&
1898 (low_tpt != IWL_INVALID_VALUE) &&
1899 (low_tpt < current_tpt)) {
1900 IWL_DEBUG_RATE(mvm,
1901 "No data about high rate and low rate is worse. Increase rate\n");
1902 return RS_ACTION_UPSCALE;
1903 }
1904
1905 if ((high_tpt != IWL_INVALID_VALUE) &&
1906 (high_tpt > current_tpt)) {
1907 IWL_DEBUG_RATE(mvm,
1908 "Higher rate is better. Increate rate\n");
1909 return RS_ACTION_UPSCALE;
1910 }
1911
1912 if ((low_tpt != IWL_INVALID_VALUE) &&
1913 (high_tpt != IWL_INVALID_VALUE) &&
1914 (low_tpt < current_tpt) &&
1915 (high_tpt < current_tpt)) {
1916 IWL_DEBUG_RATE(mvm,
1917 "Both high and low are worse. Maintain rate\n");
1918 return RS_ACTION_STAY;
1919 }
1920
1921 if ((low_tpt != IWL_INVALID_VALUE) &&
1922 (low_tpt > current_tpt)) {
1923 IWL_DEBUG_RATE(mvm,
1924 "Lower rate is better\n");
1925 action = RS_ACTION_DOWNSCALE;
1926 goto out;
1927 }
1928
1929 if ((low_tpt == IWL_INVALID_VALUE) &&
1930 (low != IWL_RATE_INVALID)) {
1931 IWL_DEBUG_RATE(mvm,
1932 "No data about lower rate\n");
1933 action = RS_ACTION_DOWNSCALE;
1934 goto out;
1935 }
1936
1937 IWL_DEBUG_RATE(mvm, "Maintain rate\n");
1938
1939out:
1940 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1941 if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
1942 IWL_DEBUG_RATE(mvm,
1943 "SR is above NO DECREASE. Avoid downscale\n");
1944 action = RS_ACTION_STAY;
1945 } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
1946 IWL_DEBUG_RATE(mvm,
1947 "Current TPT is higher than max expected in low rate. Avoid downscale\n");
1948 action = RS_ACTION_STAY;
1949 } else {
1950 IWL_DEBUG_RATE(mvm, "Decrease rate\n");
1951 }
1952 }
1953
1954 return action;
1955}
1956
1957static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1958 struct iwl_lq_sta *lq_sta)
1959{
1960 /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
1961 * supports STBC of at least 1*SS
1962 */
1963 if (!lq_sta->stbc_capable)
1964 return false;
1965
1966 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
1967 return false;
1968
1969 return true;
1970}
1971
1972static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1973 int *weaker, int *stronger)
1974{
1975 *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
1976 if (*weaker > TPC_MAX_REDUCTION)
1977 *weaker = TPC_INVALID;
1978
1979 *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
1980 if (*stronger < 0)
1981 *stronger = TPC_INVALID;
1982}
1983
1984static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1985 struct rs_rate *rate, enum ieee80211_band band)
1986{
1987 int index = rate->index;
1988 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
1989 bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
1990 !vif->bss_conf.ps);
1991
1992 IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
1993 cam, sta_ps_disabled);
1994 /*
1995 * allow tpc only if power management is enabled, or bt coex
1996 * activity grade allows it and we are on 2.4Ghz.
1997 */
1998 if ((cam || sta_ps_disabled) &&
1999 !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
2000 return false;
2001
2002 IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
2003 if (is_legacy(rate))
2004 return index == IWL_RATE_54M_INDEX;
2005 if (is_ht(rate))
2006 return index == IWL_RATE_MCS_7_INDEX;
2007 if (is_vht(rate))
2008 return index == IWL_RATE_MCS_7_INDEX ||
2009 index == IWL_RATE_MCS_8_INDEX ||
2010 index == IWL_RATE_MCS_9_INDEX;
2011
2012 WARN_ON_ONCE(1);
2013 return false;
2014}
2015
2016enum tpc_action {
2017 TPC_ACTION_STAY,
2018 TPC_ACTION_DECREASE,
2019 TPC_ACTION_INCREASE,
2020 TPC_ACTION_NO_RESTIRCTION,
2021};
2022
2023static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
2024 s32 sr, int weak, int strong,
2025 int current_tpt,
2026 int weak_tpt, int strong_tpt)
2027{
2028 /* stay until we have valid tpt */
2029 if (current_tpt == IWL_INVALID_VALUE) {
2030 IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
2031 return TPC_ACTION_STAY;
2032 }
2033
2034 /* Too many failures, increase txp */
2035 if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
2036 current_tpt == 0) {
2037 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
2038 return TPC_ACTION_NO_RESTIRCTION;
2039 }
2040
2041 /* try decreasing first if applicable */
2042 if (weak != TPC_INVALID) {
2043 if (weak_tpt == IWL_INVALID_VALUE &&
2044 (strong_tpt == IWL_INVALID_VALUE ||
2045 current_tpt >= strong_tpt)) {
2046 IWL_DEBUG_RATE(mvm,
2047 "no weak txp measurement. decrease txp\n");
2048 return TPC_ACTION_DECREASE;
2049 }
2050
2051 if (weak_tpt > current_tpt) {
2052 IWL_DEBUG_RATE(mvm,
2053 "lower txp has better tpt. decrease txp\n");
2054 return TPC_ACTION_DECREASE;
2055 }
2056 }
2057
2058 /* next, increase if needed */
2059 if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
2060 strong != TPC_INVALID) {
2061 if (weak_tpt == IWL_INVALID_VALUE &&
2062 strong_tpt != IWL_INVALID_VALUE &&
2063 current_tpt < strong_tpt) {
2064 IWL_DEBUG_RATE(mvm,
2065 "higher txp has better tpt. increase txp\n");
2066 return TPC_ACTION_INCREASE;
2067 }
2068
2069 if (weak_tpt < current_tpt &&
2070 (strong_tpt == IWL_INVALID_VALUE ||
2071 strong_tpt > current_tpt)) {
2072 IWL_DEBUG_RATE(mvm,
2073 "lower txp has worse tpt. increase txp\n");
2074 return TPC_ACTION_INCREASE;
2075 }
2076 }
2077
2078 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
2079 return TPC_ACTION_STAY;
2080}
2081
2082static bool rs_tpc_perform(struct iwl_mvm *mvm,
2083 struct ieee80211_sta *sta,
2084 struct iwl_lq_sta *lq_sta,
2085 struct iwl_scale_tbl_info *tbl)
2086{
2087 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2088 struct ieee80211_vif *vif = mvm_sta->vif;
2089 struct ieee80211_chanctx_conf *chanctx_conf;
2090 enum ieee80211_band band;
2091 struct iwl_rate_scale_data *window;
2092 struct rs_rate *rate = &tbl->rate;
2093 enum tpc_action action;
2094 s32 sr;
2095 u8 cur = lq_sta->lq.reduced_tpc;
2096 int current_tpt;
2097 int weak, strong;
2098 int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
2099
2100#ifdef CONFIG_MAC80211_DEBUGFS
2101 if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
2102 IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
2103 lq_sta->pers.dbg_fixed_txp_reduction);
2104 lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
2105 return cur != lq_sta->pers.dbg_fixed_txp_reduction;
2106 }
2107#endif
2108
2109 rcu_read_lock();
2110 chanctx_conf = rcu_dereference(vif->chanctx_conf);
2111 if (WARN_ON(!chanctx_conf))
2112 band = IEEE80211_NUM_BANDS;
2113 else
2114 band = chanctx_conf->def.chan->band;
2115 rcu_read_unlock();
2116
2117 if (!rs_tpc_allowed(mvm, vif, rate, band)) {
2118 IWL_DEBUG_RATE(mvm,
2119 "tpc is not allowed. remove txp restrictions\n");
2120 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
2121 return cur != TPC_NO_REDUCTION;
2122 }
2123
2124 rs_get_adjacent_txp(mvm, cur, &weak, &strong);
2125
2126 /* Collect measured throughputs for current and adjacent rates */
2127 window = tbl->tpc_win;
2128 sr = window[cur].success_ratio;
2129 current_tpt = window[cur].average_tpt;
2130 if (weak != TPC_INVALID)
2131 weak_tpt = window[weak].average_tpt;
2132 if (strong != TPC_INVALID)
2133 strong_tpt = window[strong].average_tpt;
2134
2135 IWL_DEBUG_RATE(mvm,
2136 "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
2137 cur, current_tpt, sr, weak, strong,
2138 weak_tpt, strong_tpt);
2139
2140 action = rs_get_tpc_action(mvm, sr, weak, strong,
2141 current_tpt, weak_tpt, strong_tpt);
2142
2143 /* override actions if we are on the edge */
2144 if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
2145 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
2146 action = TPC_ACTION_STAY;
2147 } else if (strong == TPC_INVALID &&
2148 (action == TPC_ACTION_INCREASE ||
2149 action == TPC_ACTION_NO_RESTIRCTION)) {
2150 IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
2151 action = TPC_ACTION_STAY;
2152 }
2153
2154 switch (action) {
2155 case TPC_ACTION_DECREASE:
2156 lq_sta->lq.reduced_tpc = weak;
2157 return true;
2158 case TPC_ACTION_INCREASE:
2159 lq_sta->lq.reduced_tpc = strong;
2160 return true;
2161 case TPC_ACTION_NO_RESTIRCTION:
2162 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
2163 return true;
2164 case TPC_ACTION_STAY:
2165 /* do nothing */
2166 break;
2167 }
2168 return false;
2169}
2170
2171/*
2172 * Do rate scaling and search for new modulation mode.
2173 */
2174static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2175 struct ieee80211_sta *sta,
2176 struct iwl_lq_sta *lq_sta,
2177 int tid)
2178{
2179 int low = IWL_RATE_INVALID;
2180 int high = IWL_RATE_INVALID;
2181 int index;
2182 struct iwl_rate_scale_data *window = NULL;
2183 int current_tpt = IWL_INVALID_VALUE;
2184 int low_tpt = IWL_INVALID_VALUE;
2185 int high_tpt = IWL_INVALID_VALUE;
2186 u32 fail_count;
2187 enum rs_action scale_action = RS_ACTION_STAY;
2188 u16 rate_mask;
2189 u8 update_lq = 0;
2190 struct iwl_scale_tbl_info *tbl, *tbl1;
2191 u8 active_tbl = 0;
2192 u8 done_search = 0;
2193 u16 high_low;
2194 s32 sr;
2195 u8 prev_agg = lq_sta->is_agg;
2196 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2197 struct iwl_mvm_tid_data *tid_data;
2198 struct rs_rate *rate;
2199
2200 lq_sta->is_agg = !!sta_priv->agg_tids;
2201
2202 /*
2203 * Select rate-scale / modulation-mode table to work with in
2204 * the rest of this function: "search" if searching for better
2205 * modulation mode, or "active" if doing rate scaling within a mode.
2206 */
2207 if (!lq_sta->search_better_tbl)
2208 active_tbl = lq_sta->active_tbl;
2209 else
2210 active_tbl = 1 - lq_sta->active_tbl;
2211
2212 tbl = &(lq_sta->lq_info[active_tbl]);
2213 rate = &tbl->rate;
2214
2215 if (prev_agg != lq_sta->is_agg) {
2216 IWL_DEBUG_RATE(mvm,
2217 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
2218 prev_agg, lq_sta->is_agg);
2219 rs_set_expected_tpt_table(lq_sta, tbl);
2220 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2221 }
2222
2223 /* current tx rate */
2224 index = rate->index;
2225
2226 /* rates available for this association, and for modulation mode */
2227 rate_mask = rs_get_supported_rates(lq_sta, rate);
2228
2229 if (!(BIT(index) & rate_mask)) {
2230 IWL_ERR(mvm, "Current Rate is not valid\n");
2231 if (lq_sta->search_better_tbl) {
2232 /* revert to active table if search table is not valid*/
2233 rate->type = LQ_NONE;
2234 lq_sta->search_better_tbl = 0;
2235 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2236 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2237 }
2238 return;
2239 }
2240
2241 /* Get expected throughput table and history window for current rate */
2242 if (!tbl->expected_tpt) {
2243 IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
2244 return;
2245 }
2246
2247 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2248 window = &(tbl->win[index]);
2249
2250 /*
2251 * If there is not enough history to calculate actual average
2252 * throughput, keep analyzing results of more tx frames, without
2253 * changing rate or mode (bypass most of the rest of this function).
2254 * Set up new rate table in uCode only if old rate is not supported
2255 * in current association (use new rate found above).
2256 */
2257 fail_count = window->counter - window->success_counter;
2258 if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
2259 (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
2260 IWL_DEBUG_RATE(mvm,
2261 "%s: Test Window: succ %d total %d\n",
2262 rs_pretty_rate(rate),
2263 window->success_counter, window->counter);
2264
2265 /* Can't calculate this yet; not enough history */
2266 window->average_tpt = IWL_INVALID_VALUE;
2267
2268 /* Should we stay with this modulation mode,
2269 * or search for a new one? */
2270 rs_stay_in_table(lq_sta, false);
2271
2272 return;
2273 }
2274
2275 /* If we are searching for better modulation mode, check success. */
2276 if (lq_sta->search_better_tbl) {
2277 /* If good success, continue using the "search" mode;
2278 * no need to send new link quality command, since we're
2279 * continuing to use the setup that we've been trying. */
2280 if (window->average_tpt > lq_sta->last_tpt) {
2281 IWL_DEBUG_RATE(mvm,
2282 "SWITCHING TO NEW TABLE SR: %d "
2283 "cur-tpt %d old-tpt %d\n",
2284 window->success_ratio,
2285 window->average_tpt,
2286 lq_sta->last_tpt);
2287
2288 /* Swap tables; "search" becomes "active" */
2289 lq_sta->active_tbl = active_tbl;
2290 current_tpt = window->average_tpt;
2291 /* Else poor success; go back to mode in "active" table */
2292 } else {
2293 IWL_DEBUG_RATE(mvm,
2294 "GOING BACK TO THE OLD TABLE: SR %d "
2295 "cur-tpt %d old-tpt %d\n",
2296 window->success_ratio,
2297 window->average_tpt,
2298 lq_sta->last_tpt);
2299
2300 /* Nullify "search" table */
2301 rate->type = LQ_NONE;
2302
2303 /* Revert to "active" table */
2304 active_tbl = lq_sta->active_tbl;
2305 tbl = &(lq_sta->lq_info[active_tbl]);
2306
2307 /* Revert to "active" rate and throughput info */
2308 index = tbl->rate.index;
2309 current_tpt = lq_sta->last_tpt;
2310
2311 /* Need to set up a new rate table in uCode */
2312 update_lq = 1;
2313 }
2314
2315 /* Either way, we've made a decision; modulation mode
2316 * search is done, allow rate adjustment next time. */
2317 lq_sta->search_better_tbl = 0;
2318 done_search = 1; /* Don't switch modes below! */
2319 goto lq_update;
2320 }
2321
2322 /* (Else) not in search of better modulation mode, try for better
2323 * starting rate, while staying in this mode. */
2324 high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
2325 low = high_low & 0xff;
2326 high = (high_low >> 8) & 0xff;
2327
2328 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2329
2330 sr = window->success_ratio;
2331
2332 /* Collect measured throughputs for current and adjacent rates */
2333 current_tpt = window->average_tpt;
2334 if (low != IWL_RATE_INVALID)
2335 low_tpt = tbl->win[low].average_tpt;
2336 if (high != IWL_RATE_INVALID)
2337 high_tpt = tbl->win[high].average_tpt;
2338
2339 IWL_DEBUG_RATE(mvm,
2340 "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
2341 rs_pretty_rate(rate), current_tpt, sr,
2342 low, high, low_tpt, high_tpt);
2343
2344 scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
2345 current_tpt, low_tpt, high_tpt);
2346
2347 /* Force a search in case BT doesn't like us being in MIMO */
2348 if (is_mimo(rate) &&
2349 !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
2350 IWL_DEBUG_RATE(mvm,
2351 "BT Coex forbids MIMO. Search for new config\n");
2352 rs_stay_in_table(lq_sta, true);
2353 goto lq_update;
2354 }
2355
2356 switch (scale_action) {
2357 case RS_ACTION_DOWNSCALE:
2358 /* Decrease starting rate, update uCode's rate table */
2359 if (low != IWL_RATE_INVALID) {
2360 update_lq = 1;
2361 index = low;
2362 } else {
2363 IWL_DEBUG_RATE(mvm,
2364 "At the bottom rate. Can't decrease\n");
2365 }
2366
2367 break;
2368 case RS_ACTION_UPSCALE:
2369 /* Increase starting rate, update uCode's rate table */
2370 if (high != IWL_RATE_INVALID) {
2371 update_lq = 1;
2372 index = high;
2373 } else {
2374 IWL_DEBUG_RATE(mvm,
2375 "At the top rate. Can't increase\n");
2376 }
2377
2378 break;
2379 case RS_ACTION_STAY:
2380 /* No change */
2381 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
2382 update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
2383 break;
2384 default:
2385 break;
2386 }
2387
2388lq_update:
2389 /* Replace uCode's rate table for the destination station. */
2390 if (update_lq) {
2391 tbl->rate.index = index;
2392 if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
2393 rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
2394 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2395 }
2396
2397 rs_stay_in_table(lq_sta, false);
2398
2399 /*
2400 * Search for new modulation mode if we're:
2401 * 1) Not changing rates right now
2402 * 2) Not just finishing up a search
2403 * 3) Allowing a new search
2404 */
2405 if (!update_lq && !done_search &&
2406 lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
2407 && window->counter) {
2408 enum rs_column next_column;
2409
2410 /* Save current throughput to compare with "search" throughput*/
2411 lq_sta->last_tpt = current_tpt;
2412
2413 IWL_DEBUG_RATE(mvm,
2414 "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
2415 update_lq, done_search, lq_sta->rs_state,
2416 window->counter);
2417
2418 next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
2419 if (next_column != RS_COLUMN_INVALID) {
2420 int ret = rs_switch_to_column(mvm, lq_sta, sta,
2421 next_column);
2422 if (!ret)
2423 lq_sta->search_better_tbl = 1;
2424 } else {
2425 IWL_DEBUG_RATE(mvm,
2426 "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
2427 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
2428 }
2429
2430 /* If new "search" mode was selected, set up in uCode table */
2431 if (lq_sta->search_better_tbl) {
2432 /* Access the "search" table, clear its history. */
2433 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2434 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2435
2436 /* Use new "search" start rate */
2437 index = tbl->rate.index;
2438
2439 rs_dump_rate(mvm, &tbl->rate,
2440 "Switch to SEARCH TABLE:");
2441 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2442 } else {
2443 done_search = 1;
2444 }
2445 }
2446
2447 if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
2448 /* If the "active" (non-search) mode was legacy,
2449 * and we've tried switching antennas,
2450 * but we haven't been able to try HT modes (not available),
2451 * stay with best antenna legacy modulation for a while
2452 * before next round of mode comparisons. */
2453 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2454 if (is_legacy(&tbl1->rate)) {
2455 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2456
2457 if (tid != IWL_MAX_TID_COUNT) {
2458 tid_data = &sta_priv->tid_data[tid];
2459 if (tid_data->state != IWL_AGG_OFF) {
2460 IWL_DEBUG_RATE(mvm,
2461 "Stop aggregation on tid %d\n",
2462 tid);
2463 ieee80211_stop_tx_ba_session(sta, tid);
2464 }
2465 }
2466 rs_set_stay_in_table(mvm, 1, lq_sta);
2467 } else {
2468 /* If we're in an HT mode, and all 3 mode switch actions
2469 * have been tried and compared, stay in this best modulation
2470 * mode for a while before next round of mode comparisons. */
2471 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2472 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2473 (tid != IWL_MAX_TID_COUNT)) {
2474 tid_data = &sta_priv->tid_data[tid];
2475 if (tid_data->state == IWL_AGG_OFF) {
2476 IWL_DEBUG_RATE(mvm,
2477 "try to aggregate tid %d\n",
2478 tid);
2479 rs_tl_turn_on_agg(mvm, tid,
2480 lq_sta, sta);
2481 }
2482 }
2483 rs_set_stay_in_table(mvm, 0, lq_sta);
2484 }
2485 }
2486}
2487
2488struct rs_init_rate_info {
2489 s8 rssi;
2490 u8 rate_idx;
2491};
2492
2493static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
2494 { -60, IWL_RATE_54M_INDEX },
2495 { -64, IWL_RATE_48M_INDEX },
2496 { -68, IWL_RATE_36M_INDEX },
2497 { -80, IWL_RATE_24M_INDEX },
2498 { -84, IWL_RATE_18M_INDEX },
2499 { -85, IWL_RATE_12M_INDEX },
2500 { -86, IWL_RATE_11M_INDEX },
2501 { -88, IWL_RATE_5M_INDEX },
2502 { -90, IWL_RATE_2M_INDEX },
2503 { S8_MIN, IWL_RATE_1M_INDEX },
2504};
2505
2506static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
2507 { -60, IWL_RATE_54M_INDEX },
2508 { -64, IWL_RATE_48M_INDEX },
2509 { -72, IWL_RATE_36M_INDEX },
2510 { -80, IWL_RATE_24M_INDEX },
2511 { -84, IWL_RATE_18M_INDEX },
2512 { -85, IWL_RATE_12M_INDEX },
2513 { -87, IWL_RATE_9M_INDEX },
2514 { S8_MIN, IWL_RATE_6M_INDEX },
2515};
2516
2517static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
2518 { -60, IWL_RATE_MCS_7_INDEX },
2519 { -64, IWL_RATE_MCS_6_INDEX },
2520 { -68, IWL_RATE_MCS_5_INDEX },
2521 { -72, IWL_RATE_MCS_4_INDEX },
2522 { -80, IWL_RATE_MCS_3_INDEX },
2523 { -84, IWL_RATE_MCS_2_INDEX },
2524 { -85, IWL_RATE_MCS_1_INDEX },
2525 { S8_MIN, IWL_RATE_MCS_0_INDEX},
2526};
2527
2528static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
2529 { -60, IWL_RATE_MCS_8_INDEX },
2530 { -64, IWL_RATE_MCS_7_INDEX },
2531 { -68, IWL_RATE_MCS_6_INDEX },
2532 { -72, IWL_RATE_MCS_5_INDEX },
2533 { -80, IWL_RATE_MCS_4_INDEX },
2534 { -84, IWL_RATE_MCS_3_INDEX },
2535 { -85, IWL_RATE_MCS_2_INDEX },
2536 { -87, IWL_RATE_MCS_1_INDEX },
2537 { S8_MIN, IWL_RATE_MCS_0_INDEX},
2538};
2539
2540static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
2541 { -60, IWL_RATE_MCS_9_INDEX },
2542 { -64, IWL_RATE_MCS_8_INDEX },
2543 { -68, IWL_RATE_MCS_7_INDEX },
2544 { -72, IWL_RATE_MCS_6_INDEX },
2545 { -80, IWL_RATE_MCS_5_INDEX },
2546 { -84, IWL_RATE_MCS_4_INDEX },
2547 { -85, IWL_RATE_MCS_3_INDEX },
2548 { -87, IWL_RATE_MCS_2_INDEX },
2549 { -88, IWL_RATE_MCS_1_INDEX },
2550 { S8_MIN, IWL_RATE_MCS_0_INDEX },
2551};
2552
2553/* Init the optimal rate based on STA caps
2554 * This combined with rssi is used to report the last tx rate
2555 * to userspace when we haven't transmitted enough frames.
2556 */
2557static void rs_init_optimal_rate(struct iwl_mvm *mvm,
2558 struct ieee80211_sta *sta,
2559 struct iwl_lq_sta *lq_sta)
2560{
2561 struct rs_rate *rate = &lq_sta->optimal_rate;
2562
2563 if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
2564 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
2565 else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
2566 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
2567 else if (lq_sta->band == IEEE80211_BAND_5GHZ)
2568 rate->type = LQ_LEGACY_A;
2569 else
2570 rate->type = LQ_LEGACY_G;
2571
2572 rate->bw = rs_bw_from_sta_bw(sta);
2573 rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
2574
2575 /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
2576
2577 if (is_mimo(rate)) {
2578 lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
2579 } else if (is_siso(rate)) {
2580 lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
2581 } else {
2582 lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
2583
2584 if (lq_sta->band == IEEE80211_BAND_5GHZ) {
2585 lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
2586 lq_sta->optimal_nentries =
2587 ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
2588 } else {
2589 lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
2590 lq_sta->optimal_nentries =
2591 ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
2592 }
2593 }
2594
2595 if (is_vht(rate)) {
2596 if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
2597 lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
2598 lq_sta->optimal_nentries =
2599 ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
2600 } else {
2601 lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
2602 lq_sta->optimal_nentries =
2603 ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
2604 }
2605 } else if (is_ht(rate)) {
2606 lq_sta->optimal_rates = rs_optimal_rates_ht;
2607 lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
2608 }
2609}
2610
2611/* Compute the optimal rate index based on RSSI */
2612static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
2613 struct iwl_lq_sta *lq_sta)
2614{
2615 struct rs_rate *rate = &lq_sta->optimal_rate;
2616 int i;
2617
2618 rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
2619 BITS_PER_LONG);
2620
2621 for (i = 0; i < lq_sta->optimal_nentries; i++) {
2622 int rate_idx = lq_sta->optimal_rates[i].rate_idx;
2623
2624 if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
2625 (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
2626 rate->index = rate_idx;
2627 break;
2628 }
2629 }
2630
2631 return rate;
2632}
2633
2634/* Choose an initial legacy rate and antenna to use based on the RSSI
2635 * of last Rx
2636 */
2637static void rs_get_initial_rate(struct iwl_mvm *mvm,
2638 struct iwl_lq_sta *lq_sta,
2639 enum ieee80211_band band,
2640 struct rs_rate *rate)
2641{
2642 int i, nentries;
2643 s8 best_rssi = S8_MIN;
2644 u8 best_ant = ANT_NONE;
2645 u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
2646 const struct rs_init_rate_info *initial_rates;
2647
2648 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
2649 if (!(lq_sta->pers.chains & BIT(i)))
2650 continue;
2651
2652 if (lq_sta->pers.chain_signal[i] > best_rssi) {
2653 best_rssi = lq_sta->pers.chain_signal[i];
2654 best_ant = BIT(i);
2655 }
2656 }
2657
2658 IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
2659 rs_pretty_ant(best_ant), best_rssi);
2660
2661 if (best_ant != ANT_A && best_ant != ANT_B)
2662 rate->ant = first_antenna(valid_tx_ant);
2663 else
2664 rate->ant = best_ant;
2665
2666 rate->sgi = false;
2667 rate->ldpc = false;
2668 rate->bw = RATE_MCS_CHAN_WIDTH_20;
2669
2670 rate->index = find_first_bit(&lq_sta->active_legacy_rate,
2671 BITS_PER_LONG);
2672
2673 if (band == IEEE80211_BAND_5GHZ) {
2674 rate->type = LQ_LEGACY_A;
2675 initial_rates = rs_optimal_rates_5ghz_legacy;
2676 nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
2677 } else {
2678 rate->type = LQ_LEGACY_G;
2679 initial_rates = rs_optimal_rates_24ghz_legacy;
2680 nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
2681 }
2682
2683 if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
2684 for (i = 0; i < nentries; i++) {
2685 int rate_idx = initial_rates[i].rate_idx;
2686 if ((best_rssi >= initial_rates[i].rssi) &&
2687 (BIT(rate_idx) & lq_sta->active_legacy_rate)) {
2688 rate->index = rate_idx;
2689 break;
2690 }
2691 }
2692 }
2693
2694 IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index,
2695 rs_pretty_ant(rate->ant));
2696}
2697
2698/* Save info about RSSI of last Rx */
2699void rs_update_last_rssi(struct iwl_mvm *mvm,
2700 struct iwl_lq_sta *lq_sta,
2701 struct ieee80211_rx_status *rx_status)
2702{
2703 int i;
2704
2705 lq_sta->pers.chains = rx_status->chains;
2706 lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
2707 lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
2708 lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
2709 lq_sta->pers.last_rssi = S8_MIN;
2710
2711 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
2712 if (!(lq_sta->pers.chains & BIT(i)))
2713 continue;
2714
2715 if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
2716 lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
2717 }
2718}
2719
2720/**
2721 * rs_initialize_lq - Initialize a station's hardware rate table
2722 *
2723 * The uCode's station table contains a table of fallback rates
2724 * for automatic fallback during transmission.
2725 *
2726 * NOTE: This sets up a default set of values. These will be replaced later
2727 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2728 * rc80211_simple.
2729 *
2730 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2731 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2732 * which requires station table entry to exist).
2733 */
2734static void rs_initialize_lq(struct iwl_mvm *mvm,
2735 struct ieee80211_sta *sta,
2736 struct iwl_lq_sta *lq_sta,
2737 enum ieee80211_band band,
2738 bool init)
2739{
2740 struct iwl_scale_tbl_info *tbl;
2741 struct rs_rate *rate;
2742 u8 active_tbl = 0;
2743
2744 if (!sta || !lq_sta)
2745 return;
2746
2747 if (!lq_sta->search_better_tbl)
2748 active_tbl = lq_sta->active_tbl;
2749 else
2750 active_tbl = 1 - lq_sta->active_tbl;
2751
2752 tbl = &(lq_sta->lq_info[active_tbl]);
2753 rate = &tbl->rate;
2754
2755 rs_get_initial_rate(mvm, lq_sta, band, rate);
2756 rs_init_optimal_rate(mvm, sta, lq_sta);
2757
2758 WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
2759 if (rate->ant == ANT_A)
2760 tbl->column = RS_COLUMN_LEGACY_ANT_A;
2761 else
2762 tbl->column = RS_COLUMN_LEGACY_ANT_B;
2763
2764 rs_set_expected_tpt_table(lq_sta, tbl);
2765 rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
2766 /* TODO restore station should remember the lq cmd */
2767 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
2768}
2769
2770static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2771 struct ieee80211_tx_rate_control *txrc)
2772{
2773 struct sk_buff *skb = txrc->skb;
2774 struct iwl_op_mode *op_mode __maybe_unused =
2775 (struct iwl_op_mode *)mvm_r;
2776 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2777 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2778 struct iwl_lq_sta *lq_sta = mvm_sta;
2779 struct rs_rate *optimal_rate;
2780 u32 last_ucode_rate;
2781
2782 if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
2783 /* if vif isn't initialized mvm doesn't know about
2784 * this station, so don't do anything with the it
2785 */
2786 sta = NULL;
2787 mvm_sta = NULL;
2788 }
2789
2790 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2791
2792 /* Treat uninitialized rate scaling data same as non-existing. */
2793 if (lq_sta && !lq_sta->pers.drv) {
2794 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
2795 mvm_sta = NULL;
2796 }
2797
2798 /* Send management frames and NO_ACK data using lowest rate. */
2799 if (rate_control_send_low(sta, mvm_sta, txrc))
2800 return;
2801
2802 iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
2803 info->band, &info->control.rates[0]);
2804 info->control.rates[0].count = 1;
2805
2806 /* Report the optimal rate based on rssi and STA caps if we haven't
2807 * converged yet (too little traffic) or exploring other modulations
2808 */
2809 if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
2810 optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
2811 last_ucode_rate = ucode_rate_from_rs_rate(mvm,
2812 optimal_rate);
2813 iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
2814 &txrc->reported_rate);
2815 }
2816}
2817
2818static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
2819 gfp_t gfp)
2820{
2821 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2822 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
2823 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2824 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
2825
2826 IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
2827
2828 lq_sta->pers.drv = mvm;
2829#ifdef CONFIG_MAC80211_DEBUGFS
2830 lq_sta->pers.dbg_fixed_rate = 0;
2831 lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
2832 lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
2833#endif
2834 lq_sta->pers.chains = 0;
2835 memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
2836 lq_sta->pers.last_rssi = S8_MIN;
2837
2838 return &sta_priv->lq_sta;
2839}
2840
2841static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
2842 int nss)
2843{
2844 u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
2845 (0x3 << (2 * (nss - 1)));
2846 rx_mcs >>= (2 * (nss - 1));
2847
2848 if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
2849 return IWL_RATE_MCS_7_INDEX;
2850 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
2851 return IWL_RATE_MCS_8_INDEX;
2852 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
2853 return IWL_RATE_MCS_9_INDEX;
2854
2855 WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
2856 return -1;
2857}
2858
2859static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2860 struct ieee80211_sta_vht_cap *vht_cap,
2861 struct iwl_lq_sta *lq_sta)
2862{
2863 int i;
2864 int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
2865
2866 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2867 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2868 if (i == IWL_RATE_9M_INDEX)
2869 continue;
2870
2871 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2872 if (i == IWL_RATE_MCS_9_INDEX &&
2873 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2874 continue;
2875
2876 lq_sta->active_siso_rate |= BIT(i);
2877 }
2878 }
2879
2880 if (sta->rx_nss < 2)
2881 return;
2882
2883 highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
2884 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2885 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2886 if (i == IWL_RATE_9M_INDEX)
2887 continue;
2888
2889 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2890 if (i == IWL_RATE_MCS_9_INDEX &&
2891 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2892 continue;
2893
2894 lq_sta->active_mimo2_rate |= BIT(i);
2895 }
2896 }
2897}
2898
2899static void rs_ht_init(struct iwl_mvm *mvm,
2900 struct ieee80211_sta *sta,
2901 struct iwl_lq_sta *lq_sta,
2902 struct ieee80211_sta_ht_cap *ht_cap)
2903{
2904 /* active_siso_rate mask includes 9 MBits (bit 5),
2905 * and CCK (bits 0-3), supp_rates[] does not;
2906 * shift to convert format, force 9 MBits off.
2907 */
2908 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2909 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2910 lq_sta->active_siso_rate &= ~((u16)0x2);
2911 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2912
2913 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2914 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2915 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2916 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2917
2918 if (mvm->cfg->ht_params->ldpc &&
2919 (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
2920 lq_sta->ldpc = true;
2921
2922 if (mvm->cfg->ht_params->stbc &&
2923 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2924 (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
2925 lq_sta->stbc_capable = true;
2926
2927 lq_sta->is_vht = false;
2928}
2929
2930static void rs_vht_init(struct iwl_mvm *mvm,
2931 struct ieee80211_sta *sta,
2932 struct iwl_lq_sta *lq_sta,
2933 struct ieee80211_sta_vht_cap *vht_cap)
2934{
2935 rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
2936
2937 if (mvm->cfg->ht_params->ldpc &&
2938 (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
2939 lq_sta->ldpc = true;
2940
2941 if (mvm->cfg->ht_params->stbc &&
2942 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2943 (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
2944 lq_sta->stbc_capable = true;
2945
2946 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
2947 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2948 (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
2949 lq_sta->bfer_capable = true;
2950
2951 lq_sta->is_vht = true;
2952}
2953
2954#ifdef CONFIG_IWLWIFI_DEBUGFS
2955static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
2956{
2957 spin_lock_bh(&mvm->drv_stats_lock);
2958 memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
2959 spin_unlock_bh(&mvm->drv_stats_lock);
2960}
2961
2962void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
2963{
2964 u8 nss = 0, mcs = 0;
2965
2966 spin_lock(&mvm->drv_stats_lock);
2967
2968 if (agg)
2969 mvm->drv_rx_stats.agg_frames++;
2970
2971 mvm->drv_rx_stats.success_frames++;
2972
2973 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2974 case RATE_MCS_CHAN_WIDTH_20:
2975 mvm->drv_rx_stats.bw_20_frames++;
2976 break;
2977 case RATE_MCS_CHAN_WIDTH_40:
2978 mvm->drv_rx_stats.bw_40_frames++;
2979 break;
2980 case RATE_MCS_CHAN_WIDTH_80:
2981 mvm->drv_rx_stats.bw_80_frames++;
2982 break;
2983 default:
2984 WARN_ONCE(1, "bad BW. rate 0x%x", rate);
2985 }
2986
2987 if (rate & RATE_MCS_HT_MSK) {
2988 mvm->drv_rx_stats.ht_frames++;
2989 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
2990 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
2991 } else if (rate & RATE_MCS_VHT_MSK) {
2992 mvm->drv_rx_stats.vht_frames++;
2993 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2994 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
2995 RATE_VHT_MCS_NSS_POS) + 1;
2996 } else {
2997 mvm->drv_rx_stats.legacy_frames++;
2998 }
2999
3000 if (nss == 1)
3001 mvm->drv_rx_stats.siso_frames++;
3002 else if (nss == 2)
3003 mvm->drv_rx_stats.mimo2_frames++;
3004
3005 if (rate & RATE_MCS_SGI_MSK)
3006 mvm->drv_rx_stats.sgi_frames++;
3007 else
3008 mvm->drv_rx_stats.ngi_frames++;
3009
3010 mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
3011 mvm->drv_rx_stats.last_frame_idx =
3012 (mvm->drv_rx_stats.last_frame_idx + 1) %
3013 ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
3014
3015 spin_unlock(&mvm->drv_stats_lock);
3016}
3017#endif
3018
3019/*
3020 * Called after adding a new station to initialize rate scaling
3021 */
3022void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3023 enum ieee80211_band band, bool init)
3024{
3025 int i, j;
3026 struct ieee80211_hw *hw = mvm->hw;
3027 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
3028 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
3029 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
3030 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
3031 struct ieee80211_supported_band *sband;
3032 unsigned long supp; /* must be unsigned long for for_each_set_bit */
3033
3034 /* clear all non-persistent lq data */
3035 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
3036
3037 sband = hw->wiphy->bands[band];
3038
3039 lq_sta->lq.sta_id = sta_priv->sta_id;
3040
3041 for (j = 0; j < LQ_SIZE; j++)
3042 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
3043
3044 lq_sta->flush_timer = 0;
3045 lq_sta->last_tx = jiffies;
3046
3047 IWL_DEBUG_RATE(mvm,
3048 "LQ: *** rate scale station global init for station %d ***\n",
3049 sta_priv->sta_id);
3050 /* TODO: what is a good starting rate for STA? About middle? Maybe not
3051 * the lowest or the highest rate.. Could consider using RSSI from
3052 * previous packets? Need to have IEEE 802.1X auth succeed immediately
3053 * after assoc.. */
3054
3055 lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
3056 lq_sta->band = sband->band;
3057 /*
3058 * active legacy rates as per supported rates bitmap
3059 */
3060 supp = sta->supp_rates[sband->band];
3061 lq_sta->active_legacy_rate = 0;
3062 for_each_set_bit(i, &supp, BITS_PER_LONG)
3063 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
3064
3065 /* TODO: should probably account for rx_highest for both HT/VHT */
3066 if (!vht_cap || !vht_cap->vht_supported)
3067 rs_ht_init(mvm, sta, lq_sta, ht_cap);
3068 else
3069 rs_vht_init(mvm, sta, lq_sta, vht_cap);
3070
3071 lq_sta->max_legacy_rate_idx =
3072 rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
3073 lq_sta->max_siso_rate_idx =
3074 rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
3075 lq_sta->max_mimo2_rate_idx =
3076 rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
3077
3078 IWL_DEBUG_RATE(mvm,
3079 "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
3080 lq_sta->active_legacy_rate,
3081 lq_sta->active_siso_rate,
3082 lq_sta->active_mimo2_rate,
3083 lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
3084 lq_sta->bfer_capable);
3085 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
3086 lq_sta->max_legacy_rate_idx,
3087 lq_sta->max_siso_rate_idx,
3088 lq_sta->max_mimo2_rate_idx);
3089
3090 /* These values will be overridden later */
3091 lq_sta->lq.single_stream_ant_msk =
3092 first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
3093 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
3094
3095 /* as default allow aggregation for all tids */
3096 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
3097 lq_sta->is_agg = 0;
3098#ifdef CONFIG_IWLWIFI_DEBUGFS
3099 iwl_mvm_reset_frame_stats(mvm);
3100#endif
3101 rs_initialize_lq(mvm, sta, lq_sta, band, init);
3102}
3103
3104static void rs_rate_update(void *mvm_r,
3105 struct ieee80211_supported_band *sband,
3106 struct cfg80211_chan_def *chandef,
3107 struct ieee80211_sta *sta, void *priv_sta,
3108 u32 changed)
3109{
3110 u8 tid;
3111 struct iwl_op_mode *op_mode =
3112 (struct iwl_op_mode *)mvm_r;
3113 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
3114
3115 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
3116 return;
3117
3118 /* Stop any ongoing aggregations as rs starts off assuming no agg */
3119 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
3120 ieee80211_stop_tx_ba_session(sta, tid);
3121
3122 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
3123}
3124
3125#ifdef CONFIG_MAC80211_DEBUGFS
3126static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3127 struct iwl_lq_cmd *lq_cmd,
3128 enum ieee80211_band band,
3129 u32 ucode_rate)
3130{
3131 struct rs_rate rate;
3132 int i;
3133 int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
3134 __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
3135 u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
3136
3137 for (i = 0; i < num_rates; i++)
3138 lq_cmd->rs_table[i] = ucode_rate_le32;
3139
3140 rs_rate_from_ucode_rate(ucode_rate, band, &rate);
3141
3142 if (is_mimo(&rate))
3143 lq_cmd->mimo_delim = num_rates - 1;
3144 else
3145 lq_cmd->mimo_delim = 0;
3146
3147 lq_cmd->reduced_tpc = 0;
3148
3149 if (num_of_ant(ant) == 1)
3150 lq_cmd->single_stream_ant_msk = ant;
3151
3152 lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3153}
3154#endif /* CONFIG_MAC80211_DEBUGFS */
3155
3156static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
3157 struct iwl_lq_sta *lq_sta,
3158 struct rs_rate *rate,
3159 __le32 *rs_table, int *rs_table_index,
3160 int num_rates, int num_retries,
3161 u8 valid_tx_ant, bool toggle_ant)
3162{
3163 int i, j;
3164 __le32 ucode_rate;
3165 bool bottom_reached = false;
3166 int prev_rate_idx = rate->index;
3167 int end = LINK_QUAL_MAX_RETRY_NUM;
3168 int index = *rs_table_index;
3169
3170 for (i = 0; i < num_rates && index < end; i++) {
3171 for (j = 0; j < num_retries && index < end; j++, index++) {
3172 ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
3173 rate));
3174 rs_table[index] = ucode_rate;
3175 if (toggle_ant)
3176 rs_toggle_antenna(valid_tx_ant, rate);
3177 }
3178
3179 prev_rate_idx = rate->index;
3180 bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
3181 if (bottom_reached && !is_legacy(rate))
3182 break;
3183 }
3184
3185 if (!bottom_reached && !is_legacy(rate))
3186 rate->index = prev_rate_idx;
3187
3188 *rs_table_index = index;
3189}
3190
3191/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
3192 * column the rate table should look like this:
3193 *
3194 * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
3195 * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
3196 * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
3197 * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
3198 * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
3199 * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
3200 * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
3201 * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
3202 * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
3203 * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
3204 * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
3205 * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
3206 * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
3207 * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
3208 * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
3209 * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
3210 */
3211static void rs_build_rates_table(struct iwl_mvm *mvm,
3212 struct ieee80211_sta *sta,
3213 struct iwl_lq_sta *lq_sta,
3214 const struct rs_rate *initial_rate)
3215{
3216 struct rs_rate rate;
3217 int num_rates, num_retries, index = 0;
3218 u8 valid_tx_ant = 0;
3219 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3220 bool toggle_ant = false;
3221
3222 memcpy(&rate, initial_rate, sizeof(rate));
3223
3224 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
3225
3226 /* TODO: remove old API when min FW API hits 14 */
3227 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
3228 rs_stbc_allow(mvm, sta, lq_sta))
3229 rate.stbc = true;
3230
3231 if (is_siso(&rate)) {
3232 num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
3233 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
3234 } else if (is_mimo(&rate)) {
3235 num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
3236 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
3237 } else {
3238 num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
3239 num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
3240 toggle_ant = true;
3241 }
3242
3243 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3244 num_rates, num_retries, valid_tx_ant,
3245 toggle_ant);
3246
3247 rs_get_lower_rate_down_column(lq_sta, &rate);
3248
3249 if (is_siso(&rate)) {
3250 num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
3251 num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
3252 lq_cmd->mimo_delim = index;
3253 } else if (is_legacy(&rate)) {
3254 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
3255 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
3256 } else {
3257 WARN_ON_ONCE(1);
3258 }
3259
3260 toggle_ant = true;
3261
3262 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3263 num_rates, num_retries, valid_tx_ant,
3264 toggle_ant);
3265
3266 rs_get_lower_rate_down_column(lq_sta, &rate);
3267
3268 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
3269 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
3270
3271 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3272 num_rates, num_retries, valid_tx_ant,
3273 toggle_ant);
3274
3275}
3276
3277struct rs_bfer_active_iter_data {
3278 struct ieee80211_sta *exclude_sta;
3279 struct iwl_mvm_sta *bfer_mvmsta;
3280};
3281
3282static void rs_bfer_active_iter(void *_data,
3283 struct ieee80211_sta *sta)
3284{
3285 struct rs_bfer_active_iter_data *data = _data;
3286 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3287 struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
3288 u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
3289
3290 if (sta == data->exclude_sta)
3291 return;
3292
3293 /* The current sta has BFER allowed */
3294 if (ss_params & LQ_SS_BFER_ALLOWED) {
3295 WARN_ON_ONCE(data->bfer_mvmsta != NULL);
3296
3297 data->bfer_mvmsta = mvmsta;
3298 }
3299}
3300
3301static int rs_bfer_priority(struct iwl_mvm_sta *sta)
3302{
3303 int prio = -1;
3304 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
3305
3306 switch (viftype) {
3307 case NL80211_IFTYPE_AP:
3308 case NL80211_IFTYPE_P2P_GO:
3309 prio = 3;
3310 break;
3311 case NL80211_IFTYPE_P2P_CLIENT:
3312 prio = 2;
3313 break;
3314 case NL80211_IFTYPE_STATION:
3315 prio = 1;
3316 break;
3317 default:
3318 WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
3319 prio = -1;
3320 }
3321
3322 return prio;
3323}
3324
3325/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
3326static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
3327 struct iwl_mvm_sta *sta2)
3328{
3329 int prio1 = rs_bfer_priority(sta1);
3330 int prio2 = rs_bfer_priority(sta2);
3331
3332 if (prio1 > prio2)
3333 return 1;
3334 if (prio1 < prio2)
3335 return -1;
3336 return 0;
3337}
3338
3339static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
3340 struct ieee80211_sta *sta,
3341 struct iwl_lq_sta *lq_sta,
3342 const struct rs_rate *initial_rate)
3343{
3344 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3345 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3346 struct rs_bfer_active_iter_data data = {
3347 .exclude_sta = sta,
3348 .bfer_mvmsta = NULL,
3349 };
3350 struct iwl_mvm_sta *bfer_mvmsta = NULL;
3351 u32 ss_params = LQ_SS_PARAMS_VALID;
3352
3353 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
3354 goto out;
3355
3356#ifdef CONFIG_MAC80211_DEBUGFS
3357 /* Check if forcing the decision is configured.
3358 * Note that SISO is forced by not allowing STBC or BFER
3359 */
3360 if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
3361 ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
3362 else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
3363 ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
3364
3365 if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
3366 IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
3367 lq_sta->pers.ss_force);
3368 goto out;
3369 }
3370#endif
3371
3372 if (lq_sta->stbc_capable)
3373 ss_params |= LQ_SS_STBC_1SS_ALLOWED;
3374
3375 if (!lq_sta->bfer_capable)
3376 goto out;
3377
3378 ieee80211_iterate_stations_atomic(mvm->hw,
3379 rs_bfer_active_iter,
3380 &data);
3381 bfer_mvmsta = data.bfer_mvmsta;
3382
3383 /* This code is safe as it doesn't run concurrently for different
3384 * stations. This is guaranteed by the fact that calls to
3385 * ieee80211_tx_status wouldn't run concurrently for a single HW.
3386 */
3387 if (!bfer_mvmsta) {
3388 IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
3389
3390 ss_params |= LQ_SS_BFER_ALLOWED;
3391 goto out;
3392 }
3393
3394 IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
3395 bfer_mvmsta->sta_id);
3396
3397 /* Disallow BFER on another STA if active and we're a higher priority */
3398 if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
3399 struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
3400 u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
3401
3402 bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
3403 bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
3404 iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
3405
3406 ss_params |= LQ_SS_BFER_ALLOWED;
3407 IWL_DEBUG_RATE(mvm,
3408 "Lower priority BFER sta found (%d). Switch BFER\n",
3409 bfer_mvmsta->sta_id);
3410 }
3411out:
3412 lq_cmd->ss_params = cpu_to_le32(ss_params);
3413}
3414
3415static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
3416 struct ieee80211_sta *sta,
3417 struct iwl_lq_sta *lq_sta,
3418 const struct rs_rate *initial_rate)
3419{
3420 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3421 struct iwl_mvm_sta *mvmsta;
3422 struct iwl_mvm_vif *mvmvif;
3423
3424 lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
3425 lq_cmd->agg_time_limit =
3426 cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
3427
3428#ifdef CONFIG_MAC80211_DEBUGFS
3429 if (lq_sta->pers.dbg_fixed_rate) {
3430 rs_build_rates_table_from_fixed(mvm, lq_cmd,
3431 lq_sta->band,
3432 lq_sta->pers.dbg_fixed_rate);
3433 return;
3434 }
3435#endif
3436 if (WARN_ON_ONCE(!sta || !initial_rate))
3437 return;
3438
3439 rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
3440
3441 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
3442 rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
3443
3444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3445 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
3446
3447 if (num_of_ant(initial_rate->ant) == 1)
3448 lq_cmd->single_stream_ant_msk = initial_rate->ant;
3449
3450 lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3451
3452 /*
3453 * In case of low latency, tell the firmware to leave a frame in the
3454 * Tx Fifo so that it can start a transaction in the same TxOP. This
3455 * basically allows the firmware to send bursts.
3456 */
3457 if (iwl_mvm_vif_low_latency(mvmvif)) {
3458 lq_cmd->agg_frame_cnt_limit--;
3459
3460 if (mvm->low_latency_agg_frame_limit)
3461 lq_cmd->agg_frame_cnt_limit =
3462 min(lq_cmd->agg_frame_cnt_limit,
3463 mvm->low_latency_agg_frame_limit);
3464 }
3465
3466 if (mvmsta->vif->p2p)
3467 lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK;
3468
3469 lq_cmd->agg_time_limit =
3470 cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
3471}
3472
3473static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
3474{
3475 return hw->priv;
3476}
3477/* rate scale requires free function to be implemented */
3478static void rs_free(void *mvm_rate)
3479{
3480 return;
3481}
3482
3483static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
3484 void *mvm_sta)
3485{
3486 struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
3487 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
3488
3489 IWL_DEBUG_RATE(mvm, "enter\n");
3490 IWL_DEBUG_RATE(mvm, "leave\n");
3491}
3492
3493#ifdef CONFIG_MAC80211_DEBUGFS
3494int rs_pretty_print_rate(char *buf, const u32 rate)
3495{
3496
3497 char *type, *bw;
3498 u8 mcs = 0, nss = 0;
3499 u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
3500
3501 if (!(rate & RATE_MCS_HT_MSK) &&
3502 !(rate & RATE_MCS_VHT_MSK)) {
3503 int index = iwl_hwrate_to_plcp_idx(rate);
3504
3505 return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
3506 rs_pretty_ant(ant),
3507 index == IWL_RATE_INVALID ? "BAD" :
3508 iwl_rate_mcs[index].mbps);
3509 }
3510
3511 if (rate & RATE_MCS_VHT_MSK) {
3512 type = "VHT";
3513 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
3514 nss = ((rate & RATE_VHT_MCS_NSS_MSK)
3515 >> RATE_VHT_MCS_NSS_POS) + 1;
3516 } else if (rate & RATE_MCS_HT_MSK) {
3517 type = "HT";
3518 mcs = rate & RATE_HT_MCS_INDEX_MSK;
3519 } else {
3520 type = "Unknown"; /* shouldn't happen */
3521 }
3522
3523 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
3524 case RATE_MCS_CHAN_WIDTH_20:
3525 bw = "20Mhz";
3526 break;
3527 case RATE_MCS_CHAN_WIDTH_40:
3528 bw = "40Mhz";
3529 break;
3530 case RATE_MCS_CHAN_WIDTH_80:
3531 bw = "80Mhz";
3532 break;
3533 case RATE_MCS_CHAN_WIDTH_160:
3534 bw = "160Mhz";
3535 break;
3536 default:
3537 bw = "BAD BW";
3538 }
3539
3540 return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
3541 type, rs_pretty_ant(ant), bw, mcs, nss,
3542 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
3543 (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
3544 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
3545 (rate & RATE_MCS_BF_MSK) ? "BF " : "",
3546 (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
3547}
3548
3549/**
3550 * Program the device to use fixed rate for frame transmit
3551 * This is for debugging/testing only
3552 * once the device start use fixed rate, we need to reload the module
3553 * to being back the normal operation.
3554 */
3555static void rs_program_fix_rate(struct iwl_mvm *mvm,
3556 struct iwl_lq_sta *lq_sta)
3557{
3558 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
3559 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3560 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3561
3562 IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
3563 lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
3564
3565 if (lq_sta->pers.dbg_fixed_rate) {
3566 rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
3567 iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
3568 }
3569}
3570
3571static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3572 const char __user *user_buf, size_t count, loff_t *ppos)
3573{
3574 struct iwl_lq_sta *lq_sta = file->private_data;
3575 struct iwl_mvm *mvm;
3576 char buf[64];
3577 size_t buf_size;
3578 u32 parsed_rate;
3579
3580 mvm = lq_sta->pers.drv;
3581 memset(buf, 0, sizeof(buf));
3582 buf_size = min(count, sizeof(buf) - 1);
3583 if (copy_from_user(buf, user_buf, buf_size))
3584 return -EFAULT;
3585
3586 if (sscanf(buf, "%x", &parsed_rate) == 1)
3587 lq_sta->pers.dbg_fixed_rate = parsed_rate;
3588 else
3589 lq_sta->pers.dbg_fixed_rate = 0;
3590
3591 rs_program_fix_rate(mvm, lq_sta);
3592
3593 return count;
3594}
3595
3596static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3597 char __user *user_buf, size_t count, loff_t *ppos)
3598{
3599 char *buff;
3600 int desc = 0;
3601 int i = 0;
3602 ssize_t ret;
3603
3604 struct iwl_lq_sta *lq_sta = file->private_data;
3605 struct iwl_mvm *mvm;
3606 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3607 struct rs_rate *rate = &tbl->rate;
3608 u32 ss_params;
3609 mvm = lq_sta->pers.drv;
3610 buff = kmalloc(2048, GFP_KERNEL);
3611 if (!buff)
3612 return -ENOMEM;
3613
3614 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
3615 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
3616 lq_sta->total_failed, lq_sta->total_success,
3617 lq_sta->active_legacy_rate);
3618 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3619 lq_sta->pers.dbg_fixed_rate);
3620 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3621 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
3622 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
3623 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
3624 desc += sprintf(buff+desc, "lq type %s\n",
3625 (is_legacy(rate)) ? "legacy" :
3626 is_vht(rate) ? "VHT" : "HT");
3627 if (!is_legacy(rate)) {
3628 desc += sprintf(buff + desc, " %s",
3629 (is_siso(rate)) ? "SISO" : "MIMO2");
3630 desc += sprintf(buff + desc, " %s",
3631 (is_ht20(rate)) ? "20MHz" :
3632 (is_ht40(rate)) ? "40MHz" :
3633 (is_ht80(rate)) ? "80Mhz" : "BAD BW");
3634 desc += sprintf(buff + desc, " %s %s %s\n",
3635 (rate->sgi) ? "SGI" : "NGI",
3636 (rate->ldpc) ? "LDPC" : "BCC",
3637 (lq_sta->is_agg) ? "AGG on" : "");
3638 }
3639 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3640 lq_sta->last_rate_n_flags);
3641 desc += sprintf(buff+desc,
3642 "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
3643 lq_sta->lq.flags,
3644 lq_sta->lq.mimo_delim,
3645 lq_sta->lq.single_stream_ant_msk,
3646 lq_sta->lq.dual_stream_ant_msk);
3647
3648 desc += sprintf(buff+desc,
3649 "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3650 le16_to_cpu(lq_sta->lq.agg_time_limit),
3651 lq_sta->lq.agg_disable_start_th,
3652 lq_sta->lq.agg_frame_cnt_limit);
3653
3654 desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
3655 ss_params = le32_to_cpu(lq_sta->lq.ss_params);
3656 desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
3657 (ss_params & LQ_SS_PARAMS_VALID) ?
3658 "VALID" : "INVALID",
3659 (ss_params & LQ_SS_BFER_ALLOWED) ?
3660 ", BFER" : "",
3661 (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
3662 ", STBC" : "",
3663 (ss_params & LQ_SS_FORCE) ?
3664 ", FORCE" : "");
3665 desc += sprintf(buff+desc,
3666 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3667 lq_sta->lq.initial_rate_index[0],
3668 lq_sta->lq.initial_rate_index[1],
3669 lq_sta->lq.initial_rate_index[2],
3670 lq_sta->lq.initial_rate_index[3]);
3671
3672 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3673 u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
3674
3675 desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
3676 desc += rs_pretty_print_rate(buff+desc, r);
3677 }
3678
3679 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3680 kfree(buff);
3681 return ret;
3682}
3683
3684static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3685 .write = rs_sta_dbgfs_scale_table_write,
3686 .read = rs_sta_dbgfs_scale_table_read,
3687 .open = simple_open,
3688 .llseek = default_llseek,
3689};
3690static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3691 char __user *user_buf, size_t count, loff_t *ppos)
3692{
3693 char *buff;
3694 int desc = 0;
3695 int i, j;
3696 ssize_t ret;
3697 struct iwl_scale_tbl_info *tbl;
3698 struct rs_rate *rate;
3699 struct iwl_lq_sta *lq_sta = file->private_data;
3700
3701 buff = kmalloc(1024, GFP_KERNEL);
3702 if (!buff)
3703 return -ENOMEM;
3704
3705 for (i = 0; i < LQ_SIZE; i++) {
3706 tbl = &(lq_sta->lq_info[i]);
3707 rate = &tbl->rate;
3708 desc += sprintf(buff+desc,
3709 "%s type=%d SGI=%d BW=%s DUP=0\n"
3710 "index=%d\n",
3711 lq_sta->active_tbl == i ? "*" : "x",
3712 rate->type,
3713 rate->sgi,
3714 is_ht20(rate) ? "20Mhz" :
3715 is_ht40(rate) ? "40Mhz" :
3716 is_ht80(rate) ? "80Mhz" : "ERR",
3717 rate->index);
3718 for (j = 0; j < IWL_RATE_COUNT; j++) {
3719 desc += sprintf(buff+desc,
3720 "counter=%d success=%d %%=%d\n",
3721 tbl->win[j].counter,
3722 tbl->win[j].success_counter,
3723 tbl->win[j].success_ratio);
3724 }
3725 }
3726 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3727 kfree(buff);
3728 return ret;
3729}
3730
3731static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3732 .read = rs_sta_dbgfs_stats_table_read,
3733 .open = simple_open,
3734 .llseek = default_llseek,
3735};
3736
3737static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3738 char __user *user_buf,
3739 size_t count, loff_t *ppos)
3740{
3741 static const char * const column_name[] = {
3742 [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
3743 [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
3744 [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
3745 [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
3746 [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
3747 [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
3748 [RS_COLUMN_MIMO2] = "MIMO2",
3749 [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
3750 };
3751
3752 static const char * const rate_name[] = {
3753 [IWL_RATE_1M_INDEX] = "1M",
3754 [IWL_RATE_2M_INDEX] = "2M",
3755 [IWL_RATE_5M_INDEX] = "5.5M",
3756 [IWL_RATE_11M_INDEX] = "11M",
3757 [IWL_RATE_6M_INDEX] = "6M|MCS0",
3758 [IWL_RATE_9M_INDEX] = "9M",
3759 [IWL_RATE_12M_INDEX] = "12M|MCS1",
3760 [IWL_RATE_18M_INDEX] = "18M|MCS2",
3761 [IWL_RATE_24M_INDEX] = "24M|MCS3",
3762 [IWL_RATE_36M_INDEX] = "36M|MCS4",
3763 [IWL_RATE_48M_INDEX] = "48M|MCS5",
3764 [IWL_RATE_54M_INDEX] = "54M|MCS6",
3765 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3766 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3767 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3768 };
3769
3770 char *buff, *pos, *endpos;
3771 int col, rate;
3772 ssize_t ret;
3773 struct iwl_lq_sta *lq_sta = file->private_data;
3774 struct rs_rate_stats *stats;
3775 static const size_t bufsz = 1024;
3776
3777 buff = kmalloc(bufsz, GFP_KERNEL);
3778 if (!buff)
3779 return -ENOMEM;
3780
3781 pos = buff;
3782 endpos = pos + bufsz;
3783
3784 pos += scnprintf(pos, endpos - pos, "COLUMN,");
3785 for (rate = 0; rate < IWL_RATE_COUNT; rate++)
3786 pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
3787 pos += scnprintf(pos, endpos - pos, "\n");
3788
3789 for (col = 0; col < RS_COLUMN_COUNT; col++) {
3790 pos += scnprintf(pos, endpos - pos,
3791 "%s,", column_name[col]);
3792
3793 for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
3794 stats = &(lq_sta->pers.tx_stats[col][rate]);
3795 pos += scnprintf(pos, endpos - pos,
3796 "%llu/%llu,",
3797 stats->success,
3798 stats->total);
3799 }
3800 pos += scnprintf(pos, endpos - pos, "\n");
3801 }
3802
3803 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
3804 kfree(buff);
3805 return ret;
3806}
3807
3808static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
3809 const char __user *user_buf,
3810 size_t count, loff_t *ppos)
3811{
3812 struct iwl_lq_sta *lq_sta = file->private_data;
3813 memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats));
3814
3815 return count;
3816}
3817
3818static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3819 .read = rs_sta_dbgfs_drv_tx_stats_read,
3820 .write = rs_sta_dbgfs_drv_tx_stats_write,
3821 .open = simple_open,
3822 .llseek = default_llseek,
3823};
3824
3825static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
3826 char __user *user_buf,
3827 size_t count, loff_t *ppos)
3828{
3829 struct iwl_lq_sta *lq_sta = file->private_data;
3830 char buf[12];
3831 int bufsz = sizeof(buf);
3832 int pos = 0;
3833 static const char * const ss_force_name[] = {
3834 [RS_SS_FORCE_NONE] = "none",
3835 [RS_SS_FORCE_STBC] = "stbc",
3836 [RS_SS_FORCE_BFER] = "bfer",
3837 [RS_SS_FORCE_SISO] = "siso",
3838 };
3839
3840 pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
3841 ss_force_name[lq_sta->pers.ss_force]);
3842 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3843}
3844
3845static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
3846 size_t count, loff_t *ppos)
3847{
3848 struct iwl_mvm *mvm = lq_sta->pers.drv;
3849 int ret = 0;
3850
3851 if (!strncmp("none", buf, 4)) {
3852 lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
3853 } else if (!strncmp("siso", buf, 4)) {
3854 lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
3855 } else if (!strncmp("stbc", buf, 4)) {
3856 if (lq_sta->stbc_capable) {
3857 lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
3858 } else {
3859 IWL_ERR(mvm,
3860 "can't force STBC. peer doesn't support\n");
3861 ret = -EINVAL;
3862 }
3863 } else if (!strncmp("bfer", buf, 4)) {
3864 if (lq_sta->bfer_capable) {
3865 lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
3866 } else {
3867 IWL_ERR(mvm,
3868 "can't force BFER. peer doesn't support\n");
3869 ret = -EINVAL;
3870 }
3871 } else {
3872 IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
3873 ret = -EINVAL;
3874 }
3875 return ret ?: count;
3876}
3877
3878#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
3879 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
3880#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
3881 if (!debugfs_create_file(#name, mode, parent, lq_sta, \
3882 &iwl_dbgfs_##name##_ops)) \
3883 goto err; \
3884 } while (0)
3885
3886MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
3887
3888static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
3889{
3890 struct iwl_lq_sta *lq_sta = priv_sta;
3891 struct iwl_mvm_sta *mvmsta;
3892
3893 mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
3894
3895 if (!mvmsta->vif)
3896 return;
3897
3898 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3899 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3900 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3901 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3902 debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
3903 lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
3904 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3905 &lq_sta->tx_agg_tid_en);
3906 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3907 &lq_sta->pers.dbg_fixed_txp_reduction);
3908
3909 MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR);
3910 return;
3911err:
3912 IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
3913}
3914
3915static void rs_remove_debugfs(void *mvm, void *mvm_sta)
3916{
3917}
3918#endif
3919
3920/*
3921 * Initialization of rate scaling information is done by driver after
3922 * the station is added. Since mac80211 calls this function before a
3923 * station is added we ignore it.
3924 */
3925static void rs_rate_init_stub(void *mvm_r,
3926 struct ieee80211_supported_band *sband,
3927 struct cfg80211_chan_def *chandef,
3928 struct ieee80211_sta *sta, void *mvm_sta)
3929{
3930}
3931
3932static const struct rate_control_ops rs_mvm_ops = {
3933 .name = RS_NAME,
3934 .tx_status = rs_mac80211_tx_status,
3935 .get_rate = rs_get_rate,
3936 .rate_init = rs_rate_init_stub,
3937 .alloc = rs_alloc,
3938 .free = rs_free,
3939 .alloc_sta = rs_alloc_sta,
3940 .free_sta = rs_free_sta,
3941 .rate_update = rs_rate_update,
3942#ifdef CONFIG_MAC80211_DEBUGFS
3943 .add_sta_debugfs = rs_add_debugfs,
3944 .remove_sta_debugfs = rs_remove_debugfs,
3945#endif
3946};
3947
3948int iwl_mvm_rate_control_register(void)
3949{
3950 return ieee80211_rate_control_register(&rs_mvm_ops);
3951}
3952
3953void iwl_mvm_rate_control_unregister(void)
3954{
3955 ieee80211_rate_control_unregister(&rs_mvm_ops);
3956}
3957
3958/**
3959 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
3960 * Tx protection, according to this request and previous requests,
3961 * and send the LQ command.
3962 * @mvmsta: The station
3963 * @enable: Enable Tx protection?
3964 */
3965int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3966 bool enable)
3967{
3968 struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
3969
3970 lockdep_assert_held(&mvm->mutex);
3971
3972 if (enable) {
3973 if (mvmsta->tx_protection == 0)
3974 lq->flags |= LQ_FLAG_USE_RTS_MSK;
3975 mvmsta->tx_protection++;
3976 } else {
3977 mvmsta->tx_protection--;
3978 if (mvmsta->tx_protection == 0)
3979 lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
3980 }
3981
3982 return iwl_mvm_send_lq_cmd(mvm, lq, false);
3983}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
deleted file mode 100644
index 81314ad9ebe0..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ /dev/null
@@ -1,392 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27
28#ifndef __rs_h__
29#define __rs_h__
30
31#include <net/mac80211.h>
32
33#include "iwl-config.h"
34
35#include "fw-api.h"
36#include "iwl-trans.h"
37
38struct iwl_rs_rate_info {
39 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
40 u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
41 u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
42 u8 plcp_vht_siso;
43 u8 plcp_vht_mimo2;
44 u8 prev_rs; /* previous rate used in rs algo */
45 u8 next_rs; /* next rate used in rs algo */
46};
47
48#define IWL_RATE_60M_PLCP 3
49
50enum {
51 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
52 IWL_RATE_INVALID = IWL_RATE_COUNT,
53};
54
55#define LINK_QUAL_MAX_RETRY_NUM 16
56
57enum {
58 IWL_RATE_6M_INDEX_TABLE = 0,
59 IWL_RATE_9M_INDEX_TABLE,
60 IWL_RATE_12M_INDEX_TABLE,
61 IWL_RATE_18M_INDEX_TABLE,
62 IWL_RATE_24M_INDEX_TABLE,
63 IWL_RATE_36M_INDEX_TABLE,
64 IWL_RATE_48M_INDEX_TABLE,
65 IWL_RATE_54M_INDEX_TABLE,
66 IWL_RATE_1M_INDEX_TABLE,
67 IWL_RATE_2M_INDEX_TABLE,
68 IWL_RATE_5M_INDEX_TABLE,
69 IWL_RATE_11M_INDEX_TABLE,
70 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
71};
72
73/* #define vs. enum to keep from defaulting to 'large integer' */
74#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
75#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
76#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
77#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
78#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
79#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
80#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
81#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
82#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
83#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
84#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
85#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
86#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
87
88
89/* uCode API values for HT/VHT bit rates */
90enum {
91 IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
92 IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
93 IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
94 IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
95 IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
96 IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
97 IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
98 IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
99 IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
100 IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
101 IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
102 IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
103 IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
104 IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
105 IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
106 IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
107 IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
108 IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
109 IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
110 IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
111 IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
112 IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
113 IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
114 IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
115 IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
116 IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
117 IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
118 IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
119 IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
120 IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
121 IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
122 IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
123 IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
124 IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
125 IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
126 IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
127 IWL_RATE_HT_SISO_MCS_INV_PLCP,
128 IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
129 IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
130 IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
131 IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
132 IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
133 IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
134 IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
135};
136
137#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
138
139#define IWL_INVALID_VALUE -1
140
141#define TPC_MAX_REDUCTION 15
142#define TPC_NO_REDUCTION 0
143#define TPC_INVALID 0xff
144
145#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
146#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
147#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
148
149#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
150
151/* load per tid defines for A-MPDU activation */
152#define IWL_AGG_TPT_THREHOLD 0
153#define IWL_AGG_ALL_TID 0xff
154
155enum iwl_table_type {
156 LQ_NONE,
157 LQ_LEGACY_G, /* legacy types */
158 LQ_LEGACY_A,
159 LQ_HT_SISO, /* HT types */
160 LQ_HT_MIMO2,
161 LQ_VHT_SISO, /* VHT types */
162 LQ_VHT_MIMO2,
163 LQ_MAX,
164};
165
166struct rs_rate {
167 int index;
168 enum iwl_table_type type;
169 u8 ant;
170 u32 bw;
171 bool sgi;
172 bool ldpc;
173 bool stbc;
174 bool bfer;
175};
176
177
178#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
179 ((type) == LQ_LEGACY_A))
180#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
181#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
182#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
183#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
184#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
185#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
186#define is_type_mimo(type) (is_type_mimo2(type))
187#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
188#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
189#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
190#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
191
192#define is_legacy(rate) is_type_legacy((rate)->type)
193#define is_ht_siso(rate) is_type_ht_siso((rate)->type)
194#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type)
195#define is_vht_siso(rate) is_type_vht_siso((rate)->type)
196#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type)
197#define is_siso(rate) is_type_siso((rate)->type)
198#define is_mimo2(rate) is_type_mimo2((rate)->type)
199#define is_mimo(rate) is_type_mimo((rate)->type)
200#define is_ht(rate) is_type_ht((rate)->type)
201#define is_vht(rate) is_type_vht((rate)->type)
202#define is_a_band(rate) is_type_a_band((rate)->type)
203#define is_g_band(rate) is_type_g_band((rate)->type)
204
205#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
206#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
207#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
208
209#define IWL_MAX_MCS_DISPLAY_SIZE 12
210
211struct iwl_rate_mcs_info {
212 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
213 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
214};
215
216/**
217 * struct iwl_rate_scale_data -- tx success history for one rate
218 */
219struct iwl_rate_scale_data {
220 u64 data; /* bitmap of successful frames */
221 s32 success_counter; /* number of frames successful */
222 s32 success_ratio; /* per-cent * 128 */
223 s32 counter; /* number of frames attempted */
224 s32 average_tpt; /* success ratio * expected throughput */
225};
226
227/* Possible Tx columns
228 * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
229 */
230enum rs_column {
231 RS_COLUMN_LEGACY_ANT_A = 0,
232 RS_COLUMN_LEGACY_ANT_B,
233 RS_COLUMN_SISO_ANT_A,
234 RS_COLUMN_SISO_ANT_B,
235 RS_COLUMN_SISO_ANT_A_SGI,
236 RS_COLUMN_SISO_ANT_B_SGI,
237 RS_COLUMN_MIMO2,
238 RS_COLUMN_MIMO2_SGI,
239
240 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
241 RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
242 RS_COLUMN_INVALID,
243};
244
245enum rs_ss_force_opt {
246 RS_SS_FORCE_NONE = 0,
247 RS_SS_FORCE_STBC,
248 RS_SS_FORCE_BFER,
249 RS_SS_FORCE_SISO,
250};
251
252/* Packet stats per rate */
253struct rs_rate_stats {
254 u64 success;
255 u64 total;
256};
257
258/**
259 * struct iwl_scale_tbl_info -- tx params and success history for all rates
260 *
261 * There are two of these in struct iwl_lq_sta,
262 * one for "active", and one for "search".
263 */
264struct iwl_scale_tbl_info {
265 struct rs_rate rate;
266 enum rs_column column;
267 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
268 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
269 /* per txpower-reduction history */
270 struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
271};
272
273enum {
274 RS_STATE_SEARCH_CYCLE_STARTED,
275 RS_STATE_SEARCH_CYCLE_ENDED,
276 RS_STATE_STAY_IN_COLUMN,
277};
278
279/**
280 * struct iwl_lq_sta -- driver's rate scaling private structure
281 *
282 * Pointer to this gets passed back and forth between driver and mac80211.
283 */
284struct iwl_lq_sta {
285 u8 active_tbl; /* index of active table, range 0-1 */
286 u8 rs_state; /* RS_STATE_* */
287 u8 search_better_tbl; /* 1: currently trying alternate mode */
288 s32 last_tpt;
289
290 /* The following determine when to search for a new mode */
291 u32 table_count_limit;
292 u32 max_failure_limit; /* # failed frames before new search */
293 u32 max_success_limit; /* # successful frames before new search */
294 u32 table_count;
295 u32 total_failed; /* total failed frames, any/all rates */
296 u32 total_success; /* total successful frames, any/all rates */
297 u64 flush_timer; /* time staying in mode before new search */
298
299 u32 visited_columns; /* Bitmask marking which Tx columns were
300 * explored during a search cycle
301 */
302 u64 last_tx;
303 bool is_vht;
304 bool ldpc; /* LDPC Rx is supported by the STA */
305 bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
306 bool bfer_capable; /* Remote supports beamformee and we BFer */
307
308 enum ieee80211_band band;
309
310 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
311 unsigned long active_legacy_rate;
312 unsigned long active_siso_rate;
313 unsigned long active_mimo2_rate;
314
315 /* Highest rate per Tx mode */
316 u8 max_legacy_rate_idx;
317 u8 max_siso_rate_idx;
318 u8 max_mimo2_rate_idx;
319
320 /* Optimal rate based on RSSI and STA caps.
321 * Used only to reflect link speed to userspace.
322 */
323 struct rs_rate optimal_rate;
324 unsigned long optimal_rate_mask;
325 const struct rs_init_rate_info *optimal_rates;
326 int optimal_nentries;
327
328 u8 missed_rate_counter;
329
330 struct iwl_lq_cmd lq;
331 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
332 u8 tx_agg_tid_en;
333
334 /* last tx rate_n_flags */
335 u32 last_rate_n_flags;
336 /* packets destined for this STA are aggregated */
337 u8 is_agg;
338
339 /* tx power reduce for this sta */
340 int tpc_reduce;
341
342 /* persistent fields - initialized only once - keep last! */
343 struct lq_sta_pers {
344#ifdef CONFIG_MAC80211_DEBUGFS
345 u32 dbg_fixed_rate;
346 u8 dbg_fixed_txp_reduction;
347
348 /* force STBC/BFER/SISO for testing */
349 enum rs_ss_force_opt ss_force;
350#endif
351 u8 chains;
352 s8 chain_signal[IEEE80211_MAX_CHAINS];
353 s8 last_rssi;
354 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
355 struct iwl_mvm *drv;
356 } pers;
357};
358
359/* Initialize station's rate scaling information after adding station */
360void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
361 enum ieee80211_band band, bool init);
362
363/* Notify RS about Tx status */
364void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
365 int tid, struct ieee80211_tx_info *info);
366
367/**
368 * iwl_rate_control_register - Register the rate control algorithm callbacks
369 *
370 * Since the rate control algorithm is hardware specific, there is no need
371 * or reason to place it as a stand alone module. The driver can call
372 * iwl_rate_control_register in order to register the rate control callbacks
373 * with the mac80211 subsystem. This should be performed prior to calling
374 * ieee80211_register_hw
375 *
376 */
377int iwl_mvm_rate_control_register(void);
378
379/**
380 * iwl_rate_control_unregister - Unregister the rate control callbacks
381 *
382 * This should be called after calling ieee80211_unregister_hw, but before
383 * the driver is unloaded.
384 */
385void iwl_mvm_rate_control_unregister(void);
386
387struct iwl_mvm_sta;
388
389int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
390 bool enable);
391
392#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
deleted file mode 100644
index 5b58f5320e8d..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ /dev/null
@@ -1,612 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64#include <linux/skbuff.h>
65#include "iwl-trans.h"
66#include "mvm.h"
67#include "fw-api.h"
68
69/*
70 * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
71 *
72 * Copies the phy information in mvm->last_phy_info, it will be used when the
73 * actual data will come from the fw in the next packet.
74 */
75void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
76{
77 struct iwl_rx_packet *pkt = rxb_addr(rxb);
78
79 memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
80 mvm->ampdu_ref++;
81
82#ifdef CONFIG_IWLWIFI_DEBUGFS
83 if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
84 spin_lock(&mvm->drv_stats_lock);
85 mvm->drv_rx_stats.ampdu_count++;
86 spin_unlock(&mvm->drv_stats_lock);
87 }
88#endif
89}
90
91/*
92 * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
93 *
94 * Adds the rxb to a new skb and give it to mac80211
95 */
96static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
97 struct napi_struct *napi,
98 struct sk_buff *skb,
99 struct ieee80211_hdr *hdr, u16 len,
100 u32 ampdu_status, u8 crypt_len,
101 struct iwl_rx_cmd_buffer *rxb)
102{
103 unsigned int hdrlen, fraglen;
104
105 /* If frame is small enough to fit in skb->head, pull it completely.
106 * If not, only pull ieee80211_hdr (including crypto if present, and
107 * an additional 8 bytes for SNAP/ethertype, see below) so that
108 * splice() or TCP coalesce are more efficient.
109 *
110 * Since, in addition, ieee80211_data_to_8023() always pull in at
111 * least 8 bytes (possibly more for mesh) we can do the same here
112 * to save the cost of doing it later. That still doesn't pull in
113 * the actual IP header since the typical case has a SNAP header.
114 * If the latter changes (there are efforts in the standards group
115 * to do so) we should revisit this and ieee80211_data_to_8023().
116 */
117 hdrlen = (len <= skb_tailroom(skb)) ? len :
118 sizeof(*hdr) + crypt_len + 8;
119
120 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
121 fraglen = len - hdrlen;
122
123 if (fraglen) {
124 int offset = (void *)hdr + hdrlen -
125 rxb_addr(rxb) + rxb_offset(rxb);
126
127 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
128 fraglen, rxb->truesize);
129 }
130
131 ieee80211_rx_napi(mvm->hw, skb, napi);
132}
133
134/*
135 * iwl_mvm_get_signal_strength - use new rx PHY INFO API
136 * values are reported by the fw as positive values - need to negate
137 * to obtain their dBM. Account for missing antennas by replacing 0
138 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
139 */
140static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
141 struct iwl_rx_phy_info *phy_info,
142 struct ieee80211_rx_status *rx_status)
143{
144 int energy_a, energy_b, energy_c, max_energy;
145 u32 val;
146
147 val =
148 le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
149 energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
150 IWL_RX_INFO_ENERGY_ANT_A_POS;
151 energy_a = energy_a ? -energy_a : S8_MIN;
152 energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
153 IWL_RX_INFO_ENERGY_ANT_B_POS;
154 energy_b = energy_b ? -energy_b : S8_MIN;
155 energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
156 IWL_RX_INFO_ENERGY_ANT_C_POS;
157 energy_c = energy_c ? -energy_c : S8_MIN;
158 max_energy = max(energy_a, energy_b);
159 max_energy = max(max_energy, energy_c);
160
161 IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
162 energy_a, energy_b, energy_c, max_energy);
163
164 rx_status->signal = max_energy;
165 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
166 RX_RES_PHY_FLAGS_ANTENNA)
167 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
168 rx_status->chain_signal[0] = energy_a;
169 rx_status->chain_signal[1] = energy_b;
170 rx_status->chain_signal[2] = energy_c;
171}
172
173/*
174 * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
175 * @mvm: the mvm object
176 * @hdr: 80211 header
177 * @stats: status in mac80211's format
178 * @rx_pkt_status: status coming from fw
179 *
180 * returns non 0 value if the packet should be dropped
181 */
182static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
183 struct ieee80211_hdr *hdr,
184 struct ieee80211_rx_status *stats,
185 u32 rx_pkt_status,
186 u8 *crypt_len)
187{
188 if (!ieee80211_has_protected(hdr->frame_control) ||
189 (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
190 RX_MPDU_RES_STATUS_SEC_NO_ENC)
191 return 0;
192
193 /* packet was encrypted with unknown alg */
194 if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
195 RX_MPDU_RES_STATUS_SEC_ENC_ERR)
196 return 0;
197
198 switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
199 case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
200 /* alg is CCM: check MIC only */
201 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
202 return -1;
203
204 stats->flag |= RX_FLAG_DECRYPTED;
205 *crypt_len = IEEE80211_CCMP_HDR_LEN;
206 return 0;
207
208 case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
209 /* Don't drop the frame and decrypt it in SW */
210 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
211 return 0;
212 *crypt_len = IEEE80211_TKIP_IV_LEN;
213 /* fall through if TTAK OK */
214
215 case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
216 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
217 return -1;
218
219 stats->flag |= RX_FLAG_DECRYPTED;
220 if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
221 RX_MPDU_RES_STATUS_SEC_WEP_ENC)
222 *crypt_len = IEEE80211_WEP_IV_LEN;
223 return 0;
224
225 case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
226 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
227 return -1;
228 stats->flag |= RX_FLAG_DECRYPTED;
229 return 0;
230
231 default:
232 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
233 }
234
235 return 0;
236}
237
238static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
239 struct sk_buff *skb,
240 u32 status)
241{
242 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
243 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
244
245 if (mvmvif->features & NETIF_F_RXCSUM &&
246 status & RX_MPDU_RES_STATUS_CSUM_DONE &&
247 status & RX_MPDU_RES_STATUS_CSUM_OK)
248 skb->ip_summed = CHECKSUM_UNNECESSARY;
249}
250
251/*
252 * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
253 *
254 * Handles the actual data of the Rx packet from the fw
255 */
256void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
257 struct iwl_rx_cmd_buffer *rxb)
258{
259 struct ieee80211_hdr *hdr;
260 struct ieee80211_rx_status *rx_status;
261 struct iwl_rx_packet *pkt = rxb_addr(rxb);
262 struct iwl_rx_phy_info *phy_info;
263 struct iwl_rx_mpdu_res_start *rx_res;
264 struct ieee80211_sta *sta;
265 struct sk_buff *skb;
266 u32 len;
267 u32 ampdu_status;
268 u32 rate_n_flags;
269 u32 rx_pkt_status;
270 u8 crypt_len = 0;
271
272 phy_info = &mvm->last_phy_info;
273 rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
274 hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
275 len = le16_to_cpu(rx_res->byte_count);
276 rx_pkt_status = le32_to_cpup((__le32 *)
277 (pkt->data + sizeof(*rx_res) + len));
278
279 /* Dont use dev_alloc_skb(), we'll have enough headroom once
280 * ieee80211_hdr pulled.
281 */
282 skb = alloc_skb(128, GFP_ATOMIC);
283 if (!skb) {
284 IWL_ERR(mvm, "alloc_skb failed\n");
285 return;
286 }
287
288 rx_status = IEEE80211_SKB_RXCB(skb);
289
290 /*
291 * drop the packet if it has failed being decrypted by HW
292 */
293 if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
294 &crypt_len)) {
295 IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
296 rx_pkt_status);
297 kfree_skb(skb);
298 return;
299 }
300
301 /*
302 * Keep packets with CRC errors (and with overrun) for monitor mode
303 * (otherwise the firmware discards them) but mark them as bad.
304 */
305 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
306 !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
307 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
308 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
309 }
310
311 /* This will be used in several places later */
312 rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
313
314 /* rx_status carries information about the packet to mac80211 */
315 rx_status->mactime = le64_to_cpu(phy_info->timestamp);
316 rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
317 rx_status->band =
318 (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
319 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
320 rx_status->freq =
321 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
322 rx_status->band);
323 /*
324 * TSF as indicated by the fw is at INA time, but mac80211 expects the
325 * TSF at the beginning of the MPDU.
326 */
327 /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
328
329 iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
330
331 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
332 (unsigned long long)rx_status->mactime);
333
334 rcu_read_lock();
335 /*
336 * We have tx blocked stations (with CS bit). If we heard frames from
337 * a blocked station on a new channel we can TX to it again.
338 */
339 if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
340 sta = ieee80211_find_sta(
341 rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
342 if (sta)
343 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
344 }
345
346 /* This is fine since we don't support multiple AP interfaces */
347 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
348 if (sta) {
349 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
350
351 rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
352
353 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
354 ieee80211_is_beacon(hdr->frame_control)) {
355 struct iwl_fw_dbg_trigger_tlv *trig;
356 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
357 bool trig_check;
358 s32 rssi;
359
360 trig = iwl_fw_dbg_get_trigger(mvm->fw,
361 FW_DBG_TRIGGER_RSSI);
362 rssi_trig = (void *)trig->data;
363 rssi = le32_to_cpu(rssi_trig->rssi);
364
365 trig_check =
366 iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
367 trig);
368 if (trig_check && rx_status->signal < rssi)
369 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
370 }
371 }
372
373 if (sta && ieee80211_is_data(hdr->frame_control))
374 iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
375
376 rcu_read_unlock();
377
378 /* set the preamble flag if appropriate */
379 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
380 rx_status->flag |= RX_FLAG_SHORTPRE;
381
382 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
383 /*
384 * We know which subframes of an A-MPDU belong
385 * together since we get a single PHY response
386 * from the firmware for all of them
387 */
388 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
389 rx_status->ampdu_reference = mvm->ampdu_ref;
390 }
391
392 /* Set up the HT phy flags */
393 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
394 case RATE_MCS_CHAN_WIDTH_20:
395 break;
396 case RATE_MCS_CHAN_WIDTH_40:
397 rx_status->flag |= RX_FLAG_40MHZ;
398 break;
399 case RATE_MCS_CHAN_WIDTH_80:
400 rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
401 break;
402 case RATE_MCS_CHAN_WIDTH_160:
403 rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
404 break;
405 }
406 if (rate_n_flags & RATE_MCS_SGI_MSK)
407 rx_status->flag |= RX_FLAG_SHORT_GI;
408 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
409 rx_status->flag |= RX_FLAG_HT_GF;
410 if (rate_n_flags & RATE_MCS_LDPC_MSK)
411 rx_status->flag |= RX_FLAG_LDPC;
412 if (rate_n_flags & RATE_MCS_HT_MSK) {
413 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
414 RATE_MCS_STBC_POS;
415 rx_status->flag |= RX_FLAG_HT;
416 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
417 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
418 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
419 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
420 RATE_MCS_STBC_POS;
421 rx_status->vht_nss =
422 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
423 RATE_VHT_MCS_NSS_POS) + 1;
424 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
425 rx_status->flag |= RX_FLAG_VHT;
426 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
427 if (rate_n_flags & RATE_MCS_BF_MSK)
428 rx_status->vht_flag |= RX_VHT_FLAG_BF;
429 } else {
430 rx_status->rate_idx =
431 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
432 rx_status->band);
433 }
434
435#ifdef CONFIG_IWLWIFI_DEBUGFS
436 iwl_mvm_update_frame_stats(mvm, rate_n_flags,
437 rx_status->flag & RX_FLAG_AMPDU_DETAILS);
438#endif
439 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
440 crypt_len, rxb);
441}
442
443static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
444 struct mvm_statistics_rx *rx_stats)
445{
446 lockdep_assert_held(&mvm->mutex);
447
448 mvm->rx_stats = *rx_stats;
449}
450
451struct iwl_mvm_stat_data {
452 struct iwl_mvm *mvm;
453 __le32 mac_id;
454 u8 beacon_filter_average_energy;
455 struct mvm_statistics_general_v8 *general;
456};
457
458static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
459 struct ieee80211_vif *vif)
460{
461 struct iwl_mvm_stat_data *data = _data;
462 struct iwl_mvm *mvm = data->mvm;
463 int sig = -data->beacon_filter_average_energy;
464 int last_event;
465 int thold = vif->bss_conf.cqm_rssi_thold;
466 int hyst = vif->bss_conf.cqm_rssi_hyst;
467 u16 id = le32_to_cpu(data->mac_id);
468 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
469
470 /* This doesn't need the MAC ID check since it's not taking the
471 * data copied into the "data" struct, but rather the data from
472 * the notification directly.
473 */
474 if (data->general) {
475 mvmvif->beacon_stats.num_beacons =
476 le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
477 mvmvif->beacon_stats.avg_signal =
478 -data->general->beacon_average_energy[mvmvif->id];
479 }
480
481 if (mvmvif->id != id)
482 return;
483
484 if (vif->type != NL80211_IFTYPE_STATION)
485 return;
486
487 if (sig == 0) {
488 IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
489 return;
490 }
491
492 mvmvif->bf_data.ave_beacon_signal = sig;
493
494 /* BT Coex */
495 if (mvmvif->bf_data.bt_coex_min_thold !=
496 mvmvif->bf_data.bt_coex_max_thold) {
497 last_event = mvmvif->bf_data.last_bt_coex_event;
498 if (sig > mvmvif->bf_data.bt_coex_max_thold &&
499 (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
500 last_event == 0)) {
501 mvmvif->bf_data.last_bt_coex_event = sig;
502 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
503 sig);
504 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
505 } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
506 (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
507 last_event == 0)) {
508 mvmvif->bf_data.last_bt_coex_event = sig;
509 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
510 sig);
511 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
512 }
513 }
514
515 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
516 return;
517
518 /* CQM Notification */
519 last_event = mvmvif->bf_data.last_cqm_event;
520 if (thold && sig < thold && (last_event == 0 ||
521 sig < last_event - hyst)) {
522 mvmvif->bf_data.last_cqm_event = sig;
523 IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
524 sig);
525 ieee80211_cqm_rssi_notify(
526 vif,
527 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
528 GFP_KERNEL);
529 } else if (sig > thold &&
530 (last_event == 0 || sig > last_event + hyst)) {
531 mvmvif->bf_data.last_cqm_event = sig;
532 IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
533 sig);
534 ieee80211_cqm_rssi_notify(
535 vif,
536 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
537 GFP_KERNEL);
538 }
539}
540
541static inline void
542iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
543{
544 struct iwl_fw_dbg_trigger_tlv *trig;
545 struct iwl_fw_dbg_trigger_stats *trig_stats;
546 u32 trig_offset, trig_thold;
547
548 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
549 return;
550
551 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
552 trig_stats = (void *)trig->data;
553
554 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
555 return;
556
557 trig_offset = le32_to_cpu(trig_stats->stop_offset);
558 trig_thold = le32_to_cpu(trig_stats->stop_threshold);
559
560 if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
561 return;
562
563 if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
564 return;
565
566 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
567}
568
569void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
570 struct iwl_rx_packet *pkt)
571{
572 struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
573 struct iwl_mvm_stat_data data = {
574 .mvm = mvm,
575 };
576 u32 temperature;
577
578 if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
579 goto invalid;
580
581 temperature = le32_to_cpu(stats->general.radio_temperature);
582 data.mac_id = stats->rx.general.mac_id;
583 data.beacon_filter_average_energy =
584 stats->general.beacon_filter_average_energy;
585
586 iwl_mvm_update_rx_statistics(mvm, &stats->rx);
587
588 mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
589 mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
590 mvm->radio_stats.on_time_rf =
591 le64_to_cpu(stats->general.on_time_rf);
592 mvm->radio_stats.on_time_scan =
593 le64_to_cpu(stats->general.on_time_scan);
594
595 data.general = &stats->general;
596
597 iwl_mvm_rx_stats_check_trigger(mvm, pkt);
598
599 ieee80211_iterate_active_interfaces(mvm->hw,
600 IEEE80211_IFACE_ITER_NORMAL,
601 iwl_mvm_stat_iterator,
602 &data);
603 return;
604 invalid:
605 IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
606 iwl_rx_packet_payload_len(pkt));
607}
608
609void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
610{
611 iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
612}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
deleted file mode 100644
index d6e0c1b5c20c..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ /dev/null
@@ -1,1552 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "fw-api-scan.h"
71
72#define IWL_DENSE_EBS_SCAN_RATIO 5
73#define IWL_SPARSE_EBS_SCAN_RATIO 1
74
75enum iwl_mvm_scan_type {
76 IWL_SCAN_TYPE_UNASSOC,
77 IWL_SCAN_TYPE_WILD,
78 IWL_SCAN_TYPE_MILD,
79 IWL_SCAN_TYPE_FRAGMENTED,
80};
81
82enum iwl_mvm_traffic_load {
83 IWL_MVM_TRAFFIC_LOW,
84 IWL_MVM_TRAFFIC_MEDIUM,
85 IWL_MVM_TRAFFIC_HIGH,
86};
87
88struct iwl_mvm_scan_timing_params {
89 u32 dwell_active;
90 u32 dwell_passive;
91 u32 dwell_fragmented;
92 u32 suspend_time;
93 u32 max_out_time;
94};
95
96static struct iwl_mvm_scan_timing_params scan_timing[] = {
97 [IWL_SCAN_TYPE_UNASSOC] = {
98 .dwell_active = 10,
99 .dwell_passive = 110,
100 .dwell_fragmented = 44,
101 .suspend_time = 0,
102 .max_out_time = 0,
103 },
104 [IWL_SCAN_TYPE_WILD] = {
105 .dwell_active = 10,
106 .dwell_passive = 110,
107 .dwell_fragmented = 44,
108 .suspend_time = 30,
109 .max_out_time = 120,
110 },
111 [IWL_SCAN_TYPE_MILD] = {
112 .dwell_active = 10,
113 .dwell_passive = 110,
114 .dwell_fragmented = 44,
115 .suspend_time = 120,
116 .max_out_time = 120,
117 },
118 [IWL_SCAN_TYPE_FRAGMENTED] = {
119 .dwell_active = 10,
120 .dwell_passive = 110,
121 .dwell_fragmented = 44,
122 .suspend_time = 95,
123 .max_out_time = 44,
124 },
125};
126
127struct iwl_mvm_scan_params {
128 enum iwl_mvm_scan_type type;
129 u32 n_channels;
130 u16 delay;
131 int n_ssids;
132 struct cfg80211_ssid *ssids;
133 struct ieee80211_channel **channels;
134 u32 flags;
135 u8 *mac_addr;
136 u8 *mac_addr_mask;
137 bool no_cck;
138 bool pass_all;
139 int n_match_sets;
140 struct iwl_scan_probe_req preq;
141 struct cfg80211_match_set *match_sets;
142 int n_scan_plans;
143 struct cfg80211_sched_scan_plan *scan_plans;
144};
145
146static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
147{
148 if (mvm->scan_rx_ant != ANT_NONE)
149 return mvm->scan_rx_ant;
150 return iwl_mvm_get_valid_rx_ant(mvm);
151}
152
153static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
154{
155 u16 rx_chain;
156 u8 rx_ant;
157
158 rx_ant = iwl_mvm_scan_rx_ant(mvm);
159 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
160 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
161 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
162 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
163 return cpu_to_le16(rx_chain);
164}
165
166static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
167{
168 if (band == IEEE80211_BAND_2GHZ)
169 return cpu_to_le32(PHY_BAND_24);
170 else
171 return cpu_to_le32(PHY_BAND_5);
172}
173
174static inline __le32
175iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
176 bool no_cck)
177{
178 u32 tx_ant;
179
180 mvm->scan_last_antenna_idx =
181 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
182 mvm->scan_last_antenna_idx);
183 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
184
185 if (band == IEEE80211_BAND_2GHZ && !no_cck)
186 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
187 tx_ant);
188 else
189 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
190}
191
192static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
193 struct ieee80211_vif *vif)
194{
195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
196 int *global_cnt = data;
197
198 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
199 mvmvif->phy_ctxt->id < MAX_PHYS)
200 *global_cnt += 1;
201}
202
203static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
204{
205 return IWL_MVM_TRAFFIC_LOW;
206}
207
208static enum
209iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
210 struct ieee80211_vif *vif,
211 struct iwl_mvm_scan_params *params)
212{
213 int global_cnt = 0;
214 enum iwl_mvm_traffic_load load;
215 bool low_latency;
216
217 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
218 IEEE80211_IFACE_ITER_NORMAL,
219 iwl_mvm_scan_condition_iterator,
220 &global_cnt);
221 if (!global_cnt)
222 return IWL_SCAN_TYPE_UNASSOC;
223
224 load = iwl_mvm_get_traffic_load(mvm);
225 low_latency = iwl_mvm_low_latency(mvm);
226
227 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
228 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
229 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
230 return IWL_SCAN_TYPE_FRAGMENTED;
231
232 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
233 return IWL_SCAN_TYPE_MILD;
234
235 return IWL_SCAN_TYPE_WILD;
236}
237
238static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
239{
240 /* require rrm scan whenever the fw supports it */
241 return fw_has_capa(&mvm->fw->ucode_capa,
242 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
243}
244
245static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
246{
247 int max_probe_len;
248
249 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
250
251 /* we create the 802.11 header and SSID element */
252 max_probe_len -= 24 + 2;
253
254 /* DS parameter set element is added on 2.4GHZ band if required */
255 if (iwl_mvm_rrm_scan_needed(mvm))
256 max_probe_len -= 3;
257
258 return max_probe_len;
259}
260
261int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
262{
263 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
264
265 /* TODO: [BUG] This function should return the maximum allowed size of
266 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
267 * in the same command. So the correct implementation of this function
268 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
269 * command has only 512 bytes and it would leave us with about 240
270 * bytes for scan IEs, which is clearly not enough. So meanwhile
271 * we will report an incorrect value. This may result in a failure to
272 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
273 * functions with -ENOBUFS, if a large enough probe will be provided.
274 */
275 return max_ie_len;
276}
277
278static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
279 int num_res, u8 *buf, size_t buf_size)
280{
281 int i;
282 u8 *pos = buf, *end = buf + buf_size;
283
284 for (i = 0; pos < end && i < num_res; i++)
285 pos += snprintf(pos, end - pos, " %u", res[i].channel);
286
287 /* terminate the string in case the buffer was too short */
288 *(buf + buf_size - 1) = '\0';
289
290 return buf;
291}
292
293void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
294 struct iwl_rx_cmd_buffer *rxb)
295{
296 struct iwl_rx_packet *pkt = rxb_addr(rxb);
297 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
298 u8 buf[256];
299
300 IWL_DEBUG_SCAN(mvm,
301 "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
302 notif->status, notif->scanned_channels,
303 iwl_mvm_dump_channel_list(notif->results,
304 notif->scanned_channels, buf,
305 sizeof(buf)));
306}
307
308void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
309 struct iwl_rx_cmd_buffer *rxb)
310{
311 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
312 ieee80211_sched_scan_results(mvm->hw);
313}
314
315static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
316{
317 switch (status) {
318 case IWL_SCAN_EBS_SUCCESS:
319 return "successful";
320 case IWL_SCAN_EBS_INACTIVE:
321 return "inactive";
322 case IWL_SCAN_EBS_FAILED:
323 case IWL_SCAN_EBS_CHAN_NOT_FOUND:
324 default:
325 return "failed";
326 }
327}
328
329void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
330 struct iwl_rx_cmd_buffer *rxb)
331{
332 struct iwl_rx_packet *pkt = rxb_addr(rxb);
333 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
334 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
335
336 /* scan status must be locked for proper checking */
337 lockdep_assert_held(&mvm->mutex);
338
339 /* We first check if we were stopping a scan, in which case we
340 * just clear the stopping flag. Then we check if it was a
341 * firmware initiated stop, in which case we need to inform
342 * mac80211.
343 * Note that we can have a stopping and a running scan
344 * simultaneously, but we can't have two different types of
345 * scans stopping or running at the same time (since LMAC
346 * doesn't support it).
347 */
348
349 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
350 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
351
352 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
353 aborted ? "aborted" : "completed",
354 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
355 IWL_DEBUG_SCAN(mvm,
356 "Last line %d, Last iteration %d, Time after last iteration %d\n",
357 scan_notif->last_schedule_line,
358 scan_notif->last_schedule_iteration,
359 __le32_to_cpu(scan_notif->time_after_last_iter));
360
361 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
362 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
363 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
364 aborted ? "aborted" : "completed",
365 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
366
367 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
368 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
369 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
370
371 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
372 aborted ? "aborted" : "completed",
373 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
374 IWL_DEBUG_SCAN(mvm,
375 "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
376 scan_notif->last_schedule_line,
377 scan_notif->last_schedule_iteration,
378 __le32_to_cpu(scan_notif->time_after_last_iter));
379
380 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
381 ieee80211_sched_scan_stopped(mvm->hw);
382 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
383 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
384 aborted ? "aborted" : "completed",
385 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
386
387 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
388 ieee80211_scan_completed(mvm->hw,
389 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
390 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
391 }
392
393 mvm->last_ebs_successful =
394 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
395 scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
396}
397
398static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
399{
400 int i;
401
402 for (i = 0; i < PROBE_OPTION_MAX; i++) {
403 if (!ssid_list[i].len)
404 break;
405 if (ssid_list[i].len == ssid_len &&
406 !memcmp(ssid_list->ssid, ssid, ssid_len))
407 return i;
408 }
409 return -1;
410}
411
412/* We insert the SSIDs in an inverted order, because the FW will
413 * invert it back.
414 */
415static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
416 struct iwl_ssid_ie *ssids,
417 u32 *ssid_bitmap)
418{
419 int i, j;
420 int index;
421
422 /*
423 * copy SSIDs from match list.
424 * iwl_config_sched_scan_profiles() uses the order of these ssids to
425 * config match list.
426 */
427 for (i = 0, j = params->n_match_sets - 1;
428 j >= 0 && i < PROBE_OPTION_MAX;
429 i++, j--) {
430 /* skip empty SSID matchsets */
431 if (!params->match_sets[j].ssid.ssid_len)
432 continue;
433 ssids[i].id = WLAN_EID_SSID;
434 ssids[i].len = params->match_sets[j].ssid.ssid_len;
435 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
436 ssids[i].len);
437 }
438
439 /* add SSIDs from scan SSID list */
440 *ssid_bitmap = 0;
441 for (j = params->n_ssids - 1;
442 j >= 0 && i < PROBE_OPTION_MAX;
443 i++, j--) {
444 index = iwl_ssid_exist(params->ssids[j].ssid,
445 params->ssids[j].ssid_len,
446 ssids);
447 if (index < 0) {
448 ssids[i].id = WLAN_EID_SSID;
449 ssids[i].len = params->ssids[j].ssid_len;
450 memcpy(ssids[i].ssid, params->ssids[j].ssid,
451 ssids[i].len);
452 *ssid_bitmap |= BIT(i);
453 } else {
454 *ssid_bitmap |= BIT(index);
455 }
456 }
457}
458
459static int
460iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
461 struct cfg80211_sched_scan_request *req)
462{
463 struct iwl_scan_offload_profile *profile;
464 struct iwl_scan_offload_profile_cfg *profile_cfg;
465 struct iwl_scan_offload_blacklist *blacklist;
466 struct iwl_host_cmd cmd = {
467 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
468 .len[1] = sizeof(*profile_cfg),
469 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
470 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
471 };
472 int blacklist_len;
473 int i;
474 int ret;
475
476 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
477 return -EIO;
478
479 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
480 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
481 else
482 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
483
484 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
485 if (!blacklist)
486 return -ENOMEM;
487
488 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
489 if (!profile_cfg) {
490 ret = -ENOMEM;
491 goto free_blacklist;
492 }
493
494 cmd.data[0] = blacklist;
495 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
496 cmd.data[1] = profile_cfg;
497
498 /* No blacklist configuration */
499
500 profile_cfg->num_profiles = req->n_match_sets;
501 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
502 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
503 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
504 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
505 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
506
507 for (i = 0; i < req->n_match_sets; i++) {
508 profile = &profile_cfg->profiles[i];
509 profile->ssid_index = i;
510 /* Support any cipher and auth algorithm */
511 profile->unicast_cipher = 0xff;
512 profile->auth_alg = 0xff;
513 profile->network_type = IWL_NETWORK_TYPE_ANY;
514 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
515 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
516 }
517
518 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
519
520 ret = iwl_mvm_send_cmd(mvm, &cmd);
521 kfree(profile_cfg);
522free_blacklist:
523 kfree(blacklist);
524
525 return ret;
526}
527
528static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
529 struct cfg80211_sched_scan_request *req)
530{
531 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
532 IWL_DEBUG_SCAN(mvm,
533 "Sending scheduled scan with filtering, n_match_sets %d\n",
534 req->n_match_sets);
535 return false;
536 }
537
538 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
539 return true;
540}
541
542static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
543{
544 int ret;
545 struct iwl_host_cmd cmd = {
546 .id = SCAN_OFFLOAD_ABORT_CMD,
547 };
548 u32 status;
549
550 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
551 if (ret)
552 return ret;
553
554 if (status != CAN_ABORT_STATUS) {
555 /*
556 * The scan abort will return 1 for success or
557 * 2 for "failure". A failure condition can be
558 * due to simply not being in an active scan which
559 * can occur if we send the scan abort before the
560 * microcode has notified us that a scan is completed.
561 */
562 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
563 ret = -ENOENT;
564 }
565
566 return ret;
567}
568
569static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
570 struct iwl_scan_req_tx_cmd *tx_cmd,
571 bool no_cck)
572{
573 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
574 TX_CMD_FLG_BT_DIS);
575 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
576 IEEE80211_BAND_2GHZ,
577 no_cck);
578 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
579
580 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
581 TX_CMD_FLG_BT_DIS);
582 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
583 IEEE80211_BAND_5GHZ,
584 no_cck);
585 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
586}
587
588static void
589iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
590 struct ieee80211_channel **channels,
591 int n_channels, u32 ssid_bitmap,
592 struct iwl_scan_req_lmac *cmd)
593{
594 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
595 int i;
596
597 for (i = 0; i < n_channels; i++) {
598 channel_cfg[i].channel_num =
599 cpu_to_le16(channels[i]->hw_value);
600 channel_cfg[i].iter_count = cpu_to_le16(1);
601 channel_cfg[i].iter_interval = 0;
602 channel_cfg[i].flags =
603 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
604 ssid_bitmap);
605 }
606}
607
608static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
609 size_t len, u8 *const pos)
610{
611 static const u8 before_ds_params[] = {
612 WLAN_EID_SSID,
613 WLAN_EID_SUPP_RATES,
614 WLAN_EID_REQUEST,
615 WLAN_EID_EXT_SUPP_RATES,
616 };
617 size_t offs;
618 u8 *newpos = pos;
619
620 if (!iwl_mvm_rrm_scan_needed(mvm)) {
621 memcpy(newpos, ies, len);
622 return newpos + len;
623 }
624
625 offs = ieee80211_ie_split(ies, len,
626 before_ds_params,
627 ARRAY_SIZE(before_ds_params),
628 0);
629
630 memcpy(newpos, ies, offs);
631 newpos += offs;
632
633 /* Add a placeholder for DS Parameter Set element */
634 *newpos++ = WLAN_EID_DS_PARAMS;
635 *newpos++ = 1;
636 *newpos++ = 0;
637
638 memcpy(newpos, ies + offs, len - offs);
639 newpos += len - offs;
640
641 return newpos;
642}
643
644static void
645iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
646 struct ieee80211_scan_ies *ies,
647 struct iwl_mvm_scan_params *params)
648{
649 struct ieee80211_mgmt *frame = (void *)params->preq.buf;
650 u8 *pos, *newpos;
651 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
652 params->mac_addr : NULL;
653
654 /*
655 * Unfortunately, right now the offload scan doesn't support randomising
656 * within the firmware, so until the firmware API is ready we implement
657 * it in the driver. This means that the scan iterations won't really be
658 * random, only when it's restarted, but at least that helps a bit.
659 */
660 if (mac_addr)
661 get_random_mask_addr(frame->sa, mac_addr,
662 params->mac_addr_mask);
663 else
664 memcpy(frame->sa, vif->addr, ETH_ALEN);
665
666 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
667 eth_broadcast_addr(frame->da);
668 eth_broadcast_addr(frame->bssid);
669 frame->seq_ctrl = 0;
670
671 pos = frame->u.probe_req.variable;
672 *pos++ = WLAN_EID_SSID;
673 *pos++ = 0;
674
675 params->preq.mac_header.offset = 0;
676 params->preq.mac_header.len = cpu_to_le16(24 + 2);
677
678 /* Insert ds parameter set element on 2.4 GHz band */
679 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
680 ies->ies[IEEE80211_BAND_2GHZ],
681 ies->len[IEEE80211_BAND_2GHZ],
682 pos);
683 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
684 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
685 pos = newpos;
686
687 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
688 ies->len[IEEE80211_BAND_5GHZ]);
689 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
690 params->preq.band_data[1].len =
691 cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
692 pos += ies->len[IEEE80211_BAND_5GHZ];
693
694 memcpy(pos, ies->common_ies, ies->common_ie_len);
695 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
696 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
697}
698
699static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
700 enum iwl_scan_priority_ext prio)
701{
702 if (fw_has_api(&mvm->fw->ucode_capa,
703 IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
704 return cpu_to_le32(prio);
705
706 if (prio <= IWL_SCAN_PRIORITY_EXT_2)
707 return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
708
709 if (prio <= IWL_SCAN_PRIORITY_EXT_4)
710 return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
711
712 return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
713}
714
715static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
716 struct iwl_scan_req_lmac *cmd,
717 struct iwl_mvm_scan_params *params)
718{
719 cmd->active_dwell = scan_timing[params->type].dwell_active;
720 cmd->passive_dwell = scan_timing[params->type].dwell_passive;
721 cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
722 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
723 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
724 cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
725}
726
727static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
728 struct ieee80211_scan_ies *ies,
729 int n_channels)
730{
731 return ((n_ssids <= PROBE_OPTION_MAX) &&
732 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
733 (ies->common_ie_len +
734 ies->len[NL80211_BAND_2GHZ] +
735 ies->len[NL80211_BAND_5GHZ] <=
736 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
737}
738
739static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
740 struct ieee80211_vif *vif)
741{
742 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
743
744 /* We can only use EBS if:
745 * 1. the feature is supported;
746 * 2. the last EBS was successful;
747 * 3. if only single scan, the single scan EBS API is supported;
748 * 4. it's not a p2p find operation.
749 */
750 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
751 mvm->last_ebs_successful &&
752 vif->type != NL80211_IFTYPE_P2P_DEVICE);
753}
754
755static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
756 struct iwl_mvm_scan_params *params)
757{
758 int flags = 0;
759
760 if (params->n_ssids == 0)
761 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
762
763 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
764 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
765
766 if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
767 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
768
769 if (iwl_mvm_rrm_scan_needed(mvm))
770 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
771
772 if (params->pass_all)
773 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
774 else
775 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
776
777#ifdef CONFIG_IWLWIFI_DEBUGFS
778 if (mvm->scan_iter_notif_enabled)
779 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
780#endif
781
782 return flags;
783}
784
785static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
786 struct iwl_mvm_scan_params *params)
787{
788 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
789 struct iwl_scan_probe_req *preq =
790 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
791 mvm->fw->ucode_capa.n_scan_channels);
792 u32 ssid_bitmap = 0;
793 int i;
794
795 lockdep_assert_held(&mvm->mutex);
796
797 memset(cmd, 0, ksize(cmd));
798
799 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
800 return -EINVAL;
801
802 iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
803
804 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
805 cmd->iter_num = cpu_to_le32(1);
806 cmd->n_channels = (u8)params->n_channels;
807
808 cmd->delay = cpu_to_le32(params->delay);
809
810 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
811
812 cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
813 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
814 MAC_FILTER_IN_BEACON);
815 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
816 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
817
818 /* this API uses bits 1-20 instead of 0-19 */
819 ssid_bitmap <<= 1;
820
821 for (i = 0; i < params->n_scan_plans; i++) {
822 struct cfg80211_sched_scan_plan *scan_plan =
823 &params->scan_plans[i];
824
825 cmd->schedule[i].delay =
826 cpu_to_le16(scan_plan->interval);
827 cmd->schedule[i].iterations = scan_plan->iterations;
828 cmd->schedule[i].full_scan_mul = 1;
829 }
830
831 /*
832 * If the number of iterations of the last scan plan is set to
833 * zero, it should run infinitely. However, this is not always the case.
834 * For example, when regular scan is requested the driver sets one scan
835 * plan with one iteration.
836 */
837 if (!cmd->schedule[i - 1].iterations)
838 cmd->schedule[i - 1].iterations = 0xff;
839
840 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
841 cmd->channel_opt[0].flags =
842 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
843 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
844 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
845 cmd->channel_opt[0].non_ebs_ratio =
846 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
847 cmd->channel_opt[1].flags =
848 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
849 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
850 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
851 cmd->channel_opt[1].non_ebs_ratio =
852 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
853 }
854
855 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
856 params->n_channels, ssid_bitmap, cmd);
857
858 *preq = params->preq;
859
860 return 0;
861}
862
863static int rate_to_scan_rate_flag(unsigned int rate)
864{
865 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
866 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
867 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
868 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
869 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
870 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
871 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
872 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
873 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
874 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
875 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
876 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
877 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
878 };
879
880 return rate_to_scan_rate[rate];
881}
882
883static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
884{
885 struct ieee80211_supported_band *band;
886 unsigned int rates = 0;
887 int i;
888
889 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
890 for (i = 0; i < band->n_bitrates; i++)
891 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
892 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
893 for (i = 0; i < band->n_bitrates; i++)
894 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
895
896 /* Set both basic rates and supported rates */
897 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
898
899 return cpu_to_le32(rates);
900}
901
902int iwl_mvm_config_scan(struct iwl_mvm *mvm)
903{
904 struct iwl_scan_config *scan_config;
905 struct ieee80211_supported_band *band;
906 int num_channels =
907 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
908 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
909 int ret, i, j = 0, cmd_size;
910 struct iwl_host_cmd cmd = {
911 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
912 };
913
914 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
915 return -ENOBUFS;
916
917 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
918
919 scan_config = kzalloc(cmd_size, GFP_KERNEL);
920 if (!scan_config)
921 return -ENOMEM;
922
923 scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
924 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
925 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
926 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
927 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
928 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
929 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
930 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
931 SCAN_CONFIG_N_CHANNELS(num_channels));
932 scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
933 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
934 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
935 scan_config->out_of_channel_time = cpu_to_le32(170);
936 scan_config->suspend_time = cpu_to_le32(30);
937 scan_config->dwell_active = 20;
938 scan_config->dwell_passive = 110;
939 scan_config->dwell_fragmented = 20;
940
941 memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
942
943 scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
944 scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
945 IWL_CHANNEL_FLAG_ACCURATE_EBS |
946 IWL_CHANNEL_FLAG_EBS_ADD |
947 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
948
949 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
950 for (i = 0; i < band->n_channels; i++, j++)
951 scan_config->channel_array[j] = band->channels[i].hw_value;
952 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
953 for (i = 0; i < band->n_channels; i++, j++)
954 scan_config->channel_array[j] = band->channels[i].hw_value;
955
956 cmd.data[0] = scan_config;
957 cmd.len[0] = cmd_size;
958 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
959
960 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
961
962 ret = iwl_mvm_send_cmd(mvm, &cmd);
963
964 kfree(scan_config);
965 return ret;
966}
967
968static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
969{
970 int i;
971
972 for (i = 0; i < mvm->max_scans; i++)
973 if (mvm->scan_uid_status[i] == status)
974 return i;
975
976 return -ENOENT;
977}
978
979static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
980{
981 return params->n_scan_plans == 1 &&
982 params->scan_plans[0].iterations == 1;
983}
984
985static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
986 struct iwl_scan_req_umac *cmd,
987 struct iwl_mvm_scan_params *params)
988{
989 cmd->active_dwell = scan_timing[params->type].dwell_active;
990 cmd->passive_dwell = scan_timing[params->type].dwell_passive;
991 cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
992 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
993 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
994 cmd->scan_priority =
995 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
996
997 if (iwl_mvm_is_regular_scan(params))
998 cmd->ooc_priority =
999 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1000 else
1001 cmd->ooc_priority =
1002 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
1003}
1004
1005static void
1006iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1007 struct ieee80211_channel **channels,
1008 int n_channels, u32 ssid_bitmap,
1009 struct iwl_scan_req_umac *cmd)
1010{
1011 struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1012 int i;
1013
1014 for (i = 0; i < n_channels; i++) {
1015 channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1016 channel_cfg[i].channel_num = channels[i]->hw_value;
1017 channel_cfg[i].iter_count = 1;
1018 channel_cfg[i].iter_interval = 0;
1019 }
1020}
1021
1022static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1023 struct iwl_mvm_scan_params *params)
1024{
1025 int flags = 0;
1026
1027 if (params->n_ssids == 0)
1028 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1029
1030 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1031 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1032
1033 if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
1034 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1035
1036 if (iwl_mvm_rrm_scan_needed(mvm))
1037 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1038
1039 if (params->pass_all)
1040 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1041 else
1042 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1043
1044 if (!iwl_mvm_is_regular_scan(params))
1045 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1046
1047#ifdef CONFIG_IWLWIFI_DEBUGFS
1048 if (mvm->scan_iter_notif_enabled)
1049 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1050#endif
1051 return flags;
1052}
1053
1054static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1055 struct iwl_mvm_scan_params *params,
1056 int type)
1057{
1058 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1059 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1060 sizeof(struct iwl_scan_channel_cfg_umac) *
1061 mvm->fw->ucode_capa.n_scan_channels;
1062 int uid, i;
1063 u32 ssid_bitmap = 0;
1064
1065 lockdep_assert_held(&mvm->mutex);
1066
1067 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
1068 return -EINVAL;
1069
1070 uid = iwl_mvm_scan_uid_by_status(mvm, 0);
1071 if (uid < 0)
1072 return uid;
1073
1074 memset(cmd, 0, ksize(cmd));
1075
1076 iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1077
1078 mvm->scan_uid_status[uid] = type;
1079
1080 cmd->uid = cpu_to_le32(uid);
1081 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1082
1083 if (type == IWL_MVM_SCAN_SCHED)
1084 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1085
1086 if (iwl_mvm_scan_use_ebs(mvm, vif))
1087 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1088 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1089 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1090
1091 cmd->n_channels = params->n_channels;
1092
1093 iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
1094
1095 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1096 params->n_channels, ssid_bitmap, cmd);
1097
1098 for (i = 0; i < params->n_scan_plans; i++) {
1099 struct cfg80211_sched_scan_plan *scan_plan =
1100 &params->scan_plans[i];
1101
1102 sec_part->schedule[i].iter_count = scan_plan->iterations;
1103 sec_part->schedule[i].interval =
1104 cpu_to_le16(scan_plan->interval);
1105 }
1106
1107 /*
1108 * If the number of iterations of the last scan plan is set to
1109 * zero, it should run infinitely. However, this is not always the case.
1110 * For example, when regular scan is requested the driver sets one scan
1111 * plan with one iteration.
1112 */
1113 if (!sec_part->schedule[i - 1].iter_count)
1114 sec_part->schedule[i - 1].iter_count = 0xff;
1115
1116 sec_part->delay = cpu_to_le16(params->delay);
1117 sec_part->preq = params->preq;
1118
1119 return 0;
1120}
1121
1122static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1123{
1124 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
1125}
1126
1127static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1128{
1129 /* This looks a bit arbitrary, but the idea is that if we run
1130 * out of possible simultaneous scans and the userspace is
1131 * trying to run a scan type that is already running, we
1132 * return -EBUSY. But if the userspace wants to start a
1133 * different type of scan, we stop the opposite type to make
1134 * space for the new request. The reason is backwards
1135 * compatibility with old wpa_supplicant that wouldn't stop a
1136 * scheduled scan before starting a normal scan.
1137 */
1138
1139 if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
1140 return 0;
1141
1142 /* Use a switch, even though this is a bitmask, so that more
1143 * than one bits set will fall in default and we will warn.
1144 */
1145 switch (type) {
1146 case IWL_MVM_SCAN_REGULAR:
1147 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1148 return -EBUSY;
1149 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1150 case IWL_MVM_SCAN_SCHED:
1151 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1152 return -EBUSY;
1153 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1154 case IWL_MVM_SCAN_NETDETECT:
1155 /* No need to stop anything for net-detect since the
1156 * firmware is restarted anyway. This way, any sched
1157 * scans that were running will be restarted when we
1158 * resume.
1159 */
1160 return 0;
1161 default:
1162 WARN_ON(1);
1163 break;
1164 }
1165
1166 return -EIO;
1167}
1168
1169int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1170 struct cfg80211_scan_request *req,
1171 struct ieee80211_scan_ies *ies)
1172{
1173 struct iwl_host_cmd hcmd = {
1174 .len = { iwl_mvm_scan_size(mvm), },
1175 .data = { mvm->scan_cmd, },
1176 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1177 };
1178 struct iwl_mvm_scan_params params = {};
1179 int ret;
1180 struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
1181
1182 lockdep_assert_held(&mvm->mutex);
1183
1184 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1185 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
1186 return -EBUSY;
1187 }
1188
1189 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
1190 if (ret)
1191 return ret;
1192
1193 /* we should have failed registration if scan_cmd was NULL */
1194 if (WARN_ON(!mvm->scan_cmd))
1195 return -ENOMEM;
1196
1197 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1198 return -ENOBUFS;
1199
1200 params.n_ssids = req->n_ssids;
1201 params.flags = req->flags;
1202 params.n_channels = req->n_channels;
1203 params.delay = 0;
1204 params.ssids = req->ssids;
1205 params.channels = req->channels;
1206 params.mac_addr = req->mac_addr;
1207 params.mac_addr_mask = req->mac_addr_mask;
1208 params.no_cck = req->no_cck;
1209 params.pass_all = true;
1210 params.n_match_sets = 0;
1211 params.match_sets = NULL;
1212
1213 params.scan_plans = &scan_plan;
1214 params.n_scan_plans = 1;
1215
1216 params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1217
1218 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1219
1220 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1221 hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1222 ret = iwl_mvm_scan_umac(mvm, vif, &params,
1223 IWL_MVM_SCAN_REGULAR);
1224 } else {
1225 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1226 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1227 }
1228
1229 if (ret)
1230 return ret;
1231
1232 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1233 if (ret) {
1234 /* If the scan failed, it usually means that the FW was unable
1235 * to allocate the time events. Warn on it, but maybe we
1236 * should try to send the command again with different params.
1237 */
1238 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1239 return ret;
1240 }
1241
1242 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1243 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1244 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1245
1246 return 0;
1247}
1248
1249int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1250 struct ieee80211_vif *vif,
1251 struct cfg80211_sched_scan_request *req,
1252 struct ieee80211_scan_ies *ies,
1253 int type)
1254{
1255 struct iwl_host_cmd hcmd = {
1256 .len = { iwl_mvm_scan_size(mvm), },
1257 .data = { mvm->scan_cmd, },
1258 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259 };
1260 struct iwl_mvm_scan_params params = {};
1261 int ret;
1262
1263 lockdep_assert_held(&mvm->mutex);
1264
1265 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1266 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
1267 return -EBUSY;
1268 }
1269
1270 ret = iwl_mvm_check_running_scans(mvm, type);
1271 if (ret)
1272 return ret;
1273
1274 /* we should have failed registration if scan_cmd was NULL */
1275 if (WARN_ON(!mvm->scan_cmd))
1276 return -ENOMEM;
1277
1278 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1279 return -ENOBUFS;
1280
1281 params.n_ssids = req->n_ssids;
1282 params.flags = req->flags;
1283 params.n_channels = req->n_channels;
1284 params.ssids = req->ssids;
1285 params.channels = req->channels;
1286 params.mac_addr = req->mac_addr;
1287 params.mac_addr_mask = req->mac_addr_mask;
1288 params.no_cck = false;
1289 params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
1290 params.n_match_sets = req->n_match_sets;
1291 params.match_sets = req->match_sets;
1292 if (!req->n_scan_plans)
1293 return -EINVAL;
1294
1295 params.n_scan_plans = req->n_scan_plans;
1296 params.scan_plans = req->scan_plans;
1297
1298 params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1299
1300 /* In theory, LMAC scans can handle a 32-bit delay, but since
1301 * waiting for over 18 hours to start the scan is a bit silly
1302 * and to keep it aligned with UMAC scans (which only support
1303 * 16-bit delays), trim it down to 16-bits.
1304 */
1305 if (req->delay > U16_MAX) {
1306 IWL_DEBUG_SCAN(mvm,
1307 "delay value is > 16-bits, set to max possible\n");
1308 params.delay = U16_MAX;
1309 } else {
1310 params.delay = req->delay;
1311 }
1312
1313 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1314 if (ret)
1315 return ret;
1316
1317 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1318
1319 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1320 hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1321 ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
1322 } else {
1323 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1324 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1325 }
1326
1327 if (ret)
1328 return ret;
1329
1330 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1331 if (!ret) {
1332 IWL_DEBUG_SCAN(mvm,
1333 "Sched scan request was sent successfully\n");
1334 mvm->scan_status |= type;
1335 } else {
1336 /* If the scan failed, it usually means that the FW was unable
1337 * to allocate the time events. Warn on it, but maybe we
1338 * should try to send the command again with different params.
1339 */
1340 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1341 }
1342
1343 return ret;
1344}
1345
1346void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1347 struct iwl_rx_cmd_buffer *rxb)
1348{
1349 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1350 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1351 u32 uid = __le32_to_cpu(notif->uid);
1352 bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
1353
1354 if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
1355 return;
1356
1357 /* if the scan is already stopping, we don't need to notify mac80211 */
1358 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1359 ieee80211_scan_completed(mvm->hw, aborted);
1360 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1361 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1362 ieee80211_sched_scan_stopped(mvm->hw);
1363 }
1364
1365 mvm->scan_status &= ~mvm->scan_uid_status[uid];
1366 IWL_DEBUG_SCAN(mvm,
1367 "Scan completed, uid %u type %u, status %s, EBS status %s\n",
1368 uid, mvm->scan_uid_status[uid],
1369 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1370 "completed" : "aborted",
1371 iwl_mvm_ebs_status_str(notif->ebs_status));
1372 IWL_DEBUG_SCAN(mvm,
1373 "Last line %d, Last iteration %d, Time from last iteration %d\n",
1374 notif->last_schedule, notif->last_iter,
1375 __le32_to_cpu(notif->time_from_last_iter));
1376
1377 if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
1378 notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
1379 mvm->last_ebs_successful = false;
1380
1381 mvm->scan_uid_status[uid] = 0;
1382}
1383
1384void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1385 struct iwl_rx_cmd_buffer *rxb)
1386{
1387 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1388 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
1389 u8 buf[256];
1390
1391 IWL_DEBUG_SCAN(mvm,
1392 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
1393 notif->status, notif->scanned_channels,
1394 iwl_mvm_dump_channel_list(notif->results,
1395 notif->scanned_channels, buf,
1396 sizeof(buf)));
1397}
1398
1399static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
1400{
1401 struct iwl_umac_scan_abort cmd = {};
1402 int uid, ret;
1403
1404 lockdep_assert_held(&mvm->mutex);
1405
1406 /* We should always get a valid index here, because we already
1407 * checked that this type of scan was running in the generic
1408 * code.
1409 */
1410 uid = iwl_mvm_scan_uid_by_status(mvm, type);
1411 if (WARN_ON_ONCE(uid < 0))
1412 return uid;
1413
1414 cmd.uid = cpu_to_le32(uid);
1415
1416 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1417
1418 ret = iwl_mvm_send_cmd_pdu(mvm,
1419 iwl_cmd_id(SCAN_ABORT_UMAC,
1420 IWL_ALWAYS_LONG_GROUP, 0),
1421 0, sizeof(cmd), &cmd);
1422 if (!ret)
1423 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
1424
1425 return ret;
1426}
1427
1428static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1429{
1430 struct iwl_notification_wait wait_scan_done;
1431 static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
1432 SCAN_OFFLOAD_COMPLETE, };
1433 int ret;
1434
1435 lockdep_assert_held(&mvm->mutex);
1436
1437 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1438 scan_done_notif,
1439 ARRAY_SIZE(scan_done_notif),
1440 NULL, NULL);
1441
1442 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1443
1444 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1445 ret = iwl_mvm_umac_scan_abort(mvm, type);
1446 else
1447 ret = iwl_mvm_lmac_scan_abort(mvm);
1448
1449 if (ret) {
1450 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
1451 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1452 return ret;
1453 }
1454
1455 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1456
1457 return ret;
1458}
1459
1460int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1461{
1462 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1463 return sizeof(struct iwl_scan_req_umac) +
1464 sizeof(struct iwl_scan_channel_cfg_umac) *
1465 mvm->fw->ucode_capa.n_scan_channels +
1466 sizeof(struct iwl_scan_req_umac_tail);
1467
1468 return sizeof(struct iwl_scan_req_lmac) +
1469 sizeof(struct iwl_scan_channel_cfg_lmac) *
1470 mvm->fw->ucode_capa.n_scan_channels +
1471 sizeof(struct iwl_scan_probe_req);
1472}
1473
1474/*
1475 * This function is used in nic restart flow, to inform mac80211 about scans
1476 * that was aborted by restart flow or by an assert.
1477 */
1478void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1479{
1480 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1481 int uid, i;
1482
1483 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1484 if (uid >= 0) {
1485 ieee80211_scan_completed(mvm->hw, true);
1486 mvm->scan_uid_status[uid] = 0;
1487 }
1488 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1489 if (uid >= 0 && !mvm->restart_fw) {
1490 ieee80211_sched_scan_stopped(mvm->hw);
1491 mvm->scan_uid_status[uid] = 0;
1492 }
1493
1494 /* We shouldn't have any UIDs still set. Loop over all the
1495 * UIDs to make sure there's nothing left there and warn if
1496 * any is found.
1497 */
1498 for (i = 0; i < mvm->max_scans; i++) {
1499 if (WARN_ONCE(mvm->scan_uid_status[i],
1500 "UMAC scan UID %d status was not cleaned\n",
1501 i))
1502 mvm->scan_uid_status[i] = 0;
1503 }
1504 } else {
1505 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
1506 ieee80211_scan_completed(mvm->hw, true);
1507
1508 /* Sched scan will be restarted by mac80211 in
1509 * restart_hw, so do not report if FW is about to be
1510 * restarted.
1511 */
1512 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
1513 ieee80211_sched_scan_stopped(mvm->hw);
1514 }
1515}
1516
1517int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
1518{
1519 int ret;
1520
1521 if (!(mvm->scan_status & type))
1522 return 0;
1523
1524 if (iwl_mvm_is_radio_killed(mvm)) {
1525 ret = 0;
1526 goto out;
1527 }
1528
1529 ret = iwl_mvm_scan_stop_wait(mvm, type);
1530 if (!ret)
1531 mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
1532out:
1533 /* Clear the scan status so the next scan requests will
1534 * succeed and mark the scan as stopping, so that the Rx
1535 * handler doesn't do anything, as the scan was stopped from
1536 * above.
1537 */
1538 mvm->scan_status &= ~type;
1539
1540 if (type == IWL_MVM_SCAN_REGULAR) {
1541 /* Since the rx handler won't do anything now, we have
1542 * to release the scan reference here.
1543 */
1544 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1545 if (notify)
1546 ieee80211_scan_completed(mvm->hw, true);
1547 } else if (notify) {
1548 ieee80211_sched_scan_stopped(mvm->hw);
1549 }
1550
1551 return ret;
1552}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
deleted file mode 100644
index b0f59fdd287c..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ /dev/null
@@ -1,340 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include "mvm.h"
66
67/* For counting bound interfaces */
68struct iwl_mvm_active_iface_iterator_data {
69 struct ieee80211_vif *ignore_vif;
70 u8 sta_vif_ap_sta_id;
71 enum iwl_sf_state sta_vif_state;
72 int num_active_macs;
73};
74
75/*
76 * Count bound interfaces which are not p2p, besides data->ignore_vif.
77 * data->station_vif will point to one bound vif of type station, if exists.
78 */
79static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
80 struct ieee80211_vif *vif)
81{
82 struct iwl_mvm_active_iface_iterator_data *data = _data;
83 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
84
85 if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
86 vif->type == NL80211_IFTYPE_P2P_DEVICE)
87 return;
88
89 data->num_active_macs++;
90
91 if (vif->type == NL80211_IFTYPE_STATION) {
92 data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
93 if (vif->bss_conf.assoc)
94 data->sta_vif_state = SF_FULL_ON;
95 else
96 data->sta_vif_state = SF_INIT_OFF;
97 }
98}
99
100/*
101 * Aging and idle timeouts for the different possible scenarios
102 * in default configuration
103 */
104static const
105__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
106 {
107 cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
108 cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
109 },
110 {
111 cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
112 cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
113 },
114 {
115 cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
116 cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
117 },
118 {
119 cpu_to_le32(SF_BA_AGING_TIMER_DEF),
120 cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
121 },
122 {
123 cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
124 cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
125 },
126};
127
128/*
129 * Aging and idle timeouts for the different possible scenarios
130 * in single BSS MAC configuration.
131 */
132static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
133 {
134 cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
135 cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
136 },
137 {
138 cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
139 cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
140 },
141 {
142 cpu_to_le32(SF_MCAST_AGING_TIMER),
143 cpu_to_le32(SF_MCAST_IDLE_TIMER)
144 },
145 {
146 cpu_to_le32(SF_BA_AGING_TIMER),
147 cpu_to_le32(SF_BA_IDLE_TIMER)
148 },
149 {
150 cpu_to_le32(SF_TX_RE_AGING_TIMER),
151 cpu_to_le32(SF_TX_RE_IDLE_TIMER)
152 },
153};
154
155static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
156 struct iwl_sf_cfg_cmd *sf_cmd,
157 struct ieee80211_sta *sta)
158{
159 int i, j, watermark;
160
161 sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
162
163 /*
164 * If we are in association flow - check antenna configuration
165 * capabilities of the AP station, and choose the watermark accordingly.
166 */
167 if (sta) {
168 if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
169 switch (sta->rx_nss) {
170 case 1:
171 watermark = SF_W_MARK_SISO;
172 break;
173 case 2:
174 watermark = SF_W_MARK_MIMO2;
175 break;
176 default:
177 watermark = SF_W_MARK_MIMO3;
178 break;
179 }
180 } else {
181 watermark = SF_W_MARK_LEGACY;
182 }
183 /* default watermark value for unassociated mode. */
184 } else {
185 watermark = SF_W_MARK_MIMO2;
186 }
187 sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
188
189 for (i = 0; i < SF_NUM_SCENARIO; i++) {
190 for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
191 sf_cmd->long_delay_timeouts[i][j] =
192 cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
193 }
194 }
195
196 if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
197 BUILD_BUG_ON(sizeof(sf_full_timeout) !=
198 sizeof(__le32) * SF_NUM_SCENARIO *
199 SF_NUM_TIMEOUT_TYPES);
200
201 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
202 sizeof(sf_full_timeout));
203 } else {
204 BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
205 sizeof(__le32) * SF_NUM_SCENARIO *
206 SF_NUM_TIMEOUT_TYPES);
207
208 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
209 sizeof(sf_full_timeout_def));
210 }
211
212}
213
214static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
215 enum iwl_sf_state new_state)
216{
217 struct iwl_sf_cfg_cmd sf_cmd = {
218 .state = cpu_to_le32(SF_FULL_ON),
219 };
220 struct ieee80211_sta *sta;
221 int ret = 0;
222
223 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
224 sf_cmd.state = cpu_to_le32(new_state);
225
226 if (mvm->cfg->disable_dummy_notification)
227 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
228
229 /*
230 * If an associated AP sta changed its antenna configuration, the state
231 * will remain FULL_ON but SF parameters need to be reconsidered.
232 */
233 if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
234 return 0;
235
236 switch (new_state) {
237 case SF_UNINIT:
238 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
239 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
240 break;
241 case SF_FULL_ON:
242 if (sta_id == IWL_MVM_STATION_COUNT) {
243 IWL_ERR(mvm,
244 "No station: Cannot switch SF to FULL_ON\n");
245 return -EINVAL;
246 }
247 rcu_read_lock();
248 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
249 if (IS_ERR_OR_NULL(sta)) {
250 IWL_ERR(mvm, "Invalid station id\n");
251 rcu_read_unlock();
252 return -EINVAL;
253 }
254 iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
255 rcu_read_unlock();
256 break;
257 case SF_INIT_OFF:
258 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
259 break;
260 default:
261 WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
262 new_state);
263 return -EINVAL;
264 }
265
266 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
267 sizeof(sf_cmd), &sf_cmd);
268 if (!ret)
269 mvm->sf_state = new_state;
270
271 return ret;
272}
273
274/*
275 * Update Smart fifo:
276 * Count bound interfaces that are not to be removed, ignoring p2p devices,
277 * and set new state accordingly.
278 */
279int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
280 bool remove_vif)
281{
282 enum iwl_sf_state new_state;
283 u8 sta_id = IWL_MVM_STATION_COUNT;
284 struct iwl_mvm_vif *mvmvif = NULL;
285 struct iwl_mvm_active_iface_iterator_data data = {
286 .ignore_vif = changed_vif,
287 .sta_vif_state = SF_UNINIT,
288 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
289 };
290
291 /*
292 * Ignore the call if we are in HW Restart flow, or if the handled
293 * vif is a p2p device.
294 */
295 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
296 (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
297 return 0;
298
299 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
300 IEEE80211_IFACE_ITER_NORMAL,
301 iwl_mvm_bound_iface_iterator,
302 &data);
303
304 /* If changed_vif exists and is not to be removed, add to the count */
305 if (changed_vif && !remove_vif)
306 data.num_active_macs++;
307
308 switch (data.num_active_macs) {
309 case 0:
310 /* If there are no active macs - change state to SF_INIT_OFF */
311 new_state = SF_INIT_OFF;
312 break;
313 case 1:
314 if (remove_vif) {
315 /* The one active mac left is of type station
316 * and we filled the relevant data during iteration
317 */
318 new_state = data.sta_vif_state;
319 sta_id = data.sta_vif_ap_sta_id;
320 } else {
321 if (WARN_ON(!changed_vif))
322 return -EINVAL;
323 if (changed_vif->type != NL80211_IFTYPE_STATION) {
324 new_state = SF_UNINIT;
325 } else if (changed_vif->bss_conf.assoc &&
326 changed_vif->bss_conf.dtim_period) {
327 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
328 sta_id = mvmvif->ap_sta_id;
329 new_state = SF_FULL_ON;
330 } else {
331 new_state = SF_INIT_OFF;
332 }
333 }
334 break;
335 default:
336 /* If there are multiple active macs - change to SF_UNINIT */
337 new_state = SF_UNINIT;
338 }
339 return iwl_mvm_sf_config(mvm, sta_id, new_state);
340}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
deleted file mode 100644
index 300a249486e4..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ /dev/null
@@ -1,1810 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/mac80211.h>
66
67#include "mvm.h"
68#include "sta.h"
69#include "rs.h"
70
71static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
72 enum nl80211_iftype iftype)
73{
74 int sta_id;
75 u32 reserved_ids = 0;
76
77 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
78 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
79
80 lockdep_assert_held(&mvm->mutex);
81
82 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
83 if (iftype != NL80211_IFTYPE_STATION)
84 reserved_ids = BIT(0);
85
86 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
87 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
88 if (BIT(sta_id) & reserved_ids)
89 continue;
90
91 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
92 lockdep_is_held(&mvm->mutex)))
93 return sta_id;
94 }
95 return IWL_MVM_STATION_COUNT;
96}
97
98/* send station add/update command to firmware */
99int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
100 bool update)
101{
102 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
103 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
104 .sta_id = mvm_sta->sta_id,
105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
106 .add_modify = update ? 1 : 0,
107 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
108 STA_FLG_MIMO_EN_MSK),
109 };
110 int ret;
111 u32 status;
112 u32 agg_size = 0, mpdu_dens = 0;
113
114 if (!update) {
115 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
116 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
117 }
118
119 switch (sta->bandwidth) {
120 case IEEE80211_STA_RX_BW_160:
121 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
122 /* fall through */
123 case IEEE80211_STA_RX_BW_80:
124 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
125 /* fall through */
126 case IEEE80211_STA_RX_BW_40:
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
128 /* fall through */
129 case IEEE80211_STA_RX_BW_20:
130 if (sta->ht_cap.ht_supported)
131 add_sta_cmd.station_flags |=
132 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
133 break;
134 }
135
136 switch (sta->rx_nss) {
137 case 1:
138 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
139 break;
140 case 2:
141 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
142 break;
143 case 3 ... 8:
144 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
145 break;
146 }
147
148 switch (sta->smps_mode) {
149 case IEEE80211_SMPS_AUTOMATIC:
150 case IEEE80211_SMPS_NUM_MODES:
151 WARN_ON(1);
152 break;
153 case IEEE80211_SMPS_STATIC:
154 /* override NSS */
155 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case IEEE80211_SMPS_DYNAMIC:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
160 break;
161 case IEEE80211_SMPS_OFF:
162 /* nothing */
163 break;
164 }
165
166 if (sta->ht_cap.ht_supported) {
167 add_sta_cmd.station_flags_msk |=
168 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
169 STA_FLG_AGG_MPDU_DENS_MSK);
170
171 mpdu_dens = sta->ht_cap.ampdu_density;
172 }
173
174 if (sta->vht_cap.vht_supported) {
175 agg_size = sta->vht_cap.cap &
176 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
177 agg_size >>=
178 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
179 } else if (sta->ht_cap.ht_supported) {
180 agg_size = sta->ht_cap.ampdu_factor;
181 }
182
183 add_sta_cmd.station_flags |=
184 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
185 add_sta_cmd.station_flags |=
186 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
187
188 status = ADD_STA_SUCCESS;
189 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
190 &add_sta_cmd, &status);
191 if (ret)
192 return ret;
193
194 switch (status) {
195 case ADD_STA_SUCCESS:
196 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
197 break;
198 default:
199 ret = -EIO;
200 IWL_ERR(mvm, "ADD_STA failed\n");
201 break;
202 }
203
204 return ret;
205}
206
207static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
208 struct ieee80211_sta *sta)
209{
210 unsigned long used_hw_queues;
211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
212 unsigned int wdg_timeout =
213 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
214 u32 ac;
215
216 lockdep_assert_held(&mvm->mutex);
217
218 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
219
220 /* Find available queues, and allocate them to the ACs */
221 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
222 u8 queue = find_first_zero_bit(&used_hw_queues,
223 mvm->first_agg_queue);
224
225 if (queue >= mvm->first_agg_queue) {
226 IWL_ERR(mvm, "Failed to allocate STA queue\n");
227 return -EBUSY;
228 }
229
230 __set_bit(queue, &used_hw_queues);
231 mvmsta->hw_queue[ac] = queue;
232 }
233
234 /* Found a place for all queues - enable them */
235 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
236 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
237 mvmsta->hw_queue[ac],
238 iwl_mvm_ac_to_tx_fifo[ac], 0,
239 wdg_timeout);
240 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
241 }
242
243 return 0;
244}
245
246static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
247 struct ieee80211_sta *sta)
248{
249 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
250 unsigned long sta_msk;
251 int i;
252
253 lockdep_assert_held(&mvm->mutex);
254
255 /* disable the TDLS STA-specific queues */
256 sta_msk = mvmsta->tfd_queue_msk;
257 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
258 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
259}
260
261int iwl_mvm_add_sta(struct iwl_mvm *mvm,
262 struct ieee80211_vif *vif,
263 struct ieee80211_sta *sta)
264{
265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
266 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
267 int i, ret, sta_id;
268
269 lockdep_assert_held(&mvm->mutex);
270
271 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
272 sta_id = iwl_mvm_find_free_sta_id(mvm,
273 ieee80211_vif_type_p2p(vif));
274 else
275 sta_id = mvm_sta->sta_id;
276
277 if (sta_id == IWL_MVM_STATION_COUNT)
278 return -ENOSPC;
279
280 if (vif->type == NL80211_IFTYPE_AP) {
281 mvmvif->ap_assoc_sta_count++;
282 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
283 }
284
285 spin_lock_init(&mvm_sta->lock);
286
287 mvm_sta->sta_id = sta_id;
288 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
289 mvmvif->color);
290 mvm_sta->vif = vif;
291 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
292 mvm_sta->tx_protection = 0;
293 mvm_sta->tt_tx_protection = false;
294
295 /* HW restart, don't assume the memory has been zeroed */
296 atomic_set(&mvm->pending_frames[sta_id], 0);
297 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
298 mvm_sta->tfd_queue_msk = 0;
299
300 /* allocate new queues for a TDLS station */
301 if (sta->tdls) {
302 ret = iwl_mvm_tdls_sta_init(mvm, sta);
303 if (ret)
304 return ret;
305 } else {
306 for (i = 0; i < IEEE80211_NUM_ACS; i++)
307 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
308 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
309 }
310
311 /* for HW restart - reset everything but the sequence number */
312 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
313 u16 seq = mvm_sta->tid_data[i].seq_number;
314 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
315 mvm_sta->tid_data[i].seq_number = seq;
316 }
317 mvm_sta->agg_tids = 0;
318
319 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
320 if (ret)
321 goto err;
322
323 if (vif->type == NL80211_IFTYPE_STATION) {
324 if (!sta->tdls) {
325 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
326 mvmvif->ap_sta_id = sta_id;
327 } else {
328 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
329 }
330 }
331
332 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
333
334 return 0;
335
336err:
337 iwl_mvm_tdls_sta_deinit(mvm, sta);
338 return ret;
339}
340
341int iwl_mvm_update_sta(struct iwl_mvm *mvm,
342 struct ieee80211_vif *vif,
343 struct ieee80211_sta *sta)
344{
345 return iwl_mvm_sta_send_to_fw(mvm, sta, true);
346}
347
348int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
349 bool drain)
350{
351 struct iwl_mvm_add_sta_cmd cmd = {};
352 int ret;
353 u32 status;
354
355 lockdep_assert_held(&mvm->mutex);
356
357 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
358 cmd.sta_id = mvmsta->sta_id;
359 cmd.add_modify = STA_MODE_MODIFY;
360 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
361 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
362
363 status = ADD_STA_SUCCESS;
364 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
365 &cmd, &status);
366 if (ret)
367 return ret;
368
369 switch (status) {
370 case ADD_STA_SUCCESS:
371 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
372 mvmsta->sta_id);
373 break;
374 default:
375 ret = -EIO;
376 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
377 mvmsta->sta_id);
378 break;
379 }
380
381 return ret;
382}
383
384/*
385 * Remove a station from the FW table. Before sending the command to remove
386 * the station validate that the station is indeed known to the driver (sanity
387 * only).
388 */
389static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
390{
391 struct ieee80211_sta *sta;
392 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
393 .sta_id = sta_id,
394 };
395 int ret;
396
397 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
398 lockdep_is_held(&mvm->mutex));
399
400 /* Note: internal stations are marked as error values */
401 if (!sta) {
402 IWL_ERR(mvm, "Invalid station id\n");
403 return -EINVAL;
404 }
405
406 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
407 sizeof(rm_sta_cmd), &rm_sta_cmd);
408 if (ret) {
409 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
410 return ret;
411 }
412
413 return 0;
414}
415
416void iwl_mvm_sta_drained_wk(struct work_struct *wk)
417{
418 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
419 u8 sta_id;
420
421 /*
422 * The mutex is needed because of the SYNC cmd, but not only: if the
423 * work would run concurrently with iwl_mvm_rm_sta, it would run before
424 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
425 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
426 * that later.
427 */
428 mutex_lock(&mvm->mutex);
429
430 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
431 int ret;
432 struct ieee80211_sta *sta =
433 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
434 lockdep_is_held(&mvm->mutex));
435
436 /*
437 * This station is in use or RCU-removed; the latter happens in
438 * managed mode, where mac80211 removes the station before we
439 * can remove it from firmware (we can only do that after the
440 * MAC is marked unassociated), and possibly while the deauth
441 * frame to disconnect from the AP is still queued. Then, the
442 * station pointer is -ENOENT when the last skb is reclaimed.
443 */
444 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
445 continue;
446
447 if (PTR_ERR(sta) == -EINVAL) {
448 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
449 sta_id);
450 continue;
451 }
452
453 if (!sta) {
454 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
455 sta_id);
456 continue;
457 }
458
459 WARN_ON(PTR_ERR(sta) != -EBUSY);
460 /* This station was removed and we waited until it got drained,
461 * we can now proceed and remove it.
462 */
463 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
464 if (ret) {
465 IWL_ERR(mvm,
466 "Couldn't remove sta %d after it was drained\n",
467 sta_id);
468 continue;
469 }
470 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
471 clear_bit(sta_id, mvm->sta_drained);
472
473 if (mvm->tfd_drained[sta_id]) {
474 unsigned long i, msk = mvm->tfd_drained[sta_id];
475
476 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
477 iwl_mvm_disable_txq(mvm, i, i,
478 IWL_MAX_TID_COUNT, 0);
479
480 mvm->tfd_drained[sta_id] = 0;
481 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
482 sta_id, msk);
483 }
484 }
485
486 mutex_unlock(&mvm->mutex);
487}
488
489int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
490 struct ieee80211_vif *vif,
491 struct ieee80211_sta *sta)
492{
493 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
494 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
495 int ret;
496
497 lockdep_assert_held(&mvm->mutex);
498
499 if (vif->type == NL80211_IFTYPE_STATION &&
500 mvmvif->ap_sta_id == mvm_sta->sta_id) {
501 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
502 if (ret)
503 return ret;
504 /* flush its queues here since we are freeing mvm_sta */
505 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
506 if (ret)
507 return ret;
508 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
509 mvm_sta->tfd_queue_msk);
510 if (ret)
511 return ret;
512 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
513
514 /* if we are associated - we can't remove the AP STA now */
515 if (vif->bss_conf.assoc)
516 return ret;
517
518 /* unassoc - go ahead - remove the AP STA now */
519 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
520
521 /* clear d0i3_ap_sta_id if no longer relevant */
522 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
523 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
524 }
525
526 /*
527 * This shouldn't happen - the TDLS channel switch should be canceled
528 * before the STA is removed.
529 */
530 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
531 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
532 cancel_delayed_work(&mvm->tdls_cs.dwork);
533 }
534
535 /*
536 * Make sure that the tx response code sees the station as -EBUSY and
537 * calls the drain worker.
538 */
539 spin_lock_bh(&mvm_sta->lock);
540 /*
541 * There are frames pending on the AC queues for this station.
542 * We need to wait until all the frames are drained...
543 */
544 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
545 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
546 ERR_PTR(-EBUSY));
547 spin_unlock_bh(&mvm_sta->lock);
548
549 /* disable TDLS sta queues on drain complete */
550 if (sta->tdls) {
551 mvm->tfd_drained[mvm_sta->sta_id] =
552 mvm_sta->tfd_queue_msk;
553 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
554 mvm_sta->sta_id);
555 }
556
557 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
558 } else {
559 spin_unlock_bh(&mvm_sta->lock);
560
561 if (sta->tdls)
562 iwl_mvm_tdls_sta_deinit(mvm, sta);
563
564 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
565 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
566 }
567
568 return ret;
569}
570
571int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
572 struct ieee80211_vif *vif,
573 u8 sta_id)
574{
575 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
576
577 lockdep_assert_held(&mvm->mutex);
578
579 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
580 return ret;
581}
582
583static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
584 struct iwl_mvm_int_sta *sta,
585 u32 qmask, enum nl80211_iftype iftype)
586{
587 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
588 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
589 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
590 return -ENOSPC;
591 }
592
593 sta->tfd_queue_msk = qmask;
594
595 /* put a non-NULL value so iterating over the stations won't stop */
596 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
597 return 0;
598}
599
600static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
601 struct iwl_mvm_int_sta *sta)
602{
603 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
604 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
605 sta->sta_id = IWL_MVM_STATION_COUNT;
606}
607
608static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
609 struct iwl_mvm_int_sta *sta,
610 const u8 *addr,
611 u16 mac_id, u16 color)
612{
613 struct iwl_mvm_add_sta_cmd cmd;
614 int ret;
615 u32 status;
616
617 lockdep_assert_held(&mvm->mutex);
618
619 memset(&cmd, 0, sizeof(cmd));
620 cmd.sta_id = sta->sta_id;
621 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
622 color));
623
624 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
625
626 if (addr)
627 memcpy(cmd.addr, addr, ETH_ALEN);
628
629 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
630 &cmd, &status);
631 if (ret)
632 return ret;
633
634 switch (status) {
635 case ADD_STA_SUCCESS:
636 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
637 return 0;
638 default:
639 ret = -EIO;
640 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
641 status);
642 break;
643 }
644 return ret;
645}
646
647int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
648{
649 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
650 mvm->cfg->base_params->wd_timeout :
651 IWL_WATCHDOG_DISABLED;
652 int ret;
653
654 lockdep_assert_held(&mvm->mutex);
655
656 /* Map Aux queue to fifo - needs to happen before adding Aux station */
657 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
658 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
659
660 /* Allocate aux station and assign to it the aux queue */
661 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
662 NL80211_IFTYPE_UNSPECIFIED);
663 if (ret)
664 return ret;
665
666 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
667 MAC_INDEX_AUX, 0);
668
669 if (ret)
670 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
671 return ret;
672}
673
674void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
675{
676 lockdep_assert_held(&mvm->mutex);
677
678 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
679}
680
681/*
682 * Send the add station command for the vif's broadcast station.
683 * Assumes that the station was already allocated.
684 *
685 * @mvm: the mvm component
686 * @vif: the interface to which the broadcast station is added
687 * @bsta: the broadcast station to add.
688 */
689int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
690{
691 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
692 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
693 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
694 const u8 *baddr = _baddr;
695
696 lockdep_assert_held(&mvm->mutex);
697
698 if (vif->type == NL80211_IFTYPE_ADHOC)
699 baddr = vif->bss_conf.bssid;
700
701 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
702 return -ENOSPC;
703
704 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
705 mvmvif->id, mvmvif->color);
706}
707
708/* Send the FW a request to remove the station from it's internal data
709 * structures, but DO NOT remove the entry from the local data structures. */
710int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
711{
712 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
713 int ret;
714
715 lockdep_assert_held(&mvm->mutex);
716
717 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
718 if (ret)
719 IWL_WARN(mvm, "Failed sending remove station\n");
720 return ret;
721}
722
723int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
724{
725 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
726 u32 qmask;
727
728 lockdep_assert_held(&mvm->mutex);
729
730 qmask = iwl_mvm_mac_get_queues_mask(vif);
731
732 /*
733 * The firmware defines the TFD queue mask to only be relevant
734 * for *unicast* queues, so the multicast (CAB) queue shouldn't
735 * be included.
736 */
737 if (vif->type == NL80211_IFTYPE_AP)
738 qmask &= ~BIT(vif->cab_queue);
739
740 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
741 ieee80211_vif_type_p2p(vif));
742}
743
744/* Allocate a new station entry for the broadcast station to the given vif,
745 * and send it to the FW.
746 * Note that each P2P mac should have its own broadcast station.
747 *
748 * @mvm: the mvm component
749 * @vif: the interface to which the broadcast station is added
750 * @bsta: the broadcast station to add. */
751int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
752{
753 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
754 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
755 int ret;
756
757 lockdep_assert_held(&mvm->mutex);
758
759 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
760 if (ret)
761 return ret;
762
763 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
764
765 if (ret)
766 iwl_mvm_dealloc_int_sta(mvm, bsta);
767
768 return ret;
769}
770
771void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
772{
773 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
774
775 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
776}
777
778/*
779 * Send the FW a request to remove the station from it's internal data
780 * structures, and in addition remove it from the local data structure.
781 */
782int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
783{
784 int ret;
785
786 lockdep_assert_held(&mvm->mutex);
787
788 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
789
790 iwl_mvm_dealloc_bcast_sta(mvm, vif);
791
792 return ret;
793}
794
795#define IWL_MAX_RX_BA_SESSIONS 16
796
797int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
798 int tid, u16 ssn, bool start)
799{
800 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
801 struct iwl_mvm_add_sta_cmd cmd = {};
802 int ret;
803 u32 status;
804
805 lockdep_assert_held(&mvm->mutex);
806
807 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
808 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
809 return -ENOSPC;
810 }
811
812 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
813 cmd.sta_id = mvm_sta->sta_id;
814 cmd.add_modify = STA_MODE_MODIFY;
815 if (start) {
816 cmd.add_immediate_ba_tid = (u8) tid;
817 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
818 } else {
819 cmd.remove_immediate_ba_tid = (u8) tid;
820 }
821 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
822 STA_MODIFY_REMOVE_BA_TID;
823
824 status = ADD_STA_SUCCESS;
825 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
826 &cmd, &status);
827 if (ret)
828 return ret;
829
830 switch (status) {
831 case ADD_STA_SUCCESS:
832 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
833 start ? "start" : "stopp");
834 break;
835 case ADD_STA_IMMEDIATE_BA_FAILURE:
836 IWL_WARN(mvm, "RX BA Session refused by fw\n");
837 ret = -ENOSPC;
838 break;
839 default:
840 ret = -EIO;
841 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
842 start ? "start" : "stopp", status);
843 break;
844 }
845
846 if (!ret) {
847 if (start)
848 mvm->rx_ba_sessions++;
849 else if (mvm->rx_ba_sessions > 0)
850 /* check that restart flow didn't zero the counter */
851 mvm->rx_ba_sessions--;
852 }
853
854 return ret;
855}
856
857static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
858 int tid, u8 queue, bool start)
859{
860 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
861 struct iwl_mvm_add_sta_cmd cmd = {};
862 int ret;
863 u32 status;
864
865 lockdep_assert_held(&mvm->mutex);
866
867 if (start) {
868 mvm_sta->tfd_queue_msk |= BIT(queue);
869 mvm_sta->tid_disable_agg &= ~BIT(tid);
870 } else {
871 mvm_sta->tfd_queue_msk &= ~BIT(queue);
872 mvm_sta->tid_disable_agg |= BIT(tid);
873 }
874
875 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
876 cmd.sta_id = mvm_sta->sta_id;
877 cmd.add_modify = STA_MODE_MODIFY;
878 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
879 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
880 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
881
882 status = ADD_STA_SUCCESS;
883 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
884 &cmd, &status);
885 if (ret)
886 return ret;
887
888 switch (status) {
889 case ADD_STA_SUCCESS:
890 break;
891 default:
892 ret = -EIO;
893 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
894 start ? "start" : "stopp", status);
895 break;
896 }
897
898 return ret;
899}
900
901const u8 tid_to_mac80211_ac[] = {
902 IEEE80211_AC_BE,
903 IEEE80211_AC_BK,
904 IEEE80211_AC_BK,
905 IEEE80211_AC_BE,
906 IEEE80211_AC_VI,
907 IEEE80211_AC_VI,
908 IEEE80211_AC_VO,
909 IEEE80211_AC_VO,
910};
911
912static const u8 tid_to_ucode_ac[] = {
913 AC_BE,
914 AC_BK,
915 AC_BK,
916 AC_BE,
917 AC_VI,
918 AC_VI,
919 AC_VO,
920 AC_VO,
921};
922
923int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
924 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
925{
926 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
927 struct iwl_mvm_tid_data *tid_data;
928 int txq_id;
929 int ret;
930
931 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
932 return -EINVAL;
933
934 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
935 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
936 mvmsta->tid_data[tid].state);
937 return -ENXIO;
938 }
939
940 lockdep_assert_held(&mvm->mutex);
941
942 spin_lock_bh(&mvmsta->lock);
943
944 /* possible race condition - we entered D0i3 while starting agg */
945 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
946 spin_unlock_bh(&mvmsta->lock);
947 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
948 return -EIO;
949 }
950
951 spin_lock_bh(&mvm->queue_info_lock);
952
953 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
954 mvm->last_agg_queue);
955 if (txq_id < 0) {
956 ret = txq_id;
957 spin_unlock_bh(&mvm->queue_info_lock);
958 IWL_ERR(mvm, "Failed to allocate agg queue\n");
959 goto release_locks;
960 }
961 mvm->queue_info[txq_id].setup_reserved = true;
962 spin_unlock_bh(&mvm->queue_info_lock);
963
964 tid_data = &mvmsta->tid_data[tid];
965 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
966 tid_data->txq_id = txq_id;
967 *ssn = tid_data->ssn;
968
969 IWL_DEBUG_TX_QUEUES(mvm,
970 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
971 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
972 tid_data->next_reclaimed);
973
974 if (tid_data->ssn == tid_data->next_reclaimed) {
975 tid_data->state = IWL_AGG_STARTING;
976 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
977 } else {
978 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
979 }
980
981 ret = 0;
982
983release_locks:
984 spin_unlock_bh(&mvmsta->lock);
985
986 return ret;
987}
988
989int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
990 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
991{
992 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
993 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
994 unsigned int wdg_timeout =
995 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
996 int queue, fifo, ret;
997 u16 ssn;
998
999 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1000 != IWL_MAX_TID_COUNT);
1001
1002 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
1003
1004 spin_lock_bh(&mvmsta->lock);
1005 ssn = tid_data->ssn;
1006 queue = tid_data->txq_id;
1007 tid_data->state = IWL_AGG_ON;
1008 mvmsta->agg_tids |= BIT(tid);
1009 tid_data->ssn = 0xffff;
1010 spin_unlock_bh(&mvmsta->lock);
1011
1012 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1013
1014 iwl_mvm_enable_agg_txq(mvm, queue,
1015 vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
1016 mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
1017
1018 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1019 if (ret)
1020 return -EIO;
1021
1022 /* No need to mark as reserved */
1023 spin_lock_bh(&mvm->queue_info_lock);
1024 mvm->queue_info[queue].setup_reserved = false;
1025 spin_unlock_bh(&mvm->queue_info_lock);
1026
1027 /*
1028 * Even though in theory the peer could have different
1029 * aggregation reorder buffer sizes for different sessions,
1030 * our ucode doesn't allow for that and has a global limit
1031 * for each station. Therefore, use the minimum of all the
1032 * aggregation sessions and our default value.
1033 */
1034 mvmsta->max_agg_bufsize =
1035 min(mvmsta->max_agg_bufsize, buf_size);
1036 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
1037
1038 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
1039 sta->addr, tid);
1040
1041 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
1042}
1043
1044int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1045 struct ieee80211_sta *sta, u16 tid)
1046{
1047 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1048 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1049 u16 txq_id;
1050 int err;
1051
1052
1053 /*
1054 * If mac80211 is cleaning its state, then say that we finished since
1055 * our state has been cleared anyway.
1056 */
1057 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1058 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1059 return 0;
1060 }
1061
1062 spin_lock_bh(&mvmsta->lock);
1063
1064 txq_id = tid_data->txq_id;
1065
1066 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1067 mvmsta->sta_id, tid, txq_id, tid_data->state);
1068
1069 mvmsta->agg_tids &= ~BIT(tid);
1070
1071 /* No need to mark as reserved anymore */
1072 spin_lock_bh(&mvm->queue_info_lock);
1073 mvm->queue_info[txq_id].setup_reserved = false;
1074 spin_unlock_bh(&mvm->queue_info_lock);
1075
1076 switch (tid_data->state) {
1077 case IWL_AGG_ON:
1078 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1079
1080 IWL_DEBUG_TX_QUEUES(mvm,
1081 "ssn = %d, next_recl = %d\n",
1082 tid_data->ssn, tid_data->next_reclaimed);
1083
1084 /* There are still packets for this RA / TID in the HW */
1085 if (tid_data->ssn != tid_data->next_reclaimed) {
1086 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1087 err = 0;
1088 break;
1089 }
1090
1091 tid_data->ssn = 0xffff;
1092 tid_data->state = IWL_AGG_OFF;
1093 spin_unlock_bh(&mvmsta->lock);
1094
1095 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1096
1097 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1098
1099 iwl_mvm_disable_txq(mvm, txq_id,
1100 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1101 0);
1102 return 0;
1103 case IWL_AGG_STARTING:
1104 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1105 /*
1106 * The agg session has been stopped before it was set up. This
1107 * can happen when the AddBA timer times out for example.
1108 */
1109
1110 /* No barriers since we are under mutex */
1111 lockdep_assert_held(&mvm->mutex);
1112
1113 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1114 tid_data->state = IWL_AGG_OFF;
1115 err = 0;
1116 break;
1117 default:
1118 IWL_ERR(mvm,
1119 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1120 mvmsta->sta_id, tid, tid_data->state);
1121 IWL_ERR(mvm,
1122 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1123 err = -EINVAL;
1124 }
1125
1126 spin_unlock_bh(&mvmsta->lock);
1127
1128 return err;
1129}
1130
1131int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1132 struct ieee80211_sta *sta, u16 tid)
1133{
1134 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1135 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1136 u16 txq_id;
1137 enum iwl_mvm_agg_state old_state;
1138
1139 /*
1140 * First set the agg state to OFF to avoid calling
1141 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1142 */
1143 spin_lock_bh(&mvmsta->lock);
1144 txq_id = tid_data->txq_id;
1145 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1146 mvmsta->sta_id, tid, txq_id, tid_data->state);
1147 old_state = tid_data->state;
1148 tid_data->state = IWL_AGG_OFF;
1149 mvmsta->agg_tids &= ~BIT(tid);
1150 spin_unlock_bh(&mvmsta->lock);
1151
1152 /* No need to mark as reserved */
1153 spin_lock_bh(&mvm->queue_info_lock);
1154 mvm->queue_info[txq_id].setup_reserved = false;
1155 spin_unlock_bh(&mvm->queue_info_lock);
1156
1157 if (old_state >= IWL_AGG_ON) {
1158 iwl_mvm_drain_sta(mvm, mvmsta, true);
1159 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
1160 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1161 iwl_trans_wait_tx_queue_empty(mvm->trans,
1162 mvmsta->tfd_queue_msk);
1163 iwl_mvm_drain_sta(mvm, mvmsta, false);
1164
1165 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1166
1167 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1168 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1169 0);
1170 }
1171
1172 return 0;
1173}
1174
1175static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1176{
1177 int i, max = -1, max_offs = -1;
1178
1179 lockdep_assert_held(&mvm->mutex);
1180
1181 /* Pick the unused key offset with the highest 'deleted'
1182 * counter. Every time a key is deleted, all the counters
1183 * are incremented and the one that was just deleted is
1184 * reset to zero. Thus, the highest counter is the one
1185 * that was deleted longest ago. Pick that one.
1186 */
1187 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1188 if (test_bit(i, mvm->fw_key_table))
1189 continue;
1190 if (mvm->fw_key_deleted[i] > max) {
1191 max = mvm->fw_key_deleted[i];
1192 max_offs = i;
1193 }
1194 }
1195
1196 if (max_offs < 0)
1197 return STA_KEY_IDX_INVALID;
1198
1199 __set_bit(max_offs, mvm->fw_key_table);
1200
1201 return max_offs;
1202}
1203
1204static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta)
1206{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1208
1209 if (sta) {
1210 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1211
1212 return mvm_sta->sta_id;
1213 }
1214
1215 /*
1216 * The device expects GTKs for station interfaces to be
1217 * installed as GTKs for the AP station. If we have no
1218 * station ID, then use AP's station ID.
1219 */
1220 if (vif->type == NL80211_IFTYPE_STATION &&
1221 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
1222 return mvmvif->ap_sta_id;
1223
1224 return IWL_MVM_STATION_COUNT;
1225}
1226
1227static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1228 struct iwl_mvm_sta *mvm_sta,
1229 struct ieee80211_key_conf *keyconf, bool mcast,
1230 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
1231{
1232 struct iwl_mvm_add_sta_key_cmd cmd = {};
1233 __le16 key_flags;
1234 int ret;
1235 u32 status;
1236 u16 keyidx;
1237 int i;
1238 u8 sta_id = mvm_sta->sta_id;
1239
1240 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1241 STA_KEY_FLG_KEYID_MSK;
1242 key_flags = cpu_to_le16(keyidx);
1243 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
1244
1245 switch (keyconf->cipher) {
1246 case WLAN_CIPHER_SUITE_TKIP:
1247 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1248 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1249 for (i = 0; i < 5; i++)
1250 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1251 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1252 break;
1253 case WLAN_CIPHER_SUITE_CCMP:
1254 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1255 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1256 break;
1257 case WLAN_CIPHER_SUITE_WEP104:
1258 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1259 /* fall through */
1260 case WLAN_CIPHER_SUITE_WEP40:
1261 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1262 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
1263 break;
1264 default:
1265 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
1266 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1267 }
1268
1269 if (mcast)
1270 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1271
1272 cmd.key_offset = keyconf->hw_key_idx;
1273 cmd.key_flags = key_flags;
1274 cmd.sta_id = sta_id;
1275
1276 status = ADD_STA_SUCCESS;
1277 if (cmd_flags & CMD_ASYNC)
1278 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1279 sizeof(cmd), &cmd);
1280 else
1281 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1282 &cmd, &status);
1283
1284 switch (status) {
1285 case ADD_STA_SUCCESS:
1286 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
1287 break;
1288 default:
1289 ret = -EIO;
1290 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
1291 break;
1292 }
1293
1294 return ret;
1295}
1296
1297static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1298 struct ieee80211_key_conf *keyconf,
1299 u8 sta_id, bool remove_key)
1300{
1301 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
1302
1303 /* verify the key details match the required command's expectations */
1304 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
1305 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
1306 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
1307 return -EINVAL;
1308
1309 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
1310 igtk_cmd.sta_id = cpu_to_le32(sta_id);
1311
1312 if (remove_key) {
1313 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
1314 } else {
1315 struct ieee80211_key_seq seq;
1316 const u8 *pn;
1317
1318 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1319 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1320 pn = seq.aes_cmac.pn;
1321 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
1322 ((u64) pn[4] << 8) |
1323 ((u64) pn[3] << 16) |
1324 ((u64) pn[2] << 24) |
1325 ((u64) pn[1] << 32) |
1326 ((u64) pn[0] << 40));
1327 }
1328
1329 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
1330 remove_key ? "removing" : "installing",
1331 igtk_cmd.sta_id);
1332
1333 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1334 sizeof(igtk_cmd), &igtk_cmd);
1335}
1336
1337
1338static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1339 struct ieee80211_vif *vif,
1340 struct ieee80211_sta *sta)
1341{
1342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1343
1344 if (sta)
1345 return sta->addr;
1346
1347 if (vif->type == NL80211_IFTYPE_STATION &&
1348 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1349 u8 sta_id = mvmvif->ap_sta_id;
1350 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1351 lockdep_is_held(&mvm->mutex));
1352 return sta->addr;
1353 }
1354
1355
1356 return NULL;
1357}
1358
1359static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1360 struct ieee80211_vif *vif,
1361 struct ieee80211_sta *sta,
1362 struct ieee80211_key_conf *keyconf,
1363 bool mcast)
1364{
1365 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1366 int ret;
1367 const u8 *addr;
1368 struct ieee80211_key_seq seq;
1369 u16 p1k[5];
1370
1371 switch (keyconf->cipher) {
1372 case WLAN_CIPHER_SUITE_TKIP:
1373 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
1374 /* get phase 1 key from mac80211 */
1375 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1376 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1377 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1378 seq.tkip.iv32, p1k, 0);
1379 break;
1380 case WLAN_CIPHER_SUITE_CCMP:
1381 case WLAN_CIPHER_SUITE_WEP40:
1382 case WLAN_CIPHER_SUITE_WEP104:
1383 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1384 0, NULL, 0);
1385 break;
1386 default:
1387 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1388 0, NULL, 0);
1389 }
1390
1391 return ret;
1392}
1393
1394static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
1395 struct ieee80211_key_conf *keyconf,
1396 bool mcast)
1397{
1398 struct iwl_mvm_add_sta_key_cmd cmd = {};
1399 __le16 key_flags;
1400 int ret;
1401 u32 status;
1402
1403 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1404 STA_KEY_FLG_KEYID_MSK);
1405 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
1406 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
1407
1408 if (mcast)
1409 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1410
1411 cmd.key_flags = key_flags;
1412 cmd.key_offset = keyconf->hw_key_idx;
1413 cmd.sta_id = sta_id;
1414
1415 status = ADD_STA_SUCCESS;
1416 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1417 &cmd, &status);
1418
1419 switch (status) {
1420 case ADD_STA_SUCCESS:
1421 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
1422 break;
1423 default:
1424 ret = -EIO;
1425 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
1426 break;
1427 }
1428
1429 return ret;
1430}
1431
1432int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1433 struct ieee80211_vif *vif,
1434 struct ieee80211_sta *sta,
1435 struct ieee80211_key_conf *keyconf,
1436 bool have_key_offset)
1437{
1438 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1439 u8 sta_id;
1440 int ret;
1441 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1442
1443 lockdep_assert_held(&mvm->mutex);
1444
1445 /* Get the station id from the mvm local station table */
1446 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1447 if (sta_id == IWL_MVM_STATION_COUNT) {
1448 IWL_ERR(mvm, "Failed to find station id\n");
1449 return -EINVAL;
1450 }
1451
1452 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
1453 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
1454 goto end;
1455 }
1456
1457 /*
1458 * It is possible that the 'sta' parameter is NULL, and thus
1459 * there is a need to retrieve the sta from the local station table.
1460 */
1461 if (!sta) {
1462 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1463 lockdep_is_held(&mvm->mutex));
1464 if (IS_ERR_OR_NULL(sta)) {
1465 IWL_ERR(mvm, "Invalid station id\n");
1466 return -EINVAL;
1467 }
1468 }
1469
1470 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1471 return -EINVAL;
1472
1473 if (!have_key_offset) {
1474 /*
1475 * The D3 firmware hardcodes the PTK offset to 0, so we have to
1476 * configure it there. As a result, this workaround exists to
1477 * let the caller set the key offset (hw_key_idx), see d3.c.
1478 */
1479 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
1480 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
1481 return -ENOSPC;
1482 }
1483
1484 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
1485 if (ret) {
1486 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1487 goto end;
1488 }
1489
1490 /*
1491 * For WEP, the same key is used for multicast and unicast. Upload it
1492 * again, using the same key offset, and now pointing the other one
1493 * to the same key slot (offset).
1494 * If this fails, remove the original as well.
1495 */
1496 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1497 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1498 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
1499 if (ret) {
1500 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1501 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1502 }
1503 }
1504
1505end:
1506 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1507 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1508 sta ? sta->addr : zero_addr, ret);
1509 return ret;
1510}
1511
1512int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1513 struct ieee80211_vif *vif,
1514 struct ieee80211_sta *sta,
1515 struct ieee80211_key_conf *keyconf)
1516{
1517 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1518 u8 sta_id;
1519 int ret, i;
1520
1521 lockdep_assert_held(&mvm->mutex);
1522
1523 /* Get the station id from the mvm local station table */
1524 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1525
1526 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1527 keyconf->keyidx, sta_id);
1528
1529 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1530 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
1531
1532 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
1533 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
1534 keyconf->hw_key_idx);
1535 return -ENOENT;
1536 }
1537
1538 /* track which key was deleted last */
1539 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1540 if (mvm->fw_key_deleted[i] < U8_MAX)
1541 mvm->fw_key_deleted[i]++;
1542 }
1543 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
1544
1545 if (sta_id == IWL_MVM_STATION_COUNT) {
1546 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
1547 return 0;
1548 }
1549
1550 /*
1551 * It is possible that the 'sta' parameter is NULL, and thus
1552 * there is a need to retrieve the sta from the local station table,
1553 * for example when a GTK is removed (where the sta_id will then be
1554 * the AP ID, and no station was passed by mac80211.)
1555 */
1556 if (!sta) {
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (!sta) {
1560 IWL_ERR(mvm, "Invalid station id\n");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1566 return -EINVAL;
1567
1568 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1569 if (ret)
1570 return ret;
1571
1572 /* delete WEP key twice to get rid of (now useless) offset */
1573 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1574 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
1575 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
1576
1577 return ret;
1578}
1579
1580void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1581 struct ieee80211_vif *vif,
1582 struct ieee80211_key_conf *keyconf,
1583 struct ieee80211_sta *sta, u32 iv32,
1584 u16 *phase1key)
1585{
1586 struct iwl_mvm_sta *mvm_sta;
1587 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1588 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1589
1590 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1591 return;
1592
1593 rcu_read_lock();
1594
1595 if (!sta) {
1596 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1597 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
1598 rcu_read_unlock();
1599 return;
1600 }
1601 }
1602
1603 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1604 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1605 iv32, phase1key, CMD_ASYNC);
1606 rcu_read_unlock();
1607}
1608
1609void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1610 struct ieee80211_sta *sta)
1611{
1612 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1613 struct iwl_mvm_add_sta_cmd cmd = {
1614 .add_modify = STA_MODE_MODIFY,
1615 .sta_id = mvmsta->sta_id,
1616 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1617 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1618 };
1619 int ret;
1620
1621 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1622 if (ret)
1623 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1624}
1625
1626void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1627 struct ieee80211_sta *sta,
1628 enum ieee80211_frame_release_type reason,
1629 u16 cnt, u16 tids, bool more_data,
1630 bool agg)
1631{
1632 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1633 struct iwl_mvm_add_sta_cmd cmd = {
1634 .add_modify = STA_MODE_MODIFY,
1635 .sta_id = mvmsta->sta_id,
1636 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1637 .sleep_tx_count = cpu_to_le16(cnt),
1638 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1639 };
1640 int tid, ret;
1641 unsigned long _tids = tids;
1642
1643 /* convert TIDs to ACs - we don't support TSPEC so that's OK
1644 * Note that this field is reserved and unused by firmware not
1645 * supporting GO uAPSD, so it's safe to always do this.
1646 */
1647 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
1648 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
1649
1650 /* If we're releasing frames from aggregation queues then check if the
1651 * all queues combined that we're releasing frames from have
1652 * - more frames than the service period, in which case more_data
1653 * needs to be set
1654 * - fewer than 'cnt' frames, in which case we need to adjust the
1655 * firmware command (but do that unconditionally)
1656 */
1657 if (agg) {
1658 int remaining = cnt;
1659
1660 spin_lock_bh(&mvmsta->lock);
1661 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
1662 struct iwl_mvm_tid_data *tid_data;
1663 u16 n_queued;
1664
1665 tid_data = &mvmsta->tid_data[tid];
1666 if (WARN(tid_data->state != IWL_AGG_ON &&
1667 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
1668 "TID %d state is %d\n",
1669 tid, tid_data->state)) {
1670 spin_unlock_bh(&mvmsta->lock);
1671 ieee80211_sta_eosp(sta);
1672 return;
1673 }
1674
1675 n_queued = iwl_mvm_tid_queued(tid_data);
1676 if (n_queued > remaining) {
1677 more_data = true;
1678 remaining = 0;
1679 break;
1680 }
1681 remaining -= n_queued;
1682 }
1683 spin_unlock_bh(&mvmsta->lock);
1684
1685 cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
1686 if (WARN_ON(cnt - remaining == 0)) {
1687 ieee80211_sta_eosp(sta);
1688 return;
1689 }
1690 }
1691
1692 /* Note: this is ignored by firmware not supporting GO uAPSD */
1693 if (more_data)
1694 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
1695
1696 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
1697 mvmsta->next_status_eosp = true;
1698 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
1699 } else {
1700 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1701 }
1702
1703 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1704 if (ret)
1705 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1706}
1707
1708void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
1709 struct iwl_rx_cmd_buffer *rxb)
1710{
1711 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1712 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
1713 struct ieee80211_sta *sta;
1714 u32 sta_id = le32_to_cpu(notif->sta_id);
1715
1716 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
1717 return;
1718
1719 rcu_read_lock();
1720 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1721 if (!IS_ERR_OR_NULL(sta))
1722 ieee80211_sta_eosp(sta);
1723 rcu_read_unlock();
1724}
1725
1726void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
1727 struct iwl_mvm_sta *mvmsta, bool disable)
1728{
1729 struct iwl_mvm_add_sta_cmd cmd = {
1730 .add_modify = STA_MODE_MODIFY,
1731 .sta_id = mvmsta->sta_id,
1732 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
1733 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
1734 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1735 };
1736 int ret;
1737
1738 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1739 if (ret)
1740 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1741}
1742
1743void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
1744 struct ieee80211_sta *sta,
1745 bool disable)
1746{
1747 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1748
1749 spin_lock_bh(&mvm_sta->lock);
1750
1751 if (mvm_sta->disable_tx == disable) {
1752 spin_unlock_bh(&mvm_sta->lock);
1753 return;
1754 }
1755
1756 mvm_sta->disable_tx = disable;
1757
1758 /*
1759 * Tell mac80211 to start/stop queuing tx for this station,
1760 * but don't stop queuing if there are still pending frames
1761 * for this station.
1762 */
1763 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
1764 ieee80211_sta_block_awake(mvm->hw, sta, disable);
1765
1766 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
1767
1768 spin_unlock_bh(&mvm_sta->lock);
1769}
1770
1771void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
1772 struct iwl_mvm_vif *mvmvif,
1773 bool disable)
1774{
1775 struct ieee80211_sta *sta;
1776 struct iwl_mvm_sta *mvm_sta;
1777 int i;
1778
1779 lockdep_assert_held(&mvm->mutex);
1780
1781 /* Block/unblock all the stations of the given mvmvif */
1782 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
1783 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
1784 lockdep_is_held(&mvm->mutex));
1785 if (IS_ERR_OR_NULL(sta))
1786 continue;
1787
1788 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1789 if (mvm_sta->mac_id_n_color !=
1790 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
1791 continue;
1792
1793 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
1794 }
1795}
1796
1797void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1798{
1799 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1800 struct iwl_mvm_sta *mvmsta;
1801
1802 rcu_read_lock();
1803
1804 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1805
1806 if (!WARN_ON(!mvmsta))
1807 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
1808
1809 rcu_read_unlock();
1810}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
deleted file mode 100644
index eedb215eba3f..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ /dev/null
@@ -1,426 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __sta_h__
67#define __sta_h__
68
69#include <linux/spinlock.h>
70#include <net/mac80211.h>
71#include <linux/wait.h>
72
73#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
74#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
75#include "rs.h"
76
77struct iwl_mvm;
78struct iwl_mvm_vif;
79
80/**
81 * DOC: station table - introduction
82 *
83 * The station table is a list of data structure that reprensent the stations.
84 * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
85 * In GO/AP mode, the driver will have as many stations as associated clients.
86 * All these stations are reflected in the fw's station table. The driver
87 * keeps the fw's station table up to date with the ADD_STA command. Stations
88 * can be removed by the REMOVE_STA command.
89 *
90 * All the data related to a station is held in the structure %iwl_mvm_sta
91 * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
92 * This data includes the index of the station in the fw, per tid information
93 * (sequence numbers, Block-ack state machine, etc...). The stations are
94 * created and deleted by the %sta_state callback from %ieee80211_ops.
95 *
96 * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
97 * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
98 * station index. That way, the driver is able to get the tid related data in
99 * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
100 * paths are triggered by the fw, and the driver needs to get a pointer to the
101 * %ieee80211 structure. This map helps to get that pointer quickly.
102 */
103
104/**
105 * DOC: station table - locking
106 *
107 * As stated before, the station is created / deleted by mac80211's %sta_state
108 * callback from %ieee80211_ops which can sleep. The next paragraph explains
109 * the locking of a single stations, the next ones relates to the station
110 * table.
111 *
112 * The station holds the sequence number per tid. So this data needs to be
113 * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
114 * information (the state machine / and the logic that checks if the queues
115 * were drained), so it also needs to be accessible from the Tx response flow.
116 * In short, the station needs to be access from sleepable context as well as
117 * from tasklets, so the station itself needs a spinlock.
118 *
119 * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
120 * the mvm op_mode. This is possible since %sta_state can sleep.
121 * The pointers in this map are RCU protected, hence we won't replace the
122 * station while we have Tx / Tx response / BA notification running.
123 *
124 * If a station is deleted while it still has packets in its A-MPDU queues,
125 * then the reclaim flow will notice that there is no station in the map for
126 * sta_id and it will dump the responses.
127 */
128
129/**
130 * DOC: station table - internal stations
131 *
132 * The FW needs a few internal stations that are not reflected in
133 * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
134 * scanning and P2P device (during the GO negotiation).
135 * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
136 * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
137 * Usually the data for these stations is static, so no locking is required,
138 * and no TID data as this is also not needed.
139 * One thing to note, is that these stations have an ID in the fw, but not
140 * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
141 * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
142 * pointers from this mapping need to check that the value is not error
143 * or NULL.
144 *
145 * Currently there is only one auxiliary station for scanning, initialized
146 * on init.
147 */
148
149/**
150 * DOC: station table - AP Station in STA mode
151 *
152 * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
153 * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
154 * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
155 * the AP station from the fw before setting the MAC context as unassociated.
156 * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
157 * removed by mac80211, but the station won't be removed in the fw until the
158 * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
159 */
160
161/**
162 * DOC: station table - Drain vs. Flush
163 *
164 * Flush means that all the frames in the SCD queue are dumped regardless the
165 * station to which they were sent. We do that when we disassociate and before
166 * we remove the STA of the AP. The flush can be done synchronously against the
167 * fw.
168 * Drain means that the fw will drop all the frames sent to a specific station.
169 * This is useful when a client (if we are IBSS / GO or AP) disassociates. In
170 * that case, we need to drain all the frames for that client from the AC queues
171 * that are shared with the other clients. Only then, we can remove the STA in
172 * the fw. In order to do so, we track the non-AMPDU packets for each station.
173 * If mac80211 removes a STA and if it still has non-AMPDU packets pending in
174 * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
175 * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
176 * (we know about it with its Tx response), we remove the station in fw and set
177 * it as %NULL in %fw_id_to_mac_id: this is the purpose of
178 * %iwl_mvm_sta_drained_wk.
179 */
180
181/**
182 * DOC: station table - fw restart
183 *
184 * When the fw asserts, or we have any other issue that requires to reset the
185 * driver, we require mac80211 to reconfigure the driver. Since the private
186 * data of the stations is embed in mac80211's %ieee80211_sta, that data will
187 * not be zeroed and needs to be reinitialized manually.
188 * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
189 * that we must not allocate a new sta_id but reuse the previous one. This
190 * means that the stations being re-added after the reset will have the same
191 * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
192 * map, since the stations aren't in the fw any more. Internal stations that
193 * are not added by mac80211 will be re-added in the init flow that is called
194 * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
195 * %iwl_mvm_up.
196 */
197
198/**
199 * DOC: AP mode - PS
200 *
201 * When a station is asleep, the fw will set it as "asleep". All frames on
202 * shared queues (i.e. non-aggregation queues) to that station will be dropped
203 * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
204 *
205 * AMPDUs are in a separate queue that is stopped by the fw. We just need to
206 * let mac80211 know when there are frames in these queues so that it can
207 * properly handle trigger frames.
208 *
209 * When a trigger frame is received, mac80211 tells the driver to send frames
210 * from the AMPDU queues or sends frames to non-aggregation queues itself,
211 * depending on which ACs are delivery-enabled and what TID has frames to
212 * transmit. Note that mac80211 has all the knowledge since all the non-agg
213 * frames are buffered / filtered, and the driver tells mac80211 about agg
214 * frames). The driver needs to tell the fw to let frames out even if the
215 * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
216 *
217 * When we receive a frame from that station with PM bit unset, the driver
218 * needs to let the fw know that this station isn't asleep any more. This is
219 * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
220 * station's wakeup.
221 *
222 * For a GO, the Service Period might be cut short due to an absence period
223 * of the GO. In this (and all other cases) the firmware notifies us with the
224 * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
225 * already sent to the device will be rejected again.
226 *
227 * See also "AP support for powersaving clients" in mac80211.h.
228 */
229
230/**
231 * enum iwl_mvm_agg_state
232 *
233 * The state machine of the BA agreement establishment / tear down.
234 * These states relate to a specific RA / TID.
235 *
236 * @IWL_AGG_OFF: aggregation is not used
237 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
238 * @IWL_AGG_ON: aggregation session is up
239 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
240 * HW queue to be empty from packets for this RA /TID.
241 * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
242 * HW queue to be empty from packets for this RA /TID.
243 */
244enum iwl_mvm_agg_state {
245 IWL_AGG_OFF = 0,
246 IWL_AGG_STARTING,
247 IWL_AGG_ON,
248 IWL_EMPTYING_HW_QUEUE_ADDBA,
249 IWL_EMPTYING_HW_QUEUE_DELBA,
250};
251
252/**
253 * struct iwl_mvm_tid_data - holds the states for each RA / TID
254 * @seq_number: the next WiFi sequence number to use
255 * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
256 * This is basically (last acked packet++).
257 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
258 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
259 * @reduced_tpc: Reduced tx power. Holds the data between the
260 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
261 * @state: state of the BA agreement establishment / tear down.
262 * @txq_id: Tx queue used by the BA session
263 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
264 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
265 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
266 * we are ready to finish the Tx AGG stop / start flow.
267 * @tx_time: medium time consumed by this A-MPDU
268 */
269struct iwl_mvm_tid_data {
270 u16 seq_number;
271 u16 next_reclaimed;
272 /* The rest is Tx AGG related */
273 u32 rate_n_flags;
274 u8 reduced_tpc;
275 enum iwl_mvm_agg_state state;
276 u16 txq_id;
277 u16 ssn;
278 u16 tx_time;
279};
280
281static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
282{
283 return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
284 tid_data->next_reclaimed);
285}
286
287/**
288 * struct iwl_mvm_sta - representation of a station in the driver
289 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
290 * @tfd_queue_msk: the tfd queues used by the station
291 * @hw_queue: per-AC mapping of the TFD queues used by station
292 * @mac_id_n_color: the MAC context this station is linked to
293 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
294 * tid.
295 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
296 * @bt_reduced_txpower: is reduced tx power enabled for this station
297 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
298 * we need to signal the EOSP
299 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
300 * and from Tx response flow, it needs a spinlock.
301 * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
302 * @tx_protection: reference counter for controlling the Tx protection.
303 * @tt_tx_protection: is thermal throttling enable Tx protection?
304 * @disable_tx: is tx to this STA disabled?
305 * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
306 *
307 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
308 * in the structure for use by driver. This structure is placed in that
309 * space.
310 *
311 */
312struct iwl_mvm_sta {
313 u32 sta_id;
314 u32 tfd_queue_msk;
315 u8 hw_queue[IEEE80211_NUM_ACS];
316 u32 mac_id_n_color;
317 u16 tid_disable_agg;
318 u8 max_agg_bufsize;
319 bool bt_reduced_txpower;
320 bool next_status_eosp;
321 spinlock_t lock;
322 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
323 struct iwl_lq_sta lq_sta;
324 struct ieee80211_vif *vif;
325
326 /* Temporary, until the new TLC will control the Tx protection */
327 s8 tx_protection;
328 bool tt_tx_protection;
329
330 bool disable_tx;
331 u8 agg_tids;
332};
333
334static inline struct iwl_mvm_sta *
335iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
336{
337 return (void *)sta->drv_priv;
338}
339
340/**
341 * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
342 * broadcast)
343 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
344 * @tfd_queue_msk: the tfd queues used by the station
345 */
346struct iwl_mvm_int_sta {
347 u32 sta_id;
348 u32 tfd_queue_msk;
349};
350
351int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
352 bool update);
353int iwl_mvm_add_sta(struct iwl_mvm *mvm,
354 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta);
356int iwl_mvm_update_sta(struct iwl_mvm *mvm,
357 struct ieee80211_vif *vif,
358 struct ieee80211_sta *sta);
359int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
360 struct ieee80211_vif *vif,
361 struct ieee80211_sta *sta);
362int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
363 struct ieee80211_vif *vif,
364 u8 sta_id);
365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
366 struct ieee80211_vif *vif,
367 struct ieee80211_sta *sta,
368 struct ieee80211_key_conf *key,
369 bool have_key_offset);
370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
371 struct ieee80211_vif *vif,
372 struct ieee80211_sta *sta,
373 struct ieee80211_key_conf *keyconf);
374
375void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
376 struct ieee80211_vif *vif,
377 struct ieee80211_key_conf *keyconf,
378 struct ieee80211_sta *sta, u32 iv32,
379 u16 *phase1key);
380
381void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
382 struct iwl_rx_cmd_buffer *rxb);
383
384/* AMPDU */
385int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
386 int tid, u16 ssn, bool start);
387int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
388 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
389int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
390 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
391int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
392 struct ieee80211_sta *sta, u16 tid);
393int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
394 struct ieee80211_sta *sta, u16 tid);
395
396int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
397void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
398
399int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
400int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
401int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
402int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
403int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
404void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
405
406void iwl_mvm_sta_drained_wk(struct work_struct *wk);
407void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
408 struct ieee80211_sta *sta);
409void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
410 struct ieee80211_sta *sta,
411 enum ieee80211_frame_release_type reason,
412 u16 cnt, u16 tids, bool more_data,
413 bool agg);
414int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
415 bool drain);
416void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
417 struct iwl_mvm_sta *mvmsta, bool disable);
418void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
419 struct ieee80211_sta *sta,
420 bool disable);
421void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
422 struct iwl_mvm_vif *mvmvif,
423 bool disable);
424void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
425
426#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
deleted file mode 100644
index fe2fa5650443..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ /dev/null
@@ -1,732 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/etherdevice.h>
65#include "mvm.h"
66#include "time-event.h"
67#include "iwl-io.h"
68#include "iwl-prph.h"
69
70#define TU_TO_US(x) (x * 1024)
71#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
72
73void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
74{
75 struct ieee80211_sta *sta;
76 struct iwl_mvm_sta *mvmsta;
77 int i;
78
79 lockdep_assert_held(&mvm->mutex);
80
81 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
82 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
83 lockdep_is_held(&mvm->mutex));
84 if (!sta || IS_ERR(sta) || !sta->tdls)
85 continue;
86
87 mvmsta = iwl_mvm_sta_from_mac80211(sta);
88 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
89 NL80211_TDLS_TEARDOWN,
90 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
91 GFP_KERNEL);
92 }
93}
94
95int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
96{
97 struct ieee80211_sta *sta;
98 struct iwl_mvm_sta *mvmsta;
99 int count = 0;
100 int i;
101
102 lockdep_assert_held(&mvm->mutex);
103
104 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
105 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
106 lockdep_is_held(&mvm->mutex));
107 if (!sta || IS_ERR(sta) || !sta->tdls)
108 continue;
109
110 if (vif) {
111 mvmsta = iwl_mvm_sta_from_mac80211(sta);
112 if (mvmsta->vif != vif)
113 continue;
114 }
115
116 count++;
117 }
118
119 return count;
120}
121
122static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
123{
124 struct iwl_rx_packet *pkt;
125 struct iwl_tdls_config_res *resp;
126 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
127 struct iwl_host_cmd cmd = {
128 .id = TDLS_CONFIG_CMD,
129 .flags = CMD_WANT_SKB,
130 .data = { &tdls_cfg_cmd, },
131 .len = { sizeof(struct iwl_tdls_config_cmd), },
132 };
133 struct ieee80211_sta *sta;
134 int ret, i, cnt;
135 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
136
137 lockdep_assert_held(&mvm->mutex);
138
139 tdls_cfg_cmd.id_and_color =
140 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
141 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
142 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
143
144 /* for now the Tx cmd is empty and unused */
145
146 /* populate TDLS peer data */
147 cnt = 0;
148 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
149 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
150 lockdep_is_held(&mvm->mutex));
151 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
152 continue;
153
154 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
155 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
156 IWL_MVM_TDLS_FW_TID;
157 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
158 tdls_cfg_cmd.sta_info[cnt].is_initiator =
159 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
160
161 cnt++;
162 }
163
164 tdls_cfg_cmd.tdls_peer_count = cnt;
165 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
166
167 ret = iwl_mvm_send_cmd(mvm, &cmd);
168 if (WARN_ON_ONCE(ret))
169 return;
170
171 pkt = cmd.resp_pkt;
172
173 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
174
175 /* we don't really care about the response at this point */
176
177 iwl_free_resp(&cmd);
178}
179
180void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
181 bool sta_added)
182{
183 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
184
185 /* when the first peer joins, send a power update first */
186 if (tdls_sta_cnt == 1 && sta_added)
187 iwl_mvm_power_update_mac(mvm);
188
189 /* configure the FW with TDLS peer info */
190 iwl_mvm_tdls_config(mvm, vif);
191
192 /* when the last peer leaves, send a power update last */
193 if (tdls_sta_cnt == 0 && !sta_added)
194 iwl_mvm_power_update_mac(mvm);
195}
196
197void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
198 struct ieee80211_vif *vif)
199{
200 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
201 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
202
203 /*
204 * iwl_mvm_protect_session() reads directly from the device
205 * (the system time), so make sure it is available.
206 */
207 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
208 return;
209
210 mutex_lock(&mvm->mutex);
211 /* Protect the session to hear the TDLS setup response on the channel */
212 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
213 mutex_unlock(&mvm->mutex);
214
215 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
216}
217
218static const char *
219iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
220{
221 switch (state) {
222 case IWL_MVM_TDLS_SW_IDLE:
223 return "IDLE";
224 case IWL_MVM_TDLS_SW_REQ_SENT:
225 return "REQ SENT";
226 case IWL_MVM_TDLS_SW_RESP_RCVD:
227 return "RESP RECEIVED";
228 case IWL_MVM_TDLS_SW_REQ_RCVD:
229 return "REQ RECEIVED";
230 case IWL_MVM_TDLS_SW_ACTIVE:
231 return "ACTIVE";
232 }
233
234 return NULL;
235}
236
237static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
238 enum iwl_mvm_tdls_cs_state state)
239{
240 if (mvm->tdls_cs.state == state)
241 return;
242
243 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
244 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
245 iwl_mvm_tdls_cs_state_str(state));
246 mvm->tdls_cs.state = state;
247
248 /* we only send requests to our switching peer - update sent time */
249 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
250 mvm->tdls_cs.peer.sent_timestamp =
251 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
252
253 if (state == IWL_MVM_TDLS_SW_IDLE)
254 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
255}
256
257void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
258{
259 struct iwl_rx_packet *pkt = rxb_addr(rxb);
260 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
261 struct ieee80211_sta *sta;
262 unsigned int delay;
263 struct iwl_mvm_sta *mvmsta;
264 struct ieee80211_vif *vif;
265 u32 sta_id = le32_to_cpu(notif->sta_id);
266
267 lockdep_assert_held(&mvm->mutex);
268
269 /* can fail sometimes */
270 if (!le32_to_cpu(notif->status)) {
271 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
272 return;
273 }
274
275 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
276 return;
277
278 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
279 lockdep_is_held(&mvm->mutex));
280 /* the station may not be here, but if it is, it must be a TDLS peer */
281 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
282 return;
283
284 mvmsta = iwl_mvm_sta_from_mac80211(sta);
285 vif = mvmsta->vif;
286
287 /*
288 * Update state and possibly switch again after this is over (DTIM).
289 * Also convert TU to msec.
290 */
291 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
292 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
293 msecs_to_jiffies(delay));
294
295 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
296}
297
298static int
299iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
300 enum iwl_tdls_channel_switch_type type,
301 const u8 *peer, bool peer_initiator, u32 timestamp)
302{
303 bool same_peer = false;
304 int ret = 0;
305
306 /* get the existing peer if it's there */
307 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
308 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
309 struct ieee80211_sta *sta = rcu_dereference_protected(
310 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
311 lockdep_is_held(&mvm->mutex));
312 if (!IS_ERR_OR_NULL(sta))
313 same_peer = ether_addr_equal(peer, sta->addr);
314 }
315
316 switch (mvm->tdls_cs.state) {
317 case IWL_MVM_TDLS_SW_IDLE:
318 /*
319 * might be spurious packet from the peer after the switch is
320 * already done
321 */
322 if (type == TDLS_MOVE_CH)
323 ret = -EINVAL;
324 break;
325 case IWL_MVM_TDLS_SW_REQ_SENT:
326 /* only allow requests from the same peer */
327 if (!same_peer)
328 ret = -EBUSY;
329 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
330 !peer_initiator)
331 /*
332 * We received a ch-switch request while an outgoing
333 * one is pending. Allow it if the peer is the link
334 * initiator.
335 */
336 ret = -EBUSY;
337 else if (type == TDLS_SEND_CHAN_SW_REQ)
338 /* wait for idle before sending another request */
339 ret = -EBUSY;
340 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
341 /* we got a stale response - ignore it */
342 ret = -EINVAL;
343 break;
344 case IWL_MVM_TDLS_SW_RESP_RCVD:
345 /*
346 * we are waiting for the FW to give an "active" notification,
347 * so ignore requests in the meantime
348 */
349 ret = -EBUSY;
350 break;
351 case IWL_MVM_TDLS_SW_REQ_RCVD:
352 /* as above, allow the link initiator to proceed */
353 if (type == TDLS_SEND_CHAN_SW_REQ) {
354 if (!same_peer)
355 ret = -EBUSY;
356 else if (peer_initiator) /* they are the initiator */
357 ret = -EBUSY;
358 } else if (type == TDLS_MOVE_CH) {
359 ret = -EINVAL;
360 }
361 break;
362 case IWL_MVM_TDLS_SW_ACTIVE:
363 /*
364 * the only valid request when active is a request to return
365 * to the base channel by the current off-channel peer
366 */
367 if (type != TDLS_MOVE_CH || !same_peer)
368 ret = -EBUSY;
369 break;
370 }
371
372 if (ret)
373 IWL_DEBUG_TDLS(mvm,
374 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
375 type, mvm->tdls_cs.state, peer, same_peer,
376 peer_initiator);
377
378 return ret;
379}
380
381static int
382iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
383 struct ieee80211_vif *vif,
384 enum iwl_tdls_channel_switch_type type,
385 const u8 *peer, bool peer_initiator,
386 u8 oper_class,
387 struct cfg80211_chan_def *chandef,
388 u32 timestamp, u16 switch_time,
389 u16 switch_timeout, struct sk_buff *skb,
390 u32 ch_sw_tm_ie)
391{
392 struct ieee80211_sta *sta;
393 struct iwl_mvm_sta *mvmsta;
394 struct ieee80211_tx_info *info;
395 struct ieee80211_hdr *hdr;
396 struct iwl_tdls_channel_switch_cmd cmd = {0};
397 int ret;
398
399 lockdep_assert_held(&mvm->mutex);
400
401 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
402 timestamp);
403 if (ret)
404 return ret;
405
406 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
407 ret = -EINVAL;
408 goto out;
409 }
410
411 cmd.switch_type = type;
412 cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
413 cmd.timing.switch_time = cpu_to_le32(switch_time);
414 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
415
416 rcu_read_lock();
417 sta = ieee80211_find_sta(vif, peer);
418 if (!sta) {
419 rcu_read_unlock();
420 ret = -ENOENT;
421 goto out;
422 }
423 mvmsta = iwl_mvm_sta_from_mac80211(sta);
424 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
425
426 if (!chandef) {
427 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
428 mvm->tdls_cs.peer.chandef.chan) {
429 /* actually moving to the channel */
430 chandef = &mvm->tdls_cs.peer.chandef;
431 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
432 type == TDLS_MOVE_CH) {
433 /* we need to return to base channel */
434 struct ieee80211_chanctx_conf *chanctx =
435 rcu_dereference(vif->chanctx_conf);
436
437 if (WARN_ON_ONCE(!chanctx)) {
438 rcu_read_unlock();
439 goto out;
440 }
441
442 chandef = &chanctx->def;
443 }
444 }
445
446 if (chandef) {
447 cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
448 PHY_BAND_24 : PHY_BAND_5);
449 cmd.ci.channel = chandef->chan->hw_value;
450 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
451 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
452 }
453
454 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
455 cmd.timing.max_offchan_duration =
456 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
457 vif->bss_conf.beacon_int) / 2);
458
459 /* Switch time is the first element in the switch-timing IE. */
460 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
461
462 info = IEEE80211_SKB_CB(skb);
463 hdr = (void *)skb->data;
464 if (info->control.hw_key) {
465 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
466 rcu_read_unlock();
467 ret = -EINVAL;
468 goto out;
469 }
470 iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
471 }
472
473 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
474 mvmsta->sta_id);
475
476 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
477 hdr->frame_control);
478 rcu_read_unlock();
479
480 memcpy(cmd.frame.data, skb->data, skb->len);
481
482 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
483 sizeof(cmd), &cmd);
484 if (ret) {
485 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
486 ret);
487 goto out;
488 }
489
490 /* channel switch has started, update state */
491 if (type != TDLS_MOVE_CH) {
492 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
493 iwl_mvm_tdls_update_cs_state(mvm,
494 type == TDLS_SEND_CHAN_SW_REQ ?
495 IWL_MVM_TDLS_SW_REQ_SENT :
496 IWL_MVM_TDLS_SW_REQ_RCVD);
497 } else {
498 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
499 }
500
501out:
502
503 /* channel switch failed - we are idle */
504 if (ret)
505 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
506
507 return ret;
508}
509
510void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
511{
512 struct iwl_mvm *mvm;
513 struct ieee80211_sta *sta;
514 struct iwl_mvm_sta *mvmsta;
515 struct ieee80211_vif *vif;
516 unsigned int delay;
517 int ret;
518
519 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
520 mutex_lock(&mvm->mutex);
521
522 /* called after an active channel switch has finished or timed-out */
523 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
524
525 /* station might be gone, in that case do nothing */
526 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
527 goto out;
528
529 sta = rcu_dereference_protected(
530 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
531 lockdep_is_held(&mvm->mutex));
532 /* the station may not be here, but if it is, it must be a TDLS peer */
533 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
534 goto out;
535
536 mvmsta = iwl_mvm_sta_from_mac80211(sta);
537 vif = mvmsta->vif;
538 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
539 TDLS_SEND_CHAN_SW_REQ,
540 sta->addr,
541 mvm->tdls_cs.peer.initiator,
542 mvm->tdls_cs.peer.op_class,
543 &mvm->tdls_cs.peer.chandef,
544 0, 0, 0,
545 mvm->tdls_cs.peer.skb,
546 mvm->tdls_cs.peer.ch_sw_tm_ie);
547 if (ret)
548 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
549
550 /* retry after a DTIM if we failed sending now */
551 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
552 queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
553 msecs_to_jiffies(delay));
554out:
555 mutex_unlock(&mvm->mutex);
556}
557
558int
559iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
560 struct ieee80211_vif *vif,
561 struct ieee80211_sta *sta, u8 oper_class,
562 struct cfg80211_chan_def *chandef,
563 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
564{
565 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
566 struct iwl_mvm_sta *mvmsta;
567 unsigned int delay;
568 int ret;
569
570 mutex_lock(&mvm->mutex);
571
572 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
573 sta->addr, chandef->chan->center_freq, chandef->width);
574
575 /* we only support a single peer for channel switching */
576 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
577 IWL_DEBUG_TDLS(mvm,
578 "Existing peer. Can't start switch with %pM\n",
579 sta->addr);
580 ret = -EBUSY;
581 goto out;
582 }
583
584 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
585 TDLS_SEND_CHAN_SW_REQ,
586 sta->addr, sta->tdls_initiator,
587 oper_class, chandef, 0, 0, 0,
588 tmpl_skb, ch_sw_tm_ie);
589 if (ret)
590 goto out;
591
592 /*
593 * Mark the peer as "in tdls switch" for this vif. We only allow a
594 * single such peer per vif.
595 */
596 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
597 if (!mvm->tdls_cs.peer.skb) {
598 ret = -ENOMEM;
599 goto out;
600 }
601
602 mvmsta = iwl_mvm_sta_from_mac80211(sta);
603 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
604 mvm->tdls_cs.peer.chandef = *chandef;
605 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
606 mvm->tdls_cs.peer.op_class = oper_class;
607 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
608
609 /*
610 * Wait for 2 DTIM periods before attempting the next switch. The next
611 * switch will be made sooner if the current one completes before that.
612 */
613 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
614 vif->bss_conf.beacon_int);
615 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
616 msecs_to_jiffies(delay));
617
618out:
619 mutex_unlock(&mvm->mutex);
620 return ret;
621}
622
623void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
624 struct ieee80211_vif *vif,
625 struct ieee80211_sta *sta)
626{
627 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
628 struct ieee80211_sta *cur_sta;
629 bool wait_for_phy = false;
630
631 mutex_lock(&mvm->mutex);
632
633 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
634
635 /* we only support a single peer for channel switching */
636 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
637 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
638 goto out;
639 }
640
641 cur_sta = rcu_dereference_protected(
642 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
643 lockdep_is_held(&mvm->mutex));
644 /* make sure it's the same peer */
645 if (cur_sta != sta)
646 goto out;
647
648 /*
649 * If we're currently in a switch because of the now canceled peer,
650 * wait a DTIM here to make sure the phy is back on the base channel.
651 * We can't otherwise force it.
652 */
653 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
654 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
655 wait_for_phy = true;
656
657 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
658 dev_kfree_skb(mvm->tdls_cs.peer.skb);
659 mvm->tdls_cs.peer.skb = NULL;
660
661out:
662 mutex_unlock(&mvm->mutex);
663
664 /* make sure the phy is on the base channel */
665 if (wait_for_phy)
666 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
667 vif->bss_conf.beacon_int));
668
669 /* flush the channel switch state */
670 flush_delayed_work(&mvm->tdls_cs.dwork);
671
672 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
673}
674
675void
676iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
677 struct ieee80211_vif *vif,
678 struct ieee80211_tdls_ch_sw_params *params)
679{
680 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
681 enum iwl_tdls_channel_switch_type type;
682 unsigned int delay;
683 const char *action_str =
684 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
685 "REQ" : "RESP";
686
687 mutex_lock(&mvm->mutex);
688
689 IWL_DEBUG_TDLS(mvm,
690 "Received TDLS ch switch action %s from %pM status %d\n",
691 action_str, params->sta->addr, params->status);
692
693 /*
694 * we got a non-zero status from a peer we were switching to - move to
695 * the idle state and retry again later
696 */
697 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
698 params->status != 0 &&
699 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
700 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
701 struct ieee80211_sta *cur_sta;
702
703 /* make sure it's the same peer */
704 cur_sta = rcu_dereference_protected(
705 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
706 lockdep_is_held(&mvm->mutex));
707 if (cur_sta == params->sta) {
708 iwl_mvm_tdls_update_cs_state(mvm,
709 IWL_MVM_TDLS_SW_IDLE);
710 goto retry;
711 }
712 }
713
714 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
715 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
716
717 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
718 params->sta->tdls_initiator, 0,
719 params->chandef, params->timestamp,
720 params->switch_time,
721 params->switch_timeout,
722 params->tmpl_skb,
723 params->ch_sw_tm_ie);
724
725retry:
726 /* register a timeout in case we don't succeed in switching */
727 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
728 1024 / 1000;
729 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
730 msecs_to_jiffies(delay));
731 mutex_unlock(&mvm->mutex);
732}
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
deleted file mode 100644
index 79ab6beb6b26..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/testmode.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __IWL_MVM_TESTMODE_H__
67#define __IWL_MVM_TESTMODE_H__
68
69/**
70 * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
71 * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
72 * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
73 * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
74 * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
75 */
76enum iwl_mvm_testmode_attrs {
77 IWL_MVM_TM_ATTR_UNSPEC,
78 IWL_MVM_TM_ATTR_CMD,
79 IWL_MVM_TM_ATTR_NOA_DURATION,
80 IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
81
82 /* keep last */
83 NUM_IWL_MVM_TM_ATTRS,
84 IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
85};
86
87/**
88 * enum iwl_mvm_testmode_commands - MVM testmode commands
89 * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
90 * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
91 */
92enum iwl_mvm_testmode_commands {
93 IWL_MVM_TM_CMD_SET_NOA,
94 IWL_MVM_TM_CMD_SET_BEACON_FILTER,
95};
96
97#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
deleted file mode 100644
index 7530eb23035d..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ /dev/null
@@ -1,872 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/jiffies.h>
67#include <net/mac80211.h>
68
69#include "iwl-notif-wait.h"
70#include "iwl-trans.h"
71#include "fw-api.h"
72#include "time-event.h"
73#include "mvm.h"
74#include "iwl-io.h"
75#include "iwl-prph.h"
76
77/*
78 * For the high priority TE use a time event type that has similar priority to
79 * the FW's action scan priority.
80 */
81#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
82#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
83
84void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
85 struct iwl_mvm_time_event_data *te_data)
86{
87 lockdep_assert_held(&mvm->time_event_lock);
88
89 if (!te_data->vif)
90 return;
91
92 list_del(&te_data->list);
93 te_data->running = false;
94 te_data->uid = 0;
95 te_data->id = TE_MAX;
96 te_data->vif = NULL;
97}
98
99void iwl_mvm_roc_done_wk(struct work_struct *wk)
100{
101 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
102 u32 queues = 0;
103
104 /*
105 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
106 * This will cause the TX path to drop offchannel transmissions.
107 * That would also be done by mac80211, but it is racy, in particular
108 * in the case that the time event actually completed in the firmware
109 * (which is handled in iwl_mvm_te_handle_notif).
110 */
111 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
112 queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
113 iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
114 }
115 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
116 queues |= BIT(mvm->aux_queue);
117 iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
118 }
119
120 synchronize_net();
121
122 /*
123 * Flush the offchannel queue -- this is called when the time
124 * event finishes or is canceled, so that frames queued for it
125 * won't get stuck on the queue and be transmitted in the next
126 * time event.
127 * We have to send the command asynchronously since this cannot
128 * be under the mutex for locking reasons, but that's not an
129 * issue as it will have to complete before the next command is
130 * executed, and a new time event means a new command.
131 */
132 iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
133}
134
135static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
136{
137 /*
138 * Of course, our status bit is just as racy as mac80211, so in
139 * addition, fire off the work struct which will drop all frames
140 * from the hardware queues that made it through the race. First
141 * it will of course synchronize the TX path to make sure that
142 * any *new* TX will be rejected.
143 */
144 schedule_work(&mvm->roc_done_wk);
145}
146
147static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
148{
149 struct ieee80211_vif *csa_vif;
150
151 rcu_read_lock();
152
153 csa_vif = rcu_dereference(mvm->csa_vif);
154 if (!csa_vif || !csa_vif->csa_active)
155 goto out_unlock;
156
157 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
158
159 /*
160 * CSA NoA is started but we still have beacons to
161 * transmit on the current channel.
162 * So we just do nothing here and the switch
163 * will be performed on the last TBTT.
164 */
165 if (!ieee80211_csa_is_complete(csa_vif)) {
166 IWL_WARN(mvm, "CSA NOA started too early\n");
167 goto out_unlock;
168 }
169
170 ieee80211_csa_finish(csa_vif);
171
172 rcu_read_unlock();
173
174 RCU_INIT_POINTER(mvm->csa_vif, NULL);
175
176 return;
177
178out_unlock:
179 rcu_read_unlock();
180}
181
182static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
183 struct ieee80211_vif *vif,
184 const char *errmsg)
185{
186 if (vif->type != NL80211_IFTYPE_STATION)
187 return false;
188 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
189 return false;
190 if (errmsg)
191 IWL_ERR(mvm, "%s\n", errmsg);
192
193 iwl_mvm_connection_loss(mvm, vif, errmsg);
194 return true;
195}
196
197static void
198iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
199 struct iwl_mvm_time_event_data *te_data,
200 struct iwl_time_event_notif *notif)
201{
202 struct ieee80211_vif *vif = te_data->vif;
203 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
204
205 if (!notif->status)
206 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
207
208 switch (te_data->vif->type) {
209 case NL80211_IFTYPE_AP:
210 if (!notif->status)
211 mvmvif->csa_failed = true;
212 iwl_mvm_csa_noa_start(mvm);
213 break;
214 case NL80211_IFTYPE_STATION:
215 if (!notif->status) {
216 iwl_mvm_connection_loss(mvm, vif,
217 "CSA TE failed to start");
218 break;
219 }
220 iwl_mvm_csa_client_absent(mvm, te_data->vif);
221 ieee80211_chswitch_done(te_data->vif, true);
222 break;
223 default:
224 /* should never happen */
225 WARN_ON_ONCE(1);
226 break;
227 }
228
229 /* we don't need it anymore */
230 iwl_mvm_te_clear_data(mvm, te_data);
231}
232
233static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
234 struct iwl_time_event_notif *notif,
235 struct iwl_mvm_time_event_data *te_data)
236{
237 struct iwl_fw_dbg_trigger_tlv *trig;
238 struct iwl_fw_dbg_trigger_time_event *te_trig;
239 int i;
240
241 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
242 return;
243
244 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
245 te_trig = (void *)trig->data;
246
247 if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
248 return;
249
250 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
251 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
252 u32 trig_action_bitmap =
253 le32_to_cpu(te_trig->time_events[i].action_bitmap);
254 u32 trig_status_bitmap =
255 le32_to_cpu(te_trig->time_events[i].status_bitmap);
256
257 if (trig_te_id != te_data->id ||
258 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
259 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
260 continue;
261
262 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
263 "Time event %d Action 0x%x received status: %d",
264 te_data->id,
265 le32_to_cpu(notif->action),
266 le32_to_cpu(notif->status));
267 break;
268 }
269}
270
271/*
272 * Handles a FW notification for an event that is known to the driver.
273 *
274 * @mvm: the mvm component
275 * @te_data: the time event data
276 * @notif: the notification data corresponding the time event data.
277 */
278static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
279 struct iwl_mvm_time_event_data *te_data,
280 struct iwl_time_event_notif *notif)
281{
282 lockdep_assert_held(&mvm->time_event_lock);
283
284 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
285 le32_to_cpu(notif->unique_id),
286 le32_to_cpu(notif->action));
287
288 iwl_mvm_te_check_trigger(mvm, notif, te_data);
289
290 /*
291 * The FW sends the start/end time event notifications even for events
292 * that it fails to schedule. This is indicated in the status field of
293 * the notification. This happens in cases that the scheduler cannot
294 * find a schedule that can handle the event (for example requesting a
295 * P2P Device discoveribility, while there are other higher priority
296 * events in the system).
297 */
298 if (!le32_to_cpu(notif->status)) {
299 const char *msg;
300
301 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
302 msg = "Time Event start notification failure";
303 else
304 msg = "Time Event end notification failure";
305
306 IWL_DEBUG_TE(mvm, "%s\n", msg);
307
308 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
309 iwl_mvm_te_clear_data(mvm, te_data);
310 return;
311 }
312 }
313
314 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
315 IWL_DEBUG_TE(mvm,
316 "TE ended - current time %lu, estimated end %lu\n",
317 jiffies, te_data->end_jiffies);
318
319 switch (te_data->vif->type) {
320 case NL80211_IFTYPE_P2P_DEVICE:
321 ieee80211_remain_on_channel_expired(mvm->hw);
322 iwl_mvm_roc_finished(mvm);
323 break;
324 case NL80211_IFTYPE_STATION:
325 /*
326 * By now, we should have finished association
327 * and know the dtim period.
328 */
329 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
330 "No association and the time event is over already...");
331 break;
332 default:
333 break;
334 }
335
336 iwl_mvm_te_clear_data(mvm, te_data);
337 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
338 te_data->running = true;
339 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
340
341 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
342 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
343 iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
344 ieee80211_ready_on_channel(mvm->hw);
345 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
346 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
347 }
348 } else {
349 IWL_WARN(mvm, "Got TE with unknown action\n");
350 }
351}
352
353/*
354 * Handle A Aux ROC time event
355 */
356static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
357 struct iwl_time_event_notif *notif)
358{
359 struct iwl_mvm_time_event_data *te_data, *tmp;
360 bool aux_roc_te = false;
361
362 list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
363 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
364 aux_roc_te = true;
365 break;
366 }
367 }
368 if (!aux_roc_te) /* Not a Aux ROC time event */
369 return -EINVAL;
370
371 iwl_mvm_te_check_trigger(mvm, notif, te_data);
372
373 if (!le32_to_cpu(notif->status)) {
374 IWL_DEBUG_TE(mvm,
375 "ERROR: Aux ROC Time Event %s notification failure\n",
376 (le32_to_cpu(notif->action) &
377 TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
378 return -EINVAL;
379 }
380
381 IWL_DEBUG_TE(mvm,
382 "Aux ROC time event notification - UID = 0x%x action %d\n",
383 le32_to_cpu(notif->unique_id),
384 le32_to_cpu(notif->action));
385
386 if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
387 /* End TE, notify mac80211 */
388 ieee80211_remain_on_channel_expired(mvm->hw);
389 iwl_mvm_roc_finished(mvm); /* flush aux queue */
390 list_del(&te_data->list); /* remove from list */
391 te_data->running = false;
392 te_data->vif = NULL;
393 te_data->uid = 0;
394 te_data->id = TE_MAX;
395 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
396 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
397 te_data->running = true;
398 iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
399 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
400 } else {
401 IWL_DEBUG_TE(mvm,
402 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
403 le32_to_cpu(notif->action));
404 return -EINVAL;
405 }
406
407 return 0;
408}
409
410/*
411 * The Rx handler for time event notifications
412 */
413void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
414 struct iwl_rx_cmd_buffer *rxb)
415{
416 struct iwl_rx_packet *pkt = rxb_addr(rxb);
417 struct iwl_time_event_notif *notif = (void *)pkt->data;
418 struct iwl_mvm_time_event_data *te_data, *tmp;
419
420 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
421 le32_to_cpu(notif->unique_id),
422 le32_to_cpu(notif->action));
423
424 spin_lock_bh(&mvm->time_event_lock);
425 /* This time event is triggered for Aux ROC request */
426 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
427 goto unlock;
428
429 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
430 if (le32_to_cpu(notif->unique_id) == te_data->uid)
431 iwl_mvm_te_handle_notif(mvm, te_data, notif);
432 }
433unlock:
434 spin_unlock_bh(&mvm->time_event_lock);
435}
436
437static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
438 struct iwl_rx_packet *pkt, void *data)
439{
440 struct iwl_mvm *mvm =
441 container_of(notif_wait, struct iwl_mvm, notif_wait);
442 struct iwl_mvm_time_event_data *te_data = data;
443 struct iwl_time_event_notif *resp;
444 int resp_len = iwl_rx_packet_payload_len(pkt);
445
446 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
447 return true;
448
449 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
450 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
451 return true;
452 }
453
454 resp = (void *)pkt->data;
455
456 /* te_data->uid is already set in the TIME_EVENT_CMD response */
457 if (le32_to_cpu(resp->unique_id) != te_data->uid)
458 return false;
459
460 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
461 te_data->uid);
462 if (!resp->status)
463 IWL_ERR(mvm,
464 "TIME_EVENT_NOTIFICATION received but not executed\n");
465
466 return true;
467}
468
469static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
470 struct iwl_rx_packet *pkt, void *data)
471{
472 struct iwl_mvm *mvm =
473 container_of(notif_wait, struct iwl_mvm, notif_wait);
474 struct iwl_mvm_time_event_data *te_data = data;
475 struct iwl_time_event_resp *resp;
476 int resp_len = iwl_rx_packet_payload_len(pkt);
477
478 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
479 return true;
480
481 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
482 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
483 return true;
484 }
485
486 resp = (void *)pkt->data;
487
488 /* we should never get a response to another TIME_EVENT_CMD here */
489 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
490 return false;
491
492 te_data->uid = le32_to_cpu(resp->unique_id);
493 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
494 te_data->uid);
495 return true;
496}
497
498static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
499 struct ieee80211_vif *vif,
500 struct iwl_mvm_time_event_data *te_data,
501 struct iwl_time_event_cmd *te_cmd)
502{
503 static const u16 time_event_response[] = { TIME_EVENT_CMD };
504 struct iwl_notification_wait wait_time_event;
505 int ret;
506
507 lockdep_assert_held(&mvm->mutex);
508
509 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
510 le32_to_cpu(te_cmd->duration));
511
512 spin_lock_bh(&mvm->time_event_lock);
513 if (WARN_ON(te_data->id != TE_MAX)) {
514 spin_unlock_bh(&mvm->time_event_lock);
515 return -EIO;
516 }
517 te_data->vif = vif;
518 te_data->duration = le32_to_cpu(te_cmd->duration);
519 te_data->id = le32_to_cpu(te_cmd->id);
520 list_add_tail(&te_data->list, &mvm->time_event_list);
521 spin_unlock_bh(&mvm->time_event_lock);
522
523 /*
524 * Use a notification wait, which really just processes the
525 * command response and doesn't wait for anything, in order
526 * to be able to process the response and get the UID inside
527 * the RX path. Using CMD_WANT_SKB doesn't work because it
528 * stores the buffer and then wakes up this thread, by which
529 * time another notification (that the time event started)
530 * might already be processed unsuccessfully.
531 */
532 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
533 time_event_response,
534 ARRAY_SIZE(time_event_response),
535 iwl_mvm_time_event_response, te_data);
536
537 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
538 sizeof(*te_cmd), te_cmd);
539 if (ret) {
540 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
541 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
542 goto out_clear_te;
543 }
544
545 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
546 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
547 /* should never fail */
548 WARN_ON_ONCE(ret);
549
550 if (ret) {
551 out_clear_te:
552 spin_lock_bh(&mvm->time_event_lock);
553 iwl_mvm_te_clear_data(mvm, te_data);
554 spin_unlock_bh(&mvm->time_event_lock);
555 }
556 return ret;
557}
558
559void iwl_mvm_protect_session(struct iwl_mvm *mvm,
560 struct ieee80211_vif *vif,
561 u32 duration, u32 min_duration,
562 u32 max_delay, bool wait_for_notif)
563{
564 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
565 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
566 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
567 struct iwl_notification_wait wait_te_notif;
568 struct iwl_time_event_cmd time_cmd = {};
569
570 lockdep_assert_held(&mvm->mutex);
571
572 if (te_data->running &&
573 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
574 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
575 jiffies_to_msecs(te_data->end_jiffies - jiffies));
576 return;
577 }
578
579 if (te_data->running) {
580 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
581 te_data->uid,
582 jiffies_to_msecs(te_data->end_jiffies - jiffies));
583 /*
584 * we don't have enough time
585 * cancel the current TE and issue a new one
586 * Of course it would be better to remove the old one only
587 * when the new one is added, but we don't care if we are off
588 * channel for a bit. All we need to do, is not to return
589 * before we actually begin to be on the channel.
590 */
591 iwl_mvm_stop_session_protection(mvm, vif);
592 }
593
594 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
595 time_cmd.id_and_color =
596 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
597 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
598
599 time_cmd.apply_time = cpu_to_le32(0);
600
601 time_cmd.max_frags = TE_V2_FRAG_NONE;
602 time_cmd.max_delay = cpu_to_le32(max_delay);
603 /* TODO: why do we need to interval = bi if it is not periodic? */
604 time_cmd.interval = cpu_to_le32(1);
605 time_cmd.duration = cpu_to_le32(duration);
606 time_cmd.repeat = 1;
607 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
608 TE_V2_NOTIF_HOST_EVENT_END |
609 T2_V2_START_IMMEDIATELY);
610
611 if (!wait_for_notif) {
612 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
613 return;
614 }
615
616 /*
617 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
618 * right after we send the time event
619 */
620 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
621 te_notif_response,
622 ARRAY_SIZE(te_notif_response),
623 iwl_mvm_te_notif, te_data);
624
625 /* If TE was sent OK - wait for the notification that started */
626 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
627 IWL_ERR(mvm, "Failed to add TE to protect session\n");
628 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
629 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
630 TU_TO_JIFFIES(max_delay))) {
631 IWL_ERR(mvm, "Failed to protect session until TE\n");
632 }
633}
634
635static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
636 struct iwl_mvm_time_event_data *te_data,
637 u32 *uid)
638{
639 u32 id;
640
641 /*
642 * It is possible that by the time we got to this point the time
643 * event was already removed.
644 */
645 spin_lock_bh(&mvm->time_event_lock);
646
647 /* Save time event uid before clearing its data */
648 *uid = te_data->uid;
649 id = te_data->id;
650
651 /*
652 * The clear_data function handles time events that were already removed
653 */
654 iwl_mvm_te_clear_data(mvm, te_data);
655 spin_unlock_bh(&mvm->time_event_lock);
656
657 /*
658 * It is possible that by the time we try to remove it, the time event
659 * has already ended and removed. In such a case there is no need to
660 * send a removal command.
661 */
662 if (id == TE_MAX) {
663 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
664 return false;
665 }
666
667 return true;
668}
669
670/*
671 * Explicit request to remove a aux roc time event. The removal of a time
672 * event needs to be synchronized with the flow of a time event's end
673 * notification, which also removes the time event from the op mode
674 * data structures.
675 */
676static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
677 struct iwl_mvm_vif *mvmvif,
678 struct iwl_mvm_time_event_data *te_data)
679{
680 struct iwl_hs20_roc_req aux_cmd = {};
681 u32 uid;
682 int ret;
683
684 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
685 return;
686
687 aux_cmd.event_unique_id = cpu_to_le32(uid);
688 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
689 aux_cmd.id_and_color =
690 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
691 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
692 le32_to_cpu(aux_cmd.event_unique_id));
693 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
694 sizeof(aux_cmd), &aux_cmd);
695
696 if (WARN_ON(ret))
697 return;
698}
699
700/*
701 * Explicit request to remove a time event. The removal of a time event needs to
702 * be synchronized with the flow of a time event's end notification, which also
703 * removes the time event from the op mode data structures.
704 */
705void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
706 struct iwl_mvm_vif *mvmvif,
707 struct iwl_mvm_time_event_data *te_data)
708{
709 struct iwl_time_event_cmd time_cmd = {};
710 u32 uid;
711 int ret;
712
713 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
714 return;
715
716 /* When we remove a TE, the UID is to be set in the id field */
717 time_cmd.id = cpu_to_le32(uid);
718 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
719 time_cmd.id_and_color =
720 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
721
722 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
723 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
724 sizeof(time_cmd), &time_cmd);
725 if (WARN_ON(ret))
726 return;
727}
728
729void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
730 struct ieee80211_vif *vif)
731{
732 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
733 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
734
735 lockdep_assert_held(&mvm->mutex);
736 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
737}
738
739int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
740 int duration, enum ieee80211_roc_type type)
741{
742 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
743 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
744 struct iwl_time_event_cmd time_cmd = {};
745
746 lockdep_assert_held(&mvm->mutex);
747 if (te_data->running) {
748 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
749 return -EBUSY;
750 }
751
752 /*
753 * Flush the done work, just in case it's still pending, so that
754 * the work it does can complete and we can accept new frames.
755 */
756 flush_work(&mvm->roc_done_wk);
757
758 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
759 time_cmd.id_and_color =
760 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
761
762 switch (type) {
763 case IEEE80211_ROC_TYPE_NORMAL:
764 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
765 break;
766 case IEEE80211_ROC_TYPE_MGMT_TX:
767 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
768 break;
769 default:
770 WARN_ONCE(1, "Got an invalid ROC type\n");
771 return -EINVAL;
772 }
773
774 time_cmd.apply_time = cpu_to_le32(0);
775 time_cmd.interval = cpu_to_le32(1);
776
777 /*
778 * The P2P Device TEs can have lower priority than other events
779 * that are being scheduled by the driver/fw, and thus it might not be
780 * scheduled. To improve the chances of it being scheduled, allow them
781 * to be fragmented, and in addition allow them to be delayed.
782 */
783 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
784 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
785 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
786 time_cmd.repeat = 1;
787 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
788 TE_V2_NOTIF_HOST_EVENT_END |
789 T2_V2_START_IMMEDIATELY);
790
791 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
792}
793
794void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
795{
796 struct iwl_mvm_vif *mvmvif = NULL;
797 struct iwl_mvm_time_event_data *te_data;
798 bool is_p2p = false;
799
800 lockdep_assert_held(&mvm->mutex);
801
802 spin_lock_bh(&mvm->time_event_lock);
803
804 /*
805 * Iterate over the list of time events and find the time event that is
806 * associated with a P2P_DEVICE interface.
807 * This assumes that a P2P_DEVICE interface can have only a single time
808 * event at any given time and this time event coresponds to a ROC
809 * request
810 */
811 list_for_each_entry(te_data, &mvm->time_event_list, list) {
812 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
813 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
814 is_p2p = true;
815 goto remove_te;
816 }
817 }
818
819 /* There can only be at most one AUX ROC time event, we just use the
820 * list to simplify/unify code. Remove it if it exists.
821 */
822 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
823 struct iwl_mvm_time_event_data,
824 list);
825 if (te_data)
826 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
827
828remove_te:
829 spin_unlock_bh(&mvm->time_event_lock);
830
831 if (!mvmvif) {
832 IWL_WARN(mvm, "No remain on channel event\n");
833 return;
834 }
835
836 if (is_p2p)
837 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
838 else
839 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
840
841 iwl_mvm_roc_finished(mvm);
842}
843
844int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
845 struct ieee80211_vif *vif,
846 u32 duration, u32 apply_time)
847{
848 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
849 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
850 struct iwl_time_event_cmd time_cmd = {};
851
852 lockdep_assert_held(&mvm->mutex);
853
854 if (te_data->running) {
855 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
856 return -EBUSY;
857 }
858
859 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
860 time_cmd.id_and_color =
861 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
862 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
863 time_cmd.apply_time = cpu_to_le32(apply_time);
864 time_cmd.max_frags = TE_V2_FRAG_NONE;
865 time_cmd.duration = cpu_to_le32(duration);
866 time_cmd.repeat = 1;
867 time_cmd.interval = cpu_to_le32(1);
868 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
869 TE_V2_ABSENCE);
870
871 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
872}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
deleted file mode 100644
index cbdf8e52a5f1..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ /dev/null
@@ -1,249 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __time_event_h__
67#define __time_event_h__
68
69#include "fw-api.h"
70
71#include "mvm.h"
72
73/**
74 * DOC: Time Events - what is it?
75 *
76 * Time Events are a fw feature that allows the driver to control the presence
77 * of the device on the channel. Since the fw supports multiple channels
78 * concurrently, the fw may choose to jump to another channel at any time.
79 * In order to make sure that the fw is on a specific channel at a certain time
80 * and for a certain duration, the driver needs to issue a time event.
81 *
82 * The simplest example is for BSS association. The driver issues a time event,
83 * waits for it to start, and only then tells mac80211 that we can start the
84 * association. This way, we make sure that the association will be done
85 * smoothly and won't be interrupted by channel switch decided within the fw.
86 */
87
88 /**
89 * DOC: The flow against the fw
90 *
91 * When the driver needs to make sure we are in a certain channel, at a certain
92 * time and for a certain duration, it sends a Time Event. The flow against the
93 * fw goes like this:
94 * 1) Driver sends a TIME_EVENT_CMD to the fw
95 * 2) Driver gets the response for that command. This response contains the
96 * Unique ID (UID) of the event.
97 * 3) The fw sends notification when the event starts.
98 *
99 * Of course the API provides various options that allow to cover parameters
100 * of the flow.
101 * What is the duration of the event?
102 * What is the start time of the event?
103 * Is there an end-time for the event?
104 * How much can the event be delayed?
105 * Can the event be split?
106 * If yes what is the maximal number of chunks?
107 * etc...
108 */
109
110/**
111 * DOC: Abstraction to the driver
112 *
113 * In order to simplify the use of time events to the rest of the driver,
114 * we abstract the use of time events. This component provides the functions
115 * needed by the driver.
116 */
117
118#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
119#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
120
121/**
122 * iwl_mvm_protect_session - start / extend the session protection.
123 * @mvm: the mvm component
124 * @vif: the virtual interface for which the session is issued
125 * @duration: the duration of the session in TU.
126 * @min_duration: will start a new session if the current session will end
127 * in less than min_duration.
128 * @max_delay: maximum delay before starting the time event (in TU)
129 * @wait_for_notif: true if it is required that a time event notification be
130 * waited for (that the time event has been scheduled before returning)
131 *
132 * This function can be used to start a session protection which means that the
133 * fw will stay on the channel for %duration_ms milliseconds. This function
134 * can block (sleep) until the session starts. This function can also be used
135 * to extend a currently running session.
136 * This function is meant to be used for BSS association for example, where we
137 * want to make sure that the fw stays on the channel during the association.
138 */
139void iwl_mvm_protect_session(struct iwl_mvm *mvm,
140 struct ieee80211_vif *vif,
141 u32 duration, u32 min_duration,
142 u32 max_delay, bool wait_for_notif);
143
144/**
145 * iwl_mvm_stop_session_protection - cancel the session protection.
146 * @mvm: the mvm component
147 * @vif: the virtual interface for which the session is issued
148 *
149 * This functions cancels the session protection which is an act of good
150 * citizenship. If it is not needed any more it should be canceled because
151 * the other bindings wait for the medium during that time.
152 * This funtions doesn't sleep.
153 */
154void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
155 struct ieee80211_vif *vif);
156
157/*
158 * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
159 */
160void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
161 struct iwl_rx_cmd_buffer *rxb);
162
163/**
164 * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
165 * @mvm: the mvm component
166 * @vif: the virtual interface for which the roc is requested. It is assumed
167 * that the vif type is NL80211_IFTYPE_P2P_DEVICE
168 * @duration: the requested duration in millisecond for the fw to be on the
169 * channel that is bound to the vif.
170 * @type: the remain on channel request type
171 *
172 * This function can be used to issue a remain on channel session,
173 * which means that the fw will stay in the channel for the request %duration
174 * milliseconds. The function is async, meaning that it only issues the ROC
175 * request but does not wait for it to start. Once the FW is ready to serve the
176 * ROC request, it will issue a notification to the driver that it is on the
177 * requested channel. Once the FW completes the ROC request it will issue
178 * another notification to the driver.
179 */
180int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
181 int duration, enum ieee80211_roc_type type);
182
183/**
184 * iwl_mvm_stop_roc - stop remain on channel functionality
185 * @mvm: the mvm component
186 *
187 * This function can be used to cancel an ongoing ROC session.
188 * The function is async, it will instruct the FW to stop serving the ROC
189 * session, but will not wait for the actual stopping of the session.
190 */
191void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
192
193/**
194 * iwl_mvm_remove_time_event - general function to clean up of time event
195 * @mvm: the mvm component
196 * @vif: the vif to which the time event belongs
197 * @te_data: the time event data that corresponds to that time event
198 *
199 * This function can be used to cancel a time event regardless its type.
200 * It is useful for cleaning up time events running before removing an
201 * interface.
202 */
203void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
204 struct iwl_mvm_vif *mvmvif,
205 struct iwl_mvm_time_event_data *te_data);
206
207/**
208 * iwl_mvm_te_clear_data - remove time event from list
209 * @mvm: the mvm component
210 * @te_data: the time event data to remove
211 *
212 * This function is mostly internal, it is made available here only
213 * for firmware restart purposes.
214 */
215void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
216 struct iwl_mvm_time_event_data *te_data);
217
218void iwl_mvm_roc_done_wk(struct work_struct *wk);
219
220/**
221 * iwl_mvm_schedule_csa_period - request channel switch absence period
222 * @mvm: the mvm component
223 * @vif: the virtual interface for which the channel switch is issued
224 * @duration: the duration of the NoA in TU.
225 * @apply_time: NoA start time in GP2.
226 *
227 * This function is used to schedule NoA time event and is used to perform
228 * the channel switch flow.
229 */
230int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
231 struct ieee80211_vif *vif,
232 u32 duration, u32 apply_time);
233
234/**
235 * iwl_mvm_te_scheduled - check if the fw received the TE cmd
236 * @te_data: the time event data that corresponds to that time event
237 *
238 * This function returns true iff this TE is added to the fw.
239 */
240static inline bool
241iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
242{
243 if (!te_data)
244 return false;
245
246 return !!te_data->uid;
247}
248
249#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
deleted file mode 100644
index 4007f1d421dd..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/tof.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include "mvm.h"
64#include "fw-api-tof.h"
65
66#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
67
68void iwl_mvm_tof_init(struct iwl_mvm *mvm)
69{
70 struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
71
72 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
73 return;
74
75 memset(tof_data, 0, sizeof(*tof_data));
76
77 tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
78
79#ifdef CONFIG_IWLWIFI_DEBUGFS
80 if (IWL_MVM_TOF_IS_RESPONDER) {
81 tof_data->responder_cfg.sub_grp_cmd_id =
82 cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
83 tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
84 }
85#endif
86
87 tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
88 tof_data->range_req.req_timeout = 1;
89 tof_data->range_req.initiator = 1;
90 tof_data->range_req.report_policy = 3;
91
92 tof_data->range_req_ext.sub_grp_cmd_id =
93 cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
94
95 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
96}
97
98void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
99{
100 struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
101
102 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
103 return;
104
105 memset(tof_data, 0, sizeof(*tof_data));
106 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
107}
108
109static void iwl_tof_iterator(void *_data, u8 *mac,
110 struct ieee80211_vif *vif)
111{
112 bool *enabled = _data;
113
114 /* non bss vif exists */
115 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
116 *enabled = false;
117}
118
119int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
120{
121 struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
122 bool enabled;
123
124 lockdep_assert_held(&mvm->mutex);
125
126 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
127 return -EINVAL;
128
129 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
130 IEEE80211_IFACE_ITER_NORMAL,
131 iwl_tof_iterator, &enabled);
132 if (!enabled) {
133 IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
134 return -EINVAL;
135 }
136
137 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
138 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
139 IWL_ALWAYS_LONG_GROUP, 0),
140 0, sizeof(*cmd), cmd);
141}
142
143int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
144{
145 struct iwl_tof_range_abort_cmd cmd = {
146 .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
147 .request_id = id,
148 };
149
150 lockdep_assert_held(&mvm->mutex);
151
152 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
153 return -EINVAL;
154
155 if (id != mvm->tof_data.active_range_request) {
156 IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
157 id, mvm->tof_data.active_range_request);
158 return -EINVAL;
159 }
160
161 /* after abort is sent there's no active request anymore */
162 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
163
164 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
165 IWL_ALWAYS_LONG_GROUP, 0),
166 0, sizeof(cmd), &cmd);
167}
168
169#ifdef CONFIG_IWLWIFI_DEBUGFS
170int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
171 struct ieee80211_vif *vif)
172{
173 struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
174 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
175
176 lockdep_assert_held(&mvm->mutex);
177
178 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
179 return -EINVAL;
180
181 if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
182 !mvmvif->ap_ibss_active) {
183 IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
184 return -EIO;
185 }
186
187 cmd->sta_id = mvmvif->bcast_sta.sta_id;
188 memcpy(cmd->bssid, vif->addr, ETH_ALEN);
189 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
190 IWL_ALWAYS_LONG_GROUP, 0),
191 0, sizeof(*cmd), cmd);
192}
193#endif
194
195int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
196 struct ieee80211_vif *vif)
197{
198 struct iwl_host_cmd cmd = {
199 .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
200 .len = { sizeof(mvm->tof_data.range_req), },
201 /* no copy because of the command size */
202 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
203 };
204
205 lockdep_assert_held(&mvm->mutex);
206
207 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
208 return -EINVAL;
209
210 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
211 IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
212 return -EIO;
213 }
214
215 /* nesting of range requests is not supported in FW */
216 if (mvm->tof_data.active_range_request !=
217 IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
218 IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
219 mvm->tof_data.active_range_request);
220 return -EIO;
221 }
222
223 mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
224
225 cmd.data[0] = &mvm->tof_data.range_req;
226 return iwl_mvm_send_cmd(mvm, &cmd);
227}
228
229int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
230 struct ieee80211_vif *vif)
231{
232 lockdep_assert_held(&mvm->mutex);
233
234 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
235 return -EINVAL;
236
237 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
238 IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
239 return -EIO;
240 }
241
242 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
243 IWL_ALWAYS_LONG_GROUP, 0),
244 0, sizeof(mvm->tof_data.range_req_ext),
245 &mvm->tof_data.range_req_ext);
246}
247
248static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
249{
250 struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
251
252 if (resp->request_id != mvm->tof_data.active_range_request) {
253 IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
254 resp->request_id, mvm->tof_data.active_range_request);
255 return -EIO;
256 }
257
258 memcpy(&mvm->tof_data.range_resp, resp,
259 sizeof(struct iwl_tof_range_rsp_ntfy));
260 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
261
262 return 0;
263}
264
265static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
266{
267 struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
268
269 IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
270 return 0;
271}
272
273static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
274{
275 struct iwl_tof_neighbor_report *report =
276 (struct iwl_tof_neighbor_report *)data;
277
278 IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
279 report->bssid, report->request_token, report->status);
280 return 0;
281}
282
283void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
284 struct iwl_rx_cmd_buffer *rxb)
285{
286 struct iwl_rx_packet *pkt = rxb_addr(rxb);
287 struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
288
289 lockdep_assert_held(&mvm->mutex);
290
291 switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
292 case TOF_RANGE_RESPONSE_NOTIF:
293 iwl_mvm_tof_range_resp(mvm, resp->data);
294 break;
295 case TOF_MCSI_DEBUG_NOTIF:
296 iwl_mvm_tof_mcsi_notif(mvm, resp->data);
297 break;
298 case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
299 iwl_mvm_tof_nb_report_notif(mvm, resp->data);
300 break;
301 default:
302 IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
303 resp->sub_grp_cmd_id);
304 break;
305 }
306}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
deleted file mode 100644
index 9beebc33cb8d..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/tof.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __tof_h__
64#define __tof_h__
65
66#include "fw-api-tof.h"
67
68struct iwl_mvm_tof_data {
69 struct iwl_tof_config_cmd tof_cfg;
70 struct iwl_tof_range_req_cmd range_req;
71 struct iwl_tof_range_req_ext_cmd range_req_ext;
72#ifdef CONFIG_IWLWIFI_DEBUGFS
73 struct iwl_tof_responder_config_cmd responder_cfg;
74#endif
75 struct iwl_tof_range_rsp_ntfy range_resp;
76 u8 last_abort_id;
77 u16 active_range_request;
78};
79
80void iwl_mvm_tof_init(struct iwl_mvm *mvm);
81void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
82int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
83int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
84int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
85 struct ieee80211_vif *vif);
86void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
87 struct iwl_rx_cmd_buffer *rxb);
88int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
89 struct ieee80211_vif *vif);
90#ifdef CONFIG_IWLWIFI_DEBUGFS
91int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
92 struct ieee80211_vif *vif);
93#endif
94#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
deleted file mode 100644
index cadfc0460597..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ /dev/null
@@ -1,460 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * Copyright(c) 2015 Intel Deutschland GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66
67#include "mvm.h"
68
69#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
70
71static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
72{
73 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
74 u32 duration = tt->params.ct_kill_duration;
75
76 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
77 return;
78
79 IWL_ERR(mvm, "Enter CT Kill\n");
80 iwl_mvm_set_hw_ctkill_state(mvm, true);
81
82 tt->throttle = false;
83 tt->dynamic_smps = false;
84
85 /* Don't schedule an exit work if we're in test mode, since
86 * the temperature will not change unless we manually set it
87 * again (or disable testing).
88 */
89 if (!mvm->temperature_test)
90 schedule_delayed_work(&tt->ct_kill_exit,
91 round_jiffies_relative(duration * HZ));
92}
93
94static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
95{
96 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
97 return;
98
99 IWL_ERR(mvm, "Exit CT Kill\n");
100 iwl_mvm_set_hw_ctkill_state(mvm, false);
101}
102
103void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
104{
105 /* ignore the notification if we are in test mode */
106 if (mvm->temperature_test)
107 return;
108
109 if (mvm->temperature == temp)
110 return;
111
112 mvm->temperature = temp;
113 iwl_mvm_tt_handler(mvm);
114}
115
116static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
117 struct iwl_rx_packet *pkt)
118{
119 struct iwl_dts_measurement_notif *notif;
120 int len = iwl_rx_packet_payload_len(pkt);
121 int temp;
122
123 if (WARN_ON_ONCE(len != sizeof(*notif))) {
124 IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
125 return -EINVAL;
126 }
127
128 notif = (void *)pkt->data;
129
130 temp = le32_to_cpu(notif->temp);
131
132 /* shouldn't be negative, but since it's s32, make sure it isn't */
133 if (WARN_ON_ONCE(temp < 0))
134 temp = 0;
135
136 IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
137
138 return temp;
139}
140
141static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
142 struct iwl_rx_packet *pkt, void *data)
143{
144 struct iwl_mvm *mvm =
145 container_of(notif_wait, struct iwl_mvm, notif_wait);
146 int *temp = data;
147 int ret;
148
149 ret = iwl_mvm_temp_notif_parse(mvm, pkt);
150 if (ret < 0)
151 return true;
152
153 *temp = ret;
154
155 return true;
156}
157
158void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
159{
160 struct iwl_rx_packet *pkt = rxb_addr(rxb);
161 int temp;
162
163 /* the notification is handled synchronously in ctkill, so skip here */
164 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
165 return;
166
167 temp = iwl_mvm_temp_notif_parse(mvm, pkt);
168 if (temp < 0)
169 return;
170
171 iwl_mvm_tt_temp_changed(mvm, temp);
172}
173
174static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
175{
176 struct iwl_dts_measurement_cmd cmd = {
177 .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
178 };
179 struct iwl_ext_dts_measurement_cmd extcmd = {
180 .control_mode = cpu_to_le32(DTS_AUTOMATIC),
181 };
182 u32 cmdid;
183
184 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
185 cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
186 PHY_OPS_GROUP, 0);
187 else
188 cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
189
190 if (!fw_has_capa(&mvm->fw->ucode_capa,
191 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
192 return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
193
194 return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
195}
196
197int iwl_mvm_get_temp(struct iwl_mvm *mvm)
198{
199 struct iwl_notification_wait wait_temp_notif;
200 static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
201 DTS_MEASUREMENT_NOTIF_WIDE) };
202 int ret, temp;
203
204 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
205 temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
206
207 lockdep_assert_held(&mvm->mutex);
208
209 iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
210 temp_notif, ARRAY_SIZE(temp_notif),
211 iwl_mvm_temp_notif_wait, &temp);
212
213 ret = iwl_mvm_get_temp_cmd(mvm);
214 if (ret) {
215 IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
216 iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
217 return ret;
218 }
219
220 ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
221 IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
222 if (ret) {
223 IWL_ERR(mvm, "Getting the temperature timed out\n");
224 return ret;
225 }
226
227 return temp;
228}
229
230static void check_exit_ctkill(struct work_struct *work)
231{
232 struct iwl_mvm_tt_mgmt *tt;
233 struct iwl_mvm *mvm;
234 u32 duration;
235 s32 temp;
236
237 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
238 mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
239
240 duration = tt->params.ct_kill_duration;
241
242 mutex_lock(&mvm->mutex);
243
244 if (__iwl_mvm_mac_start(mvm))
245 goto reschedule;
246
247 /* make sure the device is available for direct read/writes */
248 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) {
249 __iwl_mvm_mac_stop(mvm);
250 goto reschedule;
251 }
252
253 temp = iwl_mvm_get_temp(mvm);
254
255 iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
256
257 __iwl_mvm_mac_stop(mvm);
258
259 if (temp < 0)
260 goto reschedule;
261
262 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
263
264 if (temp <= tt->params.ct_kill_exit) {
265 mutex_unlock(&mvm->mutex);
266 iwl_mvm_exit_ctkill(mvm);
267 return;
268 }
269
270reschedule:
271 mutex_unlock(&mvm->mutex);
272 schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
273 round_jiffies(duration * HZ));
274}
275
276static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
277 struct ieee80211_vif *vif)
278{
279 struct iwl_mvm *mvm = _data;
280 enum ieee80211_smps_mode smps_mode;
281
282 lockdep_assert_held(&mvm->mutex);
283
284 if (mvm->thermal_throttle.dynamic_smps)
285 smps_mode = IEEE80211_SMPS_DYNAMIC;
286 else
287 smps_mode = IEEE80211_SMPS_AUTOMATIC;
288
289 if (vif->type != NL80211_IFTYPE_STATION)
290 return;
291
292 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
293}
294
295static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
296{
297 struct ieee80211_sta *sta;
298 struct iwl_mvm_sta *mvmsta;
299 int i, err;
300
301 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
302 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
303 lockdep_is_held(&mvm->mutex));
304 if (IS_ERR_OR_NULL(sta))
305 continue;
306 mvmsta = iwl_mvm_sta_from_mac80211(sta);
307 if (enable == mvmsta->tt_tx_protection)
308 continue;
309 err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
310 if (err) {
311 IWL_ERR(mvm, "Failed to %s Tx protection\n",
312 enable ? "enable" : "disable");
313 } else {
314 IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
315 enable ? "Enable" : "Disable");
316 mvmsta->tt_tx_protection = enable;
317 }
318 }
319}
320
321void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
322{
323 struct iwl_host_cmd cmd = {
324 .id = REPLY_THERMAL_MNG_BACKOFF,
325 .len = { sizeof(u32), },
326 .data = { &backoff, },
327 };
328
329 backoff = max(backoff, mvm->thermal_throttle.min_backoff);
330
331 if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
332 IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
333 backoff);
334 mvm->thermal_throttle.tx_backoff = backoff;
335 } else {
336 IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
337 }
338}
339
340void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
341{
342 struct iwl_tt_params *params = &mvm->thermal_throttle.params;
343 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
344 s32 temperature = mvm->temperature;
345 bool throttle_enable = false;
346 int i;
347 u32 tx_backoff;
348
349 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
350
351 if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
352 iwl_mvm_enter_ctkill(mvm);
353 return;
354 }
355
356 if (params->support_ct_kill &&
357 temperature <= params->ct_kill_exit) {
358 iwl_mvm_exit_ctkill(mvm);
359 return;
360 }
361
362 if (params->support_dynamic_smps) {
363 if (!tt->dynamic_smps &&
364 temperature >= params->dynamic_smps_entry) {
365 IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
366 tt->dynamic_smps = true;
367 ieee80211_iterate_active_interfaces_atomic(
368 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
369 iwl_mvm_tt_smps_iterator, mvm);
370 throttle_enable = true;
371 } else if (tt->dynamic_smps &&
372 temperature <= params->dynamic_smps_exit) {
373 IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
374 tt->dynamic_smps = false;
375 ieee80211_iterate_active_interfaces_atomic(
376 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
377 iwl_mvm_tt_smps_iterator, mvm);
378 }
379 }
380
381 if (params->support_tx_protection) {
382 if (temperature >= params->tx_protection_entry) {
383 iwl_mvm_tt_tx_protection(mvm, true);
384 throttle_enable = true;
385 } else if (temperature <= params->tx_protection_exit) {
386 iwl_mvm_tt_tx_protection(mvm, false);
387 }
388 }
389
390 if (params->support_tx_backoff) {
391 tx_backoff = tt->min_backoff;
392 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
393 if (temperature < params->tx_backoff[i].temperature)
394 break;
395 tx_backoff = max(tt->min_backoff,
396 params->tx_backoff[i].backoff);
397 }
398 if (tx_backoff != tt->min_backoff)
399 throttle_enable = true;
400 if (tt->tx_backoff != tx_backoff)
401 iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
402 }
403
404 if (!tt->throttle && throttle_enable) {
405 IWL_WARN(mvm,
406 "Due to high temperature thermal throttling initiated\n");
407 tt->throttle = true;
408 } else if (tt->throttle && !tt->dynamic_smps &&
409 tt->tx_backoff == tt->min_backoff &&
410 temperature <= params->tx_protection_exit) {
411 IWL_WARN(mvm,
412 "Temperature is back to normal thermal throttling stopped\n");
413 tt->throttle = false;
414 }
415}
416
417static const struct iwl_tt_params iwl_mvm_default_tt_params = {
418 .ct_kill_entry = 118,
419 .ct_kill_exit = 96,
420 .ct_kill_duration = 5,
421 .dynamic_smps_entry = 114,
422 .dynamic_smps_exit = 110,
423 .tx_protection_entry = 114,
424 .tx_protection_exit = 108,
425 .tx_backoff = {
426 {.temperature = 112, .backoff = 200},
427 {.temperature = 113, .backoff = 600},
428 {.temperature = 114, .backoff = 1200},
429 {.temperature = 115, .backoff = 2000},
430 {.temperature = 116, .backoff = 4000},
431 {.temperature = 117, .backoff = 10000},
432 },
433 .support_ct_kill = true,
434 .support_dynamic_smps = true,
435 .support_tx_protection = true,
436 .support_tx_backoff = true,
437};
438
439void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
440{
441 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
442
443 IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
444
445 if (mvm->cfg->thermal_params)
446 tt->params = *mvm->cfg->thermal_params;
447 else
448 tt->params = iwl_mvm_default_tt_params;
449
450 tt->throttle = false;
451 tt->dynamic_smps = false;
452 tt->min_backoff = min_backoff;
453 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
454}
455
456void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
457{
458 cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
459 IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
460}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
deleted file mode 100644
index c652a66be803..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ /dev/null
@@ -1,1115 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/ieee80211.h>
66#include <linux/etherdevice.h>
67
68#include "iwl-trans.h"
69#include "iwl-eeprom-parse.h"
70#include "mvm.h"
71#include "sta.h"
72
73static void
74iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
75 u16 tid, u16 ssn)
76{
77 struct iwl_fw_dbg_trigger_tlv *trig;
78 struct iwl_fw_dbg_trigger_ba *ba_trig;
79
80 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
81 return;
82
83 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
84 ba_trig = (void *)trig->data;
85
86 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
87 return;
88
89 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
90 return;
91
92 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
93 "BAR sent to %pM, tid %d, ssn %d",
94 addr, tid, ssn);
95}
96
97/*
98 * Sets most of the Tx cmd's fields
99 */
100void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
101 struct iwl_tx_cmd *tx_cmd,
102 struct ieee80211_tx_info *info, u8 sta_id)
103{
104 struct ieee80211_hdr *hdr = (void *)skb->data;
105 __le16 fc = hdr->frame_control;
106 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
107 u32 len = skb->len + FCS_LEN;
108 u8 ac;
109
110 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
111 tx_flags |= TX_CMD_FLG_ACK;
112 else
113 tx_flags &= ~TX_CMD_FLG_ACK;
114
115 if (ieee80211_is_probe_resp(fc))
116 tx_flags |= TX_CMD_FLG_TSF;
117
118 if (ieee80211_has_morefrags(fc))
119 tx_flags |= TX_CMD_FLG_MORE_FRAG;
120
121 if (ieee80211_is_data_qos(fc)) {
122 u8 *qc = ieee80211_get_qos_ctl(hdr);
123 tx_cmd->tid_tspec = qc[0] & 0xf;
124 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
125 } else if (ieee80211_is_back_req(fc)) {
126 struct ieee80211_bar *bar = (void *)skb->data;
127 u16 control = le16_to_cpu(bar->control);
128 u16 ssn = le16_to_cpu(bar->start_seq_num);
129
130 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
131 tx_cmd->tid_tspec = (control &
132 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
133 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
134 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
135 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
136 ssn);
137 } else {
138 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
139 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
140 tx_flags |= TX_CMD_FLG_SEQ_CTL;
141 else
142 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
143 }
144
145 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
146 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
147 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
148 else
149 ac = tid_to_mac80211_ac[0];
150
151 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
152 TX_CMD_FLG_BT_PRIO_POS;
153
154 if (ieee80211_is_mgmt(fc)) {
155 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
156 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
157 else if (ieee80211_is_action(fc))
158 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
159 else
160 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
161
162 /* The spec allows Action frames in A-MPDU, we don't support
163 * it
164 */
165 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
166 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
167 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
168 } else {
169 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
170 }
171
172 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
173 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
174 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
175
176 if (fw_has_capa(&mvm->fw->ucode_capa,
177 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
178 ieee80211_action_contains_tpc(skb))
179 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
180
181 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
182 /* Total # bytes to be transmitted */
183 tx_cmd->len = cpu_to_le16((u16)skb->len);
184 tx_cmd->next_frame_len = 0;
185 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
186 tx_cmd->sta_id = sta_id;
187}
188
189/*
190 * Sets the fields in the Tx cmd that are rate related
191 */
192void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
193 struct ieee80211_tx_info *info,
194 struct ieee80211_sta *sta, __le16 fc)
195{
196 u32 rate_flags;
197 int rate_idx;
198 u8 rate_plcp;
199
200 /* Set retry limit on RTS packets */
201 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
202
203 /* Set retry limit on DATA packets and Probe Responses*/
204 if (ieee80211_is_probe_resp(fc)) {
205 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
206 tx_cmd->rts_retry_limit =
207 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
208 } else if (ieee80211_is_back_req(fc)) {
209 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
210 } else {
211 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
212 }
213
214 /*
215 * for data packets, rate info comes from the table inside the fw. This
216 * table is controlled by LINK_QUALITY commands
217 */
218
219 if (ieee80211_is_data(fc) && sta) {
220 tx_cmd->initial_rate_index = 0;
221 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
222 return;
223 } else if (ieee80211_is_back_req(fc)) {
224 tx_cmd->tx_flags |=
225 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
226 }
227
228 /* HT rate doesn't make sense for a non data frame */
229 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
230 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
231 info->control.rates[0].flags,
232 info->control.rates[0].idx,
233 le16_to_cpu(fc));
234
235 rate_idx = info->control.rates[0].idx;
236 /* if the rate isn't a well known legacy rate, take the lowest one */
237 if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
238 rate_idx = rate_lowest_index(
239 &mvm->nvm_data->bands[info->band], sta);
240
241 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
242 if (info->band == IEEE80211_BAND_5GHZ)
243 rate_idx += IWL_FIRST_OFDM_RATE;
244
245 /* For 2.4 GHZ band, check that there is no need to remap */
246 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
247
248 /* Get PLCP rate for tx_cmd->rate_n_flags */
249 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
250
251 mvm->mgmt_last_antenna_idx =
252 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
253 mvm->mgmt_last_antenna_idx);
254
255 if (info->band == IEEE80211_BAND_2GHZ &&
256 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
257 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
258 else
259 rate_flags =
260 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
261
262 /* Set CCK flag as needed */
263 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
264 rate_flags |= RATE_MCS_CCK_MSK;
265
266 /* Set the rate in the TX cmd */
267 tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
268}
269
270/*
271 * Sets the fields in the Tx cmd that are crypto related
272 */
273static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
274 struct ieee80211_tx_info *info,
275 struct iwl_tx_cmd *tx_cmd,
276 struct sk_buff *skb_frag,
277 int hdrlen)
278{
279 struct ieee80211_key_conf *keyconf = info->control.hw_key;
280 u8 *crypto_hdr = skb_frag->data + hdrlen;
281 u64 pn;
282
283 switch (keyconf->cipher) {
284 case WLAN_CIPHER_SUITE_CCMP:
285 case WLAN_CIPHER_SUITE_CCMP_256:
286 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
287 pn = atomic64_inc_return(&keyconf->tx_pn);
288 crypto_hdr[0] = pn;
289 crypto_hdr[2] = 0;
290 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
291 crypto_hdr[1] = pn >> 8;
292 crypto_hdr[4] = pn >> 16;
293 crypto_hdr[5] = pn >> 24;
294 crypto_hdr[6] = pn >> 32;
295 crypto_hdr[7] = pn >> 40;
296 break;
297
298 case WLAN_CIPHER_SUITE_TKIP:
299 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
300 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
301 break;
302
303 case WLAN_CIPHER_SUITE_WEP104:
304 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
305 /* fall through */
306 case WLAN_CIPHER_SUITE_WEP40:
307 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
308 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
309 TX_CMD_SEC_WEP_KEY_IDX_MSK);
310
311 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
312 break;
313 default:
314 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
315 }
316}
317
318/*
319 * Allocates and sets the Tx cmd the driver data pointers in the skb
320 */
321static struct iwl_device_cmd *
322iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
323 int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
324{
325 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
327 struct iwl_device_cmd *dev_cmd;
328 struct iwl_tx_cmd *tx_cmd;
329
330 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
331
332 if (unlikely(!dev_cmd))
333 return NULL;
334
335 memset(dev_cmd, 0, sizeof(*dev_cmd));
336 dev_cmd->hdr.cmd = TX_CMD;
337 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
338
339 if (info->control.hw_key)
340 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
341
342 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
343
344 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
345
346 memset(&info->status, 0, sizeof(info->status));
347
348 info->driver_data[0] = NULL;
349 info->driver_data[1] = dev_cmd;
350
351 return dev_cmd;
352}
353
354int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
355{
356 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
357 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
358 struct iwl_device_cmd *dev_cmd;
359 struct iwl_tx_cmd *tx_cmd;
360 u8 sta_id;
361 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
362
363 if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
364 return -1;
365
366 if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
367 (!info->control.vif ||
368 info->hw_queue != info->control.vif->cab_queue)))
369 return -1;
370
371 /*
372 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
373 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
374 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
375 * and hence needs to be sent on the aux queue
376 */
377 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
378 info->control.vif->type == NL80211_IFTYPE_STATION)
379 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
380
381 /*
382 * If the interface on which the frame is sent is the P2P_DEVICE
383 * or an AP/GO interface use the broadcast station associated
384 * with it; otherwise if the interface is a managed interface
385 * use the AP station associated with it for multicast traffic
386 * (this is not possible for unicast packets as a TLDS discovery
387 * response are sent without a station entry); otherwise use the
388 * AUX station.
389 */
390 sta_id = mvm->aux_sta.sta_id;
391 if (info->control.vif) {
392 struct iwl_mvm_vif *mvmvif =
393 iwl_mvm_vif_from_mac80211(info->control.vif);
394
395 if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
396 info->control.vif->type == NL80211_IFTYPE_AP)
397 sta_id = mvmvif->bcast_sta.sta_id;
398 else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
399 is_multicast_ether_addr(hdr->addr1)) {
400 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
401
402 if (ap_sta_id != IWL_MVM_STATION_COUNT)
403 sta_id = ap_sta_id;
404 }
405 }
406
407 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
408
409 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
410 if (!dev_cmd)
411 return -1;
412
413 /* From now on, we cannot access info->control */
414 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
415
416 /* Copy MAC header from skb into command buffer */
417 memcpy(tx_cmd->hdr, hdr, hdrlen);
418
419 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
420 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
421 return -1;
422 }
423
424 return 0;
425}
426
427/*
428 * Sets the fields in the Tx cmd that are crypto related
429 */
430int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
431 struct ieee80211_sta *sta)
432{
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
434 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
435 struct iwl_mvm_sta *mvmsta;
436 struct iwl_device_cmd *dev_cmd;
437 struct iwl_tx_cmd *tx_cmd;
438 __le16 fc;
439 u16 seq_number = 0;
440 u8 tid = IWL_MAX_TID_COUNT;
441 u8 txq_id = info->hw_queue;
442 bool is_data_qos = false, is_ampdu = false;
443 int hdrlen;
444
445 mvmsta = iwl_mvm_sta_from_mac80211(sta);
446 fc = hdr->frame_control;
447 hdrlen = ieee80211_hdrlen(fc);
448
449 if (WARN_ON_ONCE(!mvmsta))
450 return -1;
451
452 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
453 return -1;
454
455 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
456 if (!dev_cmd)
457 goto drop;
458
459 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
460 /* From now on, we cannot access info->control */
461
462 /*
463 * we handle that entirely ourselves -- for uAPSD the firmware
464 * will always send a notification, and for PS-Poll responses
465 * we'll notify mac80211 when getting frame status
466 */
467 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
468
469 spin_lock(&mvmsta->lock);
470
471 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
472 u8 *qc = NULL;
473 qc = ieee80211_get_qos_ctl(hdr);
474 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
475 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
476 goto drop_unlock_sta;
477
478 seq_number = mvmsta->tid_data[tid].seq_number;
479 seq_number &= IEEE80211_SCTL_SEQ;
480 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
481 hdr->seq_ctrl |= cpu_to_le16(seq_number);
482 is_data_qos = true;
483 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
484 }
485
486 /* Copy MAC header from skb into command buffer */
487 memcpy(tx_cmd->hdr, hdr, hdrlen);
488
489 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
490
491 if (sta->tdls) {
492 /* default to TID 0 for non-QoS packets */
493 u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
494
495 txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
496 }
497
498 if (is_ampdu) {
499 if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
500 goto drop_unlock_sta;
501 txq_id = mvmsta->tid_data[tid].txq_id;
502 }
503
504 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
505 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
506
507 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
508 goto drop_unlock_sta;
509
510 if (is_data_qos && !ieee80211_has_morefrags(fc))
511 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
512
513 spin_unlock(&mvmsta->lock);
514
515 if (txq_id < mvm->first_agg_queue)
516 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
517
518 return 0;
519
520drop_unlock_sta:
521 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
522 spin_unlock(&mvmsta->lock);
523drop:
524 return -1;
525}
526
527static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
528 struct ieee80211_sta *sta, u8 tid)
529{
530 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
531 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
532 struct ieee80211_vif *vif = mvmsta->vif;
533
534 lockdep_assert_held(&mvmsta->lock);
535
536 if ((tid_data->state == IWL_AGG_ON ||
537 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
538 iwl_mvm_tid_queued(tid_data) == 0) {
539 /*
540 * Now that this aggregation queue is empty tell mac80211 so it
541 * knows we no longer have frames buffered for the station on
542 * this TID (for the TIM bitmap calculation.)
543 */
544 ieee80211_sta_set_buffered(sta, tid, false);
545 }
546
547 if (tid_data->ssn != tid_data->next_reclaimed)
548 return;
549
550 switch (tid_data->state) {
551 case IWL_EMPTYING_HW_QUEUE_ADDBA:
552 IWL_DEBUG_TX_QUEUES(mvm,
553 "Can continue addBA flow ssn = next_recl = %d\n",
554 tid_data->next_reclaimed);
555 tid_data->state = IWL_AGG_STARTING;
556 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
557 break;
558
559 case IWL_EMPTYING_HW_QUEUE_DELBA:
560 IWL_DEBUG_TX_QUEUES(mvm,
561 "Can continue DELBA flow ssn = next_recl = %d\n",
562 tid_data->next_reclaimed);
563 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
564 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
565 CMD_ASYNC);
566 tid_data->state = IWL_AGG_OFF;
567 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
568 break;
569
570 default:
571 break;
572 }
573}
574
575#ifdef CONFIG_IWLWIFI_DEBUG
576const char *iwl_mvm_get_tx_fail_reason(u32 status)
577{
578#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
579#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
580
581 switch (status & TX_STATUS_MSK) {
582 case TX_STATUS_SUCCESS:
583 return "SUCCESS";
584 TX_STATUS_POSTPONE(DELAY);
585 TX_STATUS_POSTPONE(FEW_BYTES);
586 TX_STATUS_POSTPONE(BT_PRIO);
587 TX_STATUS_POSTPONE(QUIET_PERIOD);
588 TX_STATUS_POSTPONE(CALC_TTAK);
589 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
590 TX_STATUS_FAIL(SHORT_LIMIT);
591 TX_STATUS_FAIL(LONG_LIMIT);
592 TX_STATUS_FAIL(UNDERRUN);
593 TX_STATUS_FAIL(DRAIN_FLOW);
594 TX_STATUS_FAIL(RFKILL_FLUSH);
595 TX_STATUS_FAIL(LIFE_EXPIRE);
596 TX_STATUS_FAIL(DEST_PS);
597 TX_STATUS_FAIL(HOST_ABORTED);
598 TX_STATUS_FAIL(BT_RETRY);
599 TX_STATUS_FAIL(STA_INVALID);
600 TX_STATUS_FAIL(FRAG_DROPPED);
601 TX_STATUS_FAIL(TID_DISABLE);
602 TX_STATUS_FAIL(FIFO_FLUSHED);
603 TX_STATUS_FAIL(SMALL_CF_POLL);
604 TX_STATUS_FAIL(FW_DROP);
605 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
606 }
607
608 return "UNKNOWN";
609
610#undef TX_STATUS_FAIL
611#undef TX_STATUS_POSTPONE
612}
613#endif /* CONFIG_IWLWIFI_DEBUG */
614
615void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
616 enum ieee80211_band band,
617 struct ieee80211_tx_rate *r)
618{
619 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
620 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
621 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
622 case RATE_MCS_CHAN_WIDTH_20:
623 break;
624 case RATE_MCS_CHAN_WIDTH_40:
625 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
626 break;
627 case RATE_MCS_CHAN_WIDTH_80:
628 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
629 break;
630 case RATE_MCS_CHAN_WIDTH_160:
631 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
632 break;
633 }
634 if (rate_n_flags & RATE_MCS_SGI_MSK)
635 r->flags |= IEEE80211_TX_RC_SHORT_GI;
636 if (rate_n_flags & RATE_MCS_HT_MSK) {
637 r->flags |= IEEE80211_TX_RC_MCS;
638 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
639 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
640 ieee80211_rate_set_vht(
641 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
642 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
643 RATE_VHT_MCS_NSS_POS) + 1);
644 r->flags |= IEEE80211_TX_RC_VHT_MCS;
645 } else {
646 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
647 band);
648 }
649}
650
651/**
652 * translate ucode response to mac80211 tx status control values
653 */
654static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
655 struct ieee80211_tx_info *info)
656{
657 struct ieee80211_tx_rate *r = &info->status.rates[0];
658
659 info->status.antenna =
660 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
661 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
662}
663
664static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
665 struct iwl_rx_packet *pkt)
666{
667 struct ieee80211_sta *sta;
668 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
669 int txq_id = SEQ_TO_QUEUE(sequence);
670 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
671 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
672 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
673 u32 status = le16_to_cpu(tx_resp->status.status);
674 u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
675 struct iwl_mvm_sta *mvmsta;
676 struct sk_buff_head skbs;
677 u8 skb_freed = 0;
678 u16 next_reclaimed, seq_ctl;
679
680 __skb_queue_head_init(&skbs);
681
682 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
683
684 /* we can free until ssn % q.n_bd not inclusive */
685 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
686
687 while (!skb_queue_empty(&skbs)) {
688 struct sk_buff *skb = __skb_dequeue(&skbs);
689 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
690
691 skb_freed++;
692
693 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
694
695 memset(&info->status, 0, sizeof(info->status));
696
697 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
698
699 /* inform mac80211 about what happened with the frame */
700 switch (status & TX_STATUS_MSK) {
701 case TX_STATUS_SUCCESS:
702 case TX_STATUS_DIRECT_DONE:
703 info->flags |= IEEE80211_TX_STAT_ACK;
704 break;
705 case TX_STATUS_FAIL_DEST_PS:
706 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
707 break;
708 default:
709 break;
710 }
711
712 info->status.rates[0].count = tx_resp->failure_frame + 1;
713 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
714 info);
715 info->status.status_driver_data[1] =
716 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
717
718 /* Single frame failure in an AMPDU queue => send BAR */
719 if (txq_id >= mvm->first_agg_queue &&
720 !(info->flags & IEEE80211_TX_STAT_ACK) &&
721 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
722 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
723
724 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
725 if (status != TX_STATUS_SUCCESS) {
726 struct ieee80211_hdr *hdr = (void *)skb->data;
727 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
728 }
729
730 /*
731 * TODO: this is not accurate if we are freeing more than one
732 * packet.
733 */
734 info->status.tx_time =
735 le16_to_cpu(tx_resp->wireless_media_time);
736 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
737 info->status.status_driver_data[0] =
738 (void *)(uintptr_t)tx_resp->reduced_tpc;
739
740 ieee80211_tx_status(mvm->hw, skb);
741 }
742
743 if (txq_id >= mvm->first_agg_queue) {
744 /* If this is an aggregation queue, we use the ssn since:
745 * ssn = wifi seq_num % 256.
746 * The seq_ctl is the sequence control of the packet to which
747 * this Tx response relates. But if there is a hole in the
748 * bitmap of the BA we received, this Tx response may allow to
749 * reclaim the hole and all the subsequent packets that were
750 * already acked. In that case, seq_ctl != ssn, and the next
751 * packet to be reclaimed will be ssn and not seq_ctl. In that
752 * case, several packets will be reclaimed even if
753 * frame_count = 1.
754 *
755 * The ssn is the index (% 256) of the latest packet that has
756 * treated (acked / dropped) + 1.
757 */
758 next_reclaimed = ssn;
759 } else {
760 /* The next packet to be reclaimed is the one after this one */
761 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
762 }
763
764 IWL_DEBUG_TX_REPLY(mvm,
765 "TXQ %d status %s (0x%08x)\n",
766 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
767
768 IWL_DEBUG_TX_REPLY(mvm,
769 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
770 le32_to_cpu(tx_resp->initial_rate),
771 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
772 ssn, next_reclaimed, seq_ctl);
773
774 rcu_read_lock();
775
776 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
777 /*
778 * sta can't be NULL otherwise it'd mean that the sta has been freed in
779 * the firmware while we still have packets for it in the Tx queues.
780 */
781 if (WARN_ON_ONCE(!sta))
782 goto out;
783
784 if (!IS_ERR(sta)) {
785 mvmsta = iwl_mvm_sta_from_mac80211(sta);
786
787 if (tid != IWL_TID_NON_QOS) {
788 struct iwl_mvm_tid_data *tid_data =
789 &mvmsta->tid_data[tid];
790
791 spin_lock_bh(&mvmsta->lock);
792 tid_data->next_reclaimed = next_reclaimed;
793 IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
794 next_reclaimed);
795 iwl_mvm_check_ratid_empty(mvm, sta, tid);
796 spin_unlock_bh(&mvmsta->lock);
797 }
798
799 if (mvmsta->next_status_eosp) {
800 mvmsta->next_status_eosp = false;
801 ieee80211_sta_eosp(sta);
802 }
803 } else {
804 mvmsta = NULL;
805 }
806
807 /*
808 * If the txq is not an AMPDU queue, there is no chance we freed
809 * several skbs. Check that out...
810 */
811 if (txq_id >= mvm->first_agg_queue)
812 goto out;
813
814 /* We can't free more than one frame at once on a shared queue */
815 WARN_ON(skb_freed > 1);
816
817 /* If we have still frames for this STA nothing to do here */
818 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
819 goto out;
820
821 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
822
823 /*
824 * If there are no pending frames for this STA and
825 * the tx to this station is not disabled, notify
826 * mac80211 that this station can now wake up in its
827 * STA table.
828 * If mvmsta is not NULL, sta is valid.
829 */
830
831 spin_lock_bh(&mvmsta->lock);
832
833 if (!mvmsta->disable_tx)
834 ieee80211_sta_block_awake(mvm->hw, sta, false);
835
836 spin_unlock_bh(&mvmsta->lock);
837 }
838
839 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
840 /*
841 * We are draining and this was the last packet - pre_rcu_remove
842 * has been called already. We might be after the
843 * synchronize_net already.
844 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
845 */
846 set_bit(sta_id, mvm->sta_drained);
847 schedule_work(&mvm->sta_drained_wk);
848 }
849
850out:
851 rcu_read_unlock();
852}
853
854#ifdef CONFIG_IWLWIFI_DEBUG
855#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
856static const char *iwl_get_agg_tx_status(u16 status)
857{
858 switch (status & AGG_TX_STATE_STATUS_MSK) {
859 AGG_TX_STATE_(TRANSMITTED);
860 AGG_TX_STATE_(UNDERRUN);
861 AGG_TX_STATE_(BT_PRIO);
862 AGG_TX_STATE_(FEW_BYTES);
863 AGG_TX_STATE_(ABORT);
864 AGG_TX_STATE_(LAST_SENT_TTL);
865 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
866 AGG_TX_STATE_(LAST_SENT_BT_KILL);
867 AGG_TX_STATE_(SCD_QUERY);
868 AGG_TX_STATE_(TEST_BAD_CRC32);
869 AGG_TX_STATE_(RESPONSE);
870 AGG_TX_STATE_(DUMP_TX);
871 AGG_TX_STATE_(DELAY_TX);
872 }
873
874 return "UNKNOWN";
875}
876
877static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
878 struct iwl_rx_packet *pkt)
879{
880 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
881 struct agg_tx_status *frame_status = &tx_resp->status;
882 int i;
883
884 for (i = 0; i < tx_resp->frame_count; i++) {
885 u16 fstatus = le16_to_cpu(frame_status[i].status);
886
887 IWL_DEBUG_TX_REPLY(mvm,
888 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
889 iwl_get_agg_tx_status(fstatus),
890 fstatus & AGG_TX_STATE_STATUS_MSK,
891 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
892 AGG_TX_STATE_TRY_CNT_POS,
893 le16_to_cpu(frame_status[i].sequence));
894 }
895}
896#else
897static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
898 struct iwl_rx_packet *pkt)
899{}
900#endif /* CONFIG_IWLWIFI_DEBUG */
901
902static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
903 struct iwl_rx_packet *pkt)
904{
905 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
906 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
907 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
908 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
909 struct ieee80211_sta *sta;
910
911 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
912 return;
913
914 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
915 return;
916
917 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
918
919 rcu_read_lock();
920
921 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
922
923 if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
924 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
925 mvmsta->tid_data[tid].rate_n_flags =
926 le32_to_cpu(tx_resp->initial_rate);
927 mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
928 mvmsta->tid_data[tid].tx_time =
929 le16_to_cpu(tx_resp->wireless_media_time);
930 }
931
932 rcu_read_unlock();
933}
934
935void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
936{
937 struct iwl_rx_packet *pkt = rxb_addr(rxb);
938 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
939
940 if (tx_resp->frame_count == 1)
941 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
942 else
943 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
944}
945
946static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
947 struct iwl_mvm_ba_notif *ba_notif,
948 struct iwl_mvm_tid_data *tid_data)
949{
950 info->flags |= IEEE80211_TX_STAT_AMPDU;
951 info->status.ampdu_ack_len = ba_notif->txed_2_done;
952 info->status.ampdu_len = ba_notif->txed;
953 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
954 info);
955 /* TODO: not accounted if the whole A-MPDU failed */
956 info->status.tx_time = tid_data->tx_time;
957 info->status.status_driver_data[0] =
958 (void *)(uintptr_t)tid_data->reduced_tpc;
959 info->status.status_driver_data[1] =
960 (void *)(uintptr_t)tid_data->rate_n_flags;
961}
962
963void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
964{
965 struct iwl_rx_packet *pkt = rxb_addr(rxb);
966 struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
967 struct sk_buff_head reclaimed_skbs;
968 struct iwl_mvm_tid_data *tid_data;
969 struct ieee80211_sta *sta;
970 struct iwl_mvm_sta *mvmsta;
971 struct sk_buff *skb;
972 int sta_id, tid, freed;
973 /* "flow" corresponds to Tx queue */
974 u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
975 /* "ssn" is start of block-ack Tx window, corresponds to index
976 * (in Tx queue's circular buffer) of first TFD/frame in window */
977 u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
978
979 sta_id = ba_notif->sta_id;
980 tid = ba_notif->tid;
981
982 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
983 tid >= IWL_MAX_TID_COUNT,
984 "sta_id %d tid %d", sta_id, tid))
985 return;
986
987 rcu_read_lock();
988
989 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
990
991 /* Reclaiming frames for a station that has been deleted ? */
992 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
993 rcu_read_unlock();
994 return;
995 }
996
997 mvmsta = iwl_mvm_sta_from_mac80211(sta);
998 tid_data = &mvmsta->tid_data[tid];
999
1000 if (tid_data->txq_id != scd_flow) {
1001 IWL_ERR(mvm,
1002 "invalid BA notification: Q %d, tid %d, flow %d\n",
1003 tid_data->txq_id, tid, scd_flow);
1004 rcu_read_unlock();
1005 return;
1006 }
1007
1008 spin_lock_bh(&mvmsta->lock);
1009
1010 __skb_queue_head_init(&reclaimed_skbs);
1011
1012 /*
1013 * Release all TFDs before the SSN, i.e. all TFDs in front of
1014 * block-ack window (we assume that they've been successfully
1015 * transmitted ... if not, it's too late anyway).
1016 */
1017 iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
1018 &reclaimed_skbs);
1019
1020 IWL_DEBUG_TX_REPLY(mvm,
1021 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1022 (u8 *)&ba_notif->sta_addr_lo32,
1023 ba_notif->sta_id);
1024 IWL_DEBUG_TX_REPLY(mvm,
1025 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1026 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1027 (unsigned long long)le64_to_cpu(ba_notif->bitmap),
1028 scd_flow, ba_resp_scd_ssn, ba_notif->txed,
1029 ba_notif->txed_2_done);
1030
1031 tid_data->next_reclaimed = ba_resp_scd_ssn;
1032
1033 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1034
1035 freed = 0;
1036
1037 skb_queue_walk(&reclaimed_skbs, skb) {
1038 struct ieee80211_hdr *hdr = (void *)skb->data;
1039 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1040
1041 if (ieee80211_is_data_qos(hdr->frame_control))
1042 freed++;
1043 else
1044 WARN_ON_ONCE(1);
1045
1046 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1047
1048 memset(&info->status, 0, sizeof(info->status));
1049 /* Packet was transmitted successfully, failures come as single
1050 * frames because before failing a frame the firmware transmits
1051 * it without aggregation at least once.
1052 */
1053 info->flags |= IEEE80211_TX_STAT_ACK;
1054
1055 /* this is the first skb we deliver in this batch */
1056 /* put the rate scaling data there */
1057 if (freed == 1)
1058 iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
1059 }
1060
1061 spin_unlock_bh(&mvmsta->lock);
1062
1063 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1064 * possible (i.e. first MPDU in the aggregation wasn't acked)
1065 * Still it's important to update RS about sent vs. acked.
1066 */
1067 if (skb_queue_empty(&reclaimed_skbs)) {
1068 struct ieee80211_tx_info ba_info = {};
1069 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1070
1071 if (mvmsta->vif)
1072 chanctx_conf =
1073 rcu_dereference(mvmsta->vif->chanctx_conf);
1074
1075 if (WARN_ON_ONCE(!chanctx_conf))
1076 goto out;
1077
1078 ba_info.band = chanctx_conf->def.chan->band;
1079 iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
1080
1081 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
1082 iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
1083 }
1084
1085out:
1086 rcu_read_unlock();
1087
1088 while (!skb_queue_empty(&reclaimed_skbs)) {
1089 skb = __skb_dequeue(&reclaimed_skbs);
1090 ieee80211_tx_status(mvm->hw, skb);
1091 }
1092}
1093
1094/*
1095 * Note that there are transports that buffer frames before they reach
1096 * the firmware. This means that after flush_tx_path is called, the
1097 * queue might not be empty. The race-free way to handle this is to:
1098 * 1) set the station as draining
1099 * 2) flush the Tx path
1100 * 3) wait for the transport queues to be empty
1101 */
1102int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
1103{
1104 int ret;
1105 struct iwl_tx_path_flush_cmd flush_cmd = {
1106 .queues_ctl = cpu_to_le32(tfd_msk),
1107 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1108 };
1109
1110 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1111 sizeof(flush_cmd), &flush_cmd);
1112 if (ret)
1113 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1114 return ret;
1115}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
deleted file mode 100644
index ad0f16909e2e..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ /dev/null
@@ -1,1083 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright (C) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#include <net/mac80211.h>
67
68#include "iwl-debug.h"
69#include "iwl-io.h"
70#include "iwl-prph.h"
71
72#include "mvm.h"
73#include "fw-api-rs.h"
74
75/*
76 * Will return 0 even if the cmd failed when RFKILL is asserted unless
77 * CMD_WANT_SKB is set in cmd->flags.
78 */
79int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
80{
81 int ret;
82
83#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
84 if (WARN_ON(mvm->d3_test_active))
85 return -EIO;
86#endif
87
88 /*
89 * Synchronous commands from this op-mode must hold
90 * the mutex, this ensures we don't try to send two
91 * (or more) synchronous commands at a time.
92 */
93 if (!(cmd->flags & CMD_ASYNC))
94 lockdep_assert_held(&mvm->mutex);
95
96 ret = iwl_trans_send_cmd(mvm->trans, cmd);
97
98 /*
99 * If the caller wants the SKB, then don't hide any problems, the
100 * caller might access the response buffer which will be NULL if
101 * the command failed.
102 */
103 if (cmd->flags & CMD_WANT_SKB)
104 return ret;
105
106 /* Silently ignore failures if RFKILL is asserted */
107 if (!ret || ret == -ERFKILL)
108 return 0;
109 return ret;
110}
111
112int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
113 u32 flags, u16 len, const void *data)
114{
115 struct iwl_host_cmd cmd = {
116 .id = id,
117 .len = { len, },
118 .data = { data, },
119 .flags = flags,
120 };
121
122 return iwl_mvm_send_cmd(mvm, &cmd);
123}
124
125/*
126 * We assume that the caller set the status to the success value
127 */
128int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
129 u32 *status)
130{
131 struct iwl_rx_packet *pkt;
132 struct iwl_cmd_response *resp;
133 int ret, resp_len;
134
135 lockdep_assert_held(&mvm->mutex);
136
137#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
138 if (WARN_ON(mvm->d3_test_active))
139 return -EIO;
140#endif
141
142 /*
143 * Only synchronous commands can wait for status,
144 * we use WANT_SKB so the caller can't.
145 */
146 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
147 "cmd flags %x", cmd->flags))
148 return -EINVAL;
149
150 cmd->flags |= CMD_WANT_SKB;
151
152 ret = iwl_trans_send_cmd(mvm->trans, cmd);
153 if (ret == -ERFKILL) {
154 /*
155 * The command failed because of RFKILL, don't update
156 * the status, leave it as success and return 0.
157 */
158 return 0;
159 } else if (ret) {
160 return ret;
161 }
162
163 pkt = cmd->resp_pkt;
164 /* Can happen if RFKILL is asserted */
165 if (!pkt) {
166 ret = 0;
167 goto out_free_resp;
168 }
169
170 resp_len = iwl_rx_packet_payload_len(pkt);
171 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
172 ret = -EIO;
173 goto out_free_resp;
174 }
175
176 resp = (void *)pkt->data;
177 *status = le32_to_cpu(resp->status);
178 out_free_resp:
179 iwl_free_resp(cmd);
180 return ret;
181}
182
183/*
184 * We assume that the caller set the status to the sucess value
185 */
186int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
187 const void *data, u32 *status)
188{
189 struct iwl_host_cmd cmd = {
190 .id = id,
191 .len = { len, },
192 .data = { data, },
193 };
194
195 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
196}
197
198#define IWL_DECLARE_RATE_INFO(r) \
199 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
200
201/*
202 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
203 */
204static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
205 IWL_DECLARE_RATE_INFO(1),
206 IWL_DECLARE_RATE_INFO(2),
207 IWL_DECLARE_RATE_INFO(5),
208 IWL_DECLARE_RATE_INFO(11),
209 IWL_DECLARE_RATE_INFO(6),
210 IWL_DECLARE_RATE_INFO(9),
211 IWL_DECLARE_RATE_INFO(12),
212 IWL_DECLARE_RATE_INFO(18),
213 IWL_DECLARE_RATE_INFO(24),
214 IWL_DECLARE_RATE_INFO(36),
215 IWL_DECLARE_RATE_INFO(48),
216 IWL_DECLARE_RATE_INFO(54),
217};
218
219int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
220 enum ieee80211_band band)
221{
222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
223 int idx;
224 int band_offset = 0;
225
226 /* Legacy rate format, search for match in table */
227 if (band == IEEE80211_BAND_5GHZ)
228 band_offset = IWL_FIRST_OFDM_RATE;
229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
230 if (fw_rate_idx_to_plcp[idx] == rate)
231 return idx - band_offset;
232
233 return -1;
234}
235
236u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
237{
238 /* Get PLCP rate for tx_cmd->rate_n_flags */
239 return fw_rate_idx_to_plcp[rate_idx];
240}
241
242void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
243{
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_error_resp *err_resp = (void *)pkt->data;
246
247 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
248 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
249 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
250 le16_to_cpu(err_resp->bad_cmd_seq_num),
251 le32_to_cpu(err_resp->error_service));
252 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
253 le64_to_cpu(err_resp->timestamp));
254}
255
256/*
257 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
258 * The parameter should also be a combination of ANT_[ABC].
259 */
260u8 first_antenna(u8 mask)
261{
262 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
263 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
264 return BIT(0);
265 return BIT(ffs(mask) - 1);
266}
267
268/*
269 * Toggles between TX antennas to send the probe request on.
270 * Receives the bitmask of valid TX antennas and the *index* used
271 * for the last TX, and returns the next valid *index* to use.
272 * In order to set it in the tx_cmd, must do BIT(idx).
273 */
274u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
275{
276 u8 ind = last_idx;
277 int i;
278
279 for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
280 ind = (ind + 1) % RATE_MCS_ANT_NUM;
281 if (valid & BIT(ind))
282 return ind;
283 }
284
285 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
286 return last_idx;
287}
288
289static const struct {
290 const char *name;
291 u8 num;
292} advanced_lookup[] = {
293 { "NMI_INTERRUPT_WDG", 0x34 },
294 { "SYSASSERT", 0x35 },
295 { "UCODE_VERSION_MISMATCH", 0x37 },
296 { "BAD_COMMAND", 0x38 },
297 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
298 { "FATAL_ERROR", 0x3D },
299 { "NMI_TRM_HW_ERR", 0x46 },
300 { "NMI_INTERRUPT_TRM", 0x4C },
301 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
302 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
303 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
304 { "NMI_INTERRUPT_HOST", 0x66 },
305 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
306 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
307 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
308 { "ADVANCED_SYSASSERT", 0 },
309};
310
311static const char *desc_lookup(u32 num)
312{
313 int i;
314
315 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
316 if (advanced_lookup[i].num == num)
317 return advanced_lookup[i].name;
318
319 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
320 return advanced_lookup[i].name;
321}
322
323/*
324 * Note: This structure is read from the device with IO accesses,
325 * and the reading already does the endian conversion. As it is
326 * read with u32-sized accesses, any members with a different size
327 * need to be ordered correctly though!
328 */
329struct iwl_error_event_table_v1 {
330 u32 valid; /* (nonzero) valid, (0) log is empty */
331 u32 error_id; /* type of error */
332 u32 pc; /* program counter */
333 u32 blink1; /* branch link */
334 u32 blink2; /* branch link */
335 u32 ilink1; /* interrupt link */
336 u32 ilink2; /* interrupt link */
337 u32 data1; /* error-specific data */
338 u32 data2; /* error-specific data */
339 u32 data3; /* error-specific data */
340 u32 bcon_time; /* beacon timer */
341 u32 tsf_low; /* network timestamp function timer */
342 u32 tsf_hi; /* network timestamp function timer */
343 u32 gp1; /* GP1 timer register */
344 u32 gp2; /* GP2 timer register */
345 u32 gp3; /* GP3 timer register */
346 u32 ucode_ver; /* uCode version */
347 u32 hw_ver; /* HW Silicon version */
348 u32 brd_ver; /* HW board version */
349 u32 log_pc; /* log program counter */
350 u32 frame_ptr; /* frame pointer */
351 u32 stack_ptr; /* stack pointer */
352 u32 hcmd; /* last host command header */
353 u32 isr0; /* isr status register LMPM_NIC_ISR0:
354 * rxtx_flag */
355 u32 isr1; /* isr status register LMPM_NIC_ISR1:
356 * host_flag */
357 u32 isr2; /* isr status register LMPM_NIC_ISR2:
358 * enc_flag */
359 u32 isr3; /* isr status register LMPM_NIC_ISR3:
360 * time_flag */
361 u32 isr4; /* isr status register LMPM_NIC_ISR4:
362 * wico interrupt */
363 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
364 u32 wait_event; /* wait event() caller address */
365 u32 l2p_control; /* L2pControlField */
366 u32 l2p_duration; /* L2pDurationField */
367 u32 l2p_mhvalid; /* L2pMhValidBits */
368 u32 l2p_addr_match; /* L2pAddrMatchStat */
369 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
370 * (LMPM_PMG_SEL) */
371 u32 u_timestamp; /* indicate when the date and time of the
372 * compilation */
373 u32 flow_handler; /* FH read/write pointers, RX credit */
374} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
375
376struct iwl_error_event_table {
377 u32 valid; /* (nonzero) valid, (0) log is empty */
378 u32 error_id; /* type of error */
379 u32 pc; /* program counter */
380 u32 blink1; /* branch link */
381 u32 blink2; /* branch link */
382 u32 ilink1; /* interrupt link */
383 u32 ilink2; /* interrupt link */
384 u32 data1; /* error-specific data */
385 u32 data2; /* error-specific data */
386 u32 data3; /* error-specific data */
387 u32 bcon_time; /* beacon timer */
388 u32 tsf_low; /* network timestamp function timer */
389 u32 tsf_hi; /* network timestamp function timer */
390 u32 gp1; /* GP1 timer register */
391 u32 gp2; /* GP2 timer register */
392 u32 gp3; /* GP3 timer register */
393 u32 major; /* uCode version major */
394 u32 minor; /* uCode version minor */
395 u32 hw_ver; /* HW Silicon version */
396 u32 brd_ver; /* HW board version */
397 u32 log_pc; /* log program counter */
398 u32 frame_ptr; /* frame pointer */
399 u32 stack_ptr; /* stack pointer */
400 u32 hcmd; /* last host command header */
401 u32 isr0; /* isr status register LMPM_NIC_ISR0:
402 * rxtx_flag */
403 u32 isr1; /* isr status register LMPM_NIC_ISR1:
404 * host_flag */
405 u32 isr2; /* isr status register LMPM_NIC_ISR2:
406 * enc_flag */
407 u32 isr3; /* isr status register LMPM_NIC_ISR3:
408 * time_flag */
409 u32 isr4; /* isr status register LMPM_NIC_ISR4:
410 * wico interrupt */
411 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
412 u32 wait_event; /* wait event() caller address */
413 u32 l2p_control; /* L2pControlField */
414 u32 l2p_duration; /* L2pDurationField */
415 u32 l2p_mhvalid; /* L2pMhValidBits */
416 u32 l2p_addr_match; /* L2pAddrMatchStat */
417 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
418 * (LMPM_PMG_SEL) */
419 u32 u_timestamp; /* indicate when the date and time of the
420 * compilation */
421 u32 flow_handler; /* FH read/write pointers, RX credit */
422} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
423
424/*
425 * UMAC error struct - relevant starting from family 8000 chip.
426 * Note: This structure is read from the device with IO accesses,
427 * and the reading already does the endian conversion. As it is
428 * read with u32-sized accesses, any members with a different size
429 * need to be ordered correctly though!
430 */
431struct iwl_umac_error_event_table {
432 u32 valid; /* (nonzero) valid, (0) log is empty */
433 u32 error_id; /* type of error */
434 u32 blink1; /* branch link */
435 u32 blink2; /* branch link */
436 u32 ilink1; /* interrupt link */
437 u32 ilink2; /* interrupt link */
438 u32 data1; /* error-specific data */
439 u32 data2; /* error-specific data */
440 u32 data3; /* error-specific data */
441 u32 umac_major;
442 u32 umac_minor;
443 u32 frame_pointer; /* core register 27*/
444 u32 stack_pointer; /* core register 28 */
445 u32 cmd_header; /* latest host cmd sent to UMAC */
446 u32 nic_isr_pref; /* ISR status register */
447} __packed;
448
449#define ERROR_START_OFFSET (1 * sizeof(u32))
450#define ERROR_ELEM_SIZE (7 * sizeof(u32))
451
452static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
453{
454 struct iwl_trans *trans = mvm->trans;
455 struct iwl_umac_error_event_table table;
456 u32 base;
457
458 base = mvm->umac_error_event_table;
459
460 if (base < 0x800000) {
461 IWL_ERR(mvm,
462 "Not valid error log pointer 0x%08X for %s uCode\n",
463 base,
464 (mvm->cur_ucode == IWL_UCODE_INIT)
465 ? "Init" : "RT");
466 return;
467 }
468
469 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
470
471 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
472 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
473 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
474 mvm->status, table.valid);
475 }
476
477 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
478 desc_lookup(table.error_id));
479 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
480 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
481 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
482 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
483 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
484 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
485 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
486 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
487 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
488 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
489 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
490 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
492}
493
494static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
495{
496 struct iwl_trans *trans = mvm->trans;
497 struct iwl_error_event_table_v1 table;
498 u32 base;
499
500 base = mvm->error_event_table;
501 if (mvm->cur_ucode == IWL_UCODE_INIT) {
502 if (!base)
503 base = mvm->fw->init_errlog_ptr;
504 } else {
505 if (!base)
506 base = mvm->fw->inst_errlog_ptr;
507 }
508
509 if (base < 0x800000) {
510 IWL_ERR(mvm,
511 "Not valid error log pointer 0x%08X for %s uCode\n",
512 base,
513 (mvm->cur_ucode == IWL_UCODE_INIT)
514 ? "Init" : "RT");
515 return;
516 }
517
518 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
519
520 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
521 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
522 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
523 mvm->status, table.valid);
524 }
525
526 /* Do not change this output - scripts rely on it */
527
528 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
529
530 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
531 table.data1, table.data2, table.data3,
532 table.blink1, table.blink2, table.ilink1,
533 table.ilink2, table.bcon_time, table.gp1,
534 table.gp2, table.gp3, table.ucode_ver, 0,
535 table.hw_ver, table.brd_ver);
536 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
537 desc_lookup(table.error_id));
538 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
539 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
540 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
541 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
542 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
543 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
544 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
545 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
546 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
547 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
548 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
549 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
550 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
551 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
552 IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
553 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
554 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
555 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
556 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
557 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
558 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
559 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
560 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
561 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
562 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
563 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
564 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
565 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
566 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
567 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
568 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
569 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
570
571 if (mvm->support_umac_log)
572 iwl_mvm_dump_umac_error_log(mvm);
573}
574
575void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
576{
577 struct iwl_trans *trans = mvm->trans;
578 struct iwl_error_event_table table;
579 u32 base;
580
581 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
582 iwl_mvm_dump_nic_error_log_old(mvm);
583 return;
584 }
585
586 base = mvm->error_event_table;
587 if (mvm->cur_ucode == IWL_UCODE_INIT) {
588 if (!base)
589 base = mvm->fw->init_errlog_ptr;
590 } else {
591 if (!base)
592 base = mvm->fw->inst_errlog_ptr;
593 }
594
595 if (base < 0x800000) {
596 IWL_ERR(mvm,
597 "Not valid error log pointer 0x%08X for %s uCode\n",
598 base,
599 (mvm->cur_ucode == IWL_UCODE_INIT)
600 ? "Init" : "RT");
601 return;
602 }
603
604 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
605
606 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
607 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
608 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
609 mvm->status, table.valid);
610 }
611
612 /* Do not change this output - scripts rely on it */
613
614 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
615
616 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
617 table.data1, table.data2, table.data3,
618 table.blink1, table.blink2, table.ilink1,
619 table.ilink2, table.bcon_time, table.gp1,
620 table.gp2, table.gp3, table.major,
621 table.minor, table.hw_ver, table.brd_ver);
622 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
623 desc_lookup(table.error_id));
624 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
625 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
626 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
627 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
628 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
629 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
630 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
631 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
632 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
633 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
634 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
635 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
636 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
637 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
638 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
639 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
640 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
641 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
642 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
643 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
644 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
645 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
646 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
647 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
648 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
649 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
650 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
651 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
652 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
653 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
654 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
655 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
656 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
657
658 if (mvm->support_umac_log)
659 iwl_mvm_dump_umac_error_log(mvm);
660}
661
662int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
663{
664 int i;
665
666 lockdep_assert_held(&mvm->queue_info_lock);
667
668 for (i = minq; i <= maxq; i++)
669 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
670 !mvm->queue_info[i].setup_reserved)
671 return i;
672
673 return -ENOSPC;
674}
675
676void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
677 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
678 unsigned int wdg_timeout)
679{
680 bool enable_queue = true;
681
682 spin_lock_bh(&mvm->queue_info_lock);
683
684 /* Make sure this TID isn't already enabled */
685 if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
686 spin_unlock_bh(&mvm->queue_info_lock);
687 IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
688 cfg->tid);
689 return;
690 }
691
692 /* Update mappings and refcounts */
693 mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
694 mvm->queue_info[queue].hw_queue_refcount++;
695 if (mvm->queue_info[queue].hw_queue_refcount > 1)
696 enable_queue = false;
697 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
698
699 IWL_DEBUG_TX_QUEUES(mvm,
700 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
701 queue, mvm->queue_info[queue].hw_queue_refcount,
702 mvm->queue_info[queue].hw_queue_to_mac80211);
703
704 spin_unlock_bh(&mvm->queue_info_lock);
705
706 /* Send the enabling command if we need to */
707 if (enable_queue) {
708 struct iwl_scd_txq_cfg_cmd cmd = {
709 .scd_queue = queue,
710 .enable = 1,
711 .window = cfg->frame_limit,
712 .sta_id = cfg->sta_id,
713 .ssn = cpu_to_le16(ssn),
714 .tx_fifo = cfg->fifo,
715 .aggregate = cfg->aggregate,
716 .tid = cfg->tid,
717 };
718
719 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
720 wdg_timeout);
721 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
722 &cmd),
723 "Failed to configure queue %d on FIFO %d\n", queue,
724 cfg->fifo);
725 }
726}
727
728void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
729 u8 tid, u8 flags)
730{
731 struct iwl_scd_txq_cfg_cmd cmd = {
732 .scd_queue = queue,
733 .enable = 0,
734 };
735 bool remove_mac_queue = true;
736 int ret;
737
738 spin_lock_bh(&mvm->queue_info_lock);
739
740 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
741 spin_unlock_bh(&mvm->queue_info_lock);
742 return;
743 }
744
745 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
746
747 /*
748 * If there is another TID with the same AC - don't remove the MAC queue
749 * from the mapping
750 */
751 if (tid < IWL_MAX_TID_COUNT) {
752 unsigned long tid_bitmap =
753 mvm->queue_info[queue].tid_bitmap;
754 int ac = tid_to_mac80211_ac[tid];
755 int i;
756
757 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
758 if (tid_to_mac80211_ac[i] == ac)
759 remove_mac_queue = false;
760 }
761 }
762
763 if (remove_mac_queue)
764 mvm->queue_info[queue].hw_queue_to_mac80211 &=
765 ~BIT(mac80211_queue);
766 mvm->queue_info[queue].hw_queue_refcount--;
767
768 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
769
770 IWL_DEBUG_TX_QUEUES(mvm,
771 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
772 queue,
773 mvm->queue_info[queue].hw_queue_refcount,
774 mvm->queue_info[queue].hw_queue_to_mac80211);
775
776 /* If the queue is still enabled - nothing left to do in this func */
777 if (cmd.enable) {
778 spin_unlock_bh(&mvm->queue_info_lock);
779 return;
780 }
781
782 /* Make sure queue info is correct even though we overwrite it */
783 WARN(mvm->queue_info[queue].hw_queue_refcount ||
784 mvm->queue_info[queue].tid_bitmap ||
785 mvm->queue_info[queue].hw_queue_to_mac80211,
786 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
787 queue, mvm->queue_info[queue].hw_queue_refcount,
788 mvm->queue_info[queue].hw_queue_to_mac80211,
789 mvm->queue_info[queue].tid_bitmap);
790
791 /* If we are here - the queue is freed and we can zero out these vals */
792 mvm->queue_info[queue].hw_queue_refcount = 0;
793 mvm->queue_info[queue].tid_bitmap = 0;
794 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
795
796 spin_unlock_bh(&mvm->queue_info_lock);
797
798 iwl_trans_txq_disable(mvm->trans, queue, false);
799 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
800 sizeof(cmd), &cmd);
801 if (ret)
802 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
803 queue, ret);
804}
805
806/**
807 * iwl_mvm_send_lq_cmd() - Send link quality command
808 * @init: This command is sent as part of station initialization right
809 * after station has been added.
810 *
811 * The link quality command is sent as the last step of station creation.
812 * This is the special case in which init is set and we call a callback in
813 * this case to clear the state indicating that station creation is in
814 * progress.
815 */
816int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
817{
818 struct iwl_host_cmd cmd = {
819 .id = LQ_CMD,
820 .len = { sizeof(struct iwl_lq_cmd), },
821 .flags = init ? 0 : CMD_ASYNC,
822 .data = { lq, },
823 };
824
825 if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
826 return -EINVAL;
827
828 return iwl_mvm_send_cmd(mvm, &cmd);
829}
830
831/**
832 * iwl_mvm_update_smps - Get a request to change the SMPS mode
833 * @req_type: The part of the driver who call for a change.
834 * @smps_requests: The request to change the SMPS mode.
835 *
836 * Get a requst to change the SMPS mode,
837 * and change it according to all other requests in the driver.
838 */
839void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
840 enum iwl_mvm_smps_type_request req_type,
841 enum ieee80211_smps_mode smps_request)
842{
843 struct iwl_mvm_vif *mvmvif;
844 enum ieee80211_smps_mode smps_mode;
845 int i;
846
847 lockdep_assert_held(&mvm->mutex);
848
849 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
850 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
851 return;
852
853 if (vif->type == NL80211_IFTYPE_AP)
854 smps_mode = IEEE80211_SMPS_OFF;
855 else
856 smps_mode = IEEE80211_SMPS_AUTOMATIC;
857
858 mvmvif = iwl_mvm_vif_from_mac80211(vif);
859 mvmvif->smps_requests[req_type] = smps_request;
860 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
861 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
862 smps_mode = IEEE80211_SMPS_STATIC;
863 break;
864 }
865 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
866 smps_mode = IEEE80211_SMPS_DYNAMIC;
867 }
868
869 ieee80211_request_smps(vif, smps_mode);
870}
871
872int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
873{
874 struct iwl_statistics_cmd scmd = {
875 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
876 };
877 struct iwl_host_cmd cmd = {
878 .id = STATISTICS_CMD,
879 .len[0] = sizeof(scmd),
880 .data[0] = &scmd,
881 .flags = CMD_WANT_SKB,
882 };
883 int ret;
884
885 ret = iwl_mvm_send_cmd(mvm, &cmd);
886 if (ret)
887 return ret;
888
889 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
890 iwl_free_resp(&cmd);
891
892 if (clear)
893 iwl_mvm_accu_radio_stats(mvm);
894
895 return 0;
896}
897
898void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
899{
900 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
901 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
902 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
903 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
904}
905
906static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
907 struct ieee80211_vif *vif)
908{
909 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
910 bool *result = _data;
911 int i;
912
913 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
914 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
915 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
916 *result = false;
917 }
918}
919
920bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
921{
922 bool result = true;
923
924 lockdep_assert_held(&mvm->mutex);
925
926 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
927 return false;
928
929 if (mvm->cfg->rx_with_siso_diversity)
930 return false;
931
932 ieee80211_iterate_active_interfaces_atomic(
933 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
934 iwl_mvm_diversity_iter, &result);
935
936 return result;
937}
938
939int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
940 bool value)
941{
942 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
943 int res;
944
945 lockdep_assert_held(&mvm->mutex);
946
947 if (mvmvif->low_latency == value)
948 return 0;
949
950 mvmvif->low_latency = value;
951
952 res = iwl_mvm_update_quotas(mvm, false, NULL);
953 if (res)
954 return res;
955
956 iwl_mvm_bt_coex_vif_change(mvm);
957
958 return iwl_mvm_power_update_mac(mvm);
959}
960
961static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
962{
963 bool *result = _data;
964
965 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
966 *result = true;
967}
968
969bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
970{
971 bool result = false;
972
973 ieee80211_iterate_active_interfaces_atomic(
974 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
975 iwl_mvm_ll_iter, &result);
976
977 return result;
978}
979
980struct iwl_bss_iter_data {
981 struct ieee80211_vif *vif;
982 bool error;
983};
984
985static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
986 struct ieee80211_vif *vif)
987{
988 struct iwl_bss_iter_data *data = _data;
989
990 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
991 return;
992
993 if (data->vif) {
994 data->error = true;
995 return;
996 }
997
998 data->vif = vif;
999}
1000
1001struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
1002{
1003 struct iwl_bss_iter_data bss_iter_data = {};
1004
1005 ieee80211_iterate_active_interfaces_atomic(
1006 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1007 iwl_mvm_bss_iface_iterator, &bss_iter_data);
1008
1009 if (bss_iter_data.error) {
1010 IWL_ERR(mvm, "More than one managed interface active!\n");
1011 return ERR_PTR(-EINVAL);
1012 }
1013
1014 return bss_iter_data.vif;
1015}
1016
1017unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1018 struct ieee80211_vif *vif,
1019 bool tdls, bool cmd_q)
1020{
1021 struct iwl_fw_dbg_trigger_tlv *trigger;
1022 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
1023 unsigned int default_timeout =
1024 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1025
1026 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
1027 return iwlmvm_mod_params.tfd_q_hang_detect ?
1028 default_timeout : IWL_WATCHDOG_DISABLED;
1029
1030 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1031 txq_timer = (void *)trigger->data;
1032
1033 if (tdls)
1034 return le32_to_cpu(txq_timer->tdls);
1035
1036 if (cmd_q)
1037 return le32_to_cpu(txq_timer->command_queue);
1038
1039 if (WARN_ON(!vif))
1040 return default_timeout;
1041
1042 switch (ieee80211_vif_type_p2p(vif)) {
1043 case NL80211_IFTYPE_ADHOC:
1044 return le32_to_cpu(txq_timer->ibss);
1045 case NL80211_IFTYPE_STATION:
1046 return le32_to_cpu(txq_timer->bss);
1047 case NL80211_IFTYPE_AP:
1048 return le32_to_cpu(txq_timer->softap);
1049 case NL80211_IFTYPE_P2P_CLIENT:
1050 return le32_to_cpu(txq_timer->p2p_client);
1051 case NL80211_IFTYPE_P2P_GO:
1052 return le32_to_cpu(txq_timer->p2p_go);
1053 case NL80211_IFTYPE_P2P_DEVICE:
1054 return le32_to_cpu(txq_timer->p2p_device);
1055 default:
1056 WARN_ON(1);
1057 return mvm->cfg->base_params->wd_timeout;
1058 }
1059}
1060
1061void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1062 const char *errmsg)
1063{
1064 struct iwl_fw_dbg_trigger_tlv *trig;
1065 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1066
1067 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
1068 goto out;
1069
1070 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
1071 trig_mlme = (void *)trig->data;
1072 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
1073 goto out;
1074
1075 if (trig_mlme->stop_connection_loss &&
1076 --trig_mlme->stop_connection_loss)
1077 goto out;
1078
1079 iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg);
1080
1081out:
1082 ieee80211_connection_loss(vif);
1083}