aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/wireless/iwlwifi
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c362
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c455
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c570
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c1083
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c299
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c2047
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c3367
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c965
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c693
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c699
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c980
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c580
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c3989
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h343
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h139
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h4033
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1938
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h521
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c2750
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h1662
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h313
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h184
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c223
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h57
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c562
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c444
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c1029
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c629
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c832
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c754
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c979
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c1038
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c1170
43 files changed, 37912 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
new file mode 100644
index 00000000000..01b49eb8c8e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -0,0 +1,251 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <linux/wireless.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include <linux/stringify.h>
38
39#include "iwl-eeprom.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-sta.h"
44#include "iwl-agn.h"
45#include "iwl-helpers.h"
46#include "iwl-agn-hw.h"
47
48/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 5
50#define IWL100_UCODE_API_MAX 5
51
52/* Lowest firmware API version supported */
53#define IWL1000_UCODE_API_MIN 1
54#define IWL100_UCODE_API_MIN 5
55
56#define IWL1000_FW_PRE "iwlwifi-1000-"
57#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
58
59#define IWL100_FW_PRE "iwlwifi-100-"
60#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
61
62
63/*
64 * For 1000, use advance thermal throttling critical temperature threshold,
65 * but legacy thermal management implementation for now.
66 * This is for the reason of 1000 uCode using advance thermal throttling API
67 * but not implement ct_kill_exit based on ct_kill exit temperature
68 * so the thermal throttling will still based on legacy thermal throttling
69 * management.
70 * The code here need to be modified once 1000 uCode has the advanced thermal
71 * throttling algorithm in place
72 */
73static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
74{
75 /* want Celsius */
76 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
77 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
78}
79
80/* NIC configuration for 1000 series */
81static void iwl1000_nic_config(struct iwl_priv *priv)
82{
83 /* set CSR_HW_CONFIG_REG for uCode use */
84 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
85 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
86 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
87
88 /* Setting digital SVR for 1000 card to 1.32V */
89 /* locking is acquired in iwl_set_bits_mask_prph() function */
90 iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG,
91 APMG_SVR_DIGITAL_VOLTAGE_1_32,
92 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
93}
94
95static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
96 .min_nrg_cck = 95,
97 .max_nrg_cck = 0, /* not used, set to 0 */
98 .auto_corr_min_ofdm = 90,
99 .auto_corr_min_ofdm_mrc = 170,
100 .auto_corr_min_ofdm_x1 = 120,
101 .auto_corr_min_ofdm_mrc_x1 = 240,
102
103 .auto_corr_max_ofdm = 120,
104 .auto_corr_max_ofdm_mrc = 210,
105 .auto_corr_max_ofdm_x1 = 155,
106 .auto_corr_max_ofdm_mrc_x1 = 290,
107
108 .auto_corr_min_cck = 125,
109 .auto_corr_max_cck = 200,
110 .auto_corr_min_cck_mrc = 170,
111 .auto_corr_max_cck_mrc = 400,
112 .nrg_th_cck = 95,
113 .nrg_th_ofdm = 95,
114
115 .barker_corr_th_min = 190,
116 .barker_corr_th_min_mrc = 390,
117 .nrg_th_cca = 62,
118};
119
120static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
121{
122 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
123 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
124 priv->cfg->base_params->num_of_queues =
125 iwlagn_mod_params.num_of_queues;
126
127 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
128 priv->hw_params.scd_bc_tbls_size =
129 priv->cfg->base_params->num_of_queues *
130 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
133 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
134
135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
137
138 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
139 BIT(IEEE80211_BAND_5GHZ);
140
141 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
142 if (priv->cfg->rx_with_siso_diversity)
143 priv->hw_params.rx_chains_num = 1;
144 else
145 priv->hw_params.rx_chains_num =
146 num_of_ant(priv->cfg->valid_rx_ant);
147 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
148 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
149
150 iwl1000_set_ct_threshold(priv);
151
152 /* Set initial sensitivity parameters */
153 /* Set initial calibration set */
154 priv->hw_params.sens = &iwl1000_sensitivity;
155 priv->hw_params.calib_init_cfg =
156 BIT(IWL_CALIB_XTAL) |
157 BIT(IWL_CALIB_LO) |
158 BIT(IWL_CALIB_TX_IQ) |
159 BIT(IWL_CALIB_TX_IQ_PERD) |
160 BIT(IWL_CALIB_BASE_BAND);
161 if (priv->cfg->need_dc_calib)
162 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
163
164 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
165
166 return 0;
167}
168
169static struct iwl_lib_ops iwl1000_lib = {
170 .set_hw_params = iwl1000_hw_set_hw_params,
171 .nic_config = iwl1000_nic_config,
172 .eeprom_ops = {
173 .regulatory_bands = {
174 EEPROM_REG_BAND_1_CHANNELS,
175 EEPROM_REG_BAND_2_CHANNELS,
176 EEPROM_REG_BAND_3_CHANNELS,
177 EEPROM_REG_BAND_4_CHANNELS,
178 EEPROM_REG_BAND_5_CHANNELS,
179 EEPROM_REG_BAND_24_HT40_CHANNELS,
180 EEPROM_REGULATORY_BAND_NO_HT40,
181 },
182 },
183 .temperature = iwlagn_temperature,
184};
185
186static struct iwl_base_params iwl1000_base_params = {
187 .num_of_queues = IWLAGN_NUM_QUEUES,
188 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
189 .eeprom_size = OTP_LOW_IMAGE_SIZE,
190 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
191 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
192 .shadow_ram_support = false,
193 .led_compensation = 51,
194 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
195 .support_ct_kill_exit = true,
196 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
197 .chain_noise_scale = 1000,
198 .wd_timeout = IWL_DEF_WD_TIMEOUT,
199 .max_event_log_size = 128,
200};
201static struct iwl_ht_params iwl1000_ht_params = {
202 .ht_greenfield_support = true,
203 .use_rts_for_aggregation = true, /* use rts/cts protection */
204 .smps_mode = IEEE80211_SMPS_STATIC,
205};
206
207#define IWL_DEVICE_1000 \
208 .fw_name_pre = IWL1000_FW_PRE, \
209 .ucode_api_max = IWL1000_UCODE_API_MAX, \
210 .ucode_api_min = IWL1000_UCODE_API_MIN, \
211 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
212 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
213 .lib = &iwl1000_lib, \
214 .base_params = &iwl1000_base_params, \
215 .led_mode = IWL_LED_BLINK
216
217struct iwl_cfg iwl1000_bgn_cfg = {
218 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
219 IWL_DEVICE_1000,
220 .ht_params = &iwl1000_ht_params,
221};
222
223struct iwl_cfg iwl1000_bg_cfg = {
224 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
225 IWL_DEVICE_1000,
226};
227
228#define IWL_DEVICE_100 \
229 .fw_name_pre = IWL100_FW_PRE, \
230 .ucode_api_max = IWL100_UCODE_API_MAX, \
231 .ucode_api_min = IWL100_UCODE_API_MIN, \
232 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
233 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
234 .lib = &iwl1000_lib, \
235 .base_params = &iwl1000_base_params, \
236 .led_mode = IWL_LED_RF_STATE, \
237 .rx_with_siso_diversity = true
238
239struct iwl_cfg iwl100_bgn_cfg = {
240 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
241 IWL_DEVICE_100,
242 .ht_params = &iwl1000_ht_params,
243};
244
245struct iwl_cfg iwl100_bg_cfg = {
246 .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
247 IWL_DEVICE_100,
248};
249
250MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
251MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 00000000000..0e13f0bb2e1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,362 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <linux/wireless.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include <linux/stringify.h>
38
39#include "iwl-eeprom.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-sta.h"
44#include "iwl-agn.h"
45#include "iwl-helpers.h"
46#include "iwl-agn-hw.h"
47#include "iwl-6000-hw.h"
48
49/* Highest firmware API version supported */
50#define IWL2030_UCODE_API_MAX 5
51#define IWL2000_UCODE_API_MAX 5
52#define IWL105_UCODE_API_MAX 5
53#define IWL135_UCODE_API_MAX 5
54
55/* Lowest firmware API version supported */
56#define IWL2030_UCODE_API_MIN 5
57#define IWL2000_UCODE_API_MIN 5
58#define IWL105_UCODE_API_MIN 5
59#define IWL135_UCODE_API_MIN 5
60
61#define IWL2030_FW_PRE "iwlwifi-2030-"
62#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
63
64#define IWL2000_FW_PRE "iwlwifi-2000-"
65#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
66
67#define IWL105_FW_PRE "iwlwifi-105-"
68#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
69
70#define IWL135_FW_PRE "iwlwifi-135-"
71#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode"
72
73static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
74{
75 /* want Celsius */
76 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
77 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
78}
79
80/* NIC configuration for 2000 series */
81static void iwl2000_nic_config(struct iwl_priv *priv)
82{
83 iwl_rf_config(priv);
84
85 if (priv->cfg->iq_invert)
86 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
87 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
88}
89
90static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
91 .min_nrg_cck = 97,
92 .max_nrg_cck = 0, /* not used, set to 0 */
93 .auto_corr_min_ofdm = 80,
94 .auto_corr_min_ofdm_mrc = 128,
95 .auto_corr_min_ofdm_x1 = 105,
96 .auto_corr_min_ofdm_mrc_x1 = 192,
97
98 .auto_corr_max_ofdm = 145,
99 .auto_corr_max_ofdm_mrc = 232,
100 .auto_corr_max_ofdm_x1 = 110,
101 .auto_corr_max_ofdm_mrc_x1 = 232,
102
103 .auto_corr_min_cck = 125,
104 .auto_corr_max_cck = 175,
105 .auto_corr_min_cck_mrc = 160,
106 .auto_corr_max_cck_mrc = 310,
107 .nrg_th_cck = 97,
108 .nrg_th_ofdm = 100,
109
110 .barker_corr_th_min = 190,
111 .barker_corr_th_min_mrc = 390,
112 .nrg_th_cca = 62,
113};
114
115static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
116{
117 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
118 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
119 priv->cfg->base_params->num_of_queues =
120 iwlagn_mod_params.num_of_queues;
121
122 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
123 priv->hw_params.scd_bc_tbls_size =
124 priv->cfg->base_params->num_of_queues *
125 sizeof(struct iwlagn_scd_bc_tbl);
126 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
127 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
128 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
129
130 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
131 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
132
133 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
134 BIT(IEEE80211_BAND_5GHZ);
135
136 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
137 if (priv->cfg->rx_with_siso_diversity)
138 priv->hw_params.rx_chains_num = 1;
139 else
140 priv->hw_params.rx_chains_num =
141 num_of_ant(priv->cfg->valid_rx_ant);
142 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
143 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
144
145 iwl2000_set_ct_threshold(priv);
146
147 /* Set initial sensitivity parameters */
148 /* Set initial calibration set */
149 priv->hw_params.sens = &iwl2000_sensitivity;
150 priv->hw_params.calib_init_cfg =
151 BIT(IWL_CALIB_XTAL) |
152 BIT(IWL_CALIB_LO) |
153 BIT(IWL_CALIB_TX_IQ) |
154 BIT(IWL_CALIB_BASE_BAND);
155 if (priv->cfg->need_dc_calib)
156 priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
157 if (priv->cfg->need_temp_offset_calib)
158 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
159
160 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
161
162 return 0;
163}
164
165static struct iwl_lib_ops iwl2000_lib = {
166 .set_hw_params = iwl2000_hw_set_hw_params,
167 .nic_config = iwl2000_nic_config,
168 .eeprom_ops = {
169 .regulatory_bands = {
170 EEPROM_REG_BAND_1_CHANNELS,
171 EEPROM_REG_BAND_2_CHANNELS,
172 EEPROM_REG_BAND_3_CHANNELS,
173 EEPROM_REG_BAND_4_CHANNELS,
174 EEPROM_REG_BAND_5_CHANNELS,
175 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
176 EEPROM_REGULATORY_BAND_NO_HT40,
177 },
178 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
179 },
180 .temperature = iwlagn_temperature,
181};
182
183static struct iwl_lib_ops iwl2030_lib = {
184 .set_hw_params = iwl2000_hw_set_hw_params,
185 .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
186 .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
187 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
188 .nic_config = iwl2000_nic_config,
189 .eeprom_ops = {
190 .regulatory_bands = {
191 EEPROM_REG_BAND_1_CHANNELS,
192 EEPROM_REG_BAND_2_CHANNELS,
193 EEPROM_REG_BAND_3_CHANNELS,
194 EEPROM_REG_BAND_4_CHANNELS,
195 EEPROM_REG_BAND_5_CHANNELS,
196 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
197 EEPROM_REGULATORY_BAND_NO_HT40,
198 },
199 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
200 },
201 .temperature = iwlagn_temperature,
202};
203
204static struct iwl_base_params iwl2000_base_params = {
205 .eeprom_size = OTP_LOW_IMAGE_SIZE,
206 .num_of_queues = IWLAGN_NUM_QUEUES,
207 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
208 .pll_cfg_val = 0,
209 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
210 .shadow_ram_support = true,
211 .led_compensation = 51,
212 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
213 .adv_thermal_throttle = true,
214 .support_ct_kill_exit = true,
215 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
216 .chain_noise_scale = 1000,
217 .wd_timeout = IWL_DEF_WD_TIMEOUT,
218 .max_event_log_size = 512,
219 .shadow_reg_enable = true,
220};
221
222
223static struct iwl_base_params iwl2030_base_params = {
224 .eeprom_size = OTP_LOW_IMAGE_SIZE,
225 .num_of_queues = IWLAGN_NUM_QUEUES,
226 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
227 .pll_cfg_val = 0,
228 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
229 .shadow_ram_support = true,
230 .led_compensation = 57,
231 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
232 .adv_thermal_throttle = true,
233 .support_ct_kill_exit = true,
234 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
235 .chain_noise_scale = 1000,
236 .wd_timeout = IWL_LONG_WD_TIMEOUT,
237 .max_event_log_size = 512,
238 .shadow_reg_enable = true,
239};
240
241static struct iwl_ht_params iwl2000_ht_params = {
242 .ht_greenfield_support = true,
243 .use_rts_for_aggregation = true, /* use rts/cts protection */
244};
245
246static struct iwl_bt_params iwl2030_bt_params = {
247 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
248 .advanced_bt_coexist = true,
249 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
250 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
251 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
252 .bt_sco_disable = true,
253 .bt_session_2 = true,
254};
255
256#define IWL_DEVICE_2000 \
257 .fw_name_pre = IWL2000_FW_PRE, \
258 .ucode_api_max = IWL2000_UCODE_API_MAX, \
259 .ucode_api_min = IWL2000_UCODE_API_MIN, \
260 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
261 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
262 .lib = &iwl2000_lib, \
263 .base_params = &iwl2000_base_params, \
264 .need_dc_calib = true, \
265 .need_temp_offset_calib = true, \
266 .led_mode = IWL_LED_RF_STATE, \
267 .iq_invert = true \
268
269struct iwl_cfg iwl2000_2bgn_cfg = {
270 .name = "2000 Series 2x2 BGN",
271 IWL_DEVICE_2000,
272 .ht_params = &iwl2000_ht_params,
273};
274
275struct iwl_cfg iwl2000_2bg_cfg = {
276 .name = "2000 Series 2x2 BG",
277 IWL_DEVICE_2000,
278};
279
280#define IWL_DEVICE_2030 \
281 .fw_name_pre = IWL2030_FW_PRE, \
282 .ucode_api_max = IWL2030_UCODE_API_MAX, \
283 .ucode_api_min = IWL2030_UCODE_API_MIN, \
284 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
285 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
286 .lib = &iwl2030_lib, \
287 .base_params = &iwl2030_base_params, \
288 .bt_params = &iwl2030_bt_params, \
289 .need_dc_calib = true, \
290 .need_temp_offset_calib = true, \
291 .led_mode = IWL_LED_RF_STATE, \
292 .adv_pm = true, \
293 .iq_invert = true \
294
295struct iwl_cfg iwl2030_2bgn_cfg = {
296 .name = "2000 Series 2x2 BGN/BT",
297 IWL_DEVICE_2030,
298 .ht_params = &iwl2000_ht_params,
299};
300
301struct iwl_cfg iwl2030_2bg_cfg = {
302 .name = "2000 Series 2x2 BG/BT",
303 IWL_DEVICE_2030,
304};
305
306#define IWL_DEVICE_105 \
307 .fw_name_pre = IWL105_FW_PRE, \
308 .ucode_api_max = IWL105_UCODE_API_MAX, \
309 .ucode_api_min = IWL105_UCODE_API_MIN, \
310 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
311 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
312 .lib = &iwl2000_lib, \
313 .base_params = &iwl2000_base_params, \
314 .need_dc_calib = true, \
315 .need_temp_offset_calib = true, \
316 .led_mode = IWL_LED_RF_STATE, \
317 .adv_pm = true, \
318 .rx_with_siso_diversity = true, \
319 .iq_invert = true \
320
321struct iwl_cfg iwl105_bg_cfg = {
322 .name = "105 Series 1x1 BG",
323 IWL_DEVICE_105,
324};
325
326struct iwl_cfg iwl105_bgn_cfg = {
327 .name = "105 Series 1x1 BGN",
328 IWL_DEVICE_105,
329 .ht_params = &iwl2000_ht_params,
330};
331
332#define IWL_DEVICE_135 \
333 .fw_name_pre = IWL135_FW_PRE, \
334 .ucode_api_max = IWL135_UCODE_API_MAX, \
335 .ucode_api_min = IWL135_UCODE_API_MIN, \
336 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
337 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
338 .lib = &iwl2030_lib, \
339 .base_params = &iwl2030_base_params, \
340 .bt_params = &iwl2030_bt_params, \
341 .need_dc_calib = true, \
342 .need_temp_offset_calib = true, \
343 .led_mode = IWL_LED_RF_STATE, \
344 .adv_pm = true, \
345 .rx_with_siso_diversity = true, \
346 .iq_invert = true \
347
348struct iwl_cfg iwl135_bg_cfg = {
349 .name = "135 Series 1x1 BG/BT",
350 IWL_DEVICE_135,
351};
352
353struct iwl_cfg iwl135_bgn_cfg = {
354 .name = "135 Series 1x1 BGN/BT",
355 IWL_DEVICE_135,
356 .ht_params = &iwl2000_ht_params,
357};
358
359MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
360MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
361MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
362MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
new file mode 100644
index 00000000000..f9630a3c79f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -0,0 +1,88 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 */
67
68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__
70
71/* 5150 only */
72#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
73
74static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
75{
76 u16 temperature, voltage;
77 __le16 *temp_calib =
78 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
79
80 temperature = le16_to_cpu(temp_calib[0]);
81 voltage = le16_to_cpu(temp_calib[1]);
82
83 /* offset = temp - volt / coeff */
84 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
85}
86
87#endif /* __iwl_5000_hw_h__ */
88
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
new file mode 100644
index 00000000000..c95cefd529d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -0,0 +1,455 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/wireless.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38#include <linux/stringify.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-helpers.h"
46#include "iwl-agn.h"
47#include "iwl-agn-hw.h"
48#include "iwl-5000-hw.h"
49#include "iwl-trans.h"
50
51/* Highest firmware API version supported */
52#define IWL5000_UCODE_API_MAX 5
53#define IWL5150_UCODE_API_MAX 2
54
55/* Lowest firmware API version supported */
56#define IWL5000_UCODE_API_MIN 1
57#define IWL5150_UCODE_API_MIN 1
58
59#define IWL5000_FW_PRE "iwlwifi-5000-"
60#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
61
62#define IWL5150_FW_PRE "iwlwifi-5150-"
63#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
64
65/* NIC configuration for 5000 series */
66static void iwl5000_nic_config(struct iwl_priv *priv)
67{
68 unsigned long flags;
69
70 iwl_rf_config(priv);
71
72 spin_lock_irqsave(&priv->lock, flags);
73
74 /* W/A : NIC is stuck in a reset state after Early PCIe power off
75 * (PCIe power is lost before PERST# is asserted),
76 * causing ME FW to lose ownership and not being able to obtain it back.
77 */
78 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
79 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
80 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
81
82
83 spin_unlock_irqrestore(&priv->lock, flags);
84}
85
86static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
87 .min_nrg_cck = 95,
88 .max_nrg_cck = 0, /* not used, set to 0 */
89 .auto_corr_min_ofdm = 90,
90 .auto_corr_min_ofdm_mrc = 170,
91 .auto_corr_min_ofdm_x1 = 120,
92 .auto_corr_min_ofdm_mrc_x1 = 240,
93
94 .auto_corr_max_ofdm = 120,
95 .auto_corr_max_ofdm_mrc = 210,
96 .auto_corr_max_ofdm_x1 = 120,
97 .auto_corr_max_ofdm_mrc_x1 = 240,
98
99 .auto_corr_min_cck = 125,
100 .auto_corr_max_cck = 200,
101 .auto_corr_min_cck_mrc = 170,
102 .auto_corr_max_cck_mrc = 400,
103 .nrg_th_cck = 95,
104 .nrg_th_ofdm = 95,
105
106 .barker_corr_th_min = 190,
107 .barker_corr_th_min_mrc = 390,
108 .nrg_th_cca = 62,
109};
110
111static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
112 .min_nrg_cck = 95,
113 .max_nrg_cck = 0, /* not used, set to 0 */
114 .auto_corr_min_ofdm = 90,
115 .auto_corr_min_ofdm_mrc = 170,
116 .auto_corr_min_ofdm_x1 = 105,
117 .auto_corr_min_ofdm_mrc_x1 = 220,
118
119 .auto_corr_max_ofdm = 120,
120 .auto_corr_max_ofdm_mrc = 210,
121 /* max = min for performance bug in 5150 DSP */
122 .auto_corr_max_ofdm_x1 = 105,
123 .auto_corr_max_ofdm_mrc_x1 = 220,
124
125 .auto_corr_min_cck = 125,
126 .auto_corr_max_cck = 200,
127 .auto_corr_min_cck_mrc = 170,
128 .auto_corr_max_cck_mrc = 400,
129 .nrg_th_cck = 95,
130 .nrg_th_ofdm = 95,
131
132 .barker_corr_th_min = 190,
133 .barker_corr_th_min_mrc = 390,
134 .nrg_th_cca = 62,
135};
136
137static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
138{
139 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
140 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
141 iwl_temp_calib_to_offset(priv);
142
143 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
144}
145
146static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
147{
148 /* want Celsius */
149 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
150}
151
152static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
153{
154 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
155 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
156 priv->cfg->base_params->num_of_queues =
157 iwlagn_mod_params.num_of_queues;
158
159 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
160 priv->hw_params.scd_bc_tbls_size =
161 priv->cfg->base_params->num_of_queues *
162 sizeof(struct iwlagn_scd_bc_tbl);
163 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
164 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
165 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
166
167 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
168 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
169
170 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
171 BIT(IEEE80211_BAND_5GHZ);
172
173 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
174 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
175 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
176 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
177
178 iwl5000_set_ct_threshold(priv);
179
180 /* Set initial sensitivity parameters */
181 /* Set initial calibration set */
182 priv->hw_params.sens = &iwl5000_sensitivity;
183 priv->hw_params.calib_init_cfg =
184 BIT(IWL_CALIB_XTAL) |
185 BIT(IWL_CALIB_LO) |
186 BIT(IWL_CALIB_TX_IQ) |
187 BIT(IWL_CALIB_TX_IQ_PERD) |
188 BIT(IWL_CALIB_BASE_BAND);
189
190 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
191
192 return 0;
193}
194
195static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
196{
197 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
198 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
199 priv->cfg->base_params->num_of_queues =
200 iwlagn_mod_params.num_of_queues;
201
202 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
203 priv->hw_params.scd_bc_tbls_size =
204 priv->cfg->base_params->num_of_queues *
205 sizeof(struct iwlagn_scd_bc_tbl);
206 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
207 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
208 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
209
210 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
211 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
212
213 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
214 BIT(IEEE80211_BAND_5GHZ);
215
216 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
217 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
218 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
219 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
220
221 iwl5150_set_ct_threshold(priv);
222
223 /* Set initial sensitivity parameters */
224 /* Set initial calibration set */
225 priv->hw_params.sens = &iwl5150_sensitivity;
226 priv->hw_params.calib_init_cfg =
227 BIT(IWL_CALIB_LO) |
228 BIT(IWL_CALIB_TX_IQ) |
229 BIT(IWL_CALIB_BASE_BAND);
230 if (priv->cfg->need_dc_calib)
231 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
232
233 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
234
235 return 0;
236}
237
238static void iwl5150_temperature(struct iwl_priv *priv)
239{
240 u32 vt = 0;
241 s32 offset = iwl_temp_calib_to_offset(priv);
242
243 vt = le32_to_cpu(priv->statistics.common.temperature);
244 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
245 /* now vt hold the temperature in Kelvin */
246 priv->temperature = KELVIN_TO_CELSIUS(vt);
247 iwl_tt_handler(priv);
248}
249
250static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
251 struct ieee80211_channel_switch *ch_switch)
252{
253 /*
254 * MULTI-FIXME
255 * See iwl_mac_channel_switch.
256 */
257 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
258 struct iwl5000_channel_switch_cmd cmd;
259 const struct iwl_channel_info *ch_info;
260 u32 switch_time_in_usec, ucode_switch_time;
261 u16 ch;
262 u32 tsf_low;
263 u8 switch_count;
264 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
265 struct ieee80211_vif *vif = ctx->vif;
266 struct iwl_host_cmd hcmd = {
267 .id = REPLY_CHANNEL_SWITCH,
268 .len = { sizeof(cmd), },
269 .flags = CMD_SYNC,
270 .data = { &cmd, },
271 };
272
273 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
274 ch = ch_switch->channel->hw_value;
275 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
276 ctx->active.channel, ch);
277 cmd.channel = cpu_to_le16(ch);
278 cmd.rxon_flags = ctx->staging.flags;
279 cmd.rxon_filter_flags = ctx->staging.filter_flags;
280 switch_count = ch_switch->count;
281 tsf_low = ch_switch->timestamp & 0x0ffffffff;
282 /*
283 * calculate the ucode channel switch time
284 * adding TSF as one of the factor for when to switch
285 */
286 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
287 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
288 beacon_interval)) {
289 switch_count -= (priv->ucode_beacon_time -
290 tsf_low) / beacon_interval;
291 } else
292 switch_count = 0;
293 }
294 if (switch_count <= 1)
295 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
296 else {
297 switch_time_in_usec =
298 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
299 ucode_switch_time = iwl_usecs_to_beacons(priv,
300 switch_time_in_usec,
301 beacon_interval);
302 cmd.switch_time = iwl_add_beacon_time(priv,
303 priv->ucode_beacon_time,
304 ucode_switch_time,
305 beacon_interval);
306 }
307 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
308 cmd.switch_time);
309 ch_info = iwl_get_channel_info(priv, priv->band, ch);
310 if (ch_info)
311 cmd.expect_beacon = is_channel_radar(ch_info);
312 else {
313 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
314 ctx->active.channel, ch);
315 return -EFAULT;
316 }
317
318 return trans_send_cmd(&priv->trans, &hcmd);
319}
320
321static struct iwl_lib_ops iwl5000_lib = {
322 .set_hw_params = iwl5000_hw_set_hw_params,
323 .set_channel_switch = iwl5000_hw_channel_switch,
324 .nic_config = iwl5000_nic_config,
325 .eeprom_ops = {
326 .regulatory_bands = {
327 EEPROM_REG_BAND_1_CHANNELS,
328 EEPROM_REG_BAND_2_CHANNELS,
329 EEPROM_REG_BAND_3_CHANNELS,
330 EEPROM_REG_BAND_4_CHANNELS,
331 EEPROM_REG_BAND_5_CHANNELS,
332 EEPROM_REG_BAND_24_HT40_CHANNELS,
333 EEPROM_REG_BAND_52_HT40_CHANNELS
334 },
335 },
336 .temperature = iwlagn_temperature,
337};
338
339static struct iwl_lib_ops iwl5150_lib = {
340 .set_hw_params = iwl5150_hw_set_hw_params,
341 .set_channel_switch = iwl5000_hw_channel_switch,
342 .nic_config = iwl5000_nic_config,
343 .eeprom_ops = {
344 .regulatory_bands = {
345 EEPROM_REG_BAND_1_CHANNELS,
346 EEPROM_REG_BAND_2_CHANNELS,
347 EEPROM_REG_BAND_3_CHANNELS,
348 EEPROM_REG_BAND_4_CHANNELS,
349 EEPROM_REG_BAND_5_CHANNELS,
350 EEPROM_REG_BAND_24_HT40_CHANNELS,
351 EEPROM_REG_BAND_52_HT40_CHANNELS
352 },
353 },
354 .temperature = iwl5150_temperature,
355};
356
357static struct iwl_base_params iwl5000_base_params = {
358 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
359 .num_of_queues = IWLAGN_NUM_QUEUES,
360 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
361 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
362 .led_compensation = 51,
363 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
364 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
365 .chain_noise_scale = 1000,
366 .wd_timeout = IWL_LONG_WD_TIMEOUT,
367 .max_event_log_size = 512,
368 .no_idle_support = true,
369};
370static struct iwl_ht_params iwl5000_ht_params = {
371 .ht_greenfield_support = true,
372};
373
374#define IWL_DEVICE_5000 \
375 .fw_name_pre = IWL5000_FW_PRE, \
376 .ucode_api_max = IWL5000_UCODE_API_MAX, \
377 .ucode_api_min = IWL5000_UCODE_API_MIN, \
378 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
379 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
380 .lib = &iwl5000_lib, \
381 .base_params = &iwl5000_base_params, \
382 .led_mode = IWL_LED_BLINK
383
384struct iwl_cfg iwl5300_agn_cfg = {
385 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
386 IWL_DEVICE_5000,
387 /* at least EEPROM 0x11A has wrong info */
388 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
389 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
390 .ht_params = &iwl5000_ht_params,
391};
392
393struct iwl_cfg iwl5100_bgn_cfg = {
394 .name = "Intel(R) WiFi Link 5100 BGN",
395 IWL_DEVICE_5000,
396 .valid_tx_ant = ANT_B, /* .cfg overwrite */
397 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
398 .ht_params = &iwl5000_ht_params,
399};
400
401struct iwl_cfg iwl5100_abg_cfg = {
402 .name = "Intel(R) WiFi Link 5100 ABG",
403 IWL_DEVICE_5000,
404 .valid_tx_ant = ANT_B, /* .cfg overwrite */
405 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
406};
407
408struct iwl_cfg iwl5100_agn_cfg = {
409 .name = "Intel(R) WiFi Link 5100 AGN",
410 IWL_DEVICE_5000,
411 .valid_tx_ant = ANT_B, /* .cfg overwrite */
412 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
413 .ht_params = &iwl5000_ht_params,
414};
415
416struct iwl_cfg iwl5350_agn_cfg = {
417 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
418 .fw_name_pre = IWL5000_FW_PRE,
419 .ucode_api_max = IWL5000_UCODE_API_MAX,
420 .ucode_api_min = IWL5000_UCODE_API_MIN,
421 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
422 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
423 .lib = &iwl5000_lib,
424 .base_params = &iwl5000_base_params,
425 .ht_params = &iwl5000_ht_params,
426 .led_mode = IWL_LED_BLINK,
427 .internal_wimax_coex = true,
428};
429
430#define IWL_DEVICE_5150 \
431 .fw_name_pre = IWL5150_FW_PRE, \
432 .ucode_api_max = IWL5150_UCODE_API_MAX, \
433 .ucode_api_min = IWL5150_UCODE_API_MIN, \
434 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
435 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
436 .lib = &iwl5150_lib, \
437 .base_params = &iwl5000_base_params, \
438 .need_dc_calib = true, \
439 .led_mode = IWL_LED_BLINK, \
440 .internal_wimax_coex = true
441
442struct iwl_cfg iwl5150_agn_cfg = {
443 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
444 IWL_DEVICE_5150,
445 .ht_params = &iwl5000_ht_params,
446
447};
448
449struct iwl_cfg iwl5150_abg_cfg = {
450 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
451 IWL_DEVICE_5150,
452};
453
454MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
455MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
new file mode 100644
index 00000000000..b27986e57c9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -0,0 +1,81 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 */
67
68#ifndef __iwl_6000_hw_h__
69#define __iwl_6000_hw_h__
70
71#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
72#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
73#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
74#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
75#define IWL60_RTC_INST_SIZE \
76 (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
77#define IWL60_RTC_DATA_SIZE \
78 (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
79
80#endif /* __iwl_6000_hw_h__ */
81
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
new file mode 100644
index 00000000000..973d1972e8c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -0,0 +1,570 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <linux/wireless.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include <linux/stringify.h>
38
39#include "iwl-eeprom.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-sta.h"
44#include "iwl-agn.h"
45#include "iwl-helpers.h"
46#include "iwl-agn-hw.h"
47#include "iwl-6000-hw.h"
48#include "iwl-trans.h"
49
50/* Highest firmware API version supported */
51#define IWL6000_UCODE_API_MAX 4
52#define IWL6050_UCODE_API_MAX 5
53#define IWL6000G2_UCODE_API_MAX 5
54
55/* Lowest firmware API version supported */
56#define IWL6000_UCODE_API_MIN 4
57#define IWL6050_UCODE_API_MIN 4
58#define IWL6000G2_UCODE_API_MIN 4
59
60#define IWL6000_FW_PRE "iwlwifi-6000-"
61#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
62
63#define IWL6050_FW_PRE "iwlwifi-6050-"
64#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
65
66#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
67#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
68
69#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
70#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
71
72static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
73{
74 /* want Celsius */
75 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
76 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
77}
78
79static void iwl6050_additional_nic_config(struct iwl_priv *priv)
80{
81 /* Indicate calibration version to uCode. */
82 if (iwlagn_eeprom_calib_version(priv) >= 6)
83 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
84 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
85}
86
87static void iwl6150_additional_nic_config(struct iwl_priv *priv)
88{
89 /* Indicate calibration version to uCode. */
90 if (iwlagn_eeprom_calib_version(priv) >= 6)
91 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
92 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
93 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
94 CSR_GP_DRIVER_REG_BIT_6050_1x2);
95}
96
97/* NIC configuration for 6000 series */
98static void iwl6000_nic_config(struct iwl_priv *priv)
99{
100 iwl_rf_config(priv);
101
102 /* no locking required for register write */
103 if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
104 /* 2x2 IPA phy type */
105 iwl_write32(priv, CSR_GP_DRIVER_REG,
106 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
107 }
108 /* do additional nic configuration if needed */
109 if (priv->cfg->additional_nic_config)
110 priv->cfg->additional_nic_config(priv);
111}
112
113static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
114 .min_nrg_cck = 97,
115 .max_nrg_cck = 0, /* not used, set to 0 */
116 .auto_corr_min_ofdm = 80,
117 .auto_corr_min_ofdm_mrc = 128,
118 .auto_corr_min_ofdm_x1 = 105,
119 .auto_corr_min_ofdm_mrc_x1 = 192,
120
121 .auto_corr_max_ofdm = 145,
122 .auto_corr_max_ofdm_mrc = 232,
123 .auto_corr_max_ofdm_x1 = 110,
124 .auto_corr_max_ofdm_mrc_x1 = 232,
125
126 .auto_corr_min_cck = 125,
127 .auto_corr_max_cck = 175,
128 .auto_corr_min_cck_mrc = 160,
129 .auto_corr_max_cck_mrc = 310,
130 .nrg_th_cck = 97,
131 .nrg_th_ofdm = 100,
132
133 .barker_corr_th_min = 190,
134 .barker_corr_th_min_mrc = 390,
135 .nrg_th_cca = 62,
136};
137
138static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
139{
140 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
141 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
142 priv->cfg->base_params->num_of_queues =
143 iwlagn_mod_params.num_of_queues;
144
145 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
146 priv->hw_params.scd_bc_tbls_size =
147 priv->cfg->base_params->num_of_queues *
148 sizeof(struct iwlagn_scd_bc_tbl);
149 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
150 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
151 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
152
153 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
154 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
155
156 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
157 BIT(IEEE80211_BAND_5GHZ);
158
159 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
160 if (priv->cfg->rx_with_siso_diversity)
161 priv->hw_params.rx_chains_num = 1;
162 else
163 priv->hw_params.rx_chains_num =
164 num_of_ant(priv->cfg->valid_rx_ant);
165 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
166 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
167
168 iwl6000_set_ct_threshold(priv);
169
170 /* Set initial sensitivity parameters */
171 /* Set initial calibration set */
172 priv->hw_params.sens = &iwl6000_sensitivity;
173 priv->hw_params.calib_init_cfg =
174 BIT(IWL_CALIB_XTAL) |
175 BIT(IWL_CALIB_LO) |
176 BIT(IWL_CALIB_TX_IQ) |
177 BIT(IWL_CALIB_BASE_BAND);
178 if (priv->cfg->need_dc_calib)
179 priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
180 if (priv->cfg->need_temp_offset_calib)
181 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
182
183 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
184
185 return 0;
186}
187
188static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
189 struct ieee80211_channel_switch *ch_switch)
190{
191 /*
192 * MULTI-FIXME
193 * See iwl_mac_channel_switch.
194 */
195 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
196 struct iwl6000_channel_switch_cmd cmd;
197 const struct iwl_channel_info *ch_info;
198 u32 switch_time_in_usec, ucode_switch_time;
199 u16 ch;
200 u32 tsf_low;
201 u8 switch_count;
202 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
203 struct ieee80211_vif *vif = ctx->vif;
204 struct iwl_host_cmd hcmd = {
205 .id = REPLY_CHANNEL_SWITCH,
206 .len = { sizeof(cmd), },
207 .flags = CMD_SYNC,
208 .data = { &cmd, },
209 };
210
211 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
212 ch = ch_switch->channel->hw_value;
213 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
214 ctx->active.channel, ch);
215 cmd.channel = cpu_to_le16(ch);
216 cmd.rxon_flags = ctx->staging.flags;
217 cmd.rxon_filter_flags = ctx->staging.filter_flags;
218 switch_count = ch_switch->count;
219 tsf_low = ch_switch->timestamp & 0x0ffffffff;
220 /*
221 * calculate the ucode channel switch time
222 * adding TSF as one of the factor for when to switch
223 */
224 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
225 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
226 beacon_interval)) {
227 switch_count -= (priv->ucode_beacon_time -
228 tsf_low) / beacon_interval;
229 } else
230 switch_count = 0;
231 }
232 if (switch_count <= 1)
233 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
234 else {
235 switch_time_in_usec =
236 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
237 ucode_switch_time = iwl_usecs_to_beacons(priv,
238 switch_time_in_usec,
239 beacon_interval);
240 cmd.switch_time = iwl_add_beacon_time(priv,
241 priv->ucode_beacon_time,
242 ucode_switch_time,
243 beacon_interval);
244 }
245 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
246 cmd.switch_time);
247 ch_info = iwl_get_channel_info(priv, priv->band, ch);
248 if (ch_info)
249 cmd.expect_beacon = is_channel_radar(ch_info);
250 else {
251 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
252 ctx->active.channel, ch);
253 return -EFAULT;
254 }
255
256 return trans_send_cmd(&priv->trans, &hcmd);
257}
258
259static struct iwl_lib_ops iwl6000_lib = {
260 .set_hw_params = iwl6000_hw_set_hw_params,
261 .set_channel_switch = iwl6000_hw_channel_switch,
262 .nic_config = iwl6000_nic_config,
263 .eeprom_ops = {
264 .regulatory_bands = {
265 EEPROM_REG_BAND_1_CHANNELS,
266 EEPROM_REG_BAND_2_CHANNELS,
267 EEPROM_REG_BAND_3_CHANNELS,
268 EEPROM_REG_BAND_4_CHANNELS,
269 EEPROM_REG_BAND_5_CHANNELS,
270 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
271 EEPROM_REG_BAND_52_HT40_CHANNELS
272 },
273 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
274 },
275 .temperature = iwlagn_temperature,
276};
277
278static struct iwl_lib_ops iwl6030_lib = {
279 .set_hw_params = iwl6000_hw_set_hw_params,
280 .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
281 .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
282 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
283 .set_channel_switch = iwl6000_hw_channel_switch,
284 .nic_config = iwl6000_nic_config,
285 .eeprom_ops = {
286 .regulatory_bands = {
287 EEPROM_REG_BAND_1_CHANNELS,
288 EEPROM_REG_BAND_2_CHANNELS,
289 EEPROM_REG_BAND_3_CHANNELS,
290 EEPROM_REG_BAND_4_CHANNELS,
291 EEPROM_REG_BAND_5_CHANNELS,
292 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
293 EEPROM_REG_BAND_52_HT40_CHANNELS
294 },
295 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
296 },
297 .temperature = iwlagn_temperature,
298};
299
300static struct iwl_base_params iwl6000_base_params = {
301 .eeprom_size = OTP_LOW_IMAGE_SIZE,
302 .num_of_queues = IWLAGN_NUM_QUEUES,
303 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
304 .pll_cfg_val = 0,
305 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
306 .shadow_ram_support = true,
307 .led_compensation = 51,
308 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
309 .adv_thermal_throttle = true,
310 .support_ct_kill_exit = true,
311 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
312 .chain_noise_scale = 1000,
313 .wd_timeout = IWL_DEF_WD_TIMEOUT,
314 .max_event_log_size = 512,
315 .shadow_reg_enable = true,
316};
317
318static struct iwl_base_params iwl6050_base_params = {
319 .eeprom_size = OTP_LOW_IMAGE_SIZE,
320 .num_of_queues = IWLAGN_NUM_QUEUES,
321 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
322 .pll_cfg_val = 0,
323 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
324 .shadow_ram_support = true,
325 .led_compensation = 51,
326 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
327 .adv_thermal_throttle = true,
328 .support_ct_kill_exit = true,
329 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
330 .chain_noise_scale = 1500,
331 .wd_timeout = IWL_DEF_WD_TIMEOUT,
332 .max_event_log_size = 1024,
333 .shadow_reg_enable = true,
334};
335static struct iwl_base_params iwl6000_g2_base_params = {
336 .eeprom_size = OTP_LOW_IMAGE_SIZE,
337 .num_of_queues = IWLAGN_NUM_QUEUES,
338 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
339 .pll_cfg_val = 0,
340 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
341 .shadow_ram_support = true,
342 .led_compensation = 57,
343 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
344 .adv_thermal_throttle = true,
345 .support_ct_kill_exit = true,
346 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
347 .chain_noise_scale = 1000,
348 .wd_timeout = IWL_LONG_WD_TIMEOUT,
349 .max_event_log_size = 512,
350 .shadow_reg_enable = true,
351};
352
353static struct iwl_ht_params iwl6000_ht_params = {
354 .ht_greenfield_support = true,
355 .use_rts_for_aggregation = true, /* use rts/cts protection */
356};
357
358static struct iwl_bt_params iwl6000_bt_params = {
359 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
360 .advanced_bt_coexist = true,
361 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
362 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
363 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
364 .bt_sco_disable = true,
365};
366
367#define IWL_DEVICE_6005 \
368 .fw_name_pre = IWL6005_FW_PRE, \
369 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
370 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
371 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
372 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
373 .lib = &iwl6000_lib, \
374 .base_params = &iwl6000_g2_base_params, \
375 .need_dc_calib = true, \
376 .need_temp_offset_calib = true, \
377 .led_mode = IWL_LED_RF_STATE
378
379struct iwl_cfg iwl6005_2agn_cfg = {
380 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
381 IWL_DEVICE_6005,
382 .ht_params = &iwl6000_ht_params,
383};
384
385struct iwl_cfg iwl6005_2abg_cfg = {
386 .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
387 IWL_DEVICE_6005,
388};
389
390struct iwl_cfg iwl6005_2bg_cfg = {
391 .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
392 IWL_DEVICE_6005,
393};
394
395#define IWL_DEVICE_6030 \
396 .fw_name_pre = IWL6030_FW_PRE, \
397 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
398 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
399 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
400 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
401 .lib = &iwl6030_lib, \
402 .base_params = &iwl6000_g2_base_params, \
403 .bt_params = &iwl6000_bt_params, \
404 .need_dc_calib = true, \
405 .need_temp_offset_calib = true, \
406 .led_mode = IWL_LED_RF_STATE, \
407 .adv_pm = true \
408
409struct iwl_cfg iwl6030_2agn_cfg = {
410 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
411 IWL_DEVICE_6030,
412 .ht_params = &iwl6000_ht_params,
413};
414
415struct iwl_cfg iwl6030_2abg_cfg = {
416 .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
417 IWL_DEVICE_6030,
418};
419
420struct iwl_cfg iwl6030_2bgn_cfg = {
421 .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
422 IWL_DEVICE_6030,
423 .ht_params = &iwl6000_ht_params,
424};
425
426struct iwl_cfg iwl6030_2bg_cfg = {
427 .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
428 IWL_DEVICE_6030,
429};
430
431struct iwl_cfg iwl6035_2agn_cfg = {
432 .name = "6035 Series 2x2 AGN/BT",
433 IWL_DEVICE_6030,
434 .ht_params = &iwl6000_ht_params,
435};
436
437struct iwl_cfg iwl6035_2abg_cfg = {
438 .name = "6035 Series 2x2 ABG/BT",
439 IWL_DEVICE_6030,
440};
441
442struct iwl_cfg iwl6035_2bg_cfg = {
443 .name = "6035 Series 2x2 BG/BT",
444 IWL_DEVICE_6030,
445};
446
447struct iwl_cfg iwl1030_bgn_cfg = {
448 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
449 IWL_DEVICE_6030,
450 .ht_params = &iwl6000_ht_params,
451};
452
453struct iwl_cfg iwl1030_bg_cfg = {
454 .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
455 IWL_DEVICE_6030,
456};
457
458struct iwl_cfg iwl130_bgn_cfg = {
459 .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
460 IWL_DEVICE_6030,
461 .ht_params = &iwl6000_ht_params,
462 .rx_with_siso_diversity = true,
463};
464
465struct iwl_cfg iwl130_bg_cfg = {
466 .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
467 IWL_DEVICE_6030,
468 .rx_with_siso_diversity = true,
469};
470
471/*
472 * "i": Internal configuration, use internal Power Amplifier
473 */
474#define IWL_DEVICE_6000i \
475 .fw_name_pre = IWL6000_FW_PRE, \
476 .ucode_api_max = IWL6000_UCODE_API_MAX, \
477 .ucode_api_min = IWL6000_UCODE_API_MIN, \
478 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
479 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
480 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
481 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
482 .lib = &iwl6000_lib, \
483 .base_params = &iwl6000_base_params, \
484 .pa_type = IWL_PA_INTERNAL, \
485 .led_mode = IWL_LED_BLINK
486
487struct iwl_cfg iwl6000i_2agn_cfg = {
488 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
489 IWL_DEVICE_6000i,
490 .ht_params = &iwl6000_ht_params,
491};
492
493struct iwl_cfg iwl6000i_2abg_cfg = {
494 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
495 IWL_DEVICE_6000i,
496};
497
498struct iwl_cfg iwl6000i_2bg_cfg = {
499 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
500 IWL_DEVICE_6000i,
501};
502
503#define IWL_DEVICE_6050 \
504 .fw_name_pre = IWL6050_FW_PRE, \
505 .ucode_api_max = IWL6050_UCODE_API_MAX, \
506 .ucode_api_min = IWL6050_UCODE_API_MIN, \
507 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
508 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
509 .lib = &iwl6000_lib, \
510 .additional_nic_config = iwl6050_additional_nic_config, \
511 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
512 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
513 .base_params = &iwl6050_base_params, \
514 .need_dc_calib = true, \
515 .led_mode = IWL_LED_BLINK, \
516 .internal_wimax_coex = true
517
518struct iwl_cfg iwl6050_2agn_cfg = {
519 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
520 IWL_DEVICE_6050,
521 .ht_params = &iwl6000_ht_params,
522};
523
524struct iwl_cfg iwl6050_2abg_cfg = {
525 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
526 IWL_DEVICE_6050,
527};
528
529#define IWL_DEVICE_6150 \
530 .fw_name_pre = IWL6050_FW_PRE, \
531 .ucode_api_max = IWL6050_UCODE_API_MAX, \
532 .ucode_api_min = IWL6050_UCODE_API_MIN, \
533 .lib = &iwl6000_lib, \
534 .additional_nic_config = iwl6150_additional_nic_config, \
535 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
536 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
537 .base_params = &iwl6050_base_params, \
538 .need_dc_calib = true, \
539 .led_mode = IWL_LED_BLINK, \
540 .internal_wimax_coex = true
541
542struct iwl_cfg iwl6150_bgn_cfg = {
543 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
544 IWL_DEVICE_6150,
545 .ht_params = &iwl6000_ht_params,
546};
547
548struct iwl_cfg iwl6150_bg_cfg = {
549 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
550 IWL_DEVICE_6150,
551};
552
553struct iwl_cfg iwl6000_3agn_cfg = {
554 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
555 .fw_name_pre = IWL6000_FW_PRE,
556 .ucode_api_max = IWL6000_UCODE_API_MAX,
557 .ucode_api_min = IWL6000_UCODE_API_MIN,
558 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
559 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
560 .lib = &iwl6000_lib,
561 .base_params = &iwl6000_base_params,
562 .ht_params = &iwl6000_ht_params,
563 .need_dc_calib = true,
564 .led_mode = IWL_LED_BLINK,
565};
566
567MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
568MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
569MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
570MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
new file mode 100644
index 00000000000..72d6297602b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -0,0 +1,1083 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-agn-calib.h"
69#include "iwl-trans.h"
70#include "iwl-agn.h"
71
72/*****************************************************************************
73 * INIT calibrations framework
74 *****************************************************************************/
75
76struct statistics_general_data {
77 u32 beacon_silence_rssi_a;
78 u32 beacon_silence_rssi_b;
79 u32 beacon_silence_rssi_c;
80 u32 beacon_energy_a;
81 u32 beacon_energy_b;
82 u32 beacon_energy_c;
83};
84
85int iwl_send_calib_results(struct iwl_priv *priv)
86{
87 int ret = 0;
88 int i = 0;
89
90 struct iwl_host_cmd hcmd = {
91 .id = REPLY_PHY_CALIBRATION_CMD,
92 .flags = CMD_SYNC,
93 };
94
95 for (i = 0; i < IWL_CALIB_MAX; i++) {
96 if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
97 priv->calib_results[i].buf) {
98 hcmd.len[0] = priv->calib_results[i].buf_len;
99 hcmd.data[0] = priv->calib_results[i].buf;
100 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
101 ret = trans_send_cmd(&priv->trans, &hcmd);
102 if (ret) {
103 IWL_ERR(priv, "Error %d iteration %d\n",
104 ret, i);
105 break;
106 }
107 }
108 }
109
110 return ret;
111}
112
113int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
114{
115 if (res->buf_len != len) {
116 kfree(res->buf);
117 res->buf = kzalloc(len, GFP_ATOMIC);
118 }
119 if (unlikely(res->buf == NULL))
120 return -ENOMEM;
121
122 res->buf_len = len;
123 memcpy(res->buf, buf, len);
124 return 0;
125}
126
127void iwl_calib_free_results(struct iwl_priv *priv)
128{
129 int i;
130
131 for (i = 0; i < IWL_CALIB_MAX; i++) {
132 kfree(priv->calib_results[i].buf);
133 priv->calib_results[i].buf = NULL;
134 priv->calib_results[i].buf_len = 0;
135 }
136}
137
138/*****************************************************************************
139 * RUNTIME calibrations framework
140 *****************************************************************************/
141
142/* "false alarms" are signals that our DSP tries to lock onto,
143 * but then determines that they are either noise, or transmissions
144 * from a distant wireless network (also "noise", really) that get
145 * "stepped on" by stronger transmissions within our own network.
146 * This algorithm attempts to set a sensitivity level that is high
147 * enough to receive all of our own network traffic, but not so
148 * high that our DSP gets too busy trying to lock onto non-network
149 * activity/noise. */
150static int iwl_sens_energy_cck(struct iwl_priv *priv,
151 u32 norm_fa,
152 u32 rx_enable_time,
153 struct statistics_general_data *rx_info)
154{
155 u32 max_nrg_cck = 0;
156 int i = 0;
157 u8 max_silence_rssi = 0;
158 u32 silence_ref = 0;
159 u8 silence_rssi_a = 0;
160 u8 silence_rssi_b = 0;
161 u8 silence_rssi_c = 0;
162 u32 val;
163
164 /* "false_alarms" values below are cross-multiplications to assess the
165 * numbers of false alarms within the measured period of actual Rx
166 * (Rx is off when we're txing), vs the min/max expected false alarms
167 * (some should be expected if rx is sensitive enough) in a
168 * hypothetical listening period of 200 time units (TU), 204.8 msec:
169 *
170 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
171 *
172 * */
173 u32 false_alarms = norm_fa * 200 * 1024;
174 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
175 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
176 struct iwl_sensitivity_data *data = NULL;
177 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
178
179 data = &(priv->sensitivity_data);
180
181 data->nrg_auto_corr_silence_diff = 0;
182
183 /* Find max silence rssi among all 3 receivers.
184 * This is background noise, which may include transmissions from other
185 * networks, measured during silence before our network's beacon */
186 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
187 ALL_BAND_FILTER) >> 8);
188 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
189 ALL_BAND_FILTER) >> 8);
190 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
191 ALL_BAND_FILTER) >> 8);
192
193 val = max(silence_rssi_b, silence_rssi_c);
194 max_silence_rssi = max(silence_rssi_a, (u8) val);
195
196 /* Store silence rssi in 20-beacon history table */
197 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
198 data->nrg_silence_idx++;
199 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
200 data->nrg_silence_idx = 0;
201
202 /* Find max silence rssi across 20 beacon history */
203 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
204 val = data->nrg_silence_rssi[i];
205 silence_ref = max(silence_ref, val);
206 }
207 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
208 silence_rssi_a, silence_rssi_b, silence_rssi_c,
209 silence_ref);
210
211 /* Find max rx energy (min value!) among all 3 receivers,
212 * measured during beacon frame.
213 * Save it in 10-beacon history table. */
214 i = data->nrg_energy_idx;
215 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
216 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
217
218 data->nrg_energy_idx++;
219 if (data->nrg_energy_idx >= 10)
220 data->nrg_energy_idx = 0;
221
222 /* Find min rx energy (max value) across 10 beacon history.
223 * This is the minimum signal level that we want to receive well.
224 * Add backoff (margin so we don't miss slightly lower energy frames).
225 * This establishes an upper bound (min value) for energy threshold. */
226 max_nrg_cck = data->nrg_value[0];
227 for (i = 1; i < 10; i++)
228 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
229 max_nrg_cck += 6;
230
231 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
232 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
233 rx_info->beacon_energy_c, max_nrg_cck - 6);
234
235 /* Count number of consecutive beacons with fewer-than-desired
236 * false alarms. */
237 if (false_alarms < min_false_alarms)
238 data->num_in_cck_no_fa++;
239 else
240 data->num_in_cck_no_fa = 0;
241 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
242 data->num_in_cck_no_fa);
243
244 /* If we got too many false alarms this time, reduce sensitivity */
245 if ((false_alarms > max_false_alarms) &&
246 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
247 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
248 false_alarms, max_false_alarms);
249 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
250 data->nrg_curr_state = IWL_FA_TOO_MANY;
251 /* Store for "fewer than desired" on later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* increase energy threshold (reduce nrg value)
255 * to decrease sensitivity */
256 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
257 /* Else if we got fewer than desired, increase sensitivity */
258 } else if (false_alarms < min_false_alarms) {
259 data->nrg_curr_state = IWL_FA_TOO_FEW;
260
261 /* Compare silence level with silence level for most recent
262 * healthy number or too many false alarms */
263 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
264 (s32)silence_ref;
265
266 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n",
267 false_alarms, min_false_alarms,
268 data->nrg_auto_corr_silence_diff);
269
270 /* Increase value to increase sensitivity, but only if:
271 * 1a) previous beacon did *not* have *too many* false alarms
272 * 1b) AND there's a significant difference in Rx levels
273 * from a previous beacon with too many, or healthy # FAs
274 * OR 2) We've seen a lot of beacons (100) with too few
275 * false alarms */
276 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
277 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
278 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
279
280 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
281 /* Increase nrg value to increase sensitivity */
282 val = data->nrg_th_cck + NRG_STEP_CCK;
283 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
284 } else {
285 IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n");
286 }
287
288 /* Else we got a healthy number of false alarms, keep status quo */
289 } else {
290 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
291 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
292
293 /* Store for use in "fewer than desired" with later beacon */
294 data->nrg_silence_ref = silence_ref;
295
296 /* If previous beacon had too many false alarms,
297 * give it some extra margin by reducing sensitivity again
298 * (but don't go below measured energy of desired Rx) */
299 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
300 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
301 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
302 data->nrg_th_cck -= NRG_MARGIN;
303 else
304 data->nrg_th_cck = max_nrg_cck;
305 }
306 }
307
308 /* Make sure the energy threshold does not go above the measured
309 * energy of the desired Rx signals (reduced by backoff margin),
310 * or else we might start missing Rx frames.
311 * Lower value is higher energy, so we use max()!
312 */
313 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
314 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
315
316 data->nrg_prev_state = data->nrg_curr_state;
317
318 /* Auto-correlation CCK algorithm */
319 if (false_alarms > min_false_alarms) {
320
321 /* increase auto_corr values to decrease sensitivity
322 * so the DSP won't be disturbed by the noise
323 */
324 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
325 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
326 else {
327 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
328 data->auto_corr_cck =
329 min((u32)ranges->auto_corr_max_cck, val);
330 }
331 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
332 data->auto_corr_cck_mrc =
333 min((u32)ranges->auto_corr_max_cck_mrc, val);
334 } else if ((false_alarms < min_false_alarms) &&
335 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
336 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
337
338 /* Decrease auto_corr values to increase sensitivity */
339 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
340 data->auto_corr_cck =
341 max((u32)ranges->auto_corr_min_cck, val);
342 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
343 data->auto_corr_cck_mrc =
344 max((u32)ranges->auto_corr_min_cck_mrc, val);
345 }
346
347 return 0;
348}
349
350
351static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
352 u32 norm_fa,
353 u32 rx_enable_time)
354{
355 u32 val;
356 u32 false_alarms = norm_fa * 200 * 1024;
357 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
358 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
359 struct iwl_sensitivity_data *data = NULL;
360 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
361
362 data = &(priv->sensitivity_data);
363
364 /* If we got too many false alarms this time, reduce sensitivity */
365 if (false_alarms > max_false_alarms) {
366
367 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
368 false_alarms, max_false_alarms);
369
370 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
371 data->auto_corr_ofdm =
372 min((u32)ranges->auto_corr_max_ofdm, val);
373
374 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
375 data->auto_corr_ofdm_mrc =
376 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
377
378 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
379 data->auto_corr_ofdm_x1 =
380 min((u32)ranges->auto_corr_max_ofdm_x1, val);
381
382 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
383 data->auto_corr_ofdm_mrc_x1 =
384 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
385 }
386
387 /* Else if we got fewer than desired, increase sensitivity */
388 else if (false_alarms < min_false_alarms) {
389
390 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
391 false_alarms, min_false_alarms);
392
393 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
394 data->auto_corr_ofdm =
395 max((u32)ranges->auto_corr_min_ofdm, val);
396
397 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
398 data->auto_corr_ofdm_mrc =
399 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
400
401 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
402 data->auto_corr_ofdm_x1 =
403 max((u32)ranges->auto_corr_min_ofdm_x1, val);
404
405 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
406 data->auto_corr_ofdm_mrc_x1 =
407 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
408 } else {
409 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
410 min_false_alarms, false_alarms, max_false_alarms);
411 }
412 return 0;
413}
414
415static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
416 struct iwl_sensitivity_data *data,
417 __le16 *tbl)
418{
419 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
420 cpu_to_le16((u16)data->auto_corr_ofdm);
421 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
422 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
423 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
424 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
425 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
426 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
427
428 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
429 cpu_to_le16((u16)data->auto_corr_cck);
430 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
431 cpu_to_le16((u16)data->auto_corr_cck_mrc);
432
433 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
434 cpu_to_le16((u16)data->nrg_th_cck);
435 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
436 cpu_to_le16((u16)data->nrg_th_ofdm);
437
438 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
439 cpu_to_le16(data->barker_corr_th_min);
440 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
441 cpu_to_le16(data->barker_corr_th_min_mrc);
442 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
443 cpu_to_le16(data->nrg_th_cca);
444
445 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
446 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
447 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
448 data->nrg_th_ofdm);
449
450 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
451 data->auto_corr_cck, data->auto_corr_cck_mrc,
452 data->nrg_th_cck);
453}
454
455/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
456static int iwl_sensitivity_write(struct iwl_priv *priv)
457{
458 struct iwl_sensitivity_cmd cmd;
459 struct iwl_sensitivity_data *data = NULL;
460 struct iwl_host_cmd cmd_out = {
461 .id = SENSITIVITY_CMD,
462 .len = { sizeof(struct iwl_sensitivity_cmd), },
463 .flags = CMD_ASYNC,
464 .data = { &cmd, },
465 };
466
467 data = &(priv->sensitivity_data);
468
469 memset(&cmd, 0, sizeof(cmd));
470
471 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
472
473 /* Update uCode's "work" table, and copy it to DSP */
474 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
475
476 /* Don't send command to uCode if nothing has changed */
477 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
478 sizeof(u16)*HD_TABLE_SIZE)) {
479 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
480 return 0;
481 }
482
483 /* Copy table for comparison next time */
484 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
485 sizeof(u16)*HD_TABLE_SIZE);
486
487 return trans_send_cmd(&priv->trans, &cmd_out);
488}
489
490/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
491static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
492{
493 struct iwl_enhance_sensitivity_cmd cmd;
494 struct iwl_sensitivity_data *data = NULL;
495 struct iwl_host_cmd cmd_out = {
496 .id = SENSITIVITY_CMD,
497 .len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
498 .flags = CMD_ASYNC,
499 .data = { &cmd, },
500 };
501
502 data = &(priv->sensitivity_data);
503
504 memset(&cmd, 0, sizeof(cmd));
505
506 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
507
508 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
509 HD_INA_NON_SQUARE_DET_OFDM_DATA;
510 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
511 HD_INA_NON_SQUARE_DET_CCK_DATA;
512 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
513 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA;
514 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
515 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA;
516 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
517 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
518 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
519 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA;
520 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
521 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA;
522 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
523 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA;
524 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
525 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
526 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
527 HD_CCK_NON_SQUARE_DET_SLOPE_DATA;
528 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
529 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA;
530
531 /* Update uCode's "work" table, and copy it to DSP */
532 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
533
534 /* Don't send command to uCode if nothing has changed */
535 if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]),
536 sizeof(u16)*HD_TABLE_SIZE) &&
537 !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX],
538 &(priv->enhance_sensitivity_tbl[0]),
539 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) {
540 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
541 return 0;
542 }
543
544 /* Copy table for comparison next time */
545 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]),
546 sizeof(u16)*HD_TABLE_SIZE);
547 memcpy(&(priv->enhance_sensitivity_tbl[0]),
548 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
549 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
550
551 return trans_send_cmd(&priv->trans, &cmd_out);
552}
553
554void iwl_init_sensitivity(struct iwl_priv *priv)
555{
556 int ret = 0;
557 int i;
558 struct iwl_sensitivity_data *data = NULL;
559 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
560
561 if (priv->disable_sens_cal)
562 return;
563
564 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
565
566 /* Clear driver's sensitivity algo data */
567 data = &(priv->sensitivity_data);
568
569 if (ranges == NULL)
570 return;
571
572 memset(data, 0, sizeof(struct iwl_sensitivity_data));
573
574 data->num_in_cck_no_fa = 0;
575 data->nrg_curr_state = IWL_FA_TOO_MANY;
576 data->nrg_prev_state = IWL_FA_TOO_MANY;
577 data->nrg_silence_ref = 0;
578 data->nrg_silence_idx = 0;
579 data->nrg_energy_idx = 0;
580
581 for (i = 0; i < 10; i++)
582 data->nrg_value[i] = 0;
583
584 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
585 data->nrg_silence_rssi[i] = 0;
586
587 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
588 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
589 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
590 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
591 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
592 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
593 data->nrg_th_cck = ranges->nrg_th_cck;
594 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
595 data->barker_corr_th_min = ranges->barker_corr_th_min;
596 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
597 data->nrg_th_cca = ranges->nrg_th_cca;
598
599 data->last_bad_plcp_cnt_ofdm = 0;
600 data->last_fa_cnt_ofdm = 0;
601 data->last_bad_plcp_cnt_cck = 0;
602 data->last_fa_cnt_cck = 0;
603
604 if (priv->enhance_sensitivity_table)
605 ret |= iwl_enhance_sensitivity_write(priv);
606 else
607 ret |= iwl_sensitivity_write(priv);
608 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
609}
610
611void iwl_sensitivity_calibration(struct iwl_priv *priv)
612{
613 u32 rx_enable_time;
614 u32 fa_cck;
615 u32 fa_ofdm;
616 u32 bad_plcp_cck;
617 u32 bad_plcp_ofdm;
618 u32 norm_fa_ofdm;
619 u32 norm_fa_cck;
620 struct iwl_sensitivity_data *data = NULL;
621 struct statistics_rx_non_phy *rx_info;
622 struct statistics_rx_phy *ofdm, *cck;
623 unsigned long flags;
624 struct statistics_general_data statis;
625
626 if (priv->disable_sens_cal)
627 return;
628
629 data = &(priv->sensitivity_data);
630
631 if (!iwl_is_any_associated(priv)) {
632 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
633 return;
634 }
635
636 spin_lock_irqsave(&priv->lock, flags);
637 rx_info = &priv->statistics.rx_non_phy;
638 ofdm = &priv->statistics.rx_ofdm;
639 cck = &priv->statistics.rx_cck;
640 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
641 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
642 spin_unlock_irqrestore(&priv->lock, flags);
643 return;
644 }
645
646 /* Extract Statistics: */
647 rx_enable_time = le32_to_cpu(rx_info->channel_load);
648 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
649 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
650 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
651 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
652
653 statis.beacon_silence_rssi_a =
654 le32_to_cpu(rx_info->beacon_silence_rssi_a);
655 statis.beacon_silence_rssi_b =
656 le32_to_cpu(rx_info->beacon_silence_rssi_b);
657 statis.beacon_silence_rssi_c =
658 le32_to_cpu(rx_info->beacon_silence_rssi_c);
659 statis.beacon_energy_a =
660 le32_to_cpu(rx_info->beacon_energy_a);
661 statis.beacon_energy_b =
662 le32_to_cpu(rx_info->beacon_energy_b);
663 statis.beacon_energy_c =
664 le32_to_cpu(rx_info->beacon_energy_c);
665
666 spin_unlock_irqrestore(&priv->lock, flags);
667
668 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
669
670 if (!rx_enable_time) {
671 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
672 return;
673 }
674
675 /* These statistics increase monotonically, and do not reset
676 * at each beacon. Calculate difference from last value, or just
677 * use the new statistics value if it has reset or wrapped around. */
678 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
679 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
680 else {
681 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
682 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
683 }
684
685 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
686 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
687 else {
688 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
689 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
690 }
691
692 if (data->last_fa_cnt_ofdm > fa_ofdm)
693 data->last_fa_cnt_ofdm = fa_ofdm;
694 else {
695 fa_ofdm -= data->last_fa_cnt_ofdm;
696 data->last_fa_cnt_ofdm += fa_ofdm;
697 }
698
699 if (data->last_fa_cnt_cck > fa_cck)
700 data->last_fa_cnt_cck = fa_cck;
701 else {
702 fa_cck -= data->last_fa_cnt_cck;
703 data->last_fa_cnt_cck += fa_cck;
704 }
705
706 /* Total aborted signal locks */
707 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
708 norm_fa_cck = fa_cck + bad_plcp_cck;
709
710 IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
711 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
712
713 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
714 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
715 if (priv->enhance_sensitivity_table)
716 iwl_enhance_sensitivity_write(priv);
717 else
718 iwl_sensitivity_write(priv);
719}
720
721static inline u8 find_first_chain(u8 mask)
722{
723 if (mask & ANT_A)
724 return CHAIN_A;
725 if (mask & ANT_B)
726 return CHAIN_B;
727 return CHAIN_C;
728}
729
730/**
731 * Run disconnected antenna algorithm to find out which antennas are
732 * disconnected.
733 */
734static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
735 struct iwl_chain_noise_data *data)
736{
737 u32 active_chains = 0;
738 u32 max_average_sig;
739 u16 max_average_sig_antenna_i;
740 u8 num_tx_chains;
741 u8 first_chain;
742 u16 i = 0;
743
744 average_sig[0] = data->chain_signal_a /
745 priv->cfg->base_params->chain_noise_num_beacons;
746 average_sig[1] = data->chain_signal_b /
747 priv->cfg->base_params->chain_noise_num_beacons;
748 average_sig[2] = data->chain_signal_c /
749 priv->cfg->base_params->chain_noise_num_beacons;
750
751 if (average_sig[0] >= average_sig[1]) {
752 max_average_sig = average_sig[0];
753 max_average_sig_antenna_i = 0;
754 active_chains = (1 << max_average_sig_antenna_i);
755 } else {
756 max_average_sig = average_sig[1];
757 max_average_sig_antenna_i = 1;
758 active_chains = (1 << max_average_sig_antenna_i);
759 }
760
761 if (average_sig[2] >= max_average_sig) {
762 max_average_sig = average_sig[2];
763 max_average_sig_antenna_i = 2;
764 active_chains = (1 << max_average_sig_antenna_i);
765 }
766
767 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
768 average_sig[0], average_sig[1], average_sig[2]);
769 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
770 max_average_sig, max_average_sig_antenna_i);
771
772 /* Compare signal strengths for all 3 receivers. */
773 for (i = 0; i < NUM_RX_CHAINS; i++) {
774 if (i != max_average_sig_antenna_i) {
775 s32 rssi_delta = (max_average_sig - average_sig[i]);
776
777 /* If signal is very weak, compared with
778 * strongest, mark it as disconnected. */
779 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
780 data->disconn_array[i] = 1;
781 else
782 active_chains |= (1 << i);
783 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
784 "disconn_array[i] = %d\n",
785 i, rssi_delta, data->disconn_array[i]);
786 }
787 }
788
789 /*
790 * The above algorithm sometimes fails when the ucode
791 * reports 0 for all chains. It's not clear why that
792 * happens to start with, but it is then causing trouble
793 * because this can make us enable more chains than the
794 * hardware really has.
795 *
796 * To be safe, simply mask out any chains that we know
797 * are not on the device.
798 */
799 active_chains &= priv->hw_params.valid_rx_ant;
800
801 num_tx_chains = 0;
802 for (i = 0; i < NUM_RX_CHAINS; i++) {
803 /* loops on all the bits of
804 * priv->hw_setting.valid_tx_ant */
805 u8 ant_msk = (1 << i);
806 if (!(priv->hw_params.valid_tx_ant & ant_msk))
807 continue;
808
809 num_tx_chains++;
810 if (data->disconn_array[i] == 0)
811 /* there is a Tx antenna connected */
812 break;
813 if (num_tx_chains == priv->hw_params.tx_chains_num &&
814 data->disconn_array[i]) {
815 /*
816 * If all chains are disconnected
817 * connect the first valid tx chain
818 */
819 first_chain =
820 find_first_chain(priv->cfg->valid_tx_ant);
821 data->disconn_array[first_chain] = 0;
822 active_chains |= BIT(first_chain);
823 IWL_DEBUG_CALIB(priv,
824 "All Tx chains are disconnected W/A - declare %d as connected\n",
825 first_chain);
826 break;
827 }
828 }
829
830 if (active_chains != priv->hw_params.valid_rx_ant &&
831 active_chains != priv->chain_noise_data.active_chains)
832 IWL_DEBUG_CALIB(priv,
833 "Detected that not all antennas are connected! "
834 "Connected: %#x, valid: %#x.\n",
835 active_chains, priv->hw_params.valid_rx_ant);
836
837 /* Save for use within RXON, TX, SCAN commands, etc. */
838 data->active_chains = active_chains;
839 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
840 active_chains);
841}
842
843static void iwlagn_gain_computation(struct iwl_priv *priv,
844 u32 average_noise[NUM_RX_CHAINS],
845 u16 min_average_noise_antenna_i,
846 u32 min_average_noise,
847 u8 default_chain)
848{
849 int i;
850 s32 delta_g;
851 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
852
853 /*
854 * Find Gain Code for the chains based on "default chain"
855 */
856 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
857 if ((data->disconn_array[i])) {
858 data->delta_gain_code[i] = 0;
859 continue;
860 }
861
862 delta_g = (priv->cfg->base_params->chain_noise_scale *
863 ((s32)average_noise[default_chain] -
864 (s32)average_noise[i])) / 1500;
865
866 /* bound gain by 2 bits value max, 3rd bit is sign */
867 data->delta_gain_code[i] =
868 min(abs(delta_g),
869 (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
870
871 if (delta_g < 0)
872 /*
873 * set negative sign ...
874 * note to Intel developers: This is uCode API format,
875 * not the format of any internal device registers.
876 * Do not change this format for e.g. 6050 or similar
877 * devices. Change format only if more resolution
878 * (i.e. more than 2 bits magnitude) is needed.
879 */
880 data->delta_gain_code[i] |= (1 << 2);
881 }
882
883 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
884 data->delta_gain_code[1], data->delta_gain_code[2]);
885
886 if (!data->radio_write) {
887 struct iwl_calib_chain_noise_gain_cmd cmd;
888
889 memset(&cmd, 0, sizeof(cmd));
890
891 iwl_set_calib_hdr(&cmd.hdr,
892 priv->phy_calib_chain_noise_gain_cmd);
893 cmd.delta_gain_1 = data->delta_gain_code[1];
894 cmd.delta_gain_2 = data->delta_gain_code[2];
895 trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
896 CMD_ASYNC, sizeof(cmd), &cmd);
897
898 data->radio_write = 1;
899 data->state = IWL_CHAIN_NOISE_CALIBRATED;
900 }
901}
902
903/*
904 * Accumulate 16 beacons of signal and noise statistics for each of
905 * 3 receivers/antennas/rx-chains, then figure out:
906 * 1) Which antennas are connected.
907 * 2) Differential rx gain settings to balance the 3 receivers.
908 */
909void iwl_chain_noise_calibration(struct iwl_priv *priv)
910{
911 struct iwl_chain_noise_data *data = NULL;
912
913 u32 chain_noise_a;
914 u32 chain_noise_b;
915 u32 chain_noise_c;
916 u32 chain_sig_a;
917 u32 chain_sig_b;
918 u32 chain_sig_c;
919 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
920 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
921 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
922 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
923 u16 i = 0;
924 u16 rxon_chnum = INITIALIZATION_VALUE;
925 u16 stat_chnum = INITIALIZATION_VALUE;
926 u8 rxon_band24;
927 u8 stat_band24;
928 unsigned long flags;
929 struct statistics_rx_non_phy *rx_info;
930
931 /*
932 * MULTI-FIXME:
933 * When we support multiple interfaces on different channels,
934 * this must be modified/fixed.
935 */
936 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
937
938 if (priv->disable_chain_noise_cal)
939 return;
940
941 data = &(priv->chain_noise_data);
942
943 /*
944 * Accumulate just the first "chain_noise_num_beacons" after
945 * the first association, then we're done forever.
946 */
947 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
948 if (data->state == IWL_CHAIN_NOISE_ALIVE)
949 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
950 return;
951 }
952
953 spin_lock_irqsave(&priv->lock, flags);
954
955 rx_info = &priv->statistics.rx_non_phy;
956
957 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
958 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
959 spin_unlock_irqrestore(&priv->lock, flags);
960 return;
961 }
962
963 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
964 rxon_chnum = le16_to_cpu(ctx->staging.channel);
965 stat_band24 =
966 !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
967 stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16;
968
969 /* Make sure we accumulate data for just the associated channel
970 * (even if scanning). */
971 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
972 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
973 rxon_chnum, rxon_band24);
974 spin_unlock_irqrestore(&priv->lock, flags);
975 return;
976 }
977
978 /*
979 * Accumulate beacon statistics values across
980 * "chain_noise_num_beacons"
981 */
982 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
983 IN_BAND_FILTER;
984 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
985 IN_BAND_FILTER;
986 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
987 IN_BAND_FILTER;
988
989 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
990 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
991 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
992
993 spin_unlock_irqrestore(&priv->lock, flags);
994
995 data->beacon_count++;
996
997 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
998 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
999 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1000
1001 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1002 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1003 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1004
1005 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
1006 rxon_chnum, rxon_band24, data->beacon_count);
1007 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
1008 chain_sig_a, chain_sig_b, chain_sig_c);
1009 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
1010 chain_noise_a, chain_noise_b, chain_noise_c);
1011
1012 /* If this is the "chain_noise_num_beacons", determine:
1013 * 1) Disconnected antennas (using signal strengths)
1014 * 2) Differential gain (using silence noise) to balance receivers */
1015 if (data->beacon_count !=
1016 priv->cfg->base_params->chain_noise_num_beacons)
1017 return;
1018
1019 /* Analyze signal for disconnected antenna */
1020 if (priv->cfg->bt_params &&
1021 priv->cfg->bt_params->advanced_bt_coexist) {
1022 /* Disable disconnected antenna algorithm for advanced
1023 bt coex, assuming valid antennas are connected */
1024 data->active_chains = priv->hw_params.valid_rx_ant;
1025 for (i = 0; i < NUM_RX_CHAINS; i++)
1026 if (!(data->active_chains & (1<<i)))
1027 data->disconn_array[i] = 1;
1028 } else
1029 iwl_find_disconn_antenna(priv, average_sig, data);
1030
1031 /* Analyze noise for rx balance */
1032 average_noise[0] = data->chain_noise_a /
1033 priv->cfg->base_params->chain_noise_num_beacons;
1034 average_noise[1] = data->chain_noise_b /
1035 priv->cfg->base_params->chain_noise_num_beacons;
1036 average_noise[2] = data->chain_noise_c /
1037 priv->cfg->base_params->chain_noise_num_beacons;
1038
1039 for (i = 0; i < NUM_RX_CHAINS; i++) {
1040 if (!(data->disconn_array[i]) &&
1041 (average_noise[i] <= min_average_noise)) {
1042 /* This means that chain i is active and has
1043 * lower noise values so far: */
1044 min_average_noise = average_noise[i];
1045 min_average_noise_antenna_i = i;
1046 }
1047 }
1048
1049 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
1050 average_noise[0], average_noise[1],
1051 average_noise[2]);
1052
1053 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
1054 min_average_noise, min_average_noise_antenna_i);
1055
1056 iwlagn_gain_computation(priv, average_noise,
1057 min_average_noise_antenna_i, min_average_noise,
1058 find_first_chain(priv->cfg->valid_rx_ant));
1059
1060 /* Some power changes may have been made during the calibration.
1061 * Update and commit the RXON
1062 */
1063 iwl_update_chain_flags(priv);
1064
1065 data->state = IWL_CHAIN_NOISE_DONE;
1066 iwl_power_update_mode(priv, false);
1067}
1068
1069void iwl_reset_run_time_calib(struct iwl_priv *priv)
1070{
1071 int i;
1072 memset(&(priv->sensitivity_data), 0,
1073 sizeof(struct iwl_sensitivity_data));
1074 memset(&(priv->chain_noise_data), 0,
1075 sizeof(struct iwl_chain_noise_data));
1076 for (i = 0; i < NUM_RX_CHAINS; i++)
1077 priv->chain_noise_data.delta_gain_code[i] =
1078 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1079
1080 /* Ask for statistics now, the uCode will send notification
1081 * periodically after association */
1082 iwl_send_statistics_request(priv, CMD_ASYNC, true);
1083}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
new file mode 100644
index 00000000000..a869fc9205d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -0,0 +1,79 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl_chain_noise_calibration(struct iwl_priv *priv);
70void iwl_sensitivity_calibration(struct iwl_priv *priv);
71
72void iwl_init_sensitivity(struct iwl_priv *priv);
73void iwl_reset_run_time_calib(struct iwl_priv *priv);
74
75int iwl_send_calib_results(struct iwl_priv *priv);
76int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
77void iwl_calib_free_results(struct iwl_priv *priv);
78
79#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
new file mode 100644
index 00000000000..b8347db850e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -0,0 +1,299 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-agn.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84int iwl_eeprom_check_version(struct iwl_priv *priv)
85{
86 u16 eeprom_ver;
87 u16 calib_ver;
88
89 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
90 calib_ver = iwlagn_eeprom_calib_version(priv);
91
92 if (eeprom_ver < priv->cfg->eeprom_ver ||
93 calib_ver < priv->cfg->eeprom_calib_ver)
94 goto err;
95
96 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
97 eeprom_ver, calib_ver);
98
99 return 0;
100err:
101 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
102 "CALIB=0x%x < 0x%x\n",
103 eeprom_ver, priv->cfg->eeprom_ver,
104 calib_ver, priv->cfg->eeprom_calib_ver);
105 return -EINVAL;
106
107}
108
109int iwl_eeprom_check_sku(struct iwl_priv *priv)
110{
111 u16 radio_cfg;
112
113 if (!priv->cfg->sku) {
114 /* not using sku overwrite */
115 priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
116 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
117 !priv->cfg->ht_params) {
118 IWL_ERR(priv, "Invalid 11n configuration\n");
119 return -EINVAL;
120 }
121 }
122 if (!priv->cfg->sku) {
123 IWL_ERR(priv, "Invalid device sku\n");
124 return -EINVAL;
125 }
126
127 IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
128
129 if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
130 /* not using .cfg overwrite */
131 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
132 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
133 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
134 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
135 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
136 priv->cfg->valid_tx_ant,
137 priv->cfg->valid_rx_ant);
138 return -EINVAL;
139 }
140 IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
141 priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
142 }
143 /*
144 * for some special cases,
145 * EEPROM did not reflect the correct antenna setting
146 * so overwrite the valid tx/rx antenna from .cfg
147 */
148 return 0;
149}
150
151void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
152{
153 const u8 *addr = iwl_eeprom_query_addr(priv,
154 EEPROM_MAC_ADDRESS);
155 memcpy(mac, addr, ETH_ALEN);
156}
157
158/**
159 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
160 * find the highest tx power from all chains for the channel
161 */
162static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
163 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
164 int element, s8 *max_txpower_in_half_dbm)
165{
166 s8 max_txpower_avg = 0; /* (dBm) */
167
168 /* Take the highest tx power from any valid chains */
169 if ((priv->cfg->valid_tx_ant & ANT_A) &&
170 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
171 max_txpower_avg = enhanced_txpower[element].chain_a_max;
172 if ((priv->cfg->valid_tx_ant & ANT_B) &&
173 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
174 max_txpower_avg = enhanced_txpower[element].chain_b_max;
175 if ((priv->cfg->valid_tx_ant & ANT_C) &&
176 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
177 max_txpower_avg = enhanced_txpower[element].chain_c_max;
178 if (((priv->cfg->valid_tx_ant == ANT_AB) |
179 (priv->cfg->valid_tx_ant == ANT_BC) |
180 (priv->cfg->valid_tx_ant == ANT_AC)) &&
181 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
182 max_txpower_avg = enhanced_txpower[element].mimo2_max;
183 if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
184 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
185 max_txpower_avg = enhanced_txpower[element].mimo3_max;
186
187 /*
188 * max. tx power in EEPROM is in 1/2 dBm format
189 * convert from 1/2 dBm to dBm (round-up convert)
190 * but we also do not want to loss 1/2 dBm resolution which
191 * will impact performance
192 */
193 *max_txpower_in_half_dbm = max_txpower_avg;
194 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
195}
196
197static void
198iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
199 struct iwl_eeprom_enhanced_txpwr *txp,
200 s8 max_txpower_avg)
201{
202 int ch_idx;
203 bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
204 enum ieee80211_band band;
205
206 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
207 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
208
209 for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
210 struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
211
212 /* update matching channel or from common data only */
213 if (txp->channel != 0 && ch_info->channel != txp->channel)
214 continue;
215
216 /* update matching band only */
217 if (band != ch_info->band)
218 continue;
219
220 if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
221 ch_info->max_power_avg = max_txpower_avg;
222 ch_info->curr_txpow = max_txpower_avg;
223 ch_info->scan_power = max_txpower_avg;
224 }
225
226 if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
227 ch_info->ht40_max_power_avg = max_txpower_avg;
228 }
229}
230
231#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
232#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
233#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
234
235#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
236 ? # x " " : "")
237
238void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
239{
240 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
241 int idx, entries;
242 __le16 *txp_len;
243 s8 max_txp_avg, max_txp_avg_halfdbm;
244
245 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
246
247 /* the length is in 16-bit words, but we want entries */
248 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
249 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
250
251 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
252
253 for (idx = 0; idx < entries; idx++) {
254 txp = &txp_array[idx];
255 /* skip invalid entries */
256 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
257 continue;
258
259 IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
260 (txp->channel && (txp->flags &
261 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
262 "Common " : (txp->channel) ?
263 "Channel" : "Common",
264 (txp->channel),
265 TXP_CHECK_AND_PRINT(VALID),
266 TXP_CHECK_AND_PRINT(BAND_52G),
267 TXP_CHECK_AND_PRINT(OFDM),
268 TXP_CHECK_AND_PRINT(40MHZ),
269 TXP_CHECK_AND_PRINT(HT_AP),
270 TXP_CHECK_AND_PRINT(RES1),
271 TXP_CHECK_AND_PRINT(RES2),
272 TXP_CHECK_AND_PRINT(COMMON_TYPE),
273 txp->flags);
274 IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
275 "chain_B: 0X%02x chain_C: 0X%02x\n",
276 txp->chain_a_max, txp->chain_b_max,
277 txp->chain_c_max);
278 IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
279 "MIMO3: 0x%02x High 20_on_40: 0x%02x "
280 "Low 20_on_40: 0x%02x\n",
281 txp->mimo2_max, txp->mimo3_max,
282 ((txp->delta_20_in_40 & 0xf0) >> 4),
283 (txp->delta_20_in_40 & 0x0f));
284
285 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
286 &max_txp_avg_halfdbm);
287
288 /*
289 * Update the user limit values values to the highest
290 * power supported by any channel
291 */
292 if (max_txp_avg > priv->tx_power_user_lmt)
293 priv->tx_power_user_lmt = max_txp_avg;
294 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
295 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
296
297 iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
298 }
299}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 00000000000..3bee0f119bc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,2047 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41#include "iwl-sta.h"
42#include "iwl-trans.h"
43
44static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
45{
46 return le32_to_cpup((__le32 *)&tx_resp->status +
47 tx_resp->frame_count) & MAX_SN;
48}
49
50static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
51{
52 status &= TX_STATUS_MSK;
53
54 switch (status) {
55 case TX_STATUS_POSTPONE_DELAY:
56 priv->reply_tx_stats.pp_delay++;
57 break;
58 case TX_STATUS_POSTPONE_FEW_BYTES:
59 priv->reply_tx_stats.pp_few_bytes++;
60 break;
61 case TX_STATUS_POSTPONE_BT_PRIO:
62 priv->reply_tx_stats.pp_bt_prio++;
63 break;
64 case TX_STATUS_POSTPONE_QUIET_PERIOD:
65 priv->reply_tx_stats.pp_quiet_period++;
66 break;
67 case TX_STATUS_POSTPONE_CALC_TTAK:
68 priv->reply_tx_stats.pp_calc_ttak++;
69 break;
70 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
71 priv->reply_tx_stats.int_crossed_retry++;
72 break;
73 case TX_STATUS_FAIL_SHORT_LIMIT:
74 priv->reply_tx_stats.short_limit++;
75 break;
76 case TX_STATUS_FAIL_LONG_LIMIT:
77 priv->reply_tx_stats.long_limit++;
78 break;
79 case TX_STATUS_FAIL_FIFO_UNDERRUN:
80 priv->reply_tx_stats.fifo_underrun++;
81 break;
82 case TX_STATUS_FAIL_DRAIN_FLOW:
83 priv->reply_tx_stats.drain_flow++;
84 break;
85 case TX_STATUS_FAIL_RFKILL_FLUSH:
86 priv->reply_tx_stats.rfkill_flush++;
87 break;
88 case TX_STATUS_FAIL_LIFE_EXPIRE:
89 priv->reply_tx_stats.life_expire++;
90 break;
91 case TX_STATUS_FAIL_DEST_PS:
92 priv->reply_tx_stats.dest_ps++;
93 break;
94 case TX_STATUS_FAIL_HOST_ABORTED:
95 priv->reply_tx_stats.host_abort++;
96 break;
97 case TX_STATUS_FAIL_BT_RETRY:
98 priv->reply_tx_stats.bt_retry++;
99 break;
100 case TX_STATUS_FAIL_STA_INVALID:
101 priv->reply_tx_stats.sta_invalid++;
102 break;
103 case TX_STATUS_FAIL_FRAG_DROPPED:
104 priv->reply_tx_stats.frag_drop++;
105 break;
106 case TX_STATUS_FAIL_TID_DISABLE:
107 priv->reply_tx_stats.tid_disable++;
108 break;
109 case TX_STATUS_FAIL_FIFO_FLUSHED:
110 priv->reply_tx_stats.fifo_flush++;
111 break;
112 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
113 priv->reply_tx_stats.insuff_cf_poll++;
114 break;
115 case TX_STATUS_FAIL_PASSIVE_NO_RX:
116 priv->reply_tx_stats.fail_hw_drop++;
117 break;
118 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
119 priv->reply_tx_stats.sta_color_mismatch++;
120 break;
121 default:
122 priv->reply_tx_stats.unknown++;
123 break;
124 }
125}
126
127static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
128{
129 status &= AGG_TX_STATUS_MSK;
130
131 switch (status) {
132 case AGG_TX_STATE_UNDERRUN_MSK:
133 priv->reply_agg_tx_stats.underrun++;
134 break;
135 case AGG_TX_STATE_BT_PRIO_MSK:
136 priv->reply_agg_tx_stats.bt_prio++;
137 break;
138 case AGG_TX_STATE_FEW_BYTES_MSK:
139 priv->reply_agg_tx_stats.few_bytes++;
140 break;
141 case AGG_TX_STATE_ABORT_MSK:
142 priv->reply_agg_tx_stats.abort++;
143 break;
144 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
145 priv->reply_agg_tx_stats.last_sent_ttl++;
146 break;
147 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
148 priv->reply_agg_tx_stats.last_sent_try++;
149 break;
150 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
151 priv->reply_agg_tx_stats.last_sent_bt_kill++;
152 break;
153 case AGG_TX_STATE_SCD_QUERY_MSK:
154 priv->reply_agg_tx_stats.scd_query++;
155 break;
156 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
157 priv->reply_agg_tx_stats.bad_crc32++;
158 break;
159 case AGG_TX_STATE_RESPONSE_MSK:
160 priv->reply_agg_tx_stats.response++;
161 break;
162 case AGG_TX_STATE_DUMP_TX_MSK:
163 priv->reply_agg_tx_stats.dump_tx++;
164 break;
165 case AGG_TX_STATE_DELAY_TX_MSK:
166 priv->reply_agg_tx_stats.delay_tx++;
167 break;
168 default:
169 priv->reply_agg_tx_stats.unknown++;
170 break;
171 }
172}
173
174static void iwlagn_set_tx_status(struct iwl_priv *priv,
175 struct ieee80211_tx_info *info,
176 struct iwl_rxon_context *ctx,
177 struct iwlagn_tx_resp *tx_resp,
178 int txq_id, bool is_agg)
179{
180 u16 status = le16_to_cpu(tx_resp->status.status);
181
182 info->status.rates[0].count = tx_resp->failure_frame + 1;
183 if (is_agg)
184 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
185 info->flags |= iwl_tx_status_to_mac80211(status);
186 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
187 info);
188 if (!iwl_is_tx_success(status))
189 iwlagn_count_tx_err_status(priv, status);
190
191 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
192 iwl_is_associated_ctx(ctx) && ctx->vif &&
193 ctx->vif->type == NL80211_IFTYPE_STATION) {
194 ctx->last_tx_rejected = true;
195 iwl_stop_queue(priv, &priv->txq[txq_id]);
196 }
197
198 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
199 "0x%x retries %d\n",
200 txq_id,
201 iwl_get_tx_fail_reason(status), status,
202 le32_to_cpu(tx_resp->rate_n_flags),
203 tx_resp->failure_frame);
204}
205
206#ifdef CONFIG_IWLWIFI_DEBUG
207#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
208
209const char *iwl_get_agg_tx_fail_reason(u16 status)
210{
211 status &= AGG_TX_STATUS_MSK;
212 switch (status) {
213 case AGG_TX_STATE_TRANSMITTED:
214 return "SUCCESS";
215 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
216 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
217 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
218 AGG_TX_STATE_FAIL(ABORT_MSK);
219 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
220 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
221 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
222 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
223 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
224 AGG_TX_STATE_FAIL(RESPONSE_MSK);
225 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
226 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
227 }
228
229 return "UNKNOWN";
230}
231#endif /* CONFIG_IWLWIFI_DEBUG */
232
233static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
234 struct iwl_ht_agg *agg,
235 struct iwlagn_tx_resp *tx_resp,
236 int txq_id, u16 start_idx)
237{
238 u16 status;
239 struct agg_tx_status *frame_status = &tx_resp->status;
240 struct ieee80211_hdr *hdr = NULL;
241 int i, sh, idx;
242 u16 seq;
243
244 if (agg->wait_for_ba)
245 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
246
247 agg->frame_count = tx_resp->frame_count;
248 agg->start_idx = start_idx;
249 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
250 agg->bitmap = 0;
251
252 /* # frames attempted by Tx command */
253 if (agg->frame_count == 1) {
254 struct iwl_tx_info *txb;
255
256 /* Only one frame was attempted; no block-ack will arrive */
257 idx = start_idx;
258
259 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
260 agg->frame_count, agg->start_idx, idx);
261 txb = &priv->txq[txq_id].txb[idx];
262 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
263 txb->ctx, tx_resp, txq_id, true);
264 agg->wait_for_ba = 0;
265 } else {
266 /* Two or more frames were attempted; expect block-ack */
267 u64 bitmap = 0;
268
269 /*
270 * Start is the lowest frame sent. It may not be the first
271 * frame in the batch; we figure this out dynamically during
272 * the following loop.
273 */
274 int start = agg->start_idx;
275
276 /* Construct bit-map of pending frames within Tx window */
277 for (i = 0; i < agg->frame_count; i++) {
278 u16 sc;
279 status = le16_to_cpu(frame_status[i].status);
280 seq = le16_to_cpu(frame_status[i].sequence);
281 idx = SEQ_TO_INDEX(seq);
282 txq_id = SEQ_TO_QUEUE(seq);
283
284 if (status & AGG_TX_STATUS_MSK)
285 iwlagn_count_agg_tx_err_status(priv, status);
286
287 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
288 AGG_TX_STATE_ABORT_MSK))
289 continue;
290
291 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
292 agg->frame_count, txq_id, idx);
293 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
294 "try-count (0x%08x)\n",
295 iwl_get_agg_tx_fail_reason(status),
296 status & AGG_TX_STATUS_MSK,
297 status & AGG_TX_TRY_MSK);
298
299 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
300 if (!hdr) {
301 IWL_ERR(priv,
302 "BUG_ON idx doesn't point to valid skb"
303 " idx=%d, txq_id=%d\n", idx, txq_id);
304 return -1;
305 }
306
307 sc = le16_to_cpu(hdr->seq_ctrl);
308 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
309 IWL_ERR(priv,
310 "BUG_ON idx doesn't match seq control"
311 " idx=%d, seq_idx=%d, seq=%d\n",
312 idx, SEQ_TO_SN(sc),
313 hdr->seq_ctrl);
314 return -1;
315 }
316
317 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
318 i, idx, SEQ_TO_SN(sc));
319
320 /*
321 * sh -> how many frames ahead of the starting frame is
322 * the current one?
323 *
324 * Note that all frames sent in the batch must be in a
325 * 64-frame window, so this number should be in [0,63].
326 * If outside of this window, then we've found a new
327 * "first" frame in the batch and need to change start.
328 */
329 sh = idx - start;
330
331 /*
332 * If >= 64, out of window. start must be at the front
333 * of the circular buffer, idx must be near the end of
334 * the buffer, and idx is the new "first" frame. Shift
335 * the indices around.
336 */
337 if (sh >= 64) {
338 /* Shift bitmap by start - idx, wrapped */
339 sh = 0x100 - idx + start;
340 bitmap = bitmap << sh;
341 /* Now idx is the new start so sh = 0 */
342 sh = 0;
343 start = idx;
344 /*
345 * If <= -64 then wraps the 256-pkt circular buffer
346 * (e.g., start = 255 and idx = 0, sh should be 1)
347 */
348 } else if (sh <= -64) {
349 sh = 0x100 - start + idx;
350 /*
351 * If < 0 but > -64, out of window. idx is before start
352 * but not wrapped. Shift the indices around.
353 */
354 } else if (sh < 0) {
355 /* Shift by how far start is ahead of idx */
356 sh = start - idx;
357 bitmap = bitmap << sh;
358 /* Now idx is the new start so sh = 0 */
359 start = idx;
360 sh = 0;
361 }
362 /* Sequence number start + sh was sent in this batch */
363 bitmap |= 1ULL << sh;
364 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
365 start, (unsigned long long)bitmap);
366 }
367
368 /*
369 * Store the bitmap and possibly the new start, if we wrapped
370 * the buffer above
371 */
372 agg->bitmap = bitmap;
373 agg->start_idx = start;
374 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
375 agg->frame_count, agg->start_idx,
376 (unsigned long long)agg->bitmap);
377
378 if (bitmap)
379 agg->wait_for_ba = 1;
380 }
381 return 0;
382}
383
384void iwl_check_abort_status(struct iwl_priv *priv,
385 u8 frame_count, u32 status)
386{
387 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
388 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
389 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
390 queue_work(priv->workqueue, &priv->tx_flush);
391 }
392}
393
394void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
395{
396 struct iwl_rx_packet *pkt = rxb_addr(rxb);
397 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
398 int txq_id = SEQ_TO_QUEUE(sequence);
399 int index = SEQ_TO_INDEX(sequence);
400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
401 struct ieee80211_tx_info *info;
402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
403 struct ieee80211_hdr *hdr;
404 struct iwl_tx_info *txb;
405 u32 status = le16_to_cpu(tx_resp->status.status);
406 int tid;
407 int sta_id;
408 int freed;
409 unsigned long flags;
410
411 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
412 IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
413 "index %d is out of range [0-%d] %d %d\n", __func__,
414 txq_id, index, txq->q.n_bd, txq->q.write_ptr,
415 txq->q.read_ptr);
416 return;
417 }
418
419 txq->time_stamp = jiffies;
420 txb = &txq->txb[txq->q.read_ptr];
421 info = IEEE80211_SKB_CB(txb->skb);
422 memset(&info->status, 0, sizeof(info->status));
423
424 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
425 IWLAGN_TX_RES_TID_POS;
426 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
427 IWLAGN_TX_RES_RA_POS;
428
429 spin_lock_irqsave(&priv->sta_lock, flags);
430
431 hdr = (void *)txb->skb->data;
432 if (!ieee80211_is_data_qos(hdr->frame_control))
433 priv->last_seq_ctl = tx_resp->seq_ctl;
434
435 if (txq->sched_retry) {
436 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
437 struct iwl_ht_agg *agg;
438
439 agg = &priv->stations[sta_id].tid[tid].agg;
440 /*
441 * If the BT kill count is non-zero, we'll get this
442 * notification again.
443 */
444 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
445 priv->cfg->bt_params &&
446 priv->cfg->bt_params->advanced_bt_coexist) {
447 IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
448 }
449 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
450
451 /* check if BAR is needed */
452 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
453 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
454
455 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
456 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
457 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
458 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
459 scd_ssn , index, txq_id, txq->swq_id);
460
461 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
462 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
463
464 if (priv->mac80211_registered &&
465 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
466 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
467 iwl_wake_queue(priv, txq);
468 }
469 } else {
470 iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
471 txq_id, false);
472 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
473 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
474
475 if (priv->mac80211_registered &&
476 iwl_queue_space(&txq->q) > txq->q.low_mark &&
477 status != TX_STATUS_FAIL_PASSIVE_NO_RX)
478 iwl_wake_queue(priv, txq);
479 }
480
481 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
482
483 iwl_check_abort_status(priv, tx_resp->frame_count, status);
484 spin_unlock_irqrestore(&priv->sta_lock, flags);
485}
486
487int iwlagn_hw_valid_rtc_data_addr(u32 addr)
488{
489 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
490 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
491}
492
493int iwlagn_send_tx_power(struct iwl_priv *priv)
494{
495 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
496 u8 tx_ant_cfg_cmd;
497
498 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
499 "TX Power requested while scanning!\n"))
500 return -EAGAIN;
501
502 /* half dBm need to multiply */
503 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
504
505 if (priv->tx_power_lmt_in_half_dbm &&
506 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
507 /*
508 * For the newer devices which using enhanced/extend tx power
509 * table in EEPROM, the format is in half dBm. driver need to
510 * convert to dBm format before report to mac80211.
511 * By doing so, there is a possibility of 1/2 dBm resolution
512 * lost. driver will perform "round-up" operation before
513 * reporting, but it will cause 1/2 dBm tx power over the
514 * regulatory limit. Perform the checking here, if the
515 * "tx_power_user_lmt" is higher than EEPROM value (in
516 * half-dBm format), lower the tx power based on EEPROM
517 */
518 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
519 }
520 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
521 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
522
523 if (IWL_UCODE_API(priv->ucode_ver) == 1)
524 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
525 else
526 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
527
528 return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
529 sizeof(tx_power_cmd), &tx_power_cmd);
530}
531
532void iwlagn_temperature(struct iwl_priv *priv)
533{
534 /* store temperature from correct statistics (in Celsius) */
535 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
536 iwl_tt_handler(priv);
537}
538
539u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
540{
541 struct iwl_eeprom_calib_hdr {
542 u8 version;
543 u8 pa_type;
544 u16 voltage;
545 } *hdr;
546
547 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
548 EEPROM_CALIB_ALL);
549 return hdr->version;
550
551}
552
553/*
554 * EEPROM
555 */
556static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
557{
558 u16 offset = 0;
559
560 if ((address & INDIRECT_ADDRESS) == 0)
561 return address;
562
563 switch (address & INDIRECT_TYPE_MSK) {
564 case INDIRECT_HOST:
565 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
566 break;
567 case INDIRECT_GENERAL:
568 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
569 break;
570 case INDIRECT_REGULATORY:
571 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
572 break;
573 case INDIRECT_TXP_LIMIT:
574 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
575 break;
576 case INDIRECT_TXP_LIMIT_SIZE:
577 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
578 break;
579 case INDIRECT_CALIBRATION:
580 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
581 break;
582 case INDIRECT_PROCESS_ADJST:
583 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
584 break;
585 case INDIRECT_OTHERS:
586 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
587 break;
588 default:
589 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
590 address & INDIRECT_TYPE_MSK);
591 break;
592 }
593
594 /* translate the offset from words to byte */
595 return (address & ADDRESS_MSK) + (offset << 1);
596}
597
598const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
599{
600 u32 address = eeprom_indirect_address(priv, offset);
601 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
602 return &priv->eeprom[address];
603}
604
605struct iwl_mod_params iwlagn_mod_params = {
606 .amsdu_size_8K = 1,
607 .restart_fw = 1,
608 .plcp_check = true,
609 .bt_coex_active = true,
610 .no_sleep_autoadjust = true,
611 .power_level = IWL_POWER_INDEX_1,
612 /* the rest are 0 by default */
613};
614
615int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
616{
617 int idx = 0;
618 int band_offset = 0;
619
620 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
621 if (rate_n_flags & RATE_MCS_HT_MSK) {
622 idx = (rate_n_flags & 0xff);
623 return idx;
624 /* Legacy rate format, search for match in table */
625 } else {
626 if (band == IEEE80211_BAND_5GHZ)
627 band_offset = IWL_FIRST_OFDM_RATE;
628 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
629 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
630 return idx - band_offset;
631 }
632
633 return -1;
634}
635
636static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
637 struct ieee80211_vif *vif,
638 enum ieee80211_band band,
639 struct iwl_scan_channel *scan_ch)
640{
641 const struct ieee80211_supported_band *sband;
642 u16 passive_dwell = 0;
643 u16 active_dwell = 0;
644 int added = 0;
645 u16 channel = 0;
646
647 sband = iwl_get_hw_mode(priv, band);
648 if (!sband) {
649 IWL_ERR(priv, "invalid band\n");
650 return added;
651 }
652
653 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
654 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
655
656 if (passive_dwell <= active_dwell)
657 passive_dwell = active_dwell + 1;
658
659 channel = iwl_get_single_channel_number(priv, band);
660 if (channel) {
661 scan_ch->channel = cpu_to_le16(channel);
662 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
663 scan_ch->active_dwell = cpu_to_le16(active_dwell);
664 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
665 /* Set txpower levels to defaults */
666 scan_ch->dsp_atten = 110;
667 if (band == IEEE80211_BAND_5GHZ)
668 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
669 else
670 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
671 added++;
672 } else
673 IWL_ERR(priv, "no valid channel found\n");
674 return added;
675}
676
677static int iwl_get_channels_for_scan(struct iwl_priv *priv,
678 struct ieee80211_vif *vif,
679 enum ieee80211_band band,
680 u8 is_active, u8 n_probes,
681 struct iwl_scan_channel *scan_ch)
682{
683 struct ieee80211_channel *chan;
684 const struct ieee80211_supported_band *sband;
685 const struct iwl_channel_info *ch_info;
686 u16 passive_dwell = 0;
687 u16 active_dwell = 0;
688 int added, i;
689 u16 channel;
690
691 sband = iwl_get_hw_mode(priv, band);
692 if (!sband)
693 return 0;
694
695 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
696 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
697
698 if (passive_dwell <= active_dwell)
699 passive_dwell = active_dwell + 1;
700
701 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
702 chan = priv->scan_request->channels[i];
703
704 if (chan->band != band)
705 continue;
706
707 channel = chan->hw_value;
708 scan_ch->channel = cpu_to_le16(channel);
709
710 ch_info = iwl_get_channel_info(priv, band, channel);
711 if (!is_channel_valid(ch_info)) {
712 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
713 channel);
714 continue;
715 }
716
717 if (!is_active || is_channel_passive(ch_info) ||
718 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
719 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
720 else
721 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
722
723 if (n_probes)
724 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
725
726 scan_ch->active_dwell = cpu_to_le16(active_dwell);
727 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
728
729 /* Set txpower levels to defaults */
730 scan_ch->dsp_atten = 110;
731
732 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
733 * power level:
734 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
735 */
736 if (band == IEEE80211_BAND_5GHZ)
737 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
738 else
739 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
740
741 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
742 channel, le32_to_cpu(scan_ch->type),
743 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
744 "ACTIVE" : "PASSIVE",
745 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
746 active_dwell : passive_dwell);
747
748 scan_ch++;
749 added++;
750 }
751
752 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
753 return added;
754}
755
756static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
757{
758 struct sk_buff *skb = priv->offchan_tx_skb;
759
760 if (skb->len < maxlen)
761 maxlen = skb->len;
762
763 memcpy(data, skb->data, maxlen);
764
765 return maxlen;
766}
767
768int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
769{
770 struct iwl_host_cmd cmd = {
771 .id = REPLY_SCAN_CMD,
772 .len = { sizeof(struct iwl_scan_cmd), },
773 .flags = CMD_SYNC,
774 };
775 struct iwl_scan_cmd *scan;
776 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
777 u32 rate_flags = 0;
778 u16 cmd_len;
779 u16 rx_chain = 0;
780 enum ieee80211_band band;
781 u8 n_probes = 0;
782 u8 rx_ant = priv->hw_params.valid_rx_ant;
783 u8 rate;
784 bool is_active = false;
785 int chan_mod;
786 u8 active_chains;
787 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
788 int ret;
789
790 lockdep_assert_held(&priv->mutex);
791
792 if (vif)
793 ctx = iwl_rxon_ctx_from_vif(vif);
794
795 if (!priv->scan_cmd) {
796 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
797 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
798 if (!priv->scan_cmd) {
799 IWL_DEBUG_SCAN(priv,
800 "fail to allocate memory for scan\n");
801 return -ENOMEM;
802 }
803 }
804 scan = priv->scan_cmd;
805 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
806
807 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
808 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
809
810 if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
811 iwl_is_any_associated(priv)) {
812 u16 interval = 0;
813 u32 extra;
814 u32 suspend_time = 100;
815 u32 scan_suspend_time = 100;
816
817 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
818 switch (priv->scan_type) {
819 case IWL_SCAN_OFFCH_TX:
820 WARN_ON(1);
821 break;
822 case IWL_SCAN_RADIO_RESET:
823 interval = 0;
824 break;
825 case IWL_SCAN_NORMAL:
826 interval = vif->bss_conf.beacon_int;
827 break;
828 }
829
830 scan->suspend_time = 0;
831 scan->max_out_time = cpu_to_le32(200 * 1024);
832 if (!interval)
833 interval = suspend_time;
834
835 extra = (suspend_time / interval) << 22;
836 scan_suspend_time = (extra |
837 ((suspend_time % interval) * 1024));
838 scan->suspend_time = cpu_to_le32(scan_suspend_time);
839 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
840 scan_suspend_time, interval);
841 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
842 scan->suspend_time = 0;
843 scan->max_out_time =
844 cpu_to_le32(1024 * priv->offchan_tx_timeout);
845 }
846
847 switch (priv->scan_type) {
848 case IWL_SCAN_RADIO_RESET:
849 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
850 break;
851 case IWL_SCAN_NORMAL:
852 if (priv->scan_request->n_ssids) {
853 int i, p = 0;
854 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
855 for (i = 0; i < priv->scan_request->n_ssids; i++) {
856 /* always does wildcard anyway */
857 if (!priv->scan_request->ssids[i].ssid_len)
858 continue;
859 scan->direct_scan[p].id = WLAN_EID_SSID;
860 scan->direct_scan[p].len =
861 priv->scan_request->ssids[i].ssid_len;
862 memcpy(scan->direct_scan[p].ssid,
863 priv->scan_request->ssids[i].ssid,
864 priv->scan_request->ssids[i].ssid_len);
865 n_probes++;
866 p++;
867 }
868 is_active = true;
869 } else
870 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
871 break;
872 case IWL_SCAN_OFFCH_TX:
873 IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
874 break;
875 }
876
877 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
878 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
879 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
880
881 switch (priv->scan_band) {
882 case IEEE80211_BAND_2GHZ:
883 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
884 chan_mod = le32_to_cpu(
885 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
886 RXON_FLG_CHANNEL_MODE_MSK)
887 >> RXON_FLG_CHANNEL_MODE_POS;
888 if (chan_mod == CHANNEL_MODE_PURE_40) {
889 rate = IWL_RATE_6M_PLCP;
890 } else {
891 rate = IWL_RATE_1M_PLCP;
892 rate_flags = RATE_MCS_CCK_MSK;
893 }
894 /*
895 * Internal scans are passive, so we can indiscriminately set
896 * the BT ignore flag on 2.4 GHz since it applies to TX only.
897 */
898 if (priv->cfg->bt_params &&
899 priv->cfg->bt_params->advanced_bt_coexist)
900 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
901 break;
902 case IEEE80211_BAND_5GHZ:
903 rate = IWL_RATE_6M_PLCP;
904 break;
905 default:
906 IWL_WARN(priv, "Invalid scan band\n");
907 return -EIO;
908 }
909
910 /*
911 * If active scanning is requested but a certain channel is
912 * marked passive, we can do active scanning if we detect
913 * transmissions.
914 *
915 * There is an issue with some firmware versions that triggers
916 * a sysassert on a "good CRC threshold" of zero (== disabled),
917 * on a radar channel even though this means that we should NOT
918 * send probes.
919 *
920 * The "good CRC threshold" is the number of frames that we
921 * need to receive during our dwell time on a channel before
922 * sending out probes -- setting this to a huge value will
923 * mean we never reach it, but at the same time work around
924 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
925 * here instead of IWL_GOOD_CRC_TH_DISABLED.
926 *
927 * This was fixed in later versions along with some other
928 * scan changes, and the threshold behaves as a flag in those
929 * versions.
930 */
931 if (priv->new_scan_threshold_behaviour)
932 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
933 IWL_GOOD_CRC_TH_DISABLED;
934 else
935 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
936 IWL_GOOD_CRC_TH_NEVER;
937
938 band = priv->scan_band;
939
940 if (priv->cfg->scan_rx_antennas[band])
941 rx_ant = priv->cfg->scan_rx_antennas[band];
942
943 if (band == IEEE80211_BAND_2GHZ &&
944 priv->cfg->bt_params &&
945 priv->cfg->bt_params->advanced_bt_coexist) {
946 /* transmit 2.4 GHz probes only on first antenna */
947 scan_tx_antennas = first_antenna(scan_tx_antennas);
948 }
949
950 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
951 scan_tx_antennas);
952 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
953 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
954
955 /* In power save mode use one chain, otherwise use all chains */
956 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
957 /* rx_ant has been set to all valid chains previously */
958 active_chains = rx_ant &
959 ((u8)(priv->chain_noise_data.active_chains));
960 if (!active_chains)
961 active_chains = rx_ant;
962
963 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
964 priv->chain_noise_data.active_chains);
965
966 rx_ant = first_antenna(active_chains);
967 }
968 if (priv->cfg->bt_params &&
969 priv->cfg->bt_params->advanced_bt_coexist &&
970 priv->bt_full_concurrent) {
971 /* operated as 1x1 in full concurrency mode */
972 rx_ant = first_antenna(rx_ant);
973 }
974
975 /* MIMO is not used here, but value is required */
976 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
977 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
978 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
979 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
980 scan->rx_chain = cpu_to_le16(rx_chain);
981 switch (priv->scan_type) {
982 case IWL_SCAN_NORMAL:
983 cmd_len = iwl_fill_probe_req(priv,
984 (struct ieee80211_mgmt *)scan->data,
985 vif->addr,
986 priv->scan_request->ie,
987 priv->scan_request->ie_len,
988 IWL_MAX_SCAN_SIZE - sizeof(*scan));
989 break;
990 case IWL_SCAN_RADIO_RESET:
991 /* use bcast addr, will not be transmitted but must be valid */
992 cmd_len = iwl_fill_probe_req(priv,
993 (struct ieee80211_mgmt *)scan->data,
994 iwl_bcast_addr, NULL, 0,
995 IWL_MAX_SCAN_SIZE - sizeof(*scan));
996 break;
997 case IWL_SCAN_OFFCH_TX:
998 cmd_len = iwl_fill_offch_tx(priv, scan->data,
999 IWL_MAX_SCAN_SIZE
1000 - sizeof(*scan)
1001 - sizeof(struct iwl_scan_channel));
1002 scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
1003 break;
1004 default:
1005 BUG();
1006 }
1007 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1008
1009 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1010 RXON_FILTER_BCON_AWARE_MSK);
1011
1012 switch (priv->scan_type) {
1013 case IWL_SCAN_RADIO_RESET:
1014 scan->channel_count =
1015 iwl_get_single_channel_for_scan(priv, vif, band,
1016 (void *)&scan->data[cmd_len]);
1017 break;
1018 case IWL_SCAN_NORMAL:
1019 scan->channel_count =
1020 iwl_get_channels_for_scan(priv, vif, band,
1021 is_active, n_probes,
1022 (void *)&scan->data[cmd_len]);
1023 break;
1024 case IWL_SCAN_OFFCH_TX: {
1025 struct iwl_scan_channel *scan_ch;
1026
1027 scan->channel_count = 1;
1028
1029 scan_ch = (void *)&scan->data[cmd_len];
1030 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1031 scan_ch->channel =
1032 cpu_to_le16(priv->offchan_tx_chan->hw_value);
1033 scan_ch->active_dwell =
1034 cpu_to_le16(priv->offchan_tx_timeout);
1035 scan_ch->passive_dwell = 0;
1036
1037 /* Set txpower levels to defaults */
1038 scan_ch->dsp_atten = 110;
1039
1040 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1041 * power level:
1042 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1043 */
1044 if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
1045 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1046 else
1047 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1048 }
1049 break;
1050 }
1051
1052 if (scan->channel_count == 0) {
1053 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1054 return -EIO;
1055 }
1056
1057 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
1058 scan->channel_count * sizeof(struct iwl_scan_channel);
1059 cmd.data[0] = scan;
1060 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1061 scan->len = cpu_to_le16(cmd.len[0]);
1062
1063 /* set scan bit here for PAN params */
1064 set_bit(STATUS_SCAN_HW, &priv->status);
1065
1066 ret = iwlagn_set_pan_params(priv);
1067 if (ret)
1068 return ret;
1069
1070 ret = trans_send_cmd(&priv->trans, &cmd);
1071 if (ret) {
1072 clear_bit(STATUS_SCAN_HW, &priv->status);
1073 iwlagn_set_pan_params(priv);
1074 }
1075
1076 return ret;
1077}
1078
1079int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1080 struct ieee80211_vif *vif, bool add)
1081{
1082 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1083
1084 if (add)
1085 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
1086 vif->bss_conf.bssid,
1087 &vif_priv->ibss_bssid_sta_id);
1088 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1089 vif->bss_conf.bssid);
1090}
1091
1092void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1093 int sta_id, int tid, int freed)
1094{
1095 lockdep_assert_held(&priv->sta_lock);
1096
1097 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1098 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1099 else {
1100 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1101 priv->stations[sta_id].tid[tid].tfds_in_queue,
1102 freed);
1103 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1104 }
1105}
1106
1107#define IWL_FLUSH_WAIT_MS 2000
1108
1109int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1110{
1111 struct iwl_tx_queue *txq;
1112 struct iwl_queue *q;
1113 int cnt;
1114 unsigned long now = jiffies;
1115 int ret = 0;
1116
1117 /* waiting for all the tx frames complete might take a while */
1118 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1119 if (cnt == priv->cmd_queue)
1120 continue;
1121 txq = &priv->txq[cnt];
1122 q = &txq->q;
1123 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1124 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1125 msleep(1);
1126
1127 if (q->read_ptr != q->write_ptr) {
1128 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1129 ret = -ETIMEDOUT;
1130 break;
1131 }
1132 }
1133 return ret;
1134}
1135
1136#define IWL_TX_QUEUE_MSK 0xfffff
1137
1138/**
1139 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1140 *
1141 * pre-requirements:
1142 * 1. acquire mutex before calling
1143 * 2. make sure rf is on and not in exit state
1144 */
1145int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1146{
1147 struct iwl_txfifo_flush_cmd flush_cmd;
1148 struct iwl_host_cmd cmd = {
1149 .id = REPLY_TXFIFO_FLUSH,
1150 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
1151 .flags = CMD_SYNC,
1152 .data = { &flush_cmd, },
1153 };
1154
1155 might_sleep();
1156
1157 memset(&flush_cmd, 0, sizeof(flush_cmd));
1158 if (flush_control & BIT(IWL_RXON_CTX_BSS))
1159 flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
1160 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
1161 IWL_SCD_MGMT_MSK;
1162 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
1163 (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
1164 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
1165 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
1166 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
1167 IWL_PAN_SCD_MULTICAST_MSK;
1168
1169 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
1170 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
1171
1172 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
1173 flush_cmd.fifo_control);
1174 flush_cmd.flush_control = cpu_to_le16(flush_control);
1175
1176 return trans_send_cmd(&priv->trans, &cmd);
1177}
1178
1179void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1180{
1181 mutex_lock(&priv->mutex);
1182 ieee80211_stop_queues(priv->hw);
1183 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
1184 IWL_ERR(priv, "flush request fail\n");
1185 goto done;
1186 }
1187 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1188 iwlagn_wait_tx_queue_empty(priv);
1189done:
1190 ieee80211_wake_queues(priv->hw);
1191 mutex_unlock(&priv->mutex);
1192}
1193
1194/*
1195 * BT coex
1196 */
1197/*
1198 * Macros to access the lookup table.
1199 *
1200 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1201* wifi_prio, wifi_txrx and wifi_sh_ant_req.
1202 *
1203 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1204 *
1205 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1206 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1207 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1208 *
1209 * These macros encode that format.
1210 */
1211#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1212 wifi_txrx, wifi_sh_ant_req) \
1213 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1214 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1215
1216#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1217 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1218#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1219 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1220 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1221 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1222 wifi_sh_ant_req))))
1223#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1224 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1225 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1226 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1227 wifi_sh_ant_req))
1228#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1229 wifi_req, wifi_prio, wifi_txrx, \
1230 wifi_sh_ant_req) \
1231 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1232 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1233 wifi_sh_ant_req))
1234
1235#define LUT_WLAN_KILL_OP(lut, op, val) \
1236 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1237#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1238 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1239 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1240 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1241#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1242 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1243 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1244 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1245#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1246 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1247 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1248 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1249
1250#define LUT_ANT_SWITCH_OP(lut, op, val) \
1251 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1252#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1253 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1254 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1255 wifi_req, wifi_prio, wifi_txrx, \
1256 wifi_sh_ant_req))))
1257#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1258 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1259 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1260 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1261#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1262 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1263 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1264 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1265
1266static const __le32 iwlagn_def_3w_lookup[12] = {
1267 cpu_to_le32(0xaaaaaaaa),
1268 cpu_to_le32(0xaaaaaaaa),
1269 cpu_to_le32(0xaeaaaaaa),
1270 cpu_to_le32(0xaaaaaaaa),
1271 cpu_to_le32(0xcc00ff28),
1272 cpu_to_le32(0x0000aaaa),
1273 cpu_to_le32(0xcc00aaaa),
1274 cpu_to_le32(0x0000aaaa),
1275 cpu_to_le32(0xc0004000),
1276 cpu_to_le32(0x00004000),
1277 cpu_to_le32(0xf0005000),
1278 cpu_to_le32(0xf0005000),
1279};
1280
1281static const __le32 iwlagn_concurrent_lookup[12] = {
1282 cpu_to_le32(0xaaaaaaaa),
1283 cpu_to_le32(0xaaaaaaaa),
1284 cpu_to_le32(0xaaaaaaaa),
1285 cpu_to_le32(0xaaaaaaaa),
1286 cpu_to_le32(0xaaaaaaaa),
1287 cpu_to_le32(0xaaaaaaaa),
1288 cpu_to_le32(0xaaaaaaaa),
1289 cpu_to_le32(0xaaaaaaaa),
1290 cpu_to_le32(0x00000000),
1291 cpu_to_le32(0x00000000),
1292 cpu_to_le32(0x00000000),
1293 cpu_to_le32(0x00000000),
1294};
1295
1296void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1297{
1298 struct iwl_basic_bt_cmd basic = {
1299 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1300 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1301 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1302 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1303 };
1304 struct iwl6000_bt_cmd bt_cmd_6000;
1305 struct iwl2000_bt_cmd bt_cmd_2000;
1306 int ret;
1307
1308 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1309 sizeof(basic.bt3_lookup_table));
1310
1311 if (priv->cfg->bt_params) {
1312 if (priv->cfg->bt_params->bt_session_2) {
1313 bt_cmd_2000.prio_boost = cpu_to_le32(
1314 priv->cfg->bt_params->bt_prio_boost);
1315 bt_cmd_2000.tx_prio_boost = 0;
1316 bt_cmd_2000.rx_prio_boost = 0;
1317 } else {
1318 bt_cmd_6000.prio_boost =
1319 priv->cfg->bt_params->bt_prio_boost;
1320 bt_cmd_6000.tx_prio_boost = 0;
1321 bt_cmd_6000.rx_prio_boost = 0;
1322 }
1323 } else {
1324 IWL_ERR(priv, "failed to construct BT Coex Config\n");
1325 return;
1326 }
1327
1328 basic.kill_ack_mask = priv->kill_ack_mask;
1329 basic.kill_cts_mask = priv->kill_cts_mask;
1330 basic.valid = priv->bt_valid;
1331
1332 /*
1333 * Configure BT coex mode to "no coexistence" when the
1334 * user disabled BT coexistence, we have no interface
1335 * (might be in monitor mode), or the interface is in
1336 * IBSS mode (no proper uCode support for coex then).
1337 */
1338 if (!iwlagn_mod_params.bt_coex_active ||
1339 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1340 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1341 } else {
1342 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1343 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1344
1345 if (!priv->bt_enable_pspoll)
1346 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1347 else
1348 basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1349
1350 if (priv->bt_ch_announce)
1351 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1352 IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
1353 }
1354 priv->bt_enable_flag = basic.flags;
1355 if (priv->bt_full_concurrent)
1356 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
1357 sizeof(iwlagn_concurrent_lookup));
1358 else
1359 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
1360 sizeof(iwlagn_def_3w_lookup));
1361
1362 IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
1363 basic.flags ? "active" : "disabled",
1364 priv->bt_full_concurrent ?
1365 "full concurrency" : "3-wire");
1366
1367 if (priv->cfg->bt_params->bt_session_2) {
1368 memcpy(&bt_cmd_2000.basic, &basic,
1369 sizeof(basic));
1370 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
1371 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
1372 } else {
1373 memcpy(&bt_cmd_6000.basic, &basic,
1374 sizeof(basic));
1375 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
1376 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
1377 }
1378 if (ret)
1379 IWL_ERR(priv, "failed to send BT Coex Config\n");
1380
1381}
1382
1383void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
1384{
1385 struct iwl_rxon_context *ctx, *found_ctx = NULL;
1386 bool found_ap = false;
1387
1388 lockdep_assert_held(&priv->mutex);
1389
1390 /* Check whether AP or GO mode is active. */
1391 if (rssi_ena) {
1392 for_each_context(priv, ctx) {
1393 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
1394 iwl_is_associated_ctx(ctx)) {
1395 found_ap = true;
1396 break;
1397 }
1398 }
1399 }
1400
1401 /*
1402 * If disable was received or If GO/AP mode, disable RSSI
1403 * measurements.
1404 */
1405 if (!rssi_ena || found_ap) {
1406 if (priv->cur_rssi_ctx) {
1407 ctx = priv->cur_rssi_ctx;
1408 ieee80211_disable_rssi_reports(ctx->vif);
1409 priv->cur_rssi_ctx = NULL;
1410 }
1411 return;
1412 }
1413
1414 /*
1415 * If rssi measurements need to be enabled, consider all cases now.
1416 * Figure out how many contexts are active.
1417 */
1418 for_each_context(priv, ctx) {
1419 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
1420 iwl_is_associated_ctx(ctx)) {
1421 found_ctx = ctx;
1422 break;
1423 }
1424 }
1425
1426 /*
1427 * rssi monitor already enabled for the correct interface...nothing
1428 * to do.
1429 */
1430 if (found_ctx == priv->cur_rssi_ctx)
1431 return;
1432
1433 /*
1434 * Figure out if rssi monitor is currently enabled, and needs
1435 * to be changed. If rssi monitor is already enabled, disable
1436 * it first else just enable rssi measurements on the
1437 * interface found above.
1438 */
1439 if (priv->cur_rssi_ctx) {
1440 ctx = priv->cur_rssi_ctx;
1441 if (ctx->vif)
1442 ieee80211_disable_rssi_reports(ctx->vif);
1443 }
1444
1445 priv->cur_rssi_ctx = found_ctx;
1446
1447 if (!found_ctx)
1448 return;
1449
1450 ieee80211_enable_rssi_reports(found_ctx->vif,
1451 IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
1452 IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
1453}
1454
1455static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
1456{
1457 return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
1458 BT_UART_MSG_FRAME3SCOESCO_POS;
1459}
1460
1461static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1462{
1463 struct iwl_priv *priv =
1464 container_of(work, struct iwl_priv, bt_traffic_change_work);
1465 struct iwl_rxon_context *ctx;
1466 int smps_request = -1;
1467
1468 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1469 /* bt coex disabled */
1470 return;
1471 }
1472
1473 /*
1474 * Note: bt_traffic_load can be overridden by scan complete and
1475 * coex profile notifications. Ignore that since only bad consequence
1476 * can be not matching debug print with actual state.
1477 */
1478 IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
1479 priv->bt_traffic_load);
1480
1481 switch (priv->bt_traffic_load) {
1482 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1483 if (priv->bt_status)
1484 smps_request = IEEE80211_SMPS_DYNAMIC;
1485 else
1486 smps_request = IEEE80211_SMPS_AUTOMATIC;
1487 break;
1488 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1489 smps_request = IEEE80211_SMPS_DYNAMIC;
1490 break;
1491 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1492 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1493 smps_request = IEEE80211_SMPS_STATIC;
1494 break;
1495 default:
1496 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1497 priv->bt_traffic_load);
1498 break;
1499 }
1500
1501 mutex_lock(&priv->mutex);
1502
1503 /*
1504 * We can not send command to firmware while scanning. When the scan
1505 * complete we will schedule this work again. We do check with mutex
1506 * locked to prevent new scan request to arrive. We do not check
1507 * STATUS_SCANNING to avoid race when queue_work two times from
1508 * different notifications, but quit and not perform any work at all.
1509 */
1510 if (test_bit(STATUS_SCAN_HW, &priv->status))
1511 goto out;
1512
1513 iwl_update_chain_flags(priv);
1514
1515 if (smps_request != -1) {
1516 priv->current_ht_config.smps = smps_request;
1517 for_each_context(priv, ctx) {
1518 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1519 ieee80211_request_smps(ctx->vif, smps_request);
1520 }
1521 }
1522
1523 /*
1524 * Dynamic PS poll related functionality. Adjust RSSI measurements if
1525 * necessary.
1526 */
1527 iwlagn_bt_coex_rssi_monitor(priv);
1528out:
1529 mutex_unlock(&priv->mutex);
1530}
1531
1532/*
1533 * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
1534 * correct interface or disable it if this is the last interface to be
1535 * removed.
1536 */
1537void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
1538{
1539 if (priv->bt_is_sco &&
1540 priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
1541 iwlagn_bt_adjust_rssi_monitor(priv, true);
1542 else
1543 iwlagn_bt_adjust_rssi_monitor(priv, false);
1544}
1545
1546static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1547 struct iwl_bt_uart_msg *uart_msg)
1548{
1549 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
1550 "Update Req = 0x%X",
1551 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1552 BT_UART_MSG_FRAME1MSGTYPE_POS,
1553 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1554 BT_UART_MSG_FRAME1SSN_POS,
1555 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1556 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1557
1558 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1559 "Chl_SeqN = 0x%X, In band = 0x%X",
1560 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1561 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1562 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1563 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1564 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1565 BT_UART_MSG_FRAME2CHLSEQN_POS,
1566 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1567 BT_UART_MSG_FRAME2INBAND_POS);
1568
1569 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1570 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1571 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1572 BT_UART_MSG_FRAME3SCOESCO_POS,
1573 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1574 BT_UART_MSG_FRAME3SNIFF_POS,
1575 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1576 BT_UART_MSG_FRAME3A2DP_POS,
1577 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1578 BT_UART_MSG_FRAME3ACL_POS,
1579 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1580 BT_UART_MSG_FRAME3MASTER_POS,
1581 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1582 BT_UART_MSG_FRAME3OBEX_POS);
1583
1584 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
1585 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1586 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1587
1588 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1589 "eSCO Retransmissions = 0x%X",
1590 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1591 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1592 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1593 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1594 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1595 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1596
1597 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1598 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1599 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1600 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1601 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1602
1603 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
1604 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1605 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1606 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1607 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
1608 BT_UART_MSG_FRAME7PAGE_POS,
1609 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
1610 BT_UART_MSG_FRAME7INQUIRY_POS,
1611 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1612 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1613}
1614
1615static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1616 struct iwl_bt_uart_msg *uart_msg)
1617{
1618 u8 kill_msk;
1619 static const __le32 bt_kill_ack_msg[2] = {
1620 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
1621 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1622 static const __le32 bt_kill_cts_msg[2] = {
1623 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
1624 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1625
1626 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
1627 ? 1 : 0;
1628 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
1629 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
1630 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1631 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
1632 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
1633 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
1634
1635 /* schedule to send runtime bt_config */
1636 queue_work(priv->workqueue, &priv->bt_runtime_config);
1637 }
1638}
1639
1640void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1641 struct iwl_rx_mem_buffer *rxb)
1642{
1643 unsigned long flags;
1644 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1645 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
1646 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
1647
1648 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1649 /* bt coex disabled */
1650 return;
1651 }
1652
1653 IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
1654 IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
1655 IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
1656 IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
1657 coex->bt_ci_compliance);
1658 iwlagn_print_uartmsg(priv, uart_msg);
1659
1660 priv->last_bt_traffic_load = priv->bt_traffic_load;
1661 priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
1662
1663 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
1664 if (priv->bt_status != coex->bt_status ||
1665 priv->last_bt_traffic_load != coex->bt_traffic_load) {
1666 if (coex->bt_status) {
1667 /* BT on */
1668 if (!priv->bt_ch_announce)
1669 priv->bt_traffic_load =
1670 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1671 else
1672 priv->bt_traffic_load =
1673 coex->bt_traffic_load;
1674 } else {
1675 /* BT off */
1676 priv->bt_traffic_load =
1677 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1678 }
1679 priv->bt_status = coex->bt_status;
1680 queue_work(priv->workqueue,
1681 &priv->bt_traffic_change_work);
1682 }
1683 }
1684
1685 iwlagn_set_kill_msk(priv, uart_msg);
1686
1687 /* FIXME: based on notification, adjust the prio_boost */
1688
1689 spin_lock_irqsave(&priv->lock, flags);
1690 priv->bt_ci_compliance = coex->bt_ci_compliance;
1691 spin_unlock_irqrestore(&priv->lock, flags);
1692}
1693
1694void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
1695{
1696 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
1697 iwlagn_bt_coex_profile_notif;
1698}
1699
1700void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
1701{
1702 INIT_WORK(&priv->bt_traffic_change_work,
1703 iwlagn_bt_traffic_change_work);
1704}
1705
1706void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
1707{
1708 cancel_work_sync(&priv->bt_traffic_change_work);
1709}
1710
1711static bool is_single_rx_stream(struct iwl_priv *priv)
1712{
1713 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1714 priv->current_ht_config.single_chain_sufficient;
1715}
1716
1717#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1718#define IWL_NUM_RX_CHAINS_SINGLE 2
1719#define IWL_NUM_IDLE_CHAINS_DUAL 2
1720#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1721
1722/*
1723 * Determine how many receiver/antenna chains to use.
1724 *
1725 * More provides better reception via diversity. Fewer saves power
1726 * at the expense of throughput, but only when not in powersave to
1727 * start with.
1728 *
1729 * MIMO (dual stream) requires at least 2, but works better with 3.
1730 * This does not determine *which* chains to use, just how many.
1731 */
1732static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1733{
1734 if (priv->cfg->bt_params &&
1735 priv->cfg->bt_params->advanced_bt_coexist &&
1736 (priv->bt_full_concurrent ||
1737 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
1738 /*
1739 * only use chain 'A' in bt high traffic load or
1740 * full concurrency mode
1741 */
1742 return IWL_NUM_RX_CHAINS_SINGLE;
1743 }
1744 /* # of Rx chains to use when expecting MIMO. */
1745 if (is_single_rx_stream(priv))
1746 return IWL_NUM_RX_CHAINS_SINGLE;
1747 else
1748 return IWL_NUM_RX_CHAINS_MULTIPLE;
1749}
1750
1751/*
1752 * When we are in power saving mode, unless device support spatial
1753 * multiplexing power save, use the active count for rx chain count.
1754 */
1755static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1756{
1757 /* # Rx chains when idling, depending on SMPS mode */
1758 switch (priv->current_ht_config.smps) {
1759 case IEEE80211_SMPS_STATIC:
1760 case IEEE80211_SMPS_DYNAMIC:
1761 return IWL_NUM_IDLE_CHAINS_SINGLE;
1762 case IEEE80211_SMPS_OFF:
1763 return active_cnt;
1764 default:
1765 WARN(1, "invalid SMPS mode %d",
1766 priv->current_ht_config.smps);
1767 return active_cnt;
1768 }
1769}
1770
1771/* up to 4 chains */
1772static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1773{
1774 u8 res;
1775 res = (chain_bitmap & BIT(0)) >> 0;
1776 res += (chain_bitmap & BIT(1)) >> 1;
1777 res += (chain_bitmap & BIT(2)) >> 2;
1778 res += (chain_bitmap & BIT(3)) >> 3;
1779 return res;
1780}
1781
1782/**
1783 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1784 *
1785 * Selects how many and which Rx receivers/antennas/chains to use.
1786 * This should not be used for scan command ... it puts data in wrong place.
1787 */
1788void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1789{
1790 bool is_single = is_single_rx_stream(priv);
1791 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1792 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1793 u32 active_chains;
1794 u16 rx_chain;
1795
1796 /* Tell uCode which antennas are actually connected.
1797 * Before first association, we assume all antennas are connected.
1798 * Just after first association, iwl_chain_noise_calibration()
1799 * checks which antennas actually *are* connected. */
1800 if (priv->chain_noise_data.active_chains)
1801 active_chains = priv->chain_noise_data.active_chains;
1802 else
1803 active_chains = priv->hw_params.valid_rx_ant;
1804
1805 if (priv->cfg->bt_params &&
1806 priv->cfg->bt_params->advanced_bt_coexist &&
1807 (priv->bt_full_concurrent ||
1808 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
1809 /*
1810 * only use chain 'A' in bt high traffic load or
1811 * full concurrency mode
1812 */
1813 active_chains = first_antenna(active_chains);
1814 }
1815
1816 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1817
1818 /* How many receivers should we use? */
1819 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
1820 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
1821
1822
1823 /* correct rx chain count according hw settings
1824 * and chain noise calibration
1825 */
1826 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
1827 if (valid_rx_cnt < active_rx_cnt)
1828 active_rx_cnt = valid_rx_cnt;
1829
1830 if (valid_rx_cnt < idle_rx_cnt)
1831 idle_rx_cnt = valid_rx_cnt;
1832
1833 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1834 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1835
1836 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1837
1838 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1839 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1840 else
1841 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1842
1843 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1844 ctx->staging.rx_chain,
1845 active_rx_cnt, idle_rx_cnt);
1846
1847 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1848 active_rx_cnt < idle_rx_cnt);
1849}
1850
1851u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1852{
1853 int i;
1854 u8 ind = ant;
1855
1856 if (priv->band == IEEE80211_BAND_2GHZ &&
1857 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
1858 return 0;
1859
1860 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1861 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1862 if (valid & BIT(ind))
1863 return ind;
1864 }
1865 return ant;
1866}
1867
1868static const char *get_csr_string(int cmd)
1869{
1870 switch (cmd) {
1871 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1872 IWL_CMD(CSR_INT_COALESCING);
1873 IWL_CMD(CSR_INT);
1874 IWL_CMD(CSR_INT_MASK);
1875 IWL_CMD(CSR_FH_INT_STATUS);
1876 IWL_CMD(CSR_GPIO_IN);
1877 IWL_CMD(CSR_RESET);
1878 IWL_CMD(CSR_GP_CNTRL);
1879 IWL_CMD(CSR_HW_REV);
1880 IWL_CMD(CSR_EEPROM_REG);
1881 IWL_CMD(CSR_EEPROM_GP);
1882 IWL_CMD(CSR_OTP_GP_REG);
1883 IWL_CMD(CSR_GIO_REG);
1884 IWL_CMD(CSR_GP_UCODE_REG);
1885 IWL_CMD(CSR_GP_DRIVER_REG);
1886 IWL_CMD(CSR_UCODE_DRV_GP1);
1887 IWL_CMD(CSR_UCODE_DRV_GP2);
1888 IWL_CMD(CSR_LED_REG);
1889 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1890 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1891 IWL_CMD(CSR_ANA_PLL_CFG);
1892 IWL_CMD(CSR_HW_REV_WA_REG);
1893 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1894 default:
1895 return "UNKNOWN";
1896 }
1897}
1898
1899void iwl_dump_csr(struct iwl_priv *priv)
1900{
1901 int i;
1902 static const u32 csr_tbl[] = {
1903 CSR_HW_IF_CONFIG_REG,
1904 CSR_INT_COALESCING,
1905 CSR_INT,
1906 CSR_INT_MASK,
1907 CSR_FH_INT_STATUS,
1908 CSR_GPIO_IN,
1909 CSR_RESET,
1910 CSR_GP_CNTRL,
1911 CSR_HW_REV,
1912 CSR_EEPROM_REG,
1913 CSR_EEPROM_GP,
1914 CSR_OTP_GP_REG,
1915 CSR_GIO_REG,
1916 CSR_GP_UCODE_REG,
1917 CSR_GP_DRIVER_REG,
1918 CSR_UCODE_DRV_GP1,
1919 CSR_UCODE_DRV_GP2,
1920 CSR_LED_REG,
1921 CSR_DRAM_INT_TBL_REG,
1922 CSR_GIO_CHICKEN_BITS,
1923 CSR_ANA_PLL_CFG,
1924 CSR_HW_REV_WA_REG,
1925 CSR_DBG_HPET_MEM_REG
1926 };
1927 IWL_ERR(priv, "CSR values:\n");
1928 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
1929 "CSR_INT_PERIODIC_REG)\n");
1930 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1931 IWL_ERR(priv, " %25s: 0X%08x\n",
1932 get_csr_string(csr_tbl[i]),
1933 iwl_read32(priv, csr_tbl[i]));
1934 }
1935}
1936
1937static const char *get_fh_string(int cmd)
1938{
1939 switch (cmd) {
1940 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1941 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1942 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1943 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1944 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1945 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1946 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1947 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1948 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1949 default:
1950 return "UNKNOWN";
1951 }
1952}
1953
1954int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1955{
1956 int i;
1957#ifdef CONFIG_IWLWIFI_DEBUG
1958 int pos = 0;
1959 size_t bufsz = 0;
1960#endif
1961 static const u32 fh_tbl[] = {
1962 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1963 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1964 FH_RSCSR_CHNL0_WPTR,
1965 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1966 FH_MEM_RSSR_SHARED_CTRL_REG,
1967 FH_MEM_RSSR_RX_STATUS_REG,
1968 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1969 FH_TSSR_TX_STATUS_REG,
1970 FH_TSSR_TX_ERROR_REG
1971 };
1972#ifdef CONFIG_IWLWIFI_DEBUG
1973 if (display) {
1974 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1975 *buf = kmalloc(bufsz, GFP_KERNEL);
1976 if (!*buf)
1977 return -ENOMEM;
1978 pos += scnprintf(*buf + pos, bufsz - pos,
1979 "FH register values:\n");
1980 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1981 pos += scnprintf(*buf + pos, bufsz - pos,
1982 " %34s: 0X%08x\n",
1983 get_fh_string(fh_tbl[i]),
1984 iwl_read_direct32(priv, fh_tbl[i]));
1985 }
1986 return pos;
1987 }
1988#endif
1989 IWL_ERR(priv, "FH register values:\n");
1990 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1991 IWL_ERR(priv, " %34s: 0X%08x\n",
1992 get_fh_string(fh_tbl[i]),
1993 iwl_read_direct32(priv, fh_tbl[i]));
1994 }
1995 return 0;
1996}
1997
1998/* notification wait support */
1999void iwlagn_init_notification_wait(struct iwl_priv *priv,
2000 struct iwl_notification_wait *wait_entry,
2001 u8 cmd,
2002 void (*fn)(struct iwl_priv *priv,
2003 struct iwl_rx_packet *pkt,
2004 void *data),
2005 void *fn_data)
2006{
2007 wait_entry->fn = fn;
2008 wait_entry->fn_data = fn_data;
2009 wait_entry->cmd = cmd;
2010 wait_entry->triggered = false;
2011 wait_entry->aborted = false;
2012
2013 spin_lock_bh(&priv->notif_wait_lock);
2014 list_add(&wait_entry->list, &priv->notif_waits);
2015 spin_unlock_bh(&priv->notif_wait_lock);
2016}
2017
2018int iwlagn_wait_notification(struct iwl_priv *priv,
2019 struct iwl_notification_wait *wait_entry,
2020 unsigned long timeout)
2021{
2022 int ret;
2023
2024 ret = wait_event_timeout(priv->notif_waitq,
2025 wait_entry->triggered || wait_entry->aborted,
2026 timeout);
2027
2028 spin_lock_bh(&priv->notif_wait_lock);
2029 list_del(&wait_entry->list);
2030 spin_unlock_bh(&priv->notif_wait_lock);
2031
2032 if (wait_entry->aborted)
2033 return -EIO;
2034
2035 /* return value is always >= 0 */
2036 if (ret <= 0)
2037 return -ETIMEDOUT;
2038 return 0;
2039}
2040
2041void iwlagn_remove_notification(struct iwl_priv *priv,
2042 struct iwl_notification_wait *wait_entry)
2043{
2044 spin_lock_bh(&priv->notif_wait_lock);
2045 list_del(&wait_entry->list);
2046 spin_unlock_bh(&priv->notif_wait_lock);
2047}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
new file mode 100644
index 00000000000..3789ff4bf53
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -0,0 +1,3367 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-agn.h"
43
44#define RS_NAME "iwl-agn-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_MIMO3_##s##M_PLCP,\
85 IWL_RATE_##r##M_IEEE, \
86 IWL_RATE_##ip##M_INDEX, \
87 IWL_RATE_##in##M_INDEX, \
88 IWL_RATE_##rp##M_INDEX, \
89 IWL_RATE_##rn##M_INDEX, \
90 IWL_RATE_##pp##M_INDEX, \
91 IWL_RATE_##np##M_INDEX }
92
93/*
94 * Parameter order:
95 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
96 *
97 * If there isn't a valid next or previous rate then INV is used which
98 * maps to IWL_RATE_INVALID
99 *
100 */
101const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
102 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
103 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
104 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
105 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
106 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
107 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
108 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
109 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
110 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
111 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
112 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
113 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
114 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
115 /* FIXME:RS: ^^ should be INV (legacy) */
116};
117
118static inline u8 rs_extract_rate(u32 rate_n_flags)
119{
120 return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
121}
122
123static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
124{
125 int idx = 0;
126
127 /* HT rate format */
128 if (rate_n_flags & RATE_MCS_HT_MSK) {
129 idx = rs_extract_rate(rate_n_flags);
130
131 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
132 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
133 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
134 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
135
136 idx += IWL_FIRST_OFDM_RATE;
137 /* skip 9M not supported in ht*/
138 if (idx >= IWL_RATE_9M_INDEX)
139 idx += 1;
140 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
141 return idx;
142
143 /* legacy rate format, search for match in table */
144 } else {
145 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
146 if (iwl_rates[idx].plcp ==
147 rs_extract_rate(rate_n_flags))
148 return idx;
149 }
150
151 return -1;
152}
153
154static void rs_rate_scale_perform(struct iwl_priv *priv,
155 struct sk_buff *skb,
156 struct ieee80211_sta *sta,
157 struct iwl_lq_sta *lq_sta);
158static void rs_fill_link_cmd(struct iwl_priv *priv,
159 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
160static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
161
162
163#ifdef CONFIG_MAC80211_DEBUGFS
164static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
165 u32 *rate_n_flags, int index);
166#else
167static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
168 u32 *rate_n_flags, int index)
169{}
170#endif
171
172/**
173 * The following tables contain the expected throughput metrics for all rates
174 *
175 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
176 *
177 * where invalid entries are zeros.
178 *
179 * CCK rates are only valid in legacy table and will only be used in G
180 * (2.4 GHz) band.
181 */
182
183static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
184 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
185};
186
187static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
188 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
189 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
190 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
191 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
192};
193
194static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
195 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
196 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
197 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
198 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
199};
200
201static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
202 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
203 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
204 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
205 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
206};
207
208static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
209 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
210 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
211 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
212 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
213};
214
215static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
216 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
217 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
218 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
219 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
220};
221
222static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
223 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
224 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
225 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
226 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
227};
228
229/* mbps, mcs */
230static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
231 { "1", "BPSK DSSS"},
232 { "2", "QPSK DSSS"},
233 {"5.5", "BPSK CCK"},
234 { "11", "QPSK CCK"},
235 { "6", "BPSK 1/2"},
236 { "9", "BPSK 1/2"},
237 { "12", "QPSK 1/2"},
238 { "18", "QPSK 3/4"},
239 { "24", "16QAM 1/2"},
240 { "36", "16QAM 3/4"},
241 { "48", "64QAM 2/3"},
242 { "54", "64QAM 3/4"},
243 { "60", "64QAM 5/6"},
244};
245
246#define MCS_INDEX_PER_STREAM (8)
247
248static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
249{
250 window->data = 0;
251 window->success_counter = 0;
252 window->success_ratio = IWL_INVALID_VALUE;
253 window->counter = 0;
254 window->average_tpt = IWL_INVALID_VALUE;
255 window->stamp = 0;
256}
257
258static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
259{
260 return (ant_type & valid_antenna) == ant_type;
261}
262
263/*
264 * removes the old data from the statistics. All data that is older than
265 * TID_MAX_TIME_DIFF, will be deleted.
266 */
267static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
268{
269 /* The oldest age we want to keep */
270 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
271
272 while (tl->queue_count &&
273 (tl->time_stamp < oldest_time)) {
274 tl->total -= tl->packet_count[tl->head];
275 tl->packet_count[tl->head] = 0;
276 tl->time_stamp += TID_QUEUE_CELL_SPACING;
277 tl->queue_count--;
278 tl->head++;
279 if (tl->head >= TID_QUEUE_MAX_SIZE)
280 tl->head = 0;
281 }
282}
283
284/*
285 * increment traffic load value for tid and also remove
286 * any old values if passed the certain time period
287 */
288static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
289 struct ieee80211_hdr *hdr)
290{
291 u32 curr_time = jiffies_to_msecs(jiffies);
292 u32 time_diff;
293 s32 index;
294 struct iwl_traffic_load *tl = NULL;
295 u8 tid;
296
297 if (ieee80211_is_data_qos(hdr->frame_control)) {
298 u8 *qc = ieee80211_get_qos_ctl(hdr);
299 tid = qc[0] & 0xf;
300 } else
301 return MAX_TID_COUNT;
302
303 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
304 return MAX_TID_COUNT;
305
306 tl = &lq_data->load[tid];
307
308 curr_time -= curr_time % TID_ROUND_VALUE;
309
310 /* Happens only for the first packet. Initialize the data */
311 if (!(tl->queue_count)) {
312 tl->total = 1;
313 tl->time_stamp = curr_time;
314 tl->queue_count = 1;
315 tl->head = 0;
316 tl->packet_count[0] = 1;
317 return MAX_TID_COUNT;
318 }
319
320 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
321 index = time_diff / TID_QUEUE_CELL_SPACING;
322
323 /* The history is too long: remove data that is older than */
324 /* TID_MAX_TIME_DIFF */
325 if (index >= TID_QUEUE_MAX_SIZE)
326 rs_tl_rm_old_stats(tl, curr_time);
327
328 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
329 tl->packet_count[index] = tl->packet_count[index] + 1;
330 tl->total = tl->total + 1;
331
332 if ((index + 1) > tl->queue_count)
333 tl->queue_count = index + 1;
334
335 return tid;
336}
337
338#ifdef CONFIG_MAC80211_DEBUGFS
339/**
340 * Program the device to use fixed rate for frame transmit
341 * This is for debugging/testing only
342 * once the device start use fixed rate, we need to reload the module
343 * to being back the normal operation.
344 */
345static void rs_program_fix_rate(struct iwl_priv *priv,
346 struct iwl_lq_sta *lq_sta)
347{
348 struct iwl_station_priv *sta_priv =
349 container_of(lq_sta, struct iwl_station_priv, lq_sta);
350 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
351
352 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
353 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
354 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
355 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
356
357#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
358 /* testmode has higher priority to overwirte the fixed rate */
359 if (priv->tm_fixed_rate)
360 lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
361#endif
362
363 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
364 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
365
366 if (lq_sta->dbg_fixed_rate) {
367 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
368 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
369 false);
370 }
371}
372#endif
373
374/*
375 get the traffic load value for tid
376*/
377static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
378{
379 u32 curr_time = jiffies_to_msecs(jiffies);
380 u32 time_diff;
381 s32 index;
382 struct iwl_traffic_load *tl = NULL;
383
384 if (tid >= TID_MAX_LOAD_COUNT)
385 return 0;
386
387 tl = &(lq_data->load[tid]);
388
389 curr_time -= curr_time % TID_ROUND_VALUE;
390
391 if (!(tl->queue_count))
392 return 0;
393
394 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
395 index = time_diff / TID_QUEUE_CELL_SPACING;
396
397 /* The history is too long: remove data that is older than */
398 /* TID_MAX_TIME_DIFF */
399 if (index >= TID_QUEUE_MAX_SIZE)
400 rs_tl_rm_old_stats(tl, curr_time);
401
402 return tl->total;
403}
404
405static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
406 struct iwl_lq_sta *lq_data, u8 tid,
407 struct ieee80211_sta *sta)
408{
409 int ret = -EAGAIN;
410 u32 load;
411
412 /*
413 * Don't create TX aggregation sessions when in high
414 * BT traffic, as they would just be disrupted by BT.
415 */
416 if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
417 IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
418 priv->bt_traffic_load);
419 return ret;
420 }
421
422 load = rs_tl_get_load(lq_data, tid);
423
424 if (load > IWL_AGG_LOAD_THRESHOLD) {
425 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
426 sta->addr, tid);
427 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
428 if (ret == -EAGAIN) {
429 /*
430 * driver and mac80211 is out of sync
431 * this might be cause by reloading firmware
432 * stop the tx ba session here
433 */
434 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
435 tid);
436 ieee80211_stop_tx_ba_session(sta, tid);
437 }
438 } else {
439 IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
440 "because load = %u\n", tid, load);
441 }
442 return ret;
443}
444
445static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
446 struct iwl_lq_sta *lq_data,
447 struct ieee80211_sta *sta)
448{
449 if (tid < TID_MAX_LOAD_COUNT)
450 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
451 else
452 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
453 tid, TID_MAX_LOAD_COUNT);
454}
455
456static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
457{
458 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
459 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
460 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
461}
462
463/*
464 * Static function to get the expected throughput from an iwl_scale_tbl_info
465 * that wraps a NULL pointer check
466 */
467static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
468{
469 if (tbl->expected_tpt)
470 return tbl->expected_tpt[rs_index];
471 return 0;
472}
473
474/**
475 * rs_collect_tx_data - Update the success/failure sliding window
476 *
477 * We keep a sliding window of the last 62 packets transmitted
478 * at this rate. window->data contains the bitmask of successful
479 * packets.
480 */
481static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
482 int scale_index, int attempts, int successes)
483{
484 struct iwl_rate_scale_data *window = NULL;
485 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
486 s32 fail_count, tpt;
487
488 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
489 return -EINVAL;
490
491 /* Select window for current tx bit rate */
492 window = &(tbl->win[scale_index]);
493
494 /* Get expected throughput */
495 tpt = get_expected_tpt(tbl, scale_index);
496
497 /*
498 * Keep track of only the latest 62 tx frame attempts in this rate's
499 * history window; anything older isn't really relevant any more.
500 * If we have filled up the sliding window, drop the oldest attempt;
501 * if the oldest attempt (highest bit in bitmap) shows "success",
502 * subtract "1" from the success counter (this is the main reason
503 * we keep these bitmaps!).
504 */
505 while (attempts > 0) {
506 if (window->counter >= IWL_RATE_MAX_WINDOW) {
507
508 /* remove earliest */
509 window->counter = IWL_RATE_MAX_WINDOW - 1;
510
511 if (window->data & mask) {
512 window->data &= ~mask;
513 window->success_counter--;
514 }
515 }
516
517 /* Increment frames-attempted counter */
518 window->counter++;
519
520 /* Shift bitmap by one frame to throw away oldest history */
521 window->data <<= 1;
522
523 /* Mark the most recent #successes attempts as successful */
524 if (successes > 0) {
525 window->success_counter++;
526 window->data |= 0x1;
527 successes--;
528 }
529
530 attempts--;
531 }
532
533 /* Calculate current success ratio, avoid divide-by-0! */
534 if (window->counter > 0)
535 window->success_ratio = 128 * (100 * window->success_counter)
536 / window->counter;
537 else
538 window->success_ratio = IWL_INVALID_VALUE;
539
540 fail_count = window->counter - window->success_counter;
541
542 /* Calculate average throughput, if we have enough history. */
543 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
544 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
545 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
546 else
547 window->average_tpt = IWL_INVALID_VALUE;
548
549 /* Tag this window as having been updated */
550 window->stamp = jiffies;
551
552 return 0;
553}
554
555/*
556 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
557 */
558/* FIXME:RS:remove this function and put the flags statically in the table */
559static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
560 struct iwl_scale_tbl_info *tbl,
561 int index, u8 use_green)
562{
563 u32 rate_n_flags = 0;
564
565 if (is_legacy(tbl->lq_type)) {
566 rate_n_flags = iwl_rates[index].plcp;
567 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
568 rate_n_flags |= RATE_MCS_CCK_MSK;
569
570 } else if (is_Ht(tbl->lq_type)) {
571 if (index > IWL_LAST_OFDM_RATE) {
572 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
573 index = IWL_LAST_OFDM_RATE;
574 }
575 rate_n_flags = RATE_MCS_HT_MSK;
576
577 if (is_siso(tbl->lq_type))
578 rate_n_flags |= iwl_rates[index].plcp_siso;
579 else if (is_mimo2(tbl->lq_type))
580 rate_n_flags |= iwl_rates[index].plcp_mimo2;
581 else
582 rate_n_flags |= iwl_rates[index].plcp_mimo3;
583 } else {
584 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
585 }
586
587 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
588 RATE_MCS_ANT_ABC_MSK);
589
590 if (is_Ht(tbl->lq_type)) {
591 if (tbl->is_ht40) {
592 if (tbl->is_dup)
593 rate_n_flags |= RATE_MCS_DUP_MSK;
594 else
595 rate_n_flags |= RATE_MCS_HT40_MSK;
596 }
597 if (tbl->is_SGI)
598 rate_n_flags |= RATE_MCS_SGI_MSK;
599
600 if (use_green) {
601 rate_n_flags |= RATE_MCS_GF_MSK;
602 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
603 rate_n_flags &= ~RATE_MCS_SGI_MSK;
604 IWL_ERR(priv, "GF was set with SGI:SISO\n");
605 }
606 }
607 }
608 return rate_n_flags;
609}
610
611/*
612 * Interpret uCode API's rate_n_flags format,
613 * fill "search" or "active" tx mode table.
614 */
615static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
616 enum ieee80211_band band,
617 struct iwl_scale_tbl_info *tbl,
618 int *rate_idx)
619{
620 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
621 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
622 u8 mcs;
623
624 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
625 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
626
627 if (*rate_idx == IWL_RATE_INVALID) {
628 *rate_idx = -1;
629 return -EINVAL;
630 }
631 tbl->is_SGI = 0; /* default legacy setup */
632 tbl->is_ht40 = 0;
633 tbl->is_dup = 0;
634 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
635 tbl->lq_type = LQ_NONE;
636 tbl->max_search = IWL_MAX_SEARCH;
637
638 /* legacy rate format */
639 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
640 if (num_of_ant == 1) {
641 if (band == IEEE80211_BAND_5GHZ)
642 tbl->lq_type = LQ_A;
643 else
644 tbl->lq_type = LQ_G;
645 }
646 /* HT rate format */
647 } else {
648 if (rate_n_flags & RATE_MCS_SGI_MSK)
649 tbl->is_SGI = 1;
650
651 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
652 (rate_n_flags & RATE_MCS_DUP_MSK))
653 tbl->is_ht40 = 1;
654
655 if (rate_n_flags & RATE_MCS_DUP_MSK)
656 tbl->is_dup = 1;
657
658 mcs = rs_extract_rate(rate_n_flags);
659
660 /* SISO */
661 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
662 if (num_of_ant == 1)
663 tbl->lq_type = LQ_SISO; /*else NONE*/
664 /* MIMO2 */
665 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
666 if (num_of_ant == 2)
667 tbl->lq_type = LQ_MIMO2;
668 /* MIMO3 */
669 } else {
670 if (num_of_ant == 3) {
671 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
672 tbl->lq_type = LQ_MIMO3;
673 }
674 }
675 }
676 return 0;
677}
678
679/* switch to another antenna/antennas and return 1 */
680/* if no other valid antenna found, return 0 */
681static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
682 struct iwl_scale_tbl_info *tbl)
683{
684 u8 new_ant_type;
685
686 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
687 return 0;
688
689 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
690 return 0;
691
692 new_ant_type = ant_toggle_lookup[tbl->ant_type];
693
694 while ((new_ant_type != tbl->ant_type) &&
695 !rs_is_valid_ant(valid_ant, new_ant_type))
696 new_ant_type = ant_toggle_lookup[new_ant_type];
697
698 if (new_ant_type == tbl->ant_type)
699 return 0;
700
701 tbl->ant_type = new_ant_type;
702 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
703 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
704 return 1;
705}
706
707/**
708 * Green-field mode is valid if the station supports it and
709 * there are no non-GF stations present in the BSS.
710 */
711static bool rs_use_green(struct ieee80211_sta *sta)
712{
713 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
714 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
715
716 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
717 !(ctx->ht.non_gf_sta_present);
718}
719
720/**
721 * rs_get_supported_rates - get the available rates
722 *
723 * if management frame or broadcast frame only return
724 * basic available rates.
725 *
726 */
727static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
728 struct ieee80211_hdr *hdr,
729 enum iwl_table_type rate_type)
730{
731 if (is_legacy(rate_type)) {
732 return lq_sta->active_legacy_rate;
733 } else {
734 if (is_siso(rate_type))
735 return lq_sta->active_siso_rate;
736 else if (is_mimo2(rate_type))
737 return lq_sta->active_mimo2_rate;
738 else
739 return lq_sta->active_mimo3_rate;
740 }
741}
742
743static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
744 int rate_type)
745{
746 u8 high = IWL_RATE_INVALID;
747 u8 low = IWL_RATE_INVALID;
748
749 /* 802.11A or ht walks to the next literal adjacent rate in
750 * the rate table */
751 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
752 int i;
753 u32 mask;
754
755 /* Find the previous rate that is in the rate mask */
756 i = index - 1;
757 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
758 if (rate_mask & mask) {
759 low = i;
760 break;
761 }
762 }
763
764 /* Find the next rate that is in the rate mask */
765 i = index + 1;
766 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
767 if (rate_mask & mask) {
768 high = i;
769 break;
770 }
771 }
772
773 return (high << 8) | low;
774 }
775
776 low = index;
777 while (low != IWL_RATE_INVALID) {
778 low = iwl_rates[low].prev_rs;
779 if (low == IWL_RATE_INVALID)
780 break;
781 if (rate_mask & (1 << low))
782 break;
783 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
784 }
785
786 high = index;
787 while (high != IWL_RATE_INVALID) {
788 high = iwl_rates[high].next_rs;
789 if (high == IWL_RATE_INVALID)
790 break;
791 if (rate_mask & (1 << high))
792 break;
793 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
794 }
795
796 return (high << 8) | low;
797}
798
799static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
800 struct iwl_scale_tbl_info *tbl,
801 u8 scale_index, u8 ht_possible)
802{
803 s32 low;
804 u16 rate_mask;
805 u16 high_low;
806 u8 switch_to_legacy = 0;
807 u8 is_green = lq_sta->is_green;
808 struct iwl_priv *priv = lq_sta->drv;
809
810 /* check if we need to switch from HT to legacy rates.
811 * assumption is that mandatory rates (1Mbps or 6Mbps)
812 * are always supported (spec demand) */
813 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
814 switch_to_legacy = 1;
815 scale_index = rs_ht_to_legacy[scale_index];
816 if (lq_sta->band == IEEE80211_BAND_5GHZ)
817 tbl->lq_type = LQ_A;
818 else
819 tbl->lq_type = LQ_G;
820
821 if (num_of_ant(tbl->ant_type) > 1)
822 tbl->ant_type =
823 first_antenna(priv->hw_params.valid_tx_ant);
824
825 tbl->is_ht40 = 0;
826 tbl->is_SGI = 0;
827 tbl->max_search = IWL_MAX_SEARCH;
828 }
829
830 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
831
832 /* Mask with station rate restriction */
833 if (is_legacy(tbl->lq_type)) {
834 /* supp_rates has no CCK bits in A mode */
835 if (lq_sta->band == IEEE80211_BAND_5GHZ)
836 rate_mask = (u16)(rate_mask &
837 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
838 else
839 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
840 }
841
842 /* If we switched from HT to legacy, check current rate */
843 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
844 low = scale_index;
845 goto out;
846 }
847
848 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
849 tbl->lq_type);
850 low = high_low & 0xff;
851
852 if (low == IWL_RATE_INVALID)
853 low = scale_index;
854
855out:
856 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
857}
858
859/*
860 * Simple function to compare two rate scale table types
861 */
862static bool table_type_matches(struct iwl_scale_tbl_info *a,
863 struct iwl_scale_tbl_info *b)
864{
865 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
866 (a->is_SGI == b->is_SGI);
867}
868
869static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
870 struct iwl_lq_sta *lq_sta)
871{
872 struct iwl_scale_tbl_info *tbl;
873 bool full_concurrent = priv->bt_full_concurrent;
874 unsigned long flags;
875
876 if (priv->bt_ant_couple_ok) {
877 /*
878 * Is there a need to switch between
879 * full concurrency and 3-wire?
880 */
881 spin_lock_irqsave(&priv->lock, flags);
882 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
883 full_concurrent = true;
884 else
885 full_concurrent = false;
886 spin_unlock_irqrestore(&priv->lock, flags);
887 }
888 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
889 (priv->bt_full_concurrent != full_concurrent)) {
890 priv->bt_full_concurrent = full_concurrent;
891
892 /* Update uCode's rate table. */
893 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
894 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
895 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
896
897 queue_work(priv->workqueue, &priv->bt_full_concurrency);
898 }
899}
900
901/*
902 * mac80211 sends us Tx status
903 */
904static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
905 struct ieee80211_sta *sta, void *priv_sta,
906 struct sk_buff *skb)
907{
908 int legacy_success;
909 int retries;
910 int rs_index, mac_index, i;
911 struct iwl_lq_sta *lq_sta = priv_sta;
912 struct iwl_link_quality_cmd *table;
913 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
914 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
915 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
916 enum mac80211_rate_control_flags mac_flags;
917 u32 tx_rate;
918 struct iwl_scale_tbl_info tbl_type;
919 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
920 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
921 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
922
923 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
924
925 /* Treat uninitialized rate scaling data same as non-existing. */
926 if (!lq_sta) {
927 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
928 return;
929 } else if (!lq_sta->drv) {
930 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
931 return;
932 }
933
934 if (!ieee80211_is_data(hdr->frame_control) ||
935 info->flags & IEEE80211_TX_CTL_NO_ACK)
936 return;
937
938 /* This packet was aggregated but doesn't carry status info */
939 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
940 !(info->flags & IEEE80211_TX_STAT_AMPDU))
941 return;
942
943 /*
944 * Ignore this Tx frame response if its initial rate doesn't match
945 * that of latest Link Quality command. There may be stragglers
946 * from a previous Link Quality command, but we're no longer interested
947 * in those; they're either from the "active" mode while we're trying
948 * to check "search" mode, or a prior "search" mode after we've moved
949 * to a new "search" mode (which might become the new "active" mode).
950 */
951 table = &lq_sta->lq;
952 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
953 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
954 if (priv->band == IEEE80211_BAND_5GHZ)
955 rs_index -= IWL_FIRST_OFDM_RATE;
956 mac_flags = info->status.rates[0].flags;
957 mac_index = info->status.rates[0].idx;
958 /* For HT packets, map MCS to PLCP */
959 if (mac_flags & IEEE80211_TX_RC_MCS) {
960 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
961 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
962 mac_index++;
963 /*
964 * mac80211 HT index is always zero-indexed; we need to move
965 * HT OFDM rates after CCK rates in 2.4 GHz band
966 */
967 if (priv->band == IEEE80211_BAND_2GHZ)
968 mac_index += IWL_FIRST_OFDM_RATE;
969 }
970 /* Here we actually compare this rate to the latest LQ command */
971 if ((mac_index < 0) ||
972 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
973 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
974 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
975 (tbl_type.ant_type != info->antenna_sel_tx) ||
976 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
977 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
978 (rs_index != mac_index)) {
979 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
980 /*
981 * Since rates mis-match, the last LQ command may have failed.
982 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
983 * ... driver.
984 */
985 lq_sta->missed_rate_counter++;
986 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
987 lq_sta->missed_rate_counter = 0;
988 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
989 }
990 /* Regardless, ignore this status info for outdated rate */
991 return;
992 } else
993 /* Rate did match, so reset the missed_rate_counter */
994 lq_sta->missed_rate_counter = 0;
995
996 /* Figure out if rate scale algorithm is in active or search table */
997 if (table_type_matches(&tbl_type,
998 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
999 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1000 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1001 } else if (table_type_matches(&tbl_type,
1002 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
1003 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1004 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1005 } else {
1006 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
1007 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1008 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
1009 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
1010 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1011 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
1012 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
1013 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
1014 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
1015 /*
1016 * no matching table found, let's by-pass the data collection
1017 * and continue to perform rate scale to find the rate table
1018 */
1019 rs_stay_in_table(lq_sta, true);
1020 goto done;
1021 }
1022
1023 /*
1024 * Updating the frame history depends on whether packets were
1025 * aggregated.
1026 *
1027 * For aggregation, all packets were transmitted at the same rate, the
1028 * first index into rate scale table.
1029 */
1030 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1031 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
1032 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
1033 &rs_index);
1034 rs_collect_tx_data(curr_tbl, rs_index,
1035 info->status.ampdu_len,
1036 info->status.ampdu_ack_len);
1037
1038 /* Update success/fail counts if not searching for new mode */
1039 if (lq_sta->stay_in_tbl) {
1040 lq_sta->total_success += info->status.ampdu_ack_len;
1041 lq_sta->total_failed += (info->status.ampdu_len -
1042 info->status.ampdu_ack_len);
1043 }
1044 } else {
1045 /*
1046 * For legacy, update frame history with for each Tx retry.
1047 */
1048 retries = info->status.rates[0].count - 1;
1049 /* HW doesn't send more than 15 retries */
1050 retries = min(retries, 15);
1051
1052 /* The last transmission may have been successful */
1053 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1054 /* Collect data for each rate used during failed TX attempts */
1055 for (i = 0; i <= retries; ++i) {
1056 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
1057 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
1058 &tbl_type, &rs_index);
1059 /*
1060 * Only collect stats if retried rate is in the same RS
1061 * table as active/search.
1062 */
1063 if (table_type_matches(&tbl_type, curr_tbl))
1064 tmp_tbl = curr_tbl;
1065 else if (table_type_matches(&tbl_type, other_tbl))
1066 tmp_tbl = other_tbl;
1067 else
1068 continue;
1069 rs_collect_tx_data(tmp_tbl, rs_index, 1,
1070 i < retries ? 0 : legacy_success);
1071 }
1072
1073 /* Update success/fail counts if not searching for new mode */
1074 if (lq_sta->stay_in_tbl) {
1075 lq_sta->total_success += legacy_success;
1076 lq_sta->total_failed += retries + (1 - legacy_success);
1077 }
1078 }
1079 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1080 lq_sta->last_rate_n_flags = tx_rate;
1081done:
1082 /* See if there's a better rate or modulation mode to try. */
1083 if (sta && sta->supp_rates[sband->band])
1084 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1085
1086#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
1087 if ((priv->tm_fixed_rate) &&
1088 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1089 rs_program_fix_rate(priv, lq_sta);
1090#endif
1091 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1092 rs_bt_update_lq(priv, ctx, lq_sta);
1093}
1094
1095/*
1096 * Begin a period of staying with a selected modulation mode.
1097 * Set "stay_in_tbl" flag to prevent any mode switches.
1098 * Set frame tx success limits according to legacy vs. high-throughput,
1099 * and reset overall (spanning all rates) tx success history statistics.
1100 * These control how long we stay using same modulation mode before
1101 * searching for a new mode.
1102 */
1103static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1104 struct iwl_lq_sta *lq_sta)
1105{
1106 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1107 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1108 if (is_legacy) {
1109 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1110 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1111 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1112 } else {
1113 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1114 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1115 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1116 }
1117 lq_sta->table_count = 0;
1118 lq_sta->total_failed = 0;
1119 lq_sta->total_success = 0;
1120 lq_sta->flush_timer = jiffies;
1121 lq_sta->action_counter = 0;
1122}
1123
1124/*
1125 * Find correct throughput table for given mode of modulation
1126 */
1127static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1128 struct iwl_scale_tbl_info *tbl)
1129{
1130 /* Used to choose among HT tables */
1131 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1132
1133 /* Check for invalid LQ type */
1134 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1135 tbl->expected_tpt = expected_tpt_legacy;
1136 return;
1137 }
1138
1139 /* Legacy rates have only one table */
1140 if (is_legacy(tbl->lq_type)) {
1141 tbl->expected_tpt = expected_tpt_legacy;
1142 return;
1143 }
1144
1145 /* Choose among many HT tables depending on number of streams
1146 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
1147 * status */
1148 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1149 ht_tbl_pointer = expected_tpt_siso20MHz;
1150 else if (is_siso(tbl->lq_type))
1151 ht_tbl_pointer = expected_tpt_siso40MHz;
1152 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1153 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1154 else if (is_mimo2(tbl->lq_type))
1155 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1156 else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1157 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1158 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1159 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1160
1161 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1162 tbl->expected_tpt = ht_tbl_pointer[0];
1163 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1164 tbl->expected_tpt = ht_tbl_pointer[1];
1165 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1166 tbl->expected_tpt = ht_tbl_pointer[2];
1167 else /* AGG+SGI */
1168 tbl->expected_tpt = ht_tbl_pointer[3];
1169}
1170
1171/*
1172 * Find starting rate for new "search" high-throughput mode of modulation.
1173 * Goal is to find lowest expected rate (under perfect conditions) that is
1174 * above the current measured throughput of "active" mode, to give new mode
1175 * a fair chance to prove itself without too many challenges.
1176 *
1177 * This gets called when transitioning to more aggressive modulation
1178 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1179 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1180 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1181 * bit rate will typically need to increase, but not if performance was bad.
1182 */
1183static s32 rs_get_best_rate(struct iwl_priv *priv,
1184 struct iwl_lq_sta *lq_sta,
1185 struct iwl_scale_tbl_info *tbl, /* "search" */
1186 u16 rate_mask, s8 index)
1187{
1188 /* "active" values */
1189 struct iwl_scale_tbl_info *active_tbl =
1190 &(lq_sta->lq_info[lq_sta->active_tbl]);
1191 s32 active_sr = active_tbl->win[index].success_ratio;
1192 s32 active_tpt = active_tbl->expected_tpt[index];
1193
1194 /* expected "search" throughput */
1195 s32 *tpt_tbl = tbl->expected_tpt;
1196
1197 s32 new_rate, high, low, start_hi;
1198 u16 high_low;
1199 s8 rate = index;
1200
1201 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1202
1203 for (; ;) {
1204 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1205 tbl->lq_type);
1206
1207 low = high_low & 0xff;
1208 high = (high_low >> 8) & 0xff;
1209
1210 /*
1211 * Lower the "search" bit rate, to give new "search" mode
1212 * approximately the same throughput as "active" if:
1213 *
1214 * 1) "Active" mode has been working modestly well (but not
1215 * great), and expected "search" throughput (under perfect
1216 * conditions) at candidate rate is above the actual
1217 * measured "active" throughput (but less than expected
1218 * "active" throughput under perfect conditions).
1219 * OR
1220 * 2) "Active" mode has been working perfectly or very well
1221 * and expected "search" throughput (under perfect
1222 * conditions) at candidate rate is above expected
1223 * "active" throughput (under perfect conditions).
1224 */
1225 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1226 ((active_sr > IWL_RATE_DECREASE_TH) &&
1227 (active_sr <= IWL_RATE_HIGH_TH) &&
1228 (tpt_tbl[rate] <= active_tpt))) ||
1229 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1230 (tpt_tbl[rate] > active_tpt))) {
1231
1232 /* (2nd or later pass)
1233 * If we've already tried to raise the rate, and are
1234 * now trying to lower it, use the higher rate. */
1235 if (start_hi != IWL_RATE_INVALID) {
1236 new_rate = start_hi;
1237 break;
1238 }
1239
1240 new_rate = rate;
1241
1242 /* Loop again with lower rate */
1243 if (low != IWL_RATE_INVALID)
1244 rate = low;
1245
1246 /* Lower rate not available, use the original */
1247 else
1248 break;
1249
1250 /* Else try to raise the "search" rate to match "active" */
1251 } else {
1252 /* (2nd or later pass)
1253 * If we've already tried to lower the rate, and are
1254 * now trying to raise it, use the lower rate. */
1255 if (new_rate != IWL_RATE_INVALID)
1256 break;
1257
1258 /* Loop again with higher rate */
1259 else if (high != IWL_RATE_INVALID) {
1260 start_hi = high;
1261 rate = high;
1262
1263 /* Higher rate not available, use the original */
1264 } else {
1265 new_rate = rate;
1266 break;
1267 }
1268 }
1269 }
1270
1271 return new_rate;
1272}
1273
1274/*
1275 * Set up search table for MIMO2
1276 */
1277static int rs_switch_to_mimo2(struct iwl_priv *priv,
1278 struct iwl_lq_sta *lq_sta,
1279 struct ieee80211_conf *conf,
1280 struct ieee80211_sta *sta,
1281 struct iwl_scale_tbl_info *tbl, int index)
1282{
1283 u16 rate_mask;
1284 s32 rate;
1285 s8 is_green = lq_sta->is_green;
1286 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1287 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1288
1289 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1290 return -1;
1291
1292 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1293 == WLAN_HT_CAP_SM_PS_STATIC)
1294 return -1;
1295
1296 /* Need both Tx chains/antennas to support MIMO */
1297 if (priv->hw_params.tx_chains_num < 2)
1298 return -1;
1299
1300 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1301
1302 tbl->lq_type = LQ_MIMO2;
1303 tbl->is_dup = lq_sta->is_dup;
1304 tbl->action = 0;
1305 tbl->max_search = IWL_MAX_SEARCH;
1306 rate_mask = lq_sta->active_mimo2_rate;
1307
1308 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1309 tbl->is_ht40 = 1;
1310 else
1311 tbl->is_ht40 = 0;
1312
1313 rs_set_expected_tpt_table(lq_sta, tbl);
1314
1315 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1316
1317 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1318 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1319 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1320 rate, rate_mask);
1321 return -1;
1322 }
1323 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1324
1325 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1326 tbl->current_rate, is_green);
1327 return 0;
1328}
1329
1330/*
1331 * Set up search table for MIMO3
1332 */
1333static int rs_switch_to_mimo3(struct iwl_priv *priv,
1334 struct iwl_lq_sta *lq_sta,
1335 struct ieee80211_conf *conf,
1336 struct ieee80211_sta *sta,
1337 struct iwl_scale_tbl_info *tbl, int index)
1338{
1339 u16 rate_mask;
1340 s32 rate;
1341 s8 is_green = lq_sta->is_green;
1342 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1343 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1344
1345 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1346 return -1;
1347
1348 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1349 == WLAN_HT_CAP_SM_PS_STATIC)
1350 return -1;
1351
1352 /* Need both Tx chains/antennas to support MIMO */
1353 if (priv->hw_params.tx_chains_num < 3)
1354 return -1;
1355
1356 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
1357
1358 tbl->lq_type = LQ_MIMO3;
1359 tbl->is_dup = lq_sta->is_dup;
1360 tbl->action = 0;
1361 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1362 rate_mask = lq_sta->active_mimo3_rate;
1363
1364 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1365 tbl->is_ht40 = 1;
1366 else
1367 tbl->is_ht40 = 0;
1368
1369 rs_set_expected_tpt_table(lq_sta, tbl);
1370
1371 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1372
1373 IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
1374 rate, rate_mask);
1375 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1376 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1377 rate, rate_mask);
1378 return -1;
1379 }
1380 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1381
1382 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1383 tbl->current_rate, is_green);
1384 return 0;
1385}
1386
1387/*
1388 * Set up search table for SISO
1389 */
1390static int rs_switch_to_siso(struct iwl_priv *priv,
1391 struct iwl_lq_sta *lq_sta,
1392 struct ieee80211_conf *conf,
1393 struct ieee80211_sta *sta,
1394 struct iwl_scale_tbl_info *tbl, int index)
1395{
1396 u16 rate_mask;
1397 u8 is_green = lq_sta->is_green;
1398 s32 rate;
1399 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1400 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1401
1402 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1403 return -1;
1404
1405 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1406
1407 tbl->is_dup = lq_sta->is_dup;
1408 tbl->lq_type = LQ_SISO;
1409 tbl->action = 0;
1410 tbl->max_search = IWL_MAX_SEARCH;
1411 rate_mask = lq_sta->active_siso_rate;
1412
1413 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1414 tbl->is_ht40 = 1;
1415 else
1416 tbl->is_ht40 = 0;
1417
1418 if (is_green)
1419 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1420
1421 rs_set_expected_tpt_table(lq_sta, tbl);
1422 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1423
1424 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1425 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1426 IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
1427 rate, rate_mask);
1428 return -1;
1429 }
1430 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1431 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1432 tbl->current_rate, is_green);
1433 return 0;
1434}
1435
1436/*
1437 * Try to switch to new modulation mode from legacy
1438 */
1439static int rs_move_legacy_other(struct iwl_priv *priv,
1440 struct iwl_lq_sta *lq_sta,
1441 struct ieee80211_conf *conf,
1442 struct ieee80211_sta *sta,
1443 int index)
1444{
1445 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1446 struct iwl_scale_tbl_info *search_tbl =
1447 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1448 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1449 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1450 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1451 u8 start_action;
1452 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1453 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1454 int ret = 0;
1455 u8 update_search_tbl_counter = 0;
1456
1457 switch (priv->bt_traffic_load) {
1458 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1459 /* nothing */
1460 break;
1461 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1462 /* avoid antenna B unless MIMO */
1463 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1464 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1465 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1466 break;
1467 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1468 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1469 /* avoid antenna B and MIMO */
1470 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1471 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1472 tbl->action != IWL_LEGACY_SWITCH_SISO)
1473 tbl->action = IWL_LEGACY_SWITCH_SISO;
1474 break;
1475 default:
1476 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1477 break;
1478 }
1479
1480 if (!iwl_ht_enabled(priv))
1481 /* stay in Legacy */
1482 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1483 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1484 tbl->action > IWL_LEGACY_SWITCH_SISO)
1485 tbl->action = IWL_LEGACY_SWITCH_SISO;
1486
1487 /* configure as 1x1 if bt full concurrency */
1488 if (priv->bt_full_concurrent) {
1489 if (!iwl_ht_enabled(priv))
1490 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1491 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1492 tbl->action = IWL_LEGACY_SWITCH_SISO;
1493 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1494 }
1495
1496 start_action = tbl->action;
1497 for (; ;) {
1498 lq_sta->action_counter++;
1499 switch (tbl->action) {
1500 case IWL_LEGACY_SWITCH_ANTENNA1:
1501 case IWL_LEGACY_SWITCH_ANTENNA2:
1502 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1503
1504 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1505 tx_chains_num <= 1) ||
1506 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1507 tx_chains_num <= 2))
1508 break;
1509
1510 /* Don't change antenna if success has been great */
1511 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1512 !priv->bt_full_concurrent &&
1513 priv->bt_traffic_load ==
1514 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1515 break;
1516
1517 /* Set up search table to try other antenna */
1518 memcpy(search_tbl, tbl, sz);
1519
1520 if (rs_toggle_antenna(valid_tx_ant,
1521 &search_tbl->current_rate, search_tbl)) {
1522 update_search_tbl_counter = 1;
1523 rs_set_expected_tpt_table(lq_sta, search_tbl);
1524 goto out;
1525 }
1526 break;
1527 case IWL_LEGACY_SWITCH_SISO:
1528 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1529
1530 /* Set up search table to try SISO */
1531 memcpy(search_tbl, tbl, sz);
1532 search_tbl->is_SGI = 0;
1533 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1534 search_tbl, index);
1535 if (!ret) {
1536 lq_sta->action_counter = 0;
1537 goto out;
1538 }
1539
1540 break;
1541 case IWL_LEGACY_SWITCH_MIMO2_AB:
1542 case IWL_LEGACY_SWITCH_MIMO2_AC:
1543 case IWL_LEGACY_SWITCH_MIMO2_BC:
1544 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1545
1546 /* Set up search table to try MIMO */
1547 memcpy(search_tbl, tbl, sz);
1548 search_tbl->is_SGI = 0;
1549
1550 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1551 search_tbl->ant_type = ANT_AB;
1552 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1553 search_tbl->ant_type = ANT_AC;
1554 else
1555 search_tbl->ant_type = ANT_BC;
1556
1557 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1558 break;
1559
1560 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1561 search_tbl, index);
1562 if (!ret) {
1563 lq_sta->action_counter = 0;
1564 goto out;
1565 }
1566 break;
1567
1568 case IWL_LEGACY_SWITCH_MIMO3_ABC:
1569 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
1570
1571 /* Set up search table to try MIMO3 */
1572 memcpy(search_tbl, tbl, sz);
1573 search_tbl->is_SGI = 0;
1574
1575 search_tbl->ant_type = ANT_ABC;
1576
1577 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1578 break;
1579
1580 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1581 search_tbl, index);
1582 if (!ret) {
1583 lq_sta->action_counter = 0;
1584 goto out;
1585 }
1586 break;
1587 }
1588 tbl->action++;
1589 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1590 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1591
1592 if (tbl->action == start_action)
1593 break;
1594
1595 }
1596 search_tbl->lq_type = LQ_NONE;
1597 return 0;
1598
1599out:
1600 lq_sta->search_better_tbl = 1;
1601 tbl->action++;
1602 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1603 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1604 if (update_search_tbl_counter)
1605 search_tbl->action = tbl->action;
1606 return 0;
1607
1608}
1609
1610/*
1611 * Try to switch to new modulation mode from SISO
1612 */
1613static int rs_move_siso_to_other(struct iwl_priv *priv,
1614 struct iwl_lq_sta *lq_sta,
1615 struct ieee80211_conf *conf,
1616 struct ieee80211_sta *sta, int index)
1617{
1618 u8 is_green = lq_sta->is_green;
1619 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1620 struct iwl_scale_tbl_info *search_tbl =
1621 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1622 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1623 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1624 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1625 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1626 u8 start_action;
1627 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1628 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1629 u8 update_search_tbl_counter = 0;
1630 int ret;
1631
1632 switch (priv->bt_traffic_load) {
1633 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1634 /* nothing */
1635 break;
1636 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1637 /* avoid antenna B unless MIMO */
1638 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1639 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1640 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1641 break;
1642 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1643 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1644 /* avoid antenna B and MIMO */
1645 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1646 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1647 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1648 break;
1649 default:
1650 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1651 break;
1652 }
1653
1654 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1655 tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
1656 /* stay in SISO */
1657 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1658 }
1659
1660 /* configure as 1x1 if bt full concurrency */
1661 if (priv->bt_full_concurrent) {
1662 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1663 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1664 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1665 }
1666
1667 start_action = tbl->action;
1668 for (;;) {
1669 lq_sta->action_counter++;
1670 switch (tbl->action) {
1671 case IWL_SISO_SWITCH_ANTENNA1:
1672 case IWL_SISO_SWITCH_ANTENNA2:
1673 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1674 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1675 tx_chains_num <= 1) ||
1676 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1677 tx_chains_num <= 2))
1678 break;
1679
1680 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1681 !priv->bt_full_concurrent &&
1682 priv->bt_traffic_load ==
1683 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1684 break;
1685
1686 memcpy(search_tbl, tbl, sz);
1687 if (rs_toggle_antenna(valid_tx_ant,
1688 &search_tbl->current_rate, search_tbl)) {
1689 update_search_tbl_counter = 1;
1690 goto out;
1691 }
1692 break;
1693 case IWL_SISO_SWITCH_MIMO2_AB:
1694 case IWL_SISO_SWITCH_MIMO2_AC:
1695 case IWL_SISO_SWITCH_MIMO2_BC:
1696 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1697 memcpy(search_tbl, tbl, sz);
1698 search_tbl->is_SGI = 0;
1699
1700 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1701 search_tbl->ant_type = ANT_AB;
1702 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1703 search_tbl->ant_type = ANT_AC;
1704 else
1705 search_tbl->ant_type = ANT_BC;
1706
1707 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1708 break;
1709
1710 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1711 search_tbl, index);
1712 if (!ret)
1713 goto out;
1714 break;
1715 case IWL_SISO_SWITCH_GI:
1716 if (!tbl->is_ht40 && !(ht_cap->cap &
1717 IEEE80211_HT_CAP_SGI_20))
1718 break;
1719 if (tbl->is_ht40 && !(ht_cap->cap &
1720 IEEE80211_HT_CAP_SGI_40))
1721 break;
1722
1723 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1724
1725 memcpy(search_tbl, tbl, sz);
1726 if (is_green) {
1727 if (!tbl->is_SGI)
1728 break;
1729 else
1730 IWL_ERR(priv,
1731 "SGI was set in GF+SISO\n");
1732 }
1733 search_tbl->is_SGI = !tbl->is_SGI;
1734 rs_set_expected_tpt_table(lq_sta, search_tbl);
1735 if (tbl->is_SGI) {
1736 s32 tpt = lq_sta->last_tpt / 100;
1737 if (tpt >= search_tbl->expected_tpt[index])
1738 break;
1739 }
1740 search_tbl->current_rate =
1741 rate_n_flags_from_tbl(priv, search_tbl,
1742 index, is_green);
1743 update_search_tbl_counter = 1;
1744 goto out;
1745 case IWL_SISO_SWITCH_MIMO3_ABC:
1746 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
1747 memcpy(search_tbl, tbl, sz);
1748 search_tbl->is_SGI = 0;
1749 search_tbl->ant_type = ANT_ABC;
1750
1751 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1752 break;
1753
1754 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1755 search_tbl, index);
1756 if (!ret)
1757 goto out;
1758 break;
1759 }
1760 tbl->action++;
1761 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1762 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1763
1764 if (tbl->action == start_action)
1765 break;
1766 }
1767 search_tbl->lq_type = LQ_NONE;
1768 return 0;
1769
1770 out:
1771 lq_sta->search_better_tbl = 1;
1772 tbl->action++;
1773 if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
1774 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1775 if (update_search_tbl_counter)
1776 search_tbl->action = tbl->action;
1777
1778 return 0;
1779}
1780
1781/*
1782 * Try to switch to new modulation mode from MIMO2
1783 */
1784static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1785 struct iwl_lq_sta *lq_sta,
1786 struct ieee80211_conf *conf,
1787 struct ieee80211_sta *sta, int index)
1788{
1789 s8 is_green = lq_sta->is_green;
1790 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1791 struct iwl_scale_tbl_info *search_tbl =
1792 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1793 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1794 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1795 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1796 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1797 u8 start_action;
1798 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1799 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1800 u8 update_search_tbl_counter = 0;
1801 int ret;
1802
1803 switch (priv->bt_traffic_load) {
1804 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1805 /* nothing */
1806 break;
1807 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1808 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1809 /* avoid antenna B and MIMO */
1810 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1811 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1812 break;
1813 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1814 /* avoid antenna B unless MIMO */
1815 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1816 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1817 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1818 break;
1819 default:
1820 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1821 break;
1822 }
1823
1824 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1825 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1826 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
1827 /* switch in SISO */
1828 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1829 }
1830
1831 /* configure as 1x1 if bt full concurrency */
1832 if (priv->bt_full_concurrent &&
1833 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1834 tbl->action > IWL_MIMO2_SWITCH_SISO_C))
1835 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1836
1837 start_action = tbl->action;
1838 for (;;) {
1839 lq_sta->action_counter++;
1840 switch (tbl->action) {
1841 case IWL_MIMO2_SWITCH_ANTENNA1:
1842 case IWL_MIMO2_SWITCH_ANTENNA2:
1843 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1844
1845 if (tx_chains_num <= 2)
1846 break;
1847
1848 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1849 break;
1850
1851 memcpy(search_tbl, tbl, sz);
1852 if (rs_toggle_antenna(valid_tx_ant,
1853 &search_tbl->current_rate, search_tbl)) {
1854 update_search_tbl_counter = 1;
1855 goto out;
1856 }
1857 break;
1858 case IWL_MIMO2_SWITCH_SISO_A:
1859 case IWL_MIMO2_SWITCH_SISO_B:
1860 case IWL_MIMO2_SWITCH_SISO_C:
1861 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1862
1863 /* Set up new search table for SISO */
1864 memcpy(search_tbl, tbl, sz);
1865
1866 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1867 search_tbl->ant_type = ANT_A;
1868 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1869 search_tbl->ant_type = ANT_B;
1870 else
1871 search_tbl->ant_type = ANT_C;
1872
1873 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1874 break;
1875
1876 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1877 search_tbl, index);
1878 if (!ret)
1879 goto out;
1880
1881 break;
1882
1883 case IWL_MIMO2_SWITCH_GI:
1884 if (!tbl->is_ht40 && !(ht_cap->cap &
1885 IEEE80211_HT_CAP_SGI_20))
1886 break;
1887 if (tbl->is_ht40 && !(ht_cap->cap &
1888 IEEE80211_HT_CAP_SGI_40))
1889 break;
1890
1891 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1892
1893 /* Set up new search table for MIMO2 */
1894 memcpy(search_tbl, tbl, sz);
1895 search_tbl->is_SGI = !tbl->is_SGI;
1896 rs_set_expected_tpt_table(lq_sta, search_tbl);
1897 /*
1898 * If active table already uses the fastest possible
1899 * modulation (dual stream with short guard interval),
1900 * and it's working well, there's no need to look
1901 * for a better type of modulation!
1902 */
1903 if (tbl->is_SGI) {
1904 s32 tpt = lq_sta->last_tpt / 100;
1905 if (tpt >= search_tbl->expected_tpt[index])
1906 break;
1907 }
1908 search_tbl->current_rate =
1909 rate_n_flags_from_tbl(priv, search_tbl,
1910 index, is_green);
1911 update_search_tbl_counter = 1;
1912 goto out;
1913
1914 case IWL_MIMO2_SWITCH_MIMO3_ABC:
1915 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
1916 memcpy(search_tbl, tbl, sz);
1917 search_tbl->is_SGI = 0;
1918 search_tbl->ant_type = ANT_ABC;
1919
1920 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1921 break;
1922
1923 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1924 search_tbl, index);
1925 if (!ret)
1926 goto out;
1927
1928 break;
1929 }
1930 tbl->action++;
1931 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1932 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1933
1934 if (tbl->action == start_action)
1935 break;
1936 }
1937 search_tbl->lq_type = LQ_NONE;
1938 return 0;
1939 out:
1940 lq_sta->search_better_tbl = 1;
1941 tbl->action++;
1942 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1943 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1944 if (update_search_tbl_counter)
1945 search_tbl->action = tbl->action;
1946
1947 return 0;
1948
1949}
1950
1951/*
1952 * Try to switch to new modulation mode from MIMO3
1953 */
1954static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1955 struct iwl_lq_sta *lq_sta,
1956 struct ieee80211_conf *conf,
1957 struct ieee80211_sta *sta, int index)
1958{
1959 s8 is_green = lq_sta->is_green;
1960 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1961 struct iwl_scale_tbl_info *search_tbl =
1962 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1963 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1964 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1965 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1966 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1967 u8 start_action;
1968 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1969 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1970 int ret;
1971 u8 update_search_tbl_counter = 0;
1972
1973 switch (priv->bt_traffic_load) {
1974 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1975 /* nothing */
1976 break;
1977 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1978 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1979 /* avoid antenna B and MIMO */
1980 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1981 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1982 break;
1983 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1984 /* avoid antenna B unless MIMO */
1985 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1986 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1987 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1988 break;
1989 default:
1990 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1991 break;
1992 }
1993
1994 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1995 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1996 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
1997 /* switch in SISO */
1998 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1999 }
2000
2001 /* configure as 1x1 if bt full concurrency */
2002 if (priv->bt_full_concurrent &&
2003 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
2004 tbl->action > IWL_MIMO3_SWITCH_SISO_C))
2005 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
2006
2007 start_action = tbl->action;
2008 for (;;) {
2009 lq_sta->action_counter++;
2010 switch (tbl->action) {
2011 case IWL_MIMO3_SWITCH_ANTENNA1:
2012 case IWL_MIMO3_SWITCH_ANTENNA2:
2013 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
2014
2015 if (tx_chains_num <= 3)
2016 break;
2017
2018 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
2019 break;
2020
2021 memcpy(search_tbl, tbl, sz);
2022 if (rs_toggle_antenna(valid_tx_ant,
2023 &search_tbl->current_rate, search_tbl))
2024 goto out;
2025 break;
2026 case IWL_MIMO3_SWITCH_SISO_A:
2027 case IWL_MIMO3_SWITCH_SISO_B:
2028 case IWL_MIMO3_SWITCH_SISO_C:
2029 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
2030
2031 /* Set up new search table for SISO */
2032 memcpy(search_tbl, tbl, sz);
2033
2034 if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
2035 search_tbl->ant_type = ANT_A;
2036 else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
2037 search_tbl->ant_type = ANT_B;
2038 else
2039 search_tbl->ant_type = ANT_C;
2040
2041 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2042 break;
2043
2044 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
2045 search_tbl, index);
2046 if (!ret)
2047 goto out;
2048
2049 break;
2050
2051 case IWL_MIMO3_SWITCH_MIMO2_AB:
2052 case IWL_MIMO3_SWITCH_MIMO2_AC:
2053 case IWL_MIMO3_SWITCH_MIMO2_BC:
2054 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
2055
2056 memcpy(search_tbl, tbl, sz);
2057 search_tbl->is_SGI = 0;
2058 if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
2059 search_tbl->ant_type = ANT_AB;
2060 else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
2061 search_tbl->ant_type = ANT_AC;
2062 else
2063 search_tbl->ant_type = ANT_BC;
2064
2065 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2066 break;
2067
2068 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
2069 search_tbl, index);
2070 if (!ret)
2071 goto out;
2072
2073 break;
2074
2075 case IWL_MIMO3_SWITCH_GI:
2076 if (!tbl->is_ht40 && !(ht_cap->cap &
2077 IEEE80211_HT_CAP_SGI_20))
2078 break;
2079 if (tbl->is_ht40 && !(ht_cap->cap &
2080 IEEE80211_HT_CAP_SGI_40))
2081 break;
2082
2083 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
2084
2085 /* Set up new search table for MIMO */
2086 memcpy(search_tbl, tbl, sz);
2087 search_tbl->is_SGI = !tbl->is_SGI;
2088 rs_set_expected_tpt_table(lq_sta, search_tbl);
2089 /*
2090 * If active table already uses the fastest possible
2091 * modulation (dual stream with short guard interval),
2092 * and it's working well, there's no need to look
2093 * for a better type of modulation!
2094 */
2095 if (tbl->is_SGI) {
2096 s32 tpt = lq_sta->last_tpt / 100;
2097 if (tpt >= search_tbl->expected_tpt[index])
2098 break;
2099 }
2100 search_tbl->current_rate =
2101 rate_n_flags_from_tbl(priv, search_tbl,
2102 index, is_green);
2103 update_search_tbl_counter = 1;
2104 goto out;
2105 }
2106 tbl->action++;
2107 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2108 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2109
2110 if (tbl->action == start_action)
2111 break;
2112 }
2113 search_tbl->lq_type = LQ_NONE;
2114 return 0;
2115 out:
2116 lq_sta->search_better_tbl = 1;
2117 tbl->action++;
2118 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2119 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2120 if (update_search_tbl_counter)
2121 search_tbl->action = tbl->action;
2122
2123 return 0;
2124
2125}
2126
2127/*
2128 * Check whether we should continue using same modulation mode, or
2129 * begin search for a new mode, based on:
2130 * 1) # tx successes or failures while using this mode
2131 * 2) # times calling this function
2132 * 3) elapsed time in this mode (not used, for now)
2133 */
2134static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2135{
2136 struct iwl_scale_tbl_info *tbl;
2137 int i;
2138 int active_tbl;
2139 int flush_interval_passed = 0;
2140 struct iwl_priv *priv;
2141
2142 priv = lq_sta->drv;
2143 active_tbl = lq_sta->active_tbl;
2144
2145 tbl = &(lq_sta->lq_info[active_tbl]);
2146
2147 /* If we've been disallowing search, see if we should now allow it */
2148 if (lq_sta->stay_in_tbl) {
2149
2150 /* Elapsed time using current modulation mode */
2151 if (lq_sta->flush_timer)
2152 flush_interval_passed =
2153 time_after(jiffies,
2154 (unsigned long)(lq_sta->flush_timer +
2155 IWL_RATE_SCALE_FLUSH_INTVL));
2156
2157 /*
2158 * Check if we should allow search for new modulation mode.
2159 * If many frames have failed or succeeded, or we've used
2160 * this same modulation for a long time, allow search, and
2161 * reset history stats that keep track of whether we should
2162 * allow a new search. Also (below) reset all bitmaps and
2163 * stats in active history.
2164 */
2165 if (force_search ||
2166 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
2167 (lq_sta->total_success > lq_sta->max_success_limit) ||
2168 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
2169 && (flush_interval_passed))) {
2170 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
2171 lq_sta->total_failed,
2172 lq_sta->total_success,
2173 flush_interval_passed);
2174
2175 /* Allow search for new mode */
2176 lq_sta->stay_in_tbl = 0; /* only place reset */
2177 lq_sta->total_failed = 0;
2178 lq_sta->total_success = 0;
2179 lq_sta->flush_timer = 0;
2180
2181 /*
2182 * Else if we've used this modulation mode enough repetitions
2183 * (regardless of elapsed time or success/failure), reset
2184 * history bitmaps and rate-specific stats for all rates in
2185 * active table.
2186 */
2187 } else {
2188 lq_sta->table_count++;
2189 if (lq_sta->table_count >=
2190 lq_sta->table_count_limit) {
2191 lq_sta->table_count = 0;
2192
2193 IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
2194 for (i = 0; i < IWL_RATE_COUNT; i++)
2195 rs_rate_scale_clear_window(
2196 &(tbl->win[i]));
2197 }
2198 }
2199
2200 /* If transitioning to allow "search", reset all history
2201 * bitmaps and stats in active table (this will become the new
2202 * "search" table). */
2203 if (!lq_sta->stay_in_tbl) {
2204 for (i = 0; i < IWL_RATE_COUNT; i++)
2205 rs_rate_scale_clear_window(&(tbl->win[i]));
2206 }
2207 }
2208}
2209
2210/*
2211 * setup rate table in uCode
2212 * return rate_n_flags as used in the table
2213 */
2214static void rs_update_rate_tbl(struct iwl_priv *priv,
2215 struct iwl_rxon_context *ctx,
2216 struct iwl_lq_sta *lq_sta,
2217 struct iwl_scale_tbl_info *tbl,
2218 int index, u8 is_green)
2219{
2220 u32 rate;
2221
2222 /* Update uCode's rate table. */
2223 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2224 rs_fill_link_cmd(priv, lq_sta, rate);
2225 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2226}
2227
2228/*
2229 * Do rate scaling and search for new modulation mode.
2230 */
2231static void rs_rate_scale_perform(struct iwl_priv *priv,
2232 struct sk_buff *skb,
2233 struct ieee80211_sta *sta,
2234 struct iwl_lq_sta *lq_sta)
2235{
2236 struct ieee80211_hw *hw = priv->hw;
2237 struct ieee80211_conf *conf = &hw->conf;
2238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2239 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2240 int low = IWL_RATE_INVALID;
2241 int high = IWL_RATE_INVALID;
2242 int index;
2243 int i;
2244 struct iwl_rate_scale_data *window = NULL;
2245 int current_tpt = IWL_INVALID_VALUE;
2246 int low_tpt = IWL_INVALID_VALUE;
2247 int high_tpt = IWL_INVALID_VALUE;
2248 u32 fail_count;
2249 s8 scale_action = 0;
2250 u16 rate_mask;
2251 u8 update_lq = 0;
2252 struct iwl_scale_tbl_info *tbl, *tbl1;
2253 u16 rate_scale_index_msk = 0;
2254 u8 is_green = 0;
2255 u8 active_tbl = 0;
2256 u8 done_search = 0;
2257 u16 high_low;
2258 s32 sr;
2259 u8 tid = MAX_TID_COUNT;
2260 struct iwl_tid_data *tid_data;
2261 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2262 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2263
2264 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
2265
2266 /* Send management frames and NO_ACK data using lowest rate. */
2267 /* TODO: this could probably be improved.. */
2268 if (!ieee80211_is_data(hdr->frame_control) ||
2269 info->flags & IEEE80211_TX_CTL_NO_ACK)
2270 return;
2271
2272 if (!sta || !lq_sta)
2273 return;
2274
2275 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2276
2277 tid = rs_tl_add_packet(lq_sta, hdr);
2278 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
2279 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
2280 if (tid_data->agg.state == IWL_AGG_OFF)
2281 lq_sta->is_agg = 0;
2282 else
2283 lq_sta->is_agg = 1;
2284 } else
2285 lq_sta->is_agg = 0;
2286
2287 /*
2288 * Select rate-scale / modulation-mode table to work with in
2289 * the rest of this function: "search" if searching for better
2290 * modulation mode, or "active" if doing rate scaling within a mode.
2291 */
2292 if (!lq_sta->search_better_tbl)
2293 active_tbl = lq_sta->active_tbl;
2294 else
2295 active_tbl = 1 - lq_sta->active_tbl;
2296
2297 tbl = &(lq_sta->lq_info[active_tbl]);
2298 if (is_legacy(tbl->lq_type))
2299 lq_sta->is_green = 0;
2300 else
2301 lq_sta->is_green = rs_use_green(sta);
2302 is_green = lq_sta->is_green;
2303
2304 /* current tx rate */
2305 index = lq_sta->last_txrate_idx;
2306
2307 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
2308 tbl->lq_type);
2309
2310 /* rates available for this association, and for modulation mode */
2311 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
2312
2313 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
2314
2315 /* mask with station rate restriction */
2316 if (is_legacy(tbl->lq_type)) {
2317 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2318 /* supp_rates has no CCK bits in A mode */
2319 rate_scale_index_msk = (u16) (rate_mask &
2320 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
2321 else
2322 rate_scale_index_msk = (u16) (rate_mask &
2323 lq_sta->supp_rates);
2324
2325 } else
2326 rate_scale_index_msk = rate_mask;
2327
2328 if (!rate_scale_index_msk)
2329 rate_scale_index_msk = rate_mask;
2330
2331 if (!((1 << index) & rate_scale_index_msk)) {
2332 IWL_ERR(priv, "Current Rate is not valid\n");
2333 if (lq_sta->search_better_tbl) {
2334 /* revert to active table if search table is not valid*/
2335 tbl->lq_type = LQ_NONE;
2336 lq_sta->search_better_tbl = 0;
2337 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2338 /* get "active" rate info */
2339 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2340 rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
2341 index, is_green);
2342 }
2343 return;
2344 }
2345
2346 /* Get expected throughput table and history window for current rate */
2347 if (!tbl->expected_tpt) {
2348 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
2349 return;
2350 }
2351
2352 /* force user max rate if set by user */
2353 if ((lq_sta->max_rate_idx != -1) &&
2354 (lq_sta->max_rate_idx < index)) {
2355 index = lq_sta->max_rate_idx;
2356 update_lq = 1;
2357 window = &(tbl->win[index]);
2358 goto lq_update;
2359 }
2360
2361 window = &(tbl->win[index]);
2362
2363 /*
2364 * If there is not enough history to calculate actual average
2365 * throughput, keep analyzing results of more tx frames, without
2366 * changing rate or mode (bypass most of the rest of this function).
2367 * Set up new rate table in uCode only if old rate is not supported
2368 * in current association (use new rate found above).
2369 */
2370 fail_count = window->counter - window->success_counter;
2371 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
2372 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
2373 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
2374 "for index %d\n",
2375 window->success_counter, window->counter, index);
2376
2377 /* Can't calculate this yet; not enough history */
2378 window->average_tpt = IWL_INVALID_VALUE;
2379
2380 /* Should we stay with this modulation mode,
2381 * or search for a new one? */
2382 rs_stay_in_table(lq_sta, false);
2383
2384 goto out;
2385 }
2386 /* Else we have enough samples; calculate estimate of
2387 * actual average throughput */
2388 if (window->average_tpt != ((window->success_ratio *
2389 tbl->expected_tpt[index] + 64) / 128)) {
2390 IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2391 window->average_tpt = ((window->success_ratio *
2392 tbl->expected_tpt[index] + 64) / 128);
2393 }
2394
2395 /* If we are searching for better modulation mode, check success. */
2396 if (lq_sta->search_better_tbl &&
2397 (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) {
2398 /* If good success, continue using the "search" mode;
2399 * no need to send new link quality command, since we're
2400 * continuing to use the setup that we've been trying. */
2401 if (window->average_tpt > lq_sta->last_tpt) {
2402
2403 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
2404 "suc=%d cur-tpt=%d old-tpt=%d\n",
2405 window->success_ratio,
2406 window->average_tpt,
2407 lq_sta->last_tpt);
2408
2409 if (!is_legacy(tbl->lq_type))
2410 lq_sta->enable_counter = 1;
2411
2412 /* Swap tables; "search" becomes "active" */
2413 lq_sta->active_tbl = active_tbl;
2414 current_tpt = window->average_tpt;
2415
2416 /* Else poor success; go back to mode in "active" table */
2417 } else {
2418
2419 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
2420 "suc=%d cur-tpt=%d old-tpt=%d\n",
2421 window->success_ratio,
2422 window->average_tpt,
2423 lq_sta->last_tpt);
2424
2425 /* Nullify "search" table */
2426 tbl->lq_type = LQ_NONE;
2427
2428 /* Revert to "active" table */
2429 active_tbl = lq_sta->active_tbl;
2430 tbl = &(lq_sta->lq_info[active_tbl]);
2431
2432 /* Revert to "active" rate and throughput info */
2433 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2434 current_tpt = lq_sta->last_tpt;
2435
2436 /* Need to set up a new rate table in uCode */
2437 update_lq = 1;
2438 }
2439
2440 /* Either way, we've made a decision; modulation mode
2441 * search is done, allow rate adjustment next time. */
2442 lq_sta->search_better_tbl = 0;
2443 done_search = 1; /* Don't switch modes below! */
2444 goto lq_update;
2445 }
2446
2447 /* (Else) not in search of better modulation mode, try for better
2448 * starting rate, while staying in this mode. */
2449 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
2450 tbl->lq_type);
2451 low = high_low & 0xff;
2452 high = (high_low >> 8) & 0xff;
2453
2454 /* If user set max rate, dont allow higher than user constrain */
2455 if ((lq_sta->max_rate_idx != -1) &&
2456 (lq_sta->max_rate_idx < high))
2457 high = IWL_RATE_INVALID;
2458
2459 sr = window->success_ratio;
2460
2461 /* Collect measured throughputs for current and adjacent rates */
2462 current_tpt = window->average_tpt;
2463 if (low != IWL_RATE_INVALID)
2464 low_tpt = tbl->win[low].average_tpt;
2465 if (high != IWL_RATE_INVALID)
2466 high_tpt = tbl->win[high].average_tpt;
2467
2468 scale_action = 0;
2469
2470 /* Too many failures, decrease rate */
2471 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
2472 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
2473 scale_action = -1;
2474
2475 /* No throughput measured yet for adjacent rates; try increase. */
2476 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2477 (high_tpt == IWL_INVALID_VALUE)) {
2478
2479 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2480 scale_action = 1;
2481 else if (low != IWL_RATE_INVALID)
2482 scale_action = 0;
2483 }
2484
2485 /* Both adjacent throughputs are measured, but neither one has better
2486 * throughput; we're using the best rate, don't change it! */
2487 else if ((low_tpt != IWL_INVALID_VALUE) &&
2488 (high_tpt != IWL_INVALID_VALUE) &&
2489 (low_tpt < current_tpt) &&
2490 (high_tpt < current_tpt))
2491 scale_action = 0;
2492
2493 /* At least one adjacent rate's throughput is measured,
2494 * and may have better performance. */
2495 else {
2496 /* Higher adjacent rate's throughput is measured */
2497 if (high_tpt != IWL_INVALID_VALUE) {
2498 /* Higher rate has better throughput */
2499 if (high_tpt > current_tpt &&
2500 sr >= IWL_RATE_INCREASE_TH) {
2501 scale_action = 1;
2502 } else {
2503 scale_action = 0;
2504 }
2505
2506 /* Lower adjacent rate's throughput is measured */
2507 } else if (low_tpt != IWL_INVALID_VALUE) {
2508 /* Lower rate has better throughput */
2509 if (low_tpt > current_tpt) {
2510 IWL_DEBUG_RATE(priv,
2511 "decrease rate because of low tpt\n");
2512 scale_action = -1;
2513 } else if (sr >= IWL_RATE_INCREASE_TH) {
2514 scale_action = 1;
2515 }
2516 }
2517 }
2518
2519 /* Sanity check; asked for decrease, but success rate or throughput
2520 * has been good at old rate. Don't change it. */
2521 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2522 ((sr > IWL_RATE_HIGH_TH) ||
2523 (current_tpt > (100 * tbl->expected_tpt[low]))))
2524 scale_action = 0;
2525 if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type))
2526 scale_action = -1;
2527 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
2528 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
2529 scale_action = -1;
2530
2531 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2532 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2533 if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
2534 /*
2535 * don't set scale_action, don't want to scale up if
2536 * the rate scale doesn't otherwise think that is a
2537 * good idea.
2538 */
2539 } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
2540 scale_action = -1;
2541 }
2542 }
2543 lq_sta->last_bt_traffic = priv->bt_traffic_load;
2544
2545 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2546 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2547 /* search for a new modulation */
2548 rs_stay_in_table(lq_sta, true);
2549 goto lq_update;
2550 }
2551
2552 switch (scale_action) {
2553 case -1:
2554 /* Decrease starting rate, update uCode's rate table */
2555 if (low != IWL_RATE_INVALID) {
2556 update_lq = 1;
2557 index = low;
2558 }
2559
2560 break;
2561 case 1:
2562 /* Increase starting rate, update uCode's rate table */
2563 if (high != IWL_RATE_INVALID) {
2564 update_lq = 1;
2565 index = high;
2566 }
2567
2568 break;
2569 case 0:
2570 /* No change */
2571 default:
2572 break;
2573 }
2574
2575 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2576 "high %d type %d\n",
2577 index, scale_action, low, high, tbl->lq_type);
2578
2579lq_update:
2580 /* Replace uCode's rate table for the destination station. */
2581 if (update_lq)
2582 rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
2583
2584 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2585 /* Should we stay with this modulation mode,
2586 * or search for a new one? */
2587 rs_stay_in_table(lq_sta, false);
2588 }
2589 /*
2590 * Search for new modulation mode if we're:
2591 * 1) Not changing rates right now
2592 * 2) Not just finishing up a search
2593 * 3) Allowing a new search
2594 */
2595 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
2596 /* Save current throughput to compare with "search" throughput*/
2597 lq_sta->last_tpt = current_tpt;
2598
2599 /* Select a new "search" modulation mode to try.
2600 * If one is found, set up the new "search" table. */
2601 if (is_legacy(tbl->lq_type))
2602 rs_move_legacy_other(priv, lq_sta, conf, sta, index);
2603 else if (is_siso(tbl->lq_type))
2604 rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
2605 else if (is_mimo2(tbl->lq_type))
2606 rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
2607 else
2608 rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
2609
2610 /* If new "search" mode was selected, set up in uCode table */
2611 if (lq_sta->search_better_tbl) {
2612 /* Access the "search" table, clear its history. */
2613 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2614 for (i = 0; i < IWL_RATE_COUNT; i++)
2615 rs_rate_scale_clear_window(&(tbl->win[i]));
2616
2617 /* Use new "search" start rate */
2618 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2619
2620 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2621 tbl->current_rate, index);
2622 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2623 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2624 } else
2625 done_search = 1;
2626 }
2627
2628 if (done_search && !lq_sta->stay_in_tbl) {
2629 /* If the "active" (non-search) mode was legacy,
2630 * and we've tried switching antennas,
2631 * but we haven't been able to try HT modes (not available),
2632 * stay with best antenna legacy modulation for a while
2633 * before next round of mode comparisons. */
2634 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2635 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2636 lq_sta->action_counter > tbl1->max_search) {
2637 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2638 rs_set_stay_in_table(priv, 1, lq_sta);
2639 }
2640
2641 /* If we're in an HT mode, and all 3 mode switch actions
2642 * have been tried and compared, stay in this best modulation
2643 * mode for a while before next round of mode comparisons. */
2644 if (lq_sta->enable_counter &&
2645 (lq_sta->action_counter >= tbl1->max_search) &&
2646 iwl_ht_enabled(priv)) {
2647 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2648 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2649 (tid != MAX_TID_COUNT)) {
2650 tid_data =
2651 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2652 if (tid_data->agg.state == IWL_AGG_OFF) {
2653 IWL_DEBUG_RATE(priv,
2654 "try to aggregate tid %d\n",
2655 tid);
2656 rs_tl_turn_on_agg(priv, tid,
2657 lq_sta, sta);
2658 }
2659 }
2660 rs_set_stay_in_table(priv, 0, lq_sta);
2661 }
2662 }
2663
2664out:
2665 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2666 i = index;
2667 lq_sta->last_txrate_idx = i;
2668}
2669
2670/**
2671 * rs_initialize_lq - Initialize a station's hardware rate table
2672 *
2673 * The uCode's station table contains a table of fallback rates
2674 * for automatic fallback during transmission.
2675 *
2676 * NOTE: This sets up a default set of values. These will be replaced later
2677 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2678 * rc80211_simple.
2679 *
2680 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2681 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2682 * which requires station table entry to exist).
2683 */
2684static void rs_initialize_lq(struct iwl_priv *priv,
2685 struct ieee80211_conf *conf,
2686 struct ieee80211_sta *sta,
2687 struct iwl_lq_sta *lq_sta)
2688{
2689 struct iwl_scale_tbl_info *tbl;
2690 int rate_idx;
2691 int i;
2692 u32 rate;
2693 u8 use_green = rs_use_green(sta);
2694 u8 active_tbl = 0;
2695 u8 valid_tx_ant;
2696 struct iwl_station_priv *sta_priv;
2697 struct iwl_rxon_context *ctx;
2698
2699 if (!sta || !lq_sta)
2700 return;
2701
2702 sta_priv = (void *)sta->drv_priv;
2703 ctx = sta_priv->common.ctx;
2704
2705 i = lq_sta->last_txrate_idx;
2706
2707 valid_tx_ant = priv->hw_params.valid_tx_ant;
2708
2709 if (!lq_sta->search_better_tbl)
2710 active_tbl = lq_sta->active_tbl;
2711 else
2712 active_tbl = 1 - lq_sta->active_tbl;
2713
2714 tbl = &(lq_sta->lq_info[active_tbl]);
2715
2716 if ((i < 0) || (i >= IWL_RATE_COUNT))
2717 i = 0;
2718
2719 rate = iwl_rates[i].plcp;
2720 tbl->ant_type = first_antenna(valid_tx_ant);
2721 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2722
2723 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2724 rate |= RATE_MCS_CCK_MSK;
2725
2726 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2727 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2728 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2729
2730 rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2731 tbl->current_rate = rate;
2732 rs_set_expected_tpt_table(lq_sta, tbl);
2733 rs_fill_link_cmd(NULL, lq_sta, rate);
2734 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2735 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2736}
2737
2738static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2739 struct ieee80211_tx_rate_control *txrc)
2740{
2741
2742 struct sk_buff *skb = txrc->skb;
2743 struct ieee80211_supported_band *sband = txrc->sband;
2744 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2746 struct iwl_lq_sta *lq_sta = priv_sta;
2747 int rate_idx;
2748
2749 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2750
2751 /* Get max rate if user set max rate */
2752 if (lq_sta) {
2753 lq_sta->max_rate_idx = txrc->max_rate_idx;
2754 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2755 (lq_sta->max_rate_idx != -1))
2756 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2757 if ((lq_sta->max_rate_idx < 0) ||
2758 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2759 lq_sta->max_rate_idx = -1;
2760 }
2761
2762 /* Treat uninitialized rate scaling data same as non-existing. */
2763 if (lq_sta && !lq_sta->drv) {
2764 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2765 priv_sta = NULL;
2766 }
2767
2768 /* Send management frames and NO_ACK data using lowest rate. */
2769 if (rate_control_send_low(sta, priv_sta, txrc))
2770 return;
2771
2772 rate_idx = lq_sta->last_txrate_idx;
2773
2774 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2775 rate_idx -= IWL_FIRST_OFDM_RATE;
2776 /* 6M and 9M shared same MCS index */
2777 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2778 if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2779 IWL_RATE_MIMO3_6M_PLCP)
2780 rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
2781 else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2782 IWL_RATE_MIMO2_6M_PLCP)
2783 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2784 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2785 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2786 info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
2787 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2788 info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
2789 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2790 info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2791 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2792 info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
2793 } else {
2794 /* Check for invalid rates */
2795 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2796 ((sband->band == IEEE80211_BAND_5GHZ) &&
2797 (rate_idx < IWL_FIRST_OFDM_RATE)))
2798 rate_idx = rate_lowest_index(sband, sta);
2799 /* On valid 5 GHz rate, adjust index */
2800 else if (sband->band == IEEE80211_BAND_5GHZ)
2801 rate_idx -= IWL_FIRST_OFDM_RATE;
2802 info->control.rates[0].flags = 0;
2803 }
2804 info->control.rates[0].idx = rate_idx;
2805
2806}
2807
2808static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2809 gfp_t gfp)
2810{
2811 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2812 struct iwl_priv *priv;
2813
2814 priv = (struct iwl_priv *)priv_rate;
2815 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2816
2817 return &sta_priv->lq_sta;
2818}
2819
2820/*
2821 * Called after adding a new station to initialize rate scaling
2822 */
2823void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
2824{
2825 int i, j;
2826 struct ieee80211_hw *hw = priv->hw;
2827 struct ieee80211_conf *conf = &priv->hw->conf;
2828 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2829 struct iwl_station_priv *sta_priv;
2830 struct iwl_lq_sta *lq_sta;
2831 struct ieee80211_supported_band *sband;
2832
2833 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2834 lq_sta = &sta_priv->lq_sta;
2835 sband = hw->wiphy->bands[conf->channel->band];
2836
2837
2838 lq_sta->lq.sta_id = sta_id;
2839
2840 for (j = 0; j < LQ_SIZE; j++)
2841 for (i = 0; i < IWL_RATE_COUNT; i++)
2842 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2843
2844 lq_sta->flush_timer = 0;
2845 lq_sta->supp_rates = sta->supp_rates[sband->band];
2846 for (j = 0; j < LQ_SIZE; j++)
2847 for (i = 0; i < IWL_RATE_COUNT; i++)
2848 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2849
2850 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2851 sta_id);
2852 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2853 * the lowest or the highest rate.. Could consider using RSSI from
2854 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2855 * after assoc.. */
2856
2857 lq_sta->is_dup = 0;
2858 lq_sta->max_rate_idx = -1;
2859 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2860 lq_sta->is_green = rs_use_green(sta);
2861 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2862 lq_sta->band = priv->band;
2863 /*
2864 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2865 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2866 */
2867 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2868 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2869 lq_sta->active_siso_rate &= ~((u16)0x2);
2870 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2871
2872 /* Same here */
2873 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2874 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2875 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2876 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2877
2878 lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
2879 lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
2880 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2881 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2882
2883 IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2884 lq_sta->active_siso_rate,
2885 lq_sta->active_mimo2_rate,
2886 lq_sta->active_mimo3_rate);
2887
2888 /* These values will be overridden later */
2889 lq_sta->lq.general_params.single_stream_ant_msk =
2890 first_antenna(priv->hw_params.valid_tx_ant);
2891 lq_sta->lq.general_params.dual_stream_ant_msk =
2892 priv->hw_params.valid_tx_ant &
2893 ~first_antenna(priv->hw_params.valid_tx_ant);
2894 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2895 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2896 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2897 lq_sta->lq.general_params.dual_stream_ant_msk =
2898 priv->hw_params.valid_tx_ant;
2899 }
2900
2901 /* as default allow aggregation for all tids */
2902 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2903 lq_sta->drv = priv;
2904
2905 /* Set last_txrate_idx to lowest rate */
2906 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2907 if (sband->band == IEEE80211_BAND_5GHZ)
2908 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2909 lq_sta->is_agg = 0;
2910#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
2911 priv->tm_fixed_rate = 0;
2912#endif
2913#ifdef CONFIG_MAC80211_DEBUGFS
2914 lq_sta->dbg_fixed_rate = 0;
2915#endif
2916
2917 rs_initialize_lq(priv, conf, sta, lq_sta);
2918}
2919
2920static void rs_fill_link_cmd(struct iwl_priv *priv,
2921 struct iwl_lq_sta *lq_sta, u32 new_rate)
2922{
2923 struct iwl_scale_tbl_info tbl_type;
2924 int index = 0;
2925 int rate_idx;
2926 int repeat_rate = 0;
2927 u8 ant_toggle_cnt = 0;
2928 u8 use_ht_possible = 1;
2929 u8 valid_tx_ant = 0;
2930 struct iwl_station_priv *sta_priv =
2931 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2932 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2933
2934 /* Override starting rate (index 0) if needed for debug purposes */
2935 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2936
2937 /* Interpret new_rate (rate_n_flags) */
2938 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2939 &tbl_type, &rate_idx);
2940
2941 if (priv && priv->bt_full_concurrent) {
2942 /* 1x1 only */
2943 tbl_type.ant_type =
2944 first_antenna(priv->hw_params.valid_tx_ant);
2945 }
2946
2947 /* How many times should we repeat the initial rate? */
2948 if (is_legacy(tbl_type.lq_type)) {
2949 ant_toggle_cnt = 1;
2950 repeat_rate = IWL_NUMBER_TRY;
2951 } else {
2952 repeat_rate = min(IWL_HT_NUMBER_TRY,
2953 LINK_QUAL_AGG_DISABLE_START_DEF - 1);
2954 }
2955
2956 lq_cmd->general_params.mimo_delimiter =
2957 is_mimo(tbl_type.lq_type) ? 1 : 0;
2958
2959 /* Fill 1st table entry (index 0) */
2960 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2961
2962 if (num_of_ant(tbl_type.ant_type) == 1) {
2963 lq_cmd->general_params.single_stream_ant_msk =
2964 tbl_type.ant_type;
2965 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2966 lq_cmd->general_params.dual_stream_ant_msk =
2967 tbl_type.ant_type;
2968 } /* otherwise we don't modify the existing value */
2969
2970 index++;
2971 repeat_rate--;
2972 if (priv) {
2973 if (priv->bt_full_concurrent)
2974 valid_tx_ant = ANT_A;
2975 else
2976 valid_tx_ant = priv->hw_params.valid_tx_ant;
2977 }
2978
2979 /* Fill rest of rate table */
2980 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2981 /* Repeat initial/next rate.
2982 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2983 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2984 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2985 if (is_legacy(tbl_type.lq_type)) {
2986 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2987 ant_toggle_cnt++;
2988 else if (priv &&
2989 rs_toggle_antenna(valid_tx_ant,
2990 &new_rate, &tbl_type))
2991 ant_toggle_cnt = 1;
2992 }
2993
2994 /* Override next rate if needed for debug purposes */
2995 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2996
2997 /* Fill next table entry */
2998 lq_cmd->rs_table[index].rate_n_flags =
2999 cpu_to_le32(new_rate);
3000 repeat_rate--;
3001 index++;
3002 }
3003
3004 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
3005 &rate_idx);
3006
3007 if (priv && priv->bt_full_concurrent) {
3008 /* 1x1 only */
3009 tbl_type.ant_type =
3010 first_antenna(priv->hw_params.valid_tx_ant);
3011 }
3012
3013 /* Indicate to uCode which entries might be MIMO.
3014 * If initial rate was MIMO, this will finally end up
3015 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
3016 if (is_mimo(tbl_type.lq_type))
3017 lq_cmd->general_params.mimo_delimiter = index;
3018
3019 /* Get next rate */
3020 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
3021 use_ht_possible);
3022
3023 /* How many times should we repeat the next rate? */
3024 if (is_legacy(tbl_type.lq_type)) {
3025 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
3026 ant_toggle_cnt++;
3027 else if (priv &&
3028 rs_toggle_antenna(valid_tx_ant,
3029 &new_rate, &tbl_type))
3030 ant_toggle_cnt = 1;
3031
3032 repeat_rate = IWL_NUMBER_TRY;
3033 } else {
3034 repeat_rate = IWL_HT_NUMBER_TRY;
3035 }
3036
3037 /* Don't allow HT rates after next pass.
3038 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
3039 use_ht_possible = 0;
3040
3041 /* Override next rate if needed for debug purposes */
3042 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
3043
3044 /* Fill next table entry */
3045 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
3046
3047 index++;
3048 repeat_rate--;
3049 }
3050
3051 lq_cmd->agg_params.agg_frame_cnt_limit =
3052 sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3053 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3054
3055 lq_cmd->agg_params.agg_time_limit =
3056 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3057 /*
3058 * overwrite if needed, pass aggregation time limit
3059 * to uCode in uSec
3060 */
3061 if (priv && priv->cfg->bt_params &&
3062 priv->cfg->bt_params->agg_time_limit &&
3063 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3064 lq_cmd->agg_params.agg_time_limit =
3065 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3066}
3067
3068static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
3069{
3070 return hw->priv;
3071}
3072/* rate scale requires free function to be implemented */
3073static void rs_free(void *priv_rate)
3074{
3075 return;
3076}
3077
3078static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
3079 void *priv_sta)
3080{
3081 struct iwl_priv *priv __maybe_unused = priv_r;
3082
3083 IWL_DEBUG_RATE(priv, "enter\n");
3084 IWL_DEBUG_RATE(priv, "leave\n");
3085}
3086
3087#ifdef CONFIG_MAC80211_DEBUGFS
3088static int open_file_generic(struct inode *inode, struct file *file)
3089{
3090 file->private_data = inode->i_private;
3091 return 0;
3092}
3093static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3094 u32 *rate_n_flags, int index)
3095{
3096 struct iwl_priv *priv;
3097 u8 valid_tx_ant;
3098 u8 ant_sel_tx;
3099
3100 priv = lq_sta->drv;
3101 valid_tx_ant = priv->hw_params.valid_tx_ant;
3102 if (lq_sta->dbg_fixed_rate) {
3103 ant_sel_tx =
3104 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
3105 >> RATE_MCS_ANT_POS);
3106 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
3107 *rate_n_flags = lq_sta->dbg_fixed_rate;
3108 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
3109 } else {
3110 lq_sta->dbg_fixed_rate = 0;
3111 IWL_ERR(priv,
3112 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
3113 ant_sel_tx, valid_tx_ant);
3114 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3115 }
3116 } else {
3117 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3118 }
3119}
3120
3121static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3122 const char __user *user_buf, size_t count, loff_t *ppos)
3123{
3124 struct iwl_lq_sta *lq_sta = file->private_data;
3125 struct iwl_priv *priv;
3126 char buf[64];
3127 size_t buf_size;
3128 u32 parsed_rate;
3129
3130
3131 priv = lq_sta->drv;
3132 memset(buf, 0, sizeof(buf));
3133 buf_size = min(count, sizeof(buf) - 1);
3134 if (copy_from_user(buf, user_buf, buf_size))
3135 return -EFAULT;
3136
3137 if (sscanf(buf, "%x", &parsed_rate) == 1)
3138 lq_sta->dbg_fixed_rate = parsed_rate;
3139 else
3140 lq_sta->dbg_fixed_rate = 0;
3141
3142 rs_program_fix_rate(priv, lq_sta);
3143
3144 return count;
3145}
3146
3147static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3148 char __user *user_buf, size_t count, loff_t *ppos)
3149{
3150 char *buff;
3151 int desc = 0;
3152 int i = 0;
3153 int index = 0;
3154 ssize_t ret;
3155
3156 struct iwl_lq_sta *lq_sta = file->private_data;
3157 struct iwl_priv *priv;
3158 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3159
3160 priv = lq_sta->drv;
3161 buff = kmalloc(1024, GFP_KERNEL);
3162 if (!buff)
3163 return -ENOMEM;
3164
3165 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
3166 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
3167 lq_sta->total_failed, lq_sta->total_success,
3168 lq_sta->active_legacy_rate);
3169 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3170 lq_sta->dbg_fixed_rate);
3171 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3172 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
3173 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
3174 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
3175 desc += sprintf(buff+desc, "lq type %s\n",
3176 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3177 if (is_Ht(tbl->lq_type)) {
3178 desc += sprintf(buff+desc, " %s",
3179 (is_siso(tbl->lq_type)) ? "SISO" :
3180 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
3181 desc += sprintf(buff+desc, " %s",
3182 (tbl->is_ht40) ? "40MHz" : "20MHz");
3183 desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
3184 (lq_sta->is_green) ? "GF enabled" : "",
3185 (lq_sta->is_agg) ? "AGG on" : "");
3186 }
3187 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3188 lq_sta->last_rate_n_flags);
3189 desc += sprintf(buff+desc, "general:"
3190 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
3191 lq_sta->lq.general_params.flags,
3192 lq_sta->lq.general_params.mimo_delimiter,
3193 lq_sta->lq.general_params.single_stream_ant_msk,
3194 lq_sta->lq.general_params.dual_stream_ant_msk);
3195
3196 desc += sprintf(buff+desc, "agg:"
3197 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3198 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
3199 lq_sta->lq.agg_params.agg_dis_start_th,
3200 lq_sta->lq.agg_params.agg_frame_cnt_limit);
3201
3202 desc += sprintf(buff+desc,
3203 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3204 lq_sta->lq.general_params.start_rate_index[0],
3205 lq_sta->lq.general_params.start_rate_index[1],
3206 lq_sta->lq.general_params.start_rate_index[2],
3207 lq_sta->lq.general_params.start_rate_index[3]);
3208
3209 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3210 index = iwl_hwrate_to_plcp_idx(
3211 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
3212 if (is_legacy(tbl->lq_type)) {
3213 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
3214 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3215 iwl_rate_mcs[index].mbps);
3216 } else {
3217 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
3218 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3219 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
3220 }
3221 }
3222
3223 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3224 kfree(buff);
3225 return ret;
3226}
3227
3228static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3229 .write = rs_sta_dbgfs_scale_table_write,
3230 .read = rs_sta_dbgfs_scale_table_read,
3231 .open = open_file_generic,
3232 .llseek = default_llseek,
3233};
3234static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3235 char __user *user_buf, size_t count, loff_t *ppos)
3236{
3237 char *buff;
3238 int desc = 0;
3239 int i, j;
3240 ssize_t ret;
3241
3242 struct iwl_lq_sta *lq_sta = file->private_data;
3243
3244 buff = kmalloc(1024, GFP_KERNEL);
3245 if (!buff)
3246 return -ENOMEM;
3247
3248 for (i = 0; i < LQ_SIZE; i++) {
3249 desc += sprintf(buff+desc,
3250 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
3251 "rate=0x%X\n",
3252 lq_sta->active_tbl == i ? "*" : "x",
3253 lq_sta->lq_info[i].lq_type,
3254 lq_sta->lq_info[i].is_SGI,
3255 lq_sta->lq_info[i].is_ht40,
3256 lq_sta->lq_info[i].is_dup,
3257 lq_sta->is_green,
3258 lq_sta->lq_info[i].current_rate);
3259 for (j = 0; j < IWL_RATE_COUNT; j++) {
3260 desc += sprintf(buff+desc,
3261 "counter=%d success=%d %%=%d\n",
3262 lq_sta->lq_info[i].win[j].counter,
3263 lq_sta->lq_info[i].win[j].success_counter,
3264 lq_sta->lq_info[i].win[j].success_ratio);
3265 }
3266 }
3267 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3268 kfree(buff);
3269 return ret;
3270}
3271
3272static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3273 .read = rs_sta_dbgfs_stats_table_read,
3274 .open = open_file_generic,
3275 .llseek = default_llseek,
3276};
3277
3278static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3279 char __user *user_buf, size_t count, loff_t *ppos)
3280{
3281 struct iwl_lq_sta *lq_sta = file->private_data;
3282 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3283 char buff[120];
3284 int desc = 0;
3285
3286 if (is_Ht(tbl->lq_type))
3287 desc += sprintf(buff+desc,
3288 "Bit Rate= %d Mb/s\n",
3289 tbl->expected_tpt[lq_sta->last_txrate_idx]);
3290 else
3291 desc += sprintf(buff+desc,
3292 "Bit Rate= %d Mb/s\n",
3293 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
3294
3295 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3296}
3297
3298static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
3299 .read = rs_sta_dbgfs_rate_scale_data_read,
3300 .open = open_file_generic,
3301 .llseek = default_llseek,
3302};
3303
3304static void rs_add_debugfs(void *priv, void *priv_sta,
3305 struct dentry *dir)
3306{
3307 struct iwl_lq_sta *lq_sta = priv_sta;
3308 lq_sta->rs_sta_dbgfs_scale_table_file =
3309 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3310 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3311 lq_sta->rs_sta_dbgfs_stats_table_file =
3312 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3313 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3314 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
3315 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
3316 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
3317 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3318 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3319 &lq_sta->tx_agg_tid_en);
3320
3321}
3322
3323static void rs_remove_debugfs(void *priv, void *priv_sta)
3324{
3325 struct iwl_lq_sta *lq_sta = priv_sta;
3326 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
3327 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3328 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
3329 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3330}
3331#endif
3332
3333/*
3334 * Initialization of rate scaling information is done by driver after
3335 * the station is added. Since mac80211 calls this function before a
3336 * station is added we ignore it.
3337 */
3338static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
3339 struct ieee80211_sta *sta, void *priv_sta)
3340{
3341}
3342static struct rate_control_ops rs_ops = {
3343 .module = NULL,
3344 .name = RS_NAME,
3345 .tx_status = rs_tx_status,
3346 .get_rate = rs_get_rate,
3347 .rate_init = rs_rate_init_stub,
3348 .alloc = rs_alloc,
3349 .free = rs_free,
3350 .alloc_sta = rs_alloc_sta,
3351 .free_sta = rs_free_sta,
3352#ifdef CONFIG_MAC80211_DEBUGFS
3353 .add_sta_debugfs = rs_add_debugfs,
3354 .remove_sta_debugfs = rs_remove_debugfs,
3355#endif
3356};
3357
3358int iwlagn_rate_control_register(void)
3359{
3360 return ieee80211_rate_control_register(&rs_ops);
3361}
3362
3363void iwlagn_rate_control_unregister(void)
3364{
3365 ieee80211_rate_control_unregister(&rs_ops);
3366}
3367
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
new file mode 100644
index 00000000000..bdae82e7fa9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -0,0 +1,463 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_agn_rs_h__
28#define __iwl_agn_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
35 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
36 u8 prev_ieee; /* previous rate in IEEE speeds */
37 u8 next_ieee; /* next rate in IEEE speeds */
38 u8 prev_rs; /* previous rate used in rs algo */
39 u8 next_rs; /* next rate used in rs algo */
40 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
41 u8 next_rs_tgg; /* next rate used in TGG rs algo */
42};
43
44/*
45 * These serve as indexes into
46 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
47 */
48enum {
49 IWL_RATE_1M_INDEX = 0,
50 IWL_RATE_2M_INDEX,
51 IWL_RATE_5M_INDEX,
52 IWL_RATE_11M_INDEX,
53 IWL_RATE_6M_INDEX,
54 IWL_RATE_9M_INDEX,
55 IWL_RATE_12M_INDEX,
56 IWL_RATE_18M_INDEX,
57 IWL_RATE_24M_INDEX,
58 IWL_RATE_36M_INDEX,
59 IWL_RATE_48M_INDEX,
60 IWL_RATE_54M_INDEX,
61 IWL_RATE_60M_INDEX,
62 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
63 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
64 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
65 IWL_RATE_INVALID = IWL_RATE_COUNT,
66};
67
68enum {
69 IWL_RATE_6M_INDEX_TABLE = 0,
70 IWL_RATE_9M_INDEX_TABLE,
71 IWL_RATE_12M_INDEX_TABLE,
72 IWL_RATE_18M_INDEX_TABLE,
73 IWL_RATE_24M_INDEX_TABLE,
74 IWL_RATE_36M_INDEX_TABLE,
75 IWL_RATE_48M_INDEX_TABLE,
76 IWL_RATE_54M_INDEX_TABLE,
77 IWL_RATE_1M_INDEX_TABLE,
78 IWL_RATE_2M_INDEX_TABLE,
79 IWL_RATE_5M_INDEX_TABLE,
80 IWL_RATE_11M_INDEX_TABLE,
81 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
82};
83
84enum {
85 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
86 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
87 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
88 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
89};
90
91/* #define vs. enum to keep from defaulting to 'large integer' */
92#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
93#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
94#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
95#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
96#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
97#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
98#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
99#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
100#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
101#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
102#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
103#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
104#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
105
106/* uCode API values for legacy bit rates, both OFDM and CCK */
107enum {
108 IWL_RATE_6M_PLCP = 13,
109 IWL_RATE_9M_PLCP = 15,
110 IWL_RATE_12M_PLCP = 5,
111 IWL_RATE_18M_PLCP = 7,
112 IWL_RATE_24M_PLCP = 9,
113 IWL_RATE_36M_PLCP = 11,
114 IWL_RATE_48M_PLCP = 1,
115 IWL_RATE_54M_PLCP = 3,
116 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
117 IWL_RATE_1M_PLCP = 10,
118 IWL_RATE_2M_PLCP = 20,
119 IWL_RATE_5M_PLCP = 55,
120 IWL_RATE_11M_PLCP = 110,
121 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
122 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
123};
124
125/* uCode API values for OFDM high-throughput (HT) bit rates */
126enum {
127 IWL_RATE_SISO_6M_PLCP = 0,
128 IWL_RATE_SISO_12M_PLCP = 1,
129 IWL_RATE_SISO_18M_PLCP = 2,
130 IWL_RATE_SISO_24M_PLCP = 3,
131 IWL_RATE_SISO_36M_PLCP = 4,
132 IWL_RATE_SISO_48M_PLCP = 5,
133 IWL_RATE_SISO_54M_PLCP = 6,
134 IWL_RATE_SISO_60M_PLCP = 7,
135 IWL_RATE_MIMO2_6M_PLCP = 0x8,
136 IWL_RATE_MIMO2_12M_PLCP = 0x9,
137 IWL_RATE_MIMO2_18M_PLCP = 0xa,
138 IWL_RATE_MIMO2_24M_PLCP = 0xb,
139 IWL_RATE_MIMO2_36M_PLCP = 0xc,
140 IWL_RATE_MIMO2_48M_PLCP = 0xd,
141 IWL_RATE_MIMO2_54M_PLCP = 0xe,
142 IWL_RATE_MIMO2_60M_PLCP = 0xf,
143 IWL_RATE_MIMO3_6M_PLCP = 0x10,
144 IWL_RATE_MIMO3_12M_PLCP = 0x11,
145 IWL_RATE_MIMO3_18M_PLCP = 0x12,
146 IWL_RATE_MIMO3_24M_PLCP = 0x13,
147 IWL_RATE_MIMO3_36M_PLCP = 0x14,
148 IWL_RATE_MIMO3_48M_PLCP = 0x15,
149 IWL_RATE_MIMO3_54M_PLCP = 0x16,
150 IWL_RATE_MIMO3_60M_PLCP = 0x17,
151 IWL_RATE_SISO_INVM_PLCP,
152 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
153 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
154};
155
156/* MAC header values for bit rates */
157enum {
158 IWL_RATE_6M_IEEE = 12,
159 IWL_RATE_9M_IEEE = 18,
160 IWL_RATE_12M_IEEE = 24,
161 IWL_RATE_18M_IEEE = 36,
162 IWL_RATE_24M_IEEE = 48,
163 IWL_RATE_36M_IEEE = 72,
164 IWL_RATE_48M_IEEE = 96,
165 IWL_RATE_54M_IEEE = 108,
166 IWL_RATE_60M_IEEE = 120,
167 IWL_RATE_1M_IEEE = 2,
168 IWL_RATE_2M_IEEE = 4,
169 IWL_RATE_5M_IEEE = 11,
170 IWL_RATE_11M_IEEE = 22,
171};
172
173#define IWL_CCK_BASIC_RATES_MASK \
174 (IWL_RATE_1M_MASK | \
175 IWL_RATE_2M_MASK)
176
177#define IWL_CCK_RATES_MASK \
178 (IWL_CCK_BASIC_RATES_MASK | \
179 IWL_RATE_5M_MASK | \
180 IWL_RATE_11M_MASK)
181
182#define IWL_OFDM_BASIC_RATES_MASK \
183 (IWL_RATE_6M_MASK | \
184 IWL_RATE_12M_MASK | \
185 IWL_RATE_24M_MASK)
186
187#define IWL_OFDM_RATES_MASK \
188 (IWL_OFDM_BASIC_RATES_MASK | \
189 IWL_RATE_9M_MASK | \
190 IWL_RATE_18M_MASK | \
191 IWL_RATE_36M_MASK | \
192 IWL_RATE_48M_MASK | \
193 IWL_RATE_54M_MASK)
194
195#define IWL_BASIC_RATES_MASK \
196 (IWL_OFDM_BASIC_RATES_MASK | \
197 IWL_CCK_BASIC_RATES_MASK)
198
199#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
200
201#define IWL_INVALID_VALUE -1
202
203#define IWL_MIN_RSSI_VAL -100
204#define IWL_MAX_RSSI_VAL 0
205
206/* These values specify how many Tx frame attempts before
207 * searching for a new modulation mode */
208#define IWL_LEGACY_FAILURE_LIMIT 160
209#define IWL_LEGACY_SUCCESS_LIMIT 480
210#define IWL_LEGACY_TABLE_COUNT 160
211
212#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
213#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
214#define IWL_NONE_LEGACY_TABLE_COUNT 1500
215
216/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
217#define IWL_RS_GOOD_RATIO 12800 /* 100% */
218#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
219#define IWL_RATE_HIGH_TH 10880 /* 85% */
220#define IWL_RATE_INCREASE_TH 6400 /* 50% */
221#define IWL_RATE_DECREASE_TH 1920 /* 15% */
222
223/* possible actions when in legacy mode */
224#define IWL_LEGACY_SWITCH_ANTENNA1 0
225#define IWL_LEGACY_SWITCH_ANTENNA2 1
226#define IWL_LEGACY_SWITCH_SISO 2
227#define IWL_LEGACY_SWITCH_MIMO2_AB 3
228#define IWL_LEGACY_SWITCH_MIMO2_AC 4
229#define IWL_LEGACY_SWITCH_MIMO2_BC 5
230#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
231
232/* possible actions when in siso mode */
233#define IWL_SISO_SWITCH_ANTENNA1 0
234#define IWL_SISO_SWITCH_ANTENNA2 1
235#define IWL_SISO_SWITCH_MIMO2_AB 2
236#define IWL_SISO_SWITCH_MIMO2_AC 3
237#define IWL_SISO_SWITCH_MIMO2_BC 4
238#define IWL_SISO_SWITCH_GI 5
239#define IWL_SISO_SWITCH_MIMO3_ABC 6
240
241
242/* possible actions when in mimo mode */
243#define IWL_MIMO2_SWITCH_ANTENNA1 0
244#define IWL_MIMO2_SWITCH_ANTENNA2 1
245#define IWL_MIMO2_SWITCH_SISO_A 2
246#define IWL_MIMO2_SWITCH_SISO_B 3
247#define IWL_MIMO2_SWITCH_SISO_C 4
248#define IWL_MIMO2_SWITCH_GI 5
249#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
250
251
252/* possible actions when in mimo3 mode */
253#define IWL_MIMO3_SWITCH_ANTENNA1 0
254#define IWL_MIMO3_SWITCH_ANTENNA2 1
255#define IWL_MIMO3_SWITCH_SISO_A 2
256#define IWL_MIMO3_SWITCH_SISO_B 3
257#define IWL_MIMO3_SWITCH_SISO_C 4
258#define IWL_MIMO3_SWITCH_MIMO2_AB 5
259#define IWL_MIMO3_SWITCH_MIMO2_AC 6
260#define IWL_MIMO3_SWITCH_MIMO2_BC 7
261#define IWL_MIMO3_SWITCH_GI 8
262
263
264#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
265#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
266
267/*FIXME:RS:add possible actions for MIMO3*/
268
269#define IWL_ACTION_LIMIT 3 /* # possible actions */
270
271#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
272
273/* load per tid defines for A-MPDU activation */
274#define IWL_AGG_TPT_THREHOLD 0
275#define IWL_AGG_LOAD_THRESHOLD 10
276#define IWL_AGG_ALL_TID 0xff
277#define TID_QUEUE_CELL_SPACING 50 /*mS */
278#define TID_QUEUE_MAX_SIZE 20
279#define TID_ROUND_VALUE 5 /* mS */
280#define TID_MAX_LOAD_COUNT 8
281
282#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
283#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
284
285extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
286
287enum iwl_table_type {
288 LQ_NONE,
289 LQ_G, /* legacy types */
290 LQ_A,
291 LQ_SISO, /* high-throughput types */
292 LQ_MIMO2,
293 LQ_MIMO3,
294 LQ_MAX,
295};
296
297#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
298#define is_siso(tbl) ((tbl) == LQ_SISO)
299#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
300#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
301#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
302#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
303#define is_a_band(tbl) ((tbl) == LQ_A)
304#define is_g_and(tbl) ((tbl) == LQ_G)
305
306#define ANT_NONE 0x0
307#define ANT_A BIT(0)
308#define ANT_B BIT(1)
309#define ANT_AB (ANT_A | ANT_B)
310#define ANT_C BIT(2)
311#define ANT_AC (ANT_A | ANT_C)
312#define ANT_BC (ANT_B | ANT_C)
313#define ANT_ABC (ANT_AB | ANT_C)
314
315#define IWL_MAX_MCS_DISPLAY_SIZE 12
316
317struct iwl_rate_mcs_info {
318 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
319 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
320};
321
322/**
323 * struct iwl_rate_scale_data -- tx success history for one rate
324 */
325struct iwl_rate_scale_data {
326 u64 data; /* bitmap of successful frames */
327 s32 success_counter; /* number of frames successful */
328 s32 success_ratio; /* per-cent * 128 */
329 s32 counter; /* number of frames attempted */
330 s32 average_tpt; /* success ratio * expected throughput */
331 unsigned long stamp;
332};
333
334/**
335 * struct iwl_scale_tbl_info -- tx params and success history for all rates
336 *
337 * There are two of these in struct iwl_lq_sta,
338 * one for "active", and one for "search".
339 */
340struct iwl_scale_tbl_info {
341 enum iwl_table_type lq_type;
342 u8 ant_type;
343 u8 is_SGI; /* 1 = short guard interval */
344 u8 is_ht40; /* 1 = 40 MHz channel width */
345 u8 is_dup; /* 1 = duplicated data streams */
346 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
347 u8 max_search; /* maximun number of tables we can search */
348 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
349 u32 current_rate; /* rate_n_flags, uCode API format */
350 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
351};
352
353struct iwl_traffic_load {
354 unsigned long time_stamp; /* age of the oldest statistics */
355 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
356 * slice */
357 u32 total; /* total num of packets during the
358 * last TID_MAX_TIME_DIFF */
359 u8 queue_count; /* number of queues that has
360 * been used since the last cleanup */
361 u8 head; /* start of the circular buffer */
362};
363
364/**
365 * struct iwl_lq_sta -- driver's rate scaling private structure
366 *
367 * Pointer to this gets passed back and forth between driver and mac80211.
368 */
369struct iwl_lq_sta {
370 u8 active_tbl; /* index of active table, range 0-1 */
371 u8 enable_counter; /* indicates HT mode */
372 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
373 u8 search_better_tbl; /* 1: currently trying alternate mode */
374 s32 last_tpt;
375
376 /* The following determine when to search for a new mode */
377 u32 table_count_limit;
378 u32 max_failure_limit; /* # failed frames before new search */
379 u32 max_success_limit; /* # successful frames before new search */
380 u32 table_count;
381 u32 total_failed; /* total failed frames, any/all rates */
382 u32 total_success; /* total successful frames, any/all rates */
383 u64 flush_timer; /* time staying in mode before new search */
384
385 u8 action_counter; /* # mode-switch actions tried */
386 u8 is_green;
387 u8 is_dup;
388 enum ieee80211_band band;
389
390 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
391 u32 supp_rates;
392 u16 active_legacy_rate;
393 u16 active_siso_rate;
394 u16 active_mimo2_rate;
395 u16 active_mimo3_rate;
396 s8 max_rate_idx; /* Max rate set by user */
397 u8 missed_rate_counter;
398
399 struct iwl_link_quality_cmd lq;
400 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
401 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
402 u8 tx_agg_tid_en;
403#ifdef CONFIG_MAC80211_DEBUGFS
404 struct dentry *rs_sta_dbgfs_scale_table_file;
405 struct dentry *rs_sta_dbgfs_stats_table_file;
406 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
407 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
408 u32 dbg_fixed_rate;
409#endif
410 struct iwl_priv *drv;
411
412 /* used to be in sta_info */
413 int last_txrate_idx;
414 /* last tx rate_n_flags */
415 u32 last_rate_n_flags;
416 /* packets destined for this STA are aggregated */
417 u8 is_agg;
418 /* BT traffic this sta was last updated in */
419 u8 last_bt_traffic;
420};
421
422static inline u8 num_of_ant(u8 mask)
423{
424 return !!((mask) & ANT_A) +
425 !!((mask) & ANT_B) +
426 !!((mask) & ANT_C);
427}
428
429static inline u8 first_antenna(u8 mask)
430{
431 if (mask & ANT_A)
432 return ANT_A;
433 if (mask & ANT_B)
434 return ANT_B;
435 return ANT_C;
436}
437
438
439/* Initialize station's rate scaling information after adding station */
440extern void iwl_rs_rate_init(struct iwl_priv *priv,
441 struct ieee80211_sta *sta, u8 sta_id);
442
443/**
444 * iwl_rate_control_register - Register the rate control algorithm callbacks
445 *
446 * Since the rate control algorithm is hardware specific, there is no need
447 * or reason to place it as a stand alone module. The driver can call
448 * iwl_rate_control_register in order to register the rate control callbacks
449 * with the mac80211 subsystem. This should be performed prior to calling
450 * ieee80211_register_hw
451 *
452 */
453extern int iwlagn_rate_control_register(void);
454
455/**
456 * iwl_rate_control_unregister - Unregister the rate control callbacks
457 *
458 * This should be called after calling ieee80211_unregister_hw, but before
459 * the driver is unloaded.
460 */
461extern void iwlagn_rate_control_unregister(void);
462
463#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
new file mode 100644
index 00000000000..5493f94d23c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -0,0 +1,965 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include "iwl-dev.h"
28#include "iwl-agn.h"
29#include "iwl-sta.h"
30#include "iwl-core.h"
31#include "iwl-agn-calib.h"
32#include "iwl-helpers.h"
33#include "iwl-trans.h"
34
35static int iwlagn_disable_bss(struct iwl_priv *priv,
36 struct iwl_rxon_context *ctx,
37 struct iwl_rxon_cmd *send)
38{
39 __le32 old_filter = send->filter_flags;
40 int ret;
41
42 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
43 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
44 CMD_SYNC, sizeof(*send), send);
45
46 send->filter_flags = old_filter;
47
48 if (ret)
49 IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
50
51 return ret;
52}
53
54static int iwlagn_disable_pan(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 struct iwl_rxon_cmd *send)
57{
58 struct iwl_notification_wait disable_wait;
59 __le32 old_filter = send->filter_flags;
60 u8 old_dev_type = send->dev_type;
61 int ret;
62
63 iwlagn_init_notification_wait(priv, &disable_wait,
64 REPLY_WIPAN_DEACTIVATION_COMPLETE,
65 NULL, NULL);
66
67 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
68 send->dev_type = RXON_DEV_TYPE_P2P;
69 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
70 CMD_SYNC, sizeof(*send), send);
71
72 send->filter_flags = old_filter;
73 send->dev_type = old_dev_type;
74
75 if (ret) {
76 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
77 iwlagn_remove_notification(priv, &disable_wait);
78 } else {
79 ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
80 if (ret)
81 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
82 }
83
84 return ret;
85}
86
87static int iwlagn_disconn_pan(struct iwl_priv *priv,
88 struct iwl_rxon_context *ctx,
89 struct iwl_rxon_cmd *send)
90{
91 __le32 old_filter = send->filter_flags;
92 int ret;
93
94 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
95 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
96 sizeof(*send), send);
97
98 send->filter_flags = old_filter;
99
100 return ret;
101}
102
103static void iwlagn_update_qos(struct iwl_priv *priv,
104 struct iwl_rxon_context *ctx)
105{
106 int ret;
107
108 if (!ctx->is_active)
109 return;
110
111 ctx->qos_data.def_qos_parm.qos_flags = 0;
112
113 if (ctx->qos_data.qos_active)
114 ctx->qos_data.def_qos_parm.qos_flags |=
115 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
116
117 if (ctx->ht.enabled)
118 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
119
120 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
121 ctx->qos_data.qos_active,
122 ctx->qos_data.def_qos_parm.qos_flags);
123
124 ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
125 sizeof(struct iwl_qosparam_cmd),
126 &ctx->qos_data.def_qos_parm);
127 if (ret)
128 IWL_ERR(priv, "Failed to update QoS\n");
129}
130
131static int iwlagn_update_beacon(struct iwl_priv *priv,
132 struct ieee80211_vif *vif)
133{
134 lockdep_assert_held(&priv->mutex);
135
136 dev_kfree_skb(priv->beacon_skb);
137 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
138 if (!priv->beacon_skb)
139 return -ENOMEM;
140 return iwlagn_send_beacon_cmd(priv);
141}
142
143static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
144 struct iwl_rxon_context *ctx)
145{
146 int ret = 0;
147 struct iwl_rxon_assoc_cmd rxon_assoc;
148 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
149 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
150
151 if ((rxon1->flags == rxon2->flags) &&
152 (rxon1->filter_flags == rxon2->filter_flags) &&
153 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
154 (rxon1->ofdm_ht_single_stream_basic_rates ==
155 rxon2->ofdm_ht_single_stream_basic_rates) &&
156 (rxon1->ofdm_ht_dual_stream_basic_rates ==
157 rxon2->ofdm_ht_dual_stream_basic_rates) &&
158 (rxon1->ofdm_ht_triple_stream_basic_rates ==
159 rxon2->ofdm_ht_triple_stream_basic_rates) &&
160 (rxon1->acquisition_data == rxon2->acquisition_data) &&
161 (rxon1->rx_chain == rxon2->rx_chain) &&
162 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
163 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
164 return 0;
165 }
166
167 rxon_assoc.flags = ctx->staging.flags;
168 rxon_assoc.filter_flags = ctx->staging.filter_flags;
169 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
170 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
171 rxon_assoc.reserved1 = 0;
172 rxon_assoc.reserved2 = 0;
173 rxon_assoc.reserved3 = 0;
174 rxon_assoc.ofdm_ht_single_stream_basic_rates =
175 ctx->staging.ofdm_ht_single_stream_basic_rates;
176 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
177 ctx->staging.ofdm_ht_dual_stream_basic_rates;
178 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
179 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
180 ctx->staging.ofdm_ht_triple_stream_basic_rates;
181 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
182
183 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
184 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
185 return ret;
186}
187
188static int iwlagn_rxon_disconn(struct iwl_priv *priv,
189 struct iwl_rxon_context *ctx)
190{
191 int ret;
192 struct iwl_rxon_cmd *active = (void *)&ctx->active;
193
194 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
195 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
196 } else {
197 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
198 if (ret)
199 return ret;
200 if (ctx->vif) {
201 ret = iwl_send_rxon_timing(priv, ctx);
202 if (ret) {
203 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
204 return ret;
205 }
206 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
207 }
208 }
209 if (ret)
210 return ret;
211
212 /*
213 * Un-assoc RXON clears the station table and WEP
214 * keys, so we have to restore those afterwards.
215 */
216 iwl_clear_ucode_stations(priv, ctx);
217 /* update -- might need P2P now */
218 iwl_update_bcast_station(priv, ctx);
219 iwl_restore_stations(priv, ctx);
220 ret = iwl_restore_default_wep_keys(priv, ctx);
221 if (ret) {
222 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
223 return ret;
224 }
225
226 memcpy(active, &ctx->staging, sizeof(*active));
227 return 0;
228}
229
230static int iwlagn_rxon_connect(struct iwl_priv *priv,
231 struct iwl_rxon_context *ctx)
232{
233 int ret;
234 struct iwl_rxon_cmd *active = (void *)&ctx->active;
235
236 /* RXON timing must be before associated RXON */
237 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
238 ret = iwl_send_rxon_timing(priv, ctx);
239 if (ret) {
240 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
241 return ret;
242 }
243 }
244 /* QoS info may be cleared by previous un-assoc RXON */
245 iwlagn_update_qos(priv, ctx);
246
247 /*
248 * We'll run into this code path when beaconing is
249 * enabled, but then we also need to send the beacon
250 * to the device.
251 */
252 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
253 ret = iwlagn_update_beacon(priv, ctx->vif);
254 if (ret) {
255 IWL_ERR(priv,
256 "Error sending required beacon (%d)!\n",
257 ret);
258 return ret;
259 }
260 }
261
262 priv->start_calib = 0;
263 /*
264 * Apply the new configuration.
265 *
266 * Associated RXON doesn't clear the station table in uCode,
267 * so we don't need to restore stations etc. after this.
268 */
269 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
270 sizeof(struct iwl_rxon_cmd), &ctx->staging);
271 if (ret) {
272 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
273 return ret;
274 }
275 memcpy(active, &ctx->staging, sizeof(*active));
276
277 iwl_reprogram_ap_sta(priv, ctx);
278
279 /* IBSS beacon needs to be sent after setting assoc */
280 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
281 if (iwlagn_update_beacon(priv, ctx->vif))
282 IWL_ERR(priv, "Error sending IBSS beacon\n");
283 iwl_init_sensitivity(priv);
284
285 /*
286 * If we issue a new RXON command which required a tune then
287 * we must send a new TXPOWER command or we won't be able to
288 * Tx any frames.
289 *
290 * It's expected we set power here if channel is changing.
291 */
292 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
293 if (ret) {
294 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
295 return ret;
296 }
297
298 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
299 priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
300 ieee80211_request_smps(ctx->vif,
301 priv->cfg->ht_params->smps_mode);
302
303 return 0;
304}
305
306int iwlagn_set_pan_params(struct iwl_priv *priv)
307{
308 struct iwl_wipan_params_cmd cmd;
309 struct iwl_rxon_context *ctx_bss, *ctx_pan;
310 int slot0 = 300, slot1 = 0;
311 int ret;
312
313 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
314 return 0;
315
316 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
317
318 lockdep_assert_held(&priv->mutex);
319
320 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
321 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
322
323 /*
324 * If the PAN context is inactive, then we don't need
325 * to update the PAN parameters, the last thing we'll
326 * have done before it goes inactive is making the PAN
327 * parameters be WLAN-only.
328 */
329 if (!ctx_pan->is_active)
330 return 0;
331
332 memset(&cmd, 0, sizeof(cmd));
333
334 /* only 2 slots are currently allowed */
335 cmd.num_slots = 2;
336
337 cmd.slots[0].type = 0; /* BSS */
338 cmd.slots[1].type = 1; /* PAN */
339
340 if (priv->hw_roc_channel) {
341 /* both contexts must be used for this to happen */
342 slot1 = priv->hw_roc_duration;
343 slot0 = IWL_MIN_SLOT_TIME;
344 } else if (ctx_bss->vif && ctx_pan->vif) {
345 int bcnint = ctx_pan->beacon_int;
346 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
347
348 /* should be set, but seems unused?? */
349 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
350
351 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
352 bcnint &&
353 bcnint != ctx_bss->beacon_int) {
354 IWL_ERR(priv,
355 "beacon intervals don't match (%d, %d)\n",
356 ctx_bss->beacon_int, ctx_pan->beacon_int);
357 } else
358 bcnint = max_t(int, bcnint,
359 ctx_bss->beacon_int);
360 if (!bcnint)
361 bcnint = DEFAULT_BEACON_INTERVAL;
362 slot0 = bcnint / 2;
363 slot1 = bcnint - slot0;
364
365 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
366 (!ctx_bss->vif->bss_conf.idle &&
367 !ctx_bss->vif->bss_conf.assoc)) {
368 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
369 slot1 = IWL_MIN_SLOT_TIME;
370 } else if (!ctx_pan->vif->bss_conf.idle &&
371 !ctx_pan->vif->bss_conf.assoc) {
372 slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
373 slot0 = IWL_MIN_SLOT_TIME;
374 }
375 } else if (ctx_pan->vif) {
376 slot0 = 0;
377 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
378 ctx_pan->beacon_int;
379 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
380
381 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
382 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
383 slot1 = IWL_MIN_SLOT_TIME;
384 }
385 }
386
387 cmd.slots[0].width = cpu_to_le16(slot0);
388 cmd.slots[1].width = cpu_to_le16(slot1);
389
390 ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
391 sizeof(cmd), &cmd);
392 if (ret)
393 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
394
395 return ret;
396}
397
398/**
399 * iwlagn_commit_rxon - commit staging_rxon to hardware
400 *
401 * The RXON command in staging_rxon is committed to the hardware and
402 * the active_rxon structure is updated with the new data. This
403 * function correctly transitions out of the RXON_ASSOC_MSK state if
404 * a HW tune is required based on the RXON structure changes.
405 *
406 * The connect/disconnect flow should be as the following:
407 *
408 * 1. make sure send RXON command with association bit unset if not connect
409 * this should include the channel and the band for the candidate
410 * to be connected to
411 * 2. Add Station before RXON association with the AP
412 * 3. RXON_timing has to send before RXON for connection
413 * 4. full RXON command - associated bit set
414 * 5. use RXON_ASSOC command to update any flags changes
415 */
416int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
417{
418 /* cast away the const for active_rxon in this function */
419 struct iwl_rxon_cmd *active = (void *)&ctx->active;
420 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
421 int ret;
422
423 lockdep_assert_held(&priv->mutex);
424
425 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
426 return -EINVAL;
427
428 if (!iwl_is_alive(priv))
429 return -EBUSY;
430
431 /* This function hardcodes a bunch of dual-mode assumptions */
432 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
433
434 if (!ctx->is_active)
435 return 0;
436
437 /* always get timestamp with Rx frame */
438 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
439
440 if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
441 struct ieee80211_channel *chan = priv->hw_roc_channel;
442
443 iwl_set_rxon_channel(priv, chan, ctx);
444 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
445 ctx->staging.filter_flags |=
446 RXON_FILTER_ASSOC_MSK |
447 RXON_FILTER_PROMISC_MSK |
448 RXON_FILTER_CTL2HOST_MSK;
449 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
450 new_assoc = true;
451
452 if (memcmp(&ctx->staging, &ctx->active,
453 sizeof(ctx->staging)) == 0)
454 return 0;
455 }
456
457 /*
458 * force CTS-to-self frames protection if RTS-CTS is not preferred
459 * one aggregation protection method
460 */
461 if (!(priv->cfg->ht_params &&
462 priv->cfg->ht_params->use_rts_for_aggregation))
463 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
464
465 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
466 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
467 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
468 else
469 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
470
471 iwl_print_rx_config_cmd(priv, ctx);
472 ret = iwl_check_rxon_cmd(priv, ctx);
473 if (ret) {
474 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
475 return -EINVAL;
476 }
477
478 /*
479 * receive commit_rxon request
480 * abort any previous channel switch if still in process
481 */
482 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
483 (priv->switch_channel != ctx->staging.channel)) {
484 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
485 le16_to_cpu(priv->switch_channel));
486 iwl_chswitch_done(priv, false);
487 }
488
489 /*
490 * If we don't need to send a full RXON, we can use
491 * iwl_rxon_assoc_cmd which is used to reconfigure filter
492 * and other flags for the current radio configuration.
493 */
494 if (!iwl_full_rxon_required(priv, ctx)) {
495 ret = iwlagn_send_rxon_assoc(priv, ctx);
496 if (ret) {
497 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
498 return ret;
499 }
500
501 memcpy(active, &ctx->staging, sizeof(*active));
502 /*
503 * We do not commit tx power settings while channel changing,
504 * do it now if after settings changed.
505 */
506 iwl_set_tx_power(priv, priv->tx_power_next, false);
507
508 /* make sure we are in the right PS state */
509 iwl_power_update_mode(priv, true);
510
511 return 0;
512 }
513
514 iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
515
516 IWL_DEBUG_INFO(priv,
517 "Going to commit RXON\n"
518 " * with%s RXON_FILTER_ASSOC_MSK\n"
519 " * channel = %d\n"
520 " * bssid = %pM\n",
521 (new_assoc ? "" : "out"),
522 le16_to_cpu(ctx->staging.channel),
523 ctx->staging.bssid_addr);
524
525 /*
526 * Always clear associated first, but with the correct config.
527 * This is required as for example station addition for the
528 * AP station must be done after the BSSID is set to correctly
529 * set up filters in the device.
530 */
531 ret = iwlagn_rxon_disconn(priv, ctx);
532 if (ret)
533 return ret;
534
535 ret = iwlagn_set_pan_params(priv);
536 if (ret)
537 return ret;
538
539 if (new_assoc)
540 return iwlagn_rxon_connect(priv, ctx);
541
542 return 0;
543}
544
545void iwlagn_config_ht40(struct ieee80211_conf *conf,
546 struct iwl_rxon_context *ctx)
547{
548 if (conf_is_ht40_minus(conf)) {
549 ctx->ht.extension_chan_offset =
550 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
551 ctx->ht.is_40mhz = true;
552 } else if (conf_is_ht40_plus(conf)) {
553 ctx->ht.extension_chan_offset =
554 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
555 ctx->ht.is_40mhz = true;
556 } else {
557 ctx->ht.extension_chan_offset =
558 IEEE80211_HT_PARAM_CHA_SEC_NONE;
559 ctx->ht.is_40mhz = false;
560 }
561}
562
563int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
564{
565 struct iwl_priv *priv = hw->priv;
566 struct iwl_rxon_context *ctx;
567 struct ieee80211_conf *conf = &hw->conf;
568 struct ieee80211_channel *channel = conf->channel;
569 const struct iwl_channel_info *ch_info;
570 int ret = 0;
571
572 IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
573
574 mutex_lock(&priv->mutex);
575
576 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
577 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
578 goto out;
579 }
580
581 if (!iwl_is_ready(priv)) {
582 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
583 goto out;
584 }
585
586 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
587 IEEE80211_CONF_CHANGE_CHANNEL)) {
588 /* mac80211 uses static for non-HT which is what we want */
589 priv->current_ht_config.smps = conf->smps_mode;
590
591 /*
592 * Recalculate chain counts.
593 *
594 * If monitor mode is enabled then mac80211 will
595 * set up the SM PS mode to OFF if an HT channel is
596 * configured.
597 */
598 for_each_context(priv, ctx)
599 iwlagn_set_rxon_chain(priv, ctx);
600 }
601
602 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
603 unsigned long flags;
604
605 ch_info = iwl_get_channel_info(priv, channel->band,
606 channel->hw_value);
607 if (!is_channel_valid(ch_info)) {
608 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
609 ret = -EINVAL;
610 goto out;
611 }
612
613 spin_lock_irqsave(&priv->lock, flags);
614
615 for_each_context(priv, ctx) {
616 /* Configure HT40 channels */
617 if (ctx->ht.enabled != conf_is_ht(conf))
618 ctx->ht.enabled = conf_is_ht(conf);
619
620 if (ctx->ht.enabled) {
621 /* if HT40 is used, it should not change
622 * after associated except channel switch */
623 if (!ctx->ht.is_40mhz ||
624 !iwl_is_associated_ctx(ctx))
625 iwlagn_config_ht40(conf, ctx);
626 } else
627 ctx->ht.is_40mhz = false;
628
629 /*
630 * Default to no protection. Protection mode will
631 * later be set from BSS config in iwl_ht_conf
632 */
633 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
634
635 /* if we are switching from ht to 2.4 clear flags
636 * from any ht related info since 2.4 does not
637 * support ht */
638 if (le16_to_cpu(ctx->staging.channel) !=
639 channel->hw_value)
640 ctx->staging.flags = 0;
641
642 iwl_set_rxon_channel(priv, channel, ctx);
643 iwl_set_rxon_ht(priv, &priv->current_ht_config);
644
645 iwl_set_flags_for_band(priv, ctx, channel->band,
646 ctx->vif);
647 }
648
649 spin_unlock_irqrestore(&priv->lock, flags);
650
651 iwl_update_bcast_stations(priv);
652
653 /*
654 * The list of supported rates and rate mask can be different
655 * for each band; since the band may have changed, reset
656 * the rate mask to what mac80211 lists.
657 */
658 iwl_set_rate(priv);
659 }
660
661 if (changed & (IEEE80211_CONF_CHANGE_PS |
662 IEEE80211_CONF_CHANGE_IDLE)) {
663 ret = iwl_power_update_mode(priv, false);
664 if (ret)
665 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
666 }
667
668 if (changed & IEEE80211_CONF_CHANGE_POWER) {
669 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
670 priv->tx_power_user_lmt, conf->power_level);
671
672 iwl_set_tx_power(priv, conf->power_level, false);
673 }
674
675 for_each_context(priv, ctx) {
676 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
677 continue;
678 iwlagn_commit_rxon(priv, ctx);
679 }
680 out:
681 mutex_unlock(&priv->mutex);
682 return ret;
683}
684
685static void iwlagn_check_needed_chains(struct iwl_priv *priv,
686 struct iwl_rxon_context *ctx,
687 struct ieee80211_bss_conf *bss_conf)
688{
689 struct ieee80211_vif *vif = ctx->vif;
690 struct iwl_rxon_context *tmp;
691 struct ieee80211_sta *sta;
692 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
693 struct ieee80211_sta_ht_cap *ht_cap;
694 bool need_multiple;
695
696 lockdep_assert_held(&priv->mutex);
697
698 switch (vif->type) {
699 case NL80211_IFTYPE_STATION:
700 rcu_read_lock();
701 sta = ieee80211_find_sta(vif, bss_conf->bssid);
702 if (!sta) {
703 /*
704 * If at all, this can only happen through a race
705 * when the AP disconnects us while we're still
706 * setting up the connection, in that case mac80211
707 * will soon tell us about that.
708 */
709 need_multiple = false;
710 rcu_read_unlock();
711 break;
712 }
713
714 ht_cap = &sta->ht_cap;
715
716 need_multiple = true;
717
718 /*
719 * If the peer advertises no support for receiving 2 and 3
720 * stream MCS rates, it can't be transmitting them either.
721 */
722 if (ht_cap->mcs.rx_mask[1] == 0 &&
723 ht_cap->mcs.rx_mask[2] == 0) {
724 need_multiple = false;
725 } else if (!(ht_cap->mcs.tx_params &
726 IEEE80211_HT_MCS_TX_DEFINED)) {
727 /* If it can't TX MCS at all ... */
728 need_multiple = false;
729 } else if (ht_cap->mcs.tx_params &
730 IEEE80211_HT_MCS_TX_RX_DIFF) {
731 int maxstreams;
732
733 /*
734 * But if it can receive them, it might still not
735 * be able to transmit them, which is what we need
736 * to check here -- so check the number of streams
737 * it advertises for TX (if different from RX).
738 */
739
740 maxstreams = (ht_cap->mcs.tx_params &
741 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
742 maxstreams >>=
743 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
744 maxstreams += 1;
745
746 if (maxstreams <= 1)
747 need_multiple = false;
748 }
749
750 rcu_read_unlock();
751 break;
752 case NL80211_IFTYPE_ADHOC:
753 /* currently */
754 need_multiple = false;
755 break;
756 default:
757 /* only AP really */
758 need_multiple = true;
759 break;
760 }
761
762 ctx->ht_need_multiple_chains = need_multiple;
763
764 if (!need_multiple) {
765 /* check all contexts */
766 for_each_context(priv, tmp) {
767 if (!tmp->vif)
768 continue;
769 if (tmp->ht_need_multiple_chains) {
770 need_multiple = true;
771 break;
772 }
773 }
774 }
775
776 ht_conf->single_chain_sufficient = !need_multiple;
777}
778
779static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
780{
781 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
782 int ret;
783
784 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
785 iwl_is_any_associated(priv)) {
786 struct iwl_calib_chain_noise_reset_cmd cmd;
787
788 /* clear data for chain noise calibration algorithm */
789 data->chain_noise_a = 0;
790 data->chain_noise_b = 0;
791 data->chain_noise_c = 0;
792 data->chain_signal_a = 0;
793 data->chain_signal_b = 0;
794 data->chain_signal_c = 0;
795 data->beacon_count = 0;
796
797 memset(&cmd, 0, sizeof(cmd));
798 iwl_set_calib_hdr(&cmd.hdr,
799 priv->phy_calib_chain_noise_reset_cmd);
800 ret = trans_send_cmd_pdu(&priv->trans,
801 REPLY_PHY_CALIBRATION_CMD,
802 CMD_SYNC, sizeof(cmd), &cmd);
803 if (ret)
804 IWL_ERR(priv,
805 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
806 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
807 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
808 }
809}
810
811void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
812 struct ieee80211_vif *vif,
813 struct ieee80211_bss_conf *bss_conf,
814 u32 changes)
815{
816 struct iwl_priv *priv = hw->priv;
817 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
818 int ret;
819 bool force = false;
820
821 mutex_lock(&priv->mutex);
822
823 if (unlikely(!iwl_is_ready(priv))) {
824 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
825 mutex_unlock(&priv->mutex);
826 return;
827 }
828
829 if (unlikely(!ctx->vif)) {
830 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
831 mutex_unlock(&priv->mutex);
832 return;
833 }
834
835 if (changes & BSS_CHANGED_BEACON_INT)
836 force = true;
837
838 if (changes & BSS_CHANGED_QOS) {
839 ctx->qos_data.qos_active = bss_conf->qos;
840 iwlagn_update_qos(priv, ctx);
841 }
842
843 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
844 if (vif->bss_conf.use_short_preamble)
845 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
846 else
847 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
848
849 if (changes & BSS_CHANGED_ASSOC) {
850 if (bss_conf->assoc) {
851 priv->timestamp = bss_conf->timestamp;
852 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
853 } else {
854 /*
855 * If we disassociate while there are pending
856 * frames, just wake up the queues and let the
857 * frames "escape" ... This shouldn't really
858 * be happening to start with, but we should
859 * not get stuck in this case either since it
860 * can happen if userspace gets confused.
861 */
862 if (ctx->last_tx_rejected) {
863 ctx->last_tx_rejected = false;
864 iwl_wake_any_queue(priv, ctx);
865 }
866 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
867
868 if (ctx->ctxid == IWL_RXON_CTX_BSS)
869 priv->have_rekey_data = false;
870 }
871
872 iwlagn_bt_coex_rssi_monitor(priv);
873 }
874
875 if (ctx->ht.enabled) {
876 ctx->ht.protection = bss_conf->ht_operation_mode &
877 IEEE80211_HT_OP_MODE_PROTECTION;
878 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
879 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
880 iwlagn_check_needed_chains(priv, ctx, bss_conf);
881 iwl_set_rxon_ht(priv, &priv->current_ht_config);
882 }
883
884 iwlagn_set_rxon_chain(priv, ctx);
885
886 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
887 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
888 else
889 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
890
891 if (bss_conf->use_cts_prot)
892 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
893 else
894 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
895
896 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
897
898 if (vif->type == NL80211_IFTYPE_AP ||
899 vif->type == NL80211_IFTYPE_ADHOC) {
900 if (vif->bss_conf.enable_beacon) {
901 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
902 priv->beacon_ctx = ctx;
903 } else {
904 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
905 priv->beacon_ctx = NULL;
906 }
907 }
908
909 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
910 iwlagn_commit_rxon(priv, ctx);
911
912 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
913 /*
914 * The chain noise calibration will enable PM upon
915 * completion. If calibration has already been run
916 * then we need to enable power management here.
917 */
918 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
919 iwl_power_update_mode(priv, false);
920
921 /* Enable RX differential gain and sensitivity calibrations */
922 if (!priv->disable_chain_noise_cal)
923 iwlagn_chain_noise_reset(priv);
924 priv->start_calib = 1;
925 }
926
927 if (changes & BSS_CHANGED_IBSS) {
928 ret = iwlagn_manage_ibss_station(priv, vif,
929 bss_conf->ibss_joined);
930 if (ret)
931 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
932 bss_conf->ibss_joined ? "add" : "remove",
933 bss_conf->bssid);
934 }
935
936 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
937 priv->beacon_ctx) {
938 if (iwlagn_update_beacon(priv, vif))
939 IWL_ERR(priv, "Error sending IBSS beacon\n");
940 }
941
942 mutex_unlock(&priv->mutex);
943}
944
945void iwlagn_post_scan(struct iwl_priv *priv)
946{
947 struct iwl_rxon_context *ctx;
948
949 /*
950 * We do not commit power settings while scan is pending,
951 * do it now if the settings changed.
952 */
953 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
954 iwl_set_tx_power(priv, priv->tx_power_next, false);
955
956 /*
957 * Since setting the RXON may have been deferred while
958 * performing the scan, fire one off if needed
959 */
960 for_each_context(priv, ctx)
961 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
962 iwlagn_commit_rxon(priv, ctx);
963
964 iwlagn_set_pan_params(priv);
965}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
new file mode 100644
index 00000000000..211a5ad6a4f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -0,0 +1,693 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-agn.h"
36#include "iwl-trans.h"
37
38static struct iwl_link_quality_cmd *
39iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
40{
41 int i, r;
42 struct iwl_link_quality_cmd *link_cmd;
43 u32 rate_flags = 0;
44 __le32 rate_n_flags;
45
46 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
47 if (!link_cmd) {
48 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
49 return NULL;
50 }
51
52 lockdep_assert_held(&priv->mutex);
53
54 /* Set up the rate scaling to start at selected rate, fall back
55 * all the way down to 1M in IEEE order, and then spin on 1M */
56 if (priv->band == IEEE80211_BAND_5GHZ)
57 r = IWL_RATE_6M_INDEX;
58 else if (ctx && ctx->vif && ctx->vif->p2p)
59 r = IWL_RATE_6M_INDEX;
60 else
61 r = IWL_RATE_1M_INDEX;
62
63 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
64 rate_flags |= RATE_MCS_CCK_MSK;
65
66 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
67 RATE_MCS_ANT_POS;
68 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
69 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
70 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
71
72 link_cmd->general_params.single_stream_ant_msk =
73 first_antenna(priv->hw_params.valid_tx_ant);
74
75 link_cmd->general_params.dual_stream_ant_msk =
76 priv->hw_params.valid_tx_ant &
77 ~first_antenna(priv->hw_params.valid_tx_ant);
78 if (!link_cmd->general_params.dual_stream_ant_msk) {
79 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
80 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
81 link_cmd->general_params.dual_stream_ant_msk =
82 priv->hw_params.valid_tx_ant;
83 }
84
85 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
86 link_cmd->agg_params.agg_time_limit =
87 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
88
89 link_cmd->sta_id = sta_id;
90
91 return link_cmd;
92}
93
94/*
95 * iwlagn_add_bssid_station - Add the special IBSS BSSID station
96 *
97 * Function sleeps.
98 */
99int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
100 const u8 *addr, u8 *sta_id_r)
101{
102 int ret;
103 u8 sta_id;
104 struct iwl_link_quality_cmd *link_cmd;
105 unsigned long flags;
106
107 if (sta_id_r)
108 *sta_id_r = IWL_INVALID_STATION;
109
110 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
111 if (ret) {
112 IWL_ERR(priv, "Unable to add station %pM\n", addr);
113 return ret;
114 }
115
116 if (sta_id_r)
117 *sta_id_r = sta_id;
118
119 spin_lock_irqsave(&priv->sta_lock, flags);
120 priv->stations[sta_id].used |= IWL_STA_LOCAL;
121 spin_unlock_irqrestore(&priv->sta_lock, flags);
122
123 /* Set up default rate scaling table in device's station table */
124 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
125 if (!link_cmd) {
126 IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
127 addr);
128 return -ENOMEM;
129 }
130
131 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
132 if (ret)
133 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
134
135 spin_lock_irqsave(&priv->sta_lock, flags);
136 priv->stations[sta_id].lq = link_cmd;
137 spin_unlock_irqrestore(&priv->sta_lock, flags);
138
139 return 0;
140}
141
142/*
143 * static WEP keys
144 *
145 * For each context, the device has a table of 4 static WEP keys
146 * (one for each key index) that is updated with the following
147 * commands.
148 */
149
150static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
151 struct iwl_rxon_context *ctx,
152 bool send_if_empty)
153{
154 int i, not_empty = 0;
155 u8 buff[sizeof(struct iwl_wep_cmd) +
156 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
157 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
158 size_t cmd_size = sizeof(struct iwl_wep_cmd);
159 struct iwl_host_cmd cmd = {
160 .id = ctx->wep_key_cmd,
161 .data = { wep_cmd, },
162 .flags = CMD_SYNC,
163 };
164
165 might_sleep();
166
167 memset(wep_cmd, 0, cmd_size +
168 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
169
170 for (i = 0; i < WEP_KEYS_MAX ; i++) {
171 wep_cmd->key[i].key_index = i;
172 if (ctx->wep_keys[i].key_size) {
173 wep_cmd->key[i].key_offset = i;
174 not_empty = 1;
175 } else {
176 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
177 }
178
179 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
180 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
181 ctx->wep_keys[i].key_size);
182 }
183
184 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
185 wep_cmd->num_keys = WEP_KEYS_MAX;
186
187 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
188
189 cmd.len[0] = cmd_size;
190
191 if (not_empty || send_if_empty)
192 return trans_send_cmd(&priv->trans, &cmd);
193 else
194 return 0;
195}
196
197int iwl_restore_default_wep_keys(struct iwl_priv *priv,
198 struct iwl_rxon_context *ctx)
199{
200 lockdep_assert_held(&priv->mutex);
201
202 return iwl_send_static_wepkey_cmd(priv, ctx, false);
203}
204
205int iwl_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *keyconf)
208{
209 int ret;
210
211 lockdep_assert_held(&priv->mutex);
212
213 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
214 keyconf->keyidx);
215
216 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
217 if (iwl_is_rfkill(priv)) {
218 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
219 /* but keys in device are clear anyway so return success */
220 return 0;
221 }
222 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
223 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
224 keyconf->keyidx, ret);
225
226 return ret;
227}
228
229int iwl_set_default_wep_key(struct iwl_priv *priv,
230 struct iwl_rxon_context *ctx,
231 struct ieee80211_key_conf *keyconf)
232{
233 int ret;
234
235 lockdep_assert_held(&priv->mutex);
236
237 if (keyconf->keylen != WEP_KEY_LEN_128 &&
238 keyconf->keylen != WEP_KEY_LEN_64) {
239 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
240 return -EINVAL;
241 }
242
243 keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
244
245 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
246 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
247 keyconf->keylen);
248
249 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
250 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
251 keyconf->keylen, keyconf->keyidx, ret);
252
253 return ret;
254}
255
256/*
257 * dynamic (per-station) keys
258 *
259 * The dynamic keys are a little more complicated. The device has
260 * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
261 * These are linked to stations by a table that contains an index
262 * into the key table for each station/key index/{mcast,unicast},
263 * i.e. it's basically an array of pointers like this:
264 * key_offset_t key_mapping[NUM_STATIONS][4][2];
265 * (it really works differently, but you can think of it as such)
266 *
267 * The key uploading and linking happens in the same command, the
268 * add station command with STA_MODIFY_KEY_MASK.
269 */
270
271static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
272 struct ieee80211_vif *vif,
273 struct ieee80211_sta *sta)
274{
275 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
276 u8 sta_id = IWL_INVALID_STATION;
277
278 if (sta)
279 sta_id = iwl_sta_id(sta);
280
281 /*
282 * The device expects GTKs for station interfaces to be
283 * installed as GTKs for the AP station. If we have no
284 * station ID, then use the ap_sta_id in that case.
285 */
286 if (!sta && vif && vif_priv->ctx) {
287 switch (vif->type) {
288 case NL80211_IFTYPE_STATION:
289 sta_id = vif_priv->ctx->ap_sta_id;
290 break;
291 default:
292 /*
293 * In all other cases, the key will be
294 * used either for TX only or is bound
295 * to a station already.
296 */
297 break;
298 }
299 }
300
301 return sta_id;
302}
303
304static int iwlagn_send_sta_key(struct iwl_priv *priv,
305 struct ieee80211_key_conf *keyconf,
306 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
307 u32 cmd_flags)
308{
309 unsigned long flags;
310 __le16 key_flags;
311 struct iwl_addsta_cmd sta_cmd;
312 int i;
313
314 spin_lock_irqsave(&priv->sta_lock, flags);
315 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
316 spin_unlock_irqrestore(&priv->sta_lock, flags);
317
318 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
319 key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
320
321 switch (keyconf->cipher) {
322 case WLAN_CIPHER_SUITE_CCMP:
323 key_flags |= STA_KEY_FLG_CCMP;
324 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
325 break;
326 case WLAN_CIPHER_SUITE_TKIP:
327 key_flags |= STA_KEY_FLG_TKIP;
328 sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
329 for (i = 0; i < 5; i++)
330 sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
331 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
332 break;
333 case WLAN_CIPHER_SUITE_WEP104:
334 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
335 /* fall through */
336 case WLAN_CIPHER_SUITE_WEP40:
337 key_flags |= STA_KEY_FLG_WEP;
338 memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
339 break;
340 default:
341 WARN_ON(1);
342 return -EINVAL;
343 }
344
345 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
346 key_flags |= STA_KEY_MULTICAST_MSK;
347
348 /* key pointer (offset) */
349 sta_cmd.key.key_offset = keyconf->hw_key_idx;
350
351 sta_cmd.key.key_flags = key_flags;
352 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
353 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
354
355 return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
356}
357
358void iwl_update_tkip_key(struct iwl_priv *priv,
359 struct ieee80211_vif *vif,
360 struct ieee80211_key_conf *keyconf,
361 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
362{
363 u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
364
365 if (sta_id == IWL_INVALID_STATION)
366 return;
367
368 if (iwl_scan_cancel(priv)) {
369 /* cancel scan failed, just live w/ bad key and rely
370 briefly on SW decryption */
371 return;
372 }
373
374 iwlagn_send_sta_key(priv, keyconf, sta_id,
375 iv32, phase1key, CMD_ASYNC);
376}
377
378int iwl_remove_dynamic_key(struct iwl_priv *priv,
379 struct iwl_rxon_context *ctx,
380 struct ieee80211_key_conf *keyconf,
381 struct ieee80211_sta *sta)
382{
383 unsigned long flags;
384 struct iwl_addsta_cmd sta_cmd;
385 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
386
387 /* if station isn't there, neither is the key */
388 if (sta_id == IWL_INVALID_STATION)
389 return -ENOENT;
390
391 spin_lock_irqsave(&priv->sta_lock, flags);
392 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
393 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
394 sta_id = IWL_INVALID_STATION;
395 spin_unlock_irqrestore(&priv->sta_lock, flags);
396
397 if (sta_id == IWL_INVALID_STATION)
398 return 0;
399
400 lockdep_assert_held(&priv->mutex);
401
402 ctx->key_mapping_keys--;
403
404 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
405 keyconf->keyidx, sta_id);
406
407 if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
408 IWL_ERR(priv, "offset %d not used in uCode key table.\n",
409 keyconf->hw_key_idx);
410
411 sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
412 sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
413 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
414 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
415
416 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
417}
418
419int iwl_set_dynamic_key(struct iwl_priv *priv,
420 struct iwl_rxon_context *ctx,
421 struct ieee80211_key_conf *keyconf,
422 struct ieee80211_sta *sta)
423{
424 struct ieee80211_key_seq seq;
425 u16 p1k[5];
426 int ret;
427 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
428 const u8 *addr;
429
430 if (sta_id == IWL_INVALID_STATION)
431 return -EINVAL;
432
433 lockdep_assert_held(&priv->mutex);
434
435 keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
436 if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
437 return -ENOSPC;
438
439 ctx->key_mapping_keys++;
440
441 switch (keyconf->cipher) {
442 case WLAN_CIPHER_SUITE_TKIP:
443 if (sta)
444 addr = sta->addr;
445 else /* station mode case only */
446 addr = ctx->active.bssid_addr;
447
448 /* pre-fill phase 1 key into device cache */
449 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
450 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
451 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
452 seq.tkip.iv32, p1k, CMD_SYNC);
453 break;
454 case WLAN_CIPHER_SUITE_CCMP:
455 case WLAN_CIPHER_SUITE_WEP40:
456 case WLAN_CIPHER_SUITE_WEP104:
457 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
458 0, NULL, CMD_SYNC);
459 break;
460 default:
461 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
462 ret = -EINVAL;
463 }
464
465 if (ret) {
466 ctx->key_mapping_keys--;
467 clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
468 }
469
470 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
471 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
472 sta ? sta->addr : NULL, ret);
473
474 return ret;
475}
476
477/**
478 * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
479 *
480 * This adds the broadcast station into the driver's station table
481 * and marks it driver active, so that it will be restored to the
482 * device at the next best time.
483 */
484int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
485 struct iwl_rxon_context *ctx)
486{
487 struct iwl_link_quality_cmd *link_cmd;
488 unsigned long flags;
489 u8 sta_id;
490
491 spin_lock_irqsave(&priv->sta_lock, flags);
492 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
493 if (sta_id == IWL_INVALID_STATION) {
494 IWL_ERR(priv, "Unable to prepare broadcast station\n");
495 spin_unlock_irqrestore(&priv->sta_lock, flags);
496
497 return -EINVAL;
498 }
499
500 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
501 priv->stations[sta_id].used |= IWL_STA_BCAST;
502 spin_unlock_irqrestore(&priv->sta_lock, flags);
503
504 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
505 if (!link_cmd) {
506 IWL_ERR(priv,
507 "Unable to initialize rate scaling for bcast station.\n");
508 return -ENOMEM;
509 }
510
511 spin_lock_irqsave(&priv->sta_lock, flags);
512 priv->stations[sta_id].lq = link_cmd;
513 spin_unlock_irqrestore(&priv->sta_lock, flags);
514
515 return 0;
516}
517
518/**
519 * iwl_update_bcast_station - update broadcast station's LQ command
520 *
521 * Only used by iwlagn. Placed here to have all bcast station management
522 * code together.
523 */
524int iwl_update_bcast_station(struct iwl_priv *priv,
525 struct iwl_rxon_context *ctx)
526{
527 unsigned long flags;
528 struct iwl_link_quality_cmd *link_cmd;
529 u8 sta_id = ctx->bcast_sta_id;
530
531 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
532 if (!link_cmd) {
533 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
534 return -ENOMEM;
535 }
536
537 spin_lock_irqsave(&priv->sta_lock, flags);
538 if (priv->stations[sta_id].lq)
539 kfree(priv->stations[sta_id].lq);
540 else
541 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
542 priv->stations[sta_id].lq = link_cmd;
543 spin_unlock_irqrestore(&priv->sta_lock, flags);
544
545 return 0;
546}
547
548int iwl_update_bcast_stations(struct iwl_priv *priv)
549{
550 struct iwl_rxon_context *ctx;
551 int ret = 0;
552
553 for_each_context(priv, ctx) {
554 ret = iwl_update_bcast_station(priv, ctx);
555 if (ret)
556 break;
557 }
558
559 return ret;
560}
561
562/**
563 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
564 */
565int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
566{
567 unsigned long flags;
568 struct iwl_addsta_cmd sta_cmd;
569
570 lockdep_assert_held(&priv->mutex);
571
572 /* Remove "disable" flag, to enable Tx for this TID */
573 spin_lock_irqsave(&priv->sta_lock, flags);
574 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
575 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
576 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
577 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
578 spin_unlock_irqrestore(&priv->sta_lock, flags);
579
580 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
581}
582
583int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
584 int tid, u16 ssn)
585{
586 unsigned long flags;
587 int sta_id;
588 struct iwl_addsta_cmd sta_cmd;
589
590 lockdep_assert_held(&priv->mutex);
591
592 sta_id = iwl_sta_id(sta);
593 if (sta_id == IWL_INVALID_STATION)
594 return -ENXIO;
595
596 spin_lock_irqsave(&priv->sta_lock, flags);
597 priv->stations[sta_id].sta.station_flags_msk = 0;
598 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
599 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
600 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
601 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
602 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
603 spin_unlock_irqrestore(&priv->sta_lock, flags);
604
605 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
606}
607
608int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
609 int tid)
610{
611 unsigned long flags;
612 int sta_id;
613 struct iwl_addsta_cmd sta_cmd;
614
615 lockdep_assert_held(&priv->mutex);
616
617 sta_id = iwl_sta_id(sta);
618 if (sta_id == IWL_INVALID_STATION) {
619 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
620 return -ENXIO;
621 }
622
623 spin_lock_irqsave(&priv->sta_lock, flags);
624 priv->stations[sta_id].sta.station_flags_msk = 0;
625 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
626 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
627 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
628 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
629 spin_unlock_irqrestore(&priv->sta_lock, flags);
630
631 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
632}
633
634static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&priv->sta_lock, flags);
639 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
640 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
641 priv->stations[sta_id].sta.sta.modify_mask = 0;
642 priv->stations[sta_id].sta.sleep_tx_count = 0;
643 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
644 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
645 spin_unlock_irqrestore(&priv->sta_lock, flags);
646
647}
648
649void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
650{
651 unsigned long flags;
652
653 spin_lock_irqsave(&priv->sta_lock, flags);
654 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
655 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
656 priv->stations[sta_id].sta.sta.modify_mask =
657 STA_MODIFY_SLEEP_TX_COUNT_MSK;
658 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
659 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
660 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
661 spin_unlock_irqrestore(&priv->sta_lock, flags);
662
663}
664
665void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
666 struct ieee80211_vif *vif,
667 enum sta_notify_cmd cmd,
668 struct ieee80211_sta *sta)
669{
670 struct iwl_priv *priv = hw->priv;
671 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
672 int sta_id;
673
674 switch (cmd) {
675 case STA_NOTIFY_SLEEP:
676 WARN_ON(!sta_priv->client);
677 sta_priv->asleep = true;
678 if (atomic_read(&sta_priv->pending_frames) > 0)
679 ieee80211_sta_block_awake(hw, sta, true);
680 break;
681 case STA_NOTIFY_AWAKE:
682 WARN_ON(!sta_priv->client);
683 if (!sta_priv->asleep)
684 break;
685 sta_priv->asleep = false;
686 sta_id = iwl_sta_id(sta);
687 if (sta_id != IWL_INVALID_STATION)
688 iwl_sta_modify_ps_wake(priv, sta_id);
689 break;
690 default:
691 break;
692 }
693}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
new file mode 100644
index 00000000000..f501d742984
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -0,0 +1,699 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-agn-tt.h"
44
45/* default Thermal Throttling transaction table
46 * Current state | Throttling Down | Throttling Up
47 *=============================================================================
48 * Condition Nxt State Condition Nxt State Condition Nxt State
49 *-----------------------------------------------------------------------------
50 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
51 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
52 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
53 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
54 *=============================================================================
55 */
56static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
57 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
58 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
59 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
60};
61static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
62 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
63 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
64 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
65};
66static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
67 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
68 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
69 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
70};
71static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
72 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
73 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
74 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
75};
76
77/* Advance Thermal Throttling default restriction table */
78static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
79 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
80 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
81 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
82 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
83};
84
85bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
86{
87 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
88
89 if (tt->state >= IWL_TI_1)
90 return true;
91 return false;
92}
93
94u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
95{
96 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
97
98 return tt->tt_power_mode;
99}
100
101bool iwl_ht_enabled(struct iwl_priv *priv)
102{
103 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
104 struct iwl_tt_restriction *restriction;
105
106 if (!priv->thermal_throttle.advanced_tt)
107 return true;
108 restriction = tt->restriction + tt->state;
109 return restriction->is_ht;
110}
111
112static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
113{
114 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
115 bool within_margin = false;
116
117 if (priv->cfg->base_params->temperature_kelvin)
118 temp = KELVIN_TO_CELSIUS(priv->temperature);
119
120 if (!priv->thermal_throttle.advanced_tt)
121 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
122 CT_KILL_THRESHOLD_LEGACY) ? true : false;
123 else
124 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
125 CT_KILL_THRESHOLD) ? true : false;
126 return within_margin;
127}
128
129bool iwl_check_for_ct_kill(struct iwl_priv *priv)
130{
131 bool is_ct_kill = false;
132
133 if (iwl_within_ct_kill_margin(priv)) {
134 iwl_tt_enter_ct_kill(priv);
135 is_ct_kill = true;
136 }
137 return is_ct_kill;
138}
139
140enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
141{
142 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
143 struct iwl_tt_restriction *restriction;
144
145 if (!priv->thermal_throttle.advanced_tt)
146 return IWL_ANT_OK_MULTI;
147 restriction = tt->restriction + tt->state;
148 return restriction->tx_stream;
149}
150
151enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
152{
153 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
154 struct iwl_tt_restriction *restriction;
155
156 if (!priv->thermal_throttle.advanced_tt)
157 return IWL_ANT_OK_MULTI;
158 restriction = tt->restriction + tt->state;
159 return restriction->rx_stream;
160}
161
162#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
163#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
164
165/*
166 * toggle the bit to wake up uCode and check the temperature
167 * if the temperature is below CT, uCode will stay awake and send card
168 * state notification with CT_KILL bit clear to inform Thermal Throttling
169 * Management to change state. Otherwise, uCode will go back to sleep
170 * without doing anything, driver should continue the 5 seconds timer
171 * to wake up uCode for temperature check until temperature drop below CT
172 */
173static void iwl_tt_check_exit_ct_kill(unsigned long data)
174{
175 struct iwl_priv *priv = (struct iwl_priv *)data;
176 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
177 unsigned long flags;
178
179 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
180 return;
181
182 if (tt->state == IWL_TI_CT_KILL) {
183 if (priv->thermal_throttle.ct_kill_toggle) {
184 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
185 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
186 priv->thermal_throttle.ct_kill_toggle = false;
187 } else {
188 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
189 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
190 priv->thermal_throttle.ct_kill_toggle = true;
191 }
192 iwl_read32(priv, CSR_UCODE_DRV_GP1);
193 spin_lock_irqsave(&priv->reg_lock, flags);
194 if (!iwl_grab_nic_access(priv))
195 iwl_release_nic_access(priv);
196 spin_unlock_irqrestore(&priv->reg_lock, flags);
197
198 /* Reschedule the ct_kill timer to occur in
199 * CT_KILL_EXIT_DURATION seconds to ensure we get a
200 * thermal update */
201 IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n");
202 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
203 jiffies + CT_KILL_EXIT_DURATION * HZ);
204 }
205}
206
207static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
208 bool stop)
209{
210 if (stop) {
211 IWL_DEBUG_TEMP(priv, "Stop all queues\n");
212 if (priv->mac80211_registered)
213 ieee80211_stop_queues(priv->hw);
214 IWL_DEBUG_TEMP(priv,
215 "Schedule 5 seconds CT_KILL Timer\n");
216 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
217 jiffies + CT_KILL_EXIT_DURATION * HZ);
218 } else {
219 IWL_DEBUG_TEMP(priv, "Wake all queues\n");
220 if (priv->mac80211_registered)
221 ieee80211_wake_queues(priv->hw);
222 }
223}
224
225static void iwl_tt_ready_for_ct_kill(unsigned long data)
226{
227 struct iwl_priv *priv = (struct iwl_priv *)data;
228 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
229
230 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
231 return;
232
233 /* temperature timer expired, ready to go into CT_KILL state */
234 if (tt->state != IWL_TI_CT_KILL) {
235 IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
236 "temperature timer expired\n");
237 tt->state = IWL_TI_CT_KILL;
238 set_bit(STATUS_CT_KILL, &priv->status);
239 iwl_perform_ct_kill_task(priv, true);
240 }
241}
242
243static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
244{
245 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
246 /* make request to retrieve statistics information */
247 iwl_send_statistics_request(priv, CMD_SYNC, false);
248 /* Reschedule the ct_kill wait timer */
249 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
250 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
251}
252
253#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
254#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
255#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
256
257/*
258 * Legacy thermal throttling
259 * 1) Avoid NIC destruction due to high temperatures
260 * Chip will identify dangerously high temperatures that can
261 * harm the device and will power down
262 * 2) Avoid the NIC power down due to high temperature
263 * Throttle early enough to lower the power consumption before
264 * drastic steps are needed
265 */
266static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
267{
268 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
269 enum iwl_tt_state old_state;
270
271#ifdef CONFIG_IWLWIFI_DEBUG
272 if ((tt->tt_previous_temp) &&
273 (temp > tt->tt_previous_temp) &&
274 ((temp - tt->tt_previous_temp) >
275 IWL_TT_INCREASE_MARGIN)) {
276 IWL_DEBUG_TEMP(priv,
277 "Temperature increase %d degree Celsius\n",
278 (temp - tt->tt_previous_temp));
279 }
280#endif
281 old_state = tt->state;
282 /* in Celsius */
283 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
284 tt->state = IWL_TI_CT_KILL;
285 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
286 tt->state = IWL_TI_2;
287 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
288 tt->state = IWL_TI_1;
289 else
290 tt->state = IWL_TI_0;
291
292#ifdef CONFIG_IWLWIFI_DEBUG
293 tt->tt_previous_temp = temp;
294#endif
295 /* stop ct_kill_waiting_tm timer */
296 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
297 if (tt->state != old_state) {
298 switch (tt->state) {
299 case IWL_TI_0:
300 /*
301 * When the system is ready to go back to IWL_TI_0
302 * we only have to call iwl_power_update_mode() to
303 * do so.
304 */
305 break;
306 case IWL_TI_1:
307 tt->tt_power_mode = IWL_POWER_INDEX_3;
308 break;
309 case IWL_TI_2:
310 tt->tt_power_mode = IWL_POWER_INDEX_4;
311 break;
312 default:
313 tt->tt_power_mode = IWL_POWER_INDEX_5;
314 break;
315 }
316 mutex_lock(&priv->mutex);
317 if (old_state == IWL_TI_CT_KILL)
318 clear_bit(STATUS_CT_KILL, &priv->status);
319 if (tt->state != IWL_TI_CT_KILL &&
320 iwl_power_update_mode(priv, true)) {
321 /* TT state not updated
322 * try again during next temperature read
323 */
324 if (old_state == IWL_TI_CT_KILL)
325 set_bit(STATUS_CT_KILL, &priv->status);
326 tt->state = old_state;
327 IWL_ERR(priv, "Cannot update power mode, "
328 "TT state not updated\n");
329 } else {
330 if (tt->state == IWL_TI_CT_KILL) {
331 if (force) {
332 set_bit(STATUS_CT_KILL, &priv->status);
333 iwl_perform_ct_kill_task(priv, true);
334 } else {
335 iwl_prepare_ct_kill_task(priv);
336 tt->state = old_state;
337 }
338 } else if (old_state == IWL_TI_CT_KILL &&
339 tt->state != IWL_TI_CT_KILL)
340 iwl_perform_ct_kill_task(priv, false);
341 IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
342 tt->state);
343 IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
344 tt->tt_power_mode);
345 }
346 mutex_unlock(&priv->mutex);
347 }
348}
349
350/*
351 * Advance thermal throttling
352 * 1) Avoid NIC destruction due to high temperatures
353 * Chip will identify dangerously high temperatures that can
354 * harm the device and will power down
355 * 2) Avoid the NIC power down due to high temperature
356 * Throttle early enough to lower the power consumption before
357 * drastic steps are needed
358 * Actions include relaxing the power down sleep thresholds and
359 * decreasing the number of TX streams
360 * 3) Avoid throughput performance impact as much as possible
361 *
362 *=============================================================================
363 * Condition Nxt State Condition Nxt State Condition Nxt State
364 *-----------------------------------------------------------------------------
365 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
366 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
367 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
368 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
369 *=============================================================================
370 */
371static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
372{
373 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
374 int i;
375 bool changed = false;
376 enum iwl_tt_state old_state;
377 struct iwl_tt_trans *transaction;
378
379 old_state = tt->state;
380 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
381 /* based on the current TT state,
382 * find the curresponding transaction table
383 * each table has (IWL_TI_STATE_MAX - 1) entries
384 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
385 * will advance to the correct table.
386 * then based on the current temperature
387 * find the next state need to transaction to
388 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
389 * in the current table to see if transaction is needed
390 */
391 transaction = tt->transaction +
392 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
393 if (temp >= transaction->tt_low &&
394 temp <= transaction->tt_high) {
395#ifdef CONFIG_IWLWIFI_DEBUG
396 if ((tt->tt_previous_temp) &&
397 (temp > tt->tt_previous_temp) &&
398 ((temp - tt->tt_previous_temp) >
399 IWL_TT_INCREASE_MARGIN)) {
400 IWL_DEBUG_TEMP(priv,
401 "Temperature increase %d "
402 "degree Celsius\n",
403 (temp - tt->tt_previous_temp));
404 }
405 tt->tt_previous_temp = temp;
406#endif
407 if (old_state !=
408 transaction->next_state) {
409 changed = true;
410 tt->state =
411 transaction->next_state;
412 }
413 break;
414 }
415 }
416 /* stop ct_kill_waiting_tm timer */
417 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
418 if (changed) {
419 if (tt->state >= IWL_TI_1) {
420 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
421 tt->tt_power_mode = IWL_POWER_INDEX_5;
422
423 if (!iwl_ht_enabled(priv)) {
424 struct iwl_rxon_context *ctx;
425
426 for_each_context(priv, ctx) {
427 struct iwl_rxon_cmd *rxon;
428
429 rxon = &ctx->staging;
430
431 /* disable HT */
432 rxon->flags &= ~(
433 RXON_FLG_CHANNEL_MODE_MSK |
434 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
435 RXON_FLG_HT40_PROT_MSK |
436 RXON_FLG_HT_PROT_MSK);
437 }
438 } else {
439 /* check HT capability and set
440 * according to the system HT capability
441 * in case get disabled before */
442 iwl_set_rxon_ht(priv, &priv->current_ht_config);
443 }
444
445 } else {
446 /*
447 * restore system power setting -- it will be
448 * recalculated automatically.
449 */
450
451 /* check HT capability and set
452 * according to the system HT capability
453 * in case get disabled before */
454 iwl_set_rxon_ht(priv, &priv->current_ht_config);
455 }
456 mutex_lock(&priv->mutex);
457 if (old_state == IWL_TI_CT_KILL)
458 clear_bit(STATUS_CT_KILL, &priv->status);
459 if (tt->state != IWL_TI_CT_KILL &&
460 iwl_power_update_mode(priv, true)) {
461 /* TT state not updated
462 * try again during next temperature read
463 */
464 IWL_ERR(priv, "Cannot update power mode, "
465 "TT state not updated\n");
466 if (old_state == IWL_TI_CT_KILL)
467 set_bit(STATUS_CT_KILL, &priv->status);
468 tt->state = old_state;
469 } else {
470 IWL_DEBUG_TEMP(priv,
471 "Thermal Throttling to new state: %u\n",
472 tt->state);
473 if (old_state != IWL_TI_CT_KILL &&
474 tt->state == IWL_TI_CT_KILL) {
475 if (force) {
476 IWL_DEBUG_TEMP(priv,
477 "Enter IWL_TI_CT_KILL\n");
478 set_bit(STATUS_CT_KILL, &priv->status);
479 iwl_perform_ct_kill_task(priv, true);
480 } else {
481 iwl_prepare_ct_kill_task(priv);
482 tt->state = old_state;
483 }
484 } else if (old_state == IWL_TI_CT_KILL &&
485 tt->state != IWL_TI_CT_KILL) {
486 IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n");
487 iwl_perform_ct_kill_task(priv, false);
488 }
489 }
490 mutex_unlock(&priv->mutex);
491 }
492}
493
494/* Card State Notification indicated reach critical temperature
495 * if PSP not enable, no Thermal Throttling function will be performed
496 * just set the GP1 bit to acknowledge the event
497 * otherwise, go into IWL_TI_CT_KILL state
498 * since Card State Notification will not provide any temperature reading
499 * for Legacy mode
500 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
501 * for advance mode
502 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
503 */
504static void iwl_bg_ct_enter(struct work_struct *work)
505{
506 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
507 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
508
509 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
510 return;
511
512 if (!iwl_is_ready(priv))
513 return;
514
515 if (tt->state != IWL_TI_CT_KILL) {
516 IWL_ERR(priv, "Device reached critical temperature "
517 "- ucode going to sleep!\n");
518 if (!priv->thermal_throttle.advanced_tt)
519 iwl_legacy_tt_handler(priv,
520 IWL_MINIMAL_POWER_THRESHOLD,
521 true);
522 else
523 iwl_advance_tt_handler(priv,
524 CT_KILL_THRESHOLD + 1, true);
525 }
526}
527
528/* Card State Notification indicated out of critical temperature
529 * since Card State Notification will not provide any temperature reading
530 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
531 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
532 */
533static void iwl_bg_ct_exit(struct work_struct *work)
534{
535 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
536 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
537
538 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
539 return;
540
541 if (!iwl_is_ready(priv))
542 return;
543
544 /* stop ct_kill_exit_tm timer */
545 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
546
547 if (tt->state == IWL_TI_CT_KILL) {
548 IWL_ERR(priv,
549 "Device temperature below critical"
550 "- ucode awake!\n");
551 /*
552 * exit from CT_KILL state
553 * reset the current temperature reading
554 */
555 priv->temperature = 0;
556 if (!priv->thermal_throttle.advanced_tt)
557 iwl_legacy_tt_handler(priv,
558 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
559 true);
560 else
561 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
562 true);
563 }
564}
565
566void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
567{
568 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
569 return;
570
571 IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
572 queue_work(priv->workqueue, &priv->ct_enter);
573}
574
575void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
576{
577 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
578 return;
579
580 IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
581 queue_work(priv->workqueue, &priv->ct_exit);
582}
583
584static void iwl_bg_tt_work(struct work_struct *work)
585{
586 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
587 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
588
589 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
590 return;
591
592 if (priv->cfg->base_params->temperature_kelvin)
593 temp = KELVIN_TO_CELSIUS(priv->temperature);
594
595 if (!priv->thermal_throttle.advanced_tt)
596 iwl_legacy_tt_handler(priv, temp, false);
597 else
598 iwl_advance_tt_handler(priv, temp, false);
599}
600
601void iwl_tt_handler(struct iwl_priv *priv)
602{
603 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
604 return;
605
606 IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
607 queue_work(priv->workqueue, &priv->tt_work);
608}
609
610/* Thermal throttling initialization
611 * For advance thermal throttling:
612 * Initialize Thermal Index and temperature threshold table
613 * Initialize thermal throttling restriction table
614 */
615void iwl_tt_initialize(struct iwl_priv *priv)
616{
617 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
618 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
619 struct iwl_tt_trans *transaction;
620
621 IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n");
622
623 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
624
625 tt->state = IWL_TI_0;
626 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
627 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
628 priv->thermal_throttle.ct_kill_exit_tm.function =
629 iwl_tt_check_exit_ct_kill;
630 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
631 priv->thermal_throttle.ct_kill_waiting_tm.data =
632 (unsigned long)priv;
633 priv->thermal_throttle.ct_kill_waiting_tm.function =
634 iwl_tt_ready_for_ct_kill;
635 /* setup deferred ct kill work */
636 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
637 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
638 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
639
640 if (priv->cfg->base_params->adv_thermal_throttle) {
641 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
642 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
643 IWL_TI_STATE_MAX, GFP_KERNEL);
644 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
645 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
646 GFP_KERNEL);
647 if (!tt->restriction || !tt->transaction) {
648 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
649 priv->thermal_throttle.advanced_tt = false;
650 kfree(tt->restriction);
651 tt->restriction = NULL;
652 kfree(tt->transaction);
653 tt->transaction = NULL;
654 } else {
655 transaction = tt->transaction +
656 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
657 memcpy(transaction, &tt_range_0[0], size);
658 transaction = tt->transaction +
659 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
660 memcpy(transaction, &tt_range_1[0], size);
661 transaction = tt->transaction +
662 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
663 memcpy(transaction, &tt_range_2[0], size);
664 transaction = tt->transaction +
665 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
666 memcpy(transaction, &tt_range_3[0], size);
667 size = sizeof(struct iwl_tt_restriction) *
668 IWL_TI_STATE_MAX;
669 memcpy(tt->restriction,
670 &restriction_range[0], size);
671 priv->thermal_throttle.advanced_tt = true;
672 }
673 } else {
674 IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n");
675 priv->thermal_throttle.advanced_tt = false;
676 }
677}
678
679/* cleanup thermal throttling management related memory and timer */
680void iwl_tt_exit(struct iwl_priv *priv)
681{
682 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
683
684 /* stop ct_kill_exit_tm timer if activated */
685 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
686 /* stop ct_kill_waiting_tm timer if activated */
687 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
688 cancel_work_sync(&priv->tt_work);
689 cancel_work_sync(&priv->ct_enter);
690 cancel_work_sync(&priv->ct_exit);
691
692 if (priv->thermal_throttle.advanced_tt) {
693 /* free advance thermal throttling memory */
694 kfree(tt->restriction);
695 tt->restriction = NULL;
696 kfree(tt->transaction);
697 tt->transaction = NULL;
698 }
699}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
new file mode 100644
index 00000000000..d118ed29bf3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__
30
31#include "iwl-commands.h"
32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
118bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
119bool iwl_ht_enabled(struct iwl_priv *priv);
120bool iwl_check_for_ct_kill(struct iwl_priv *priv);
121enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
122enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
123void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
124void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
125void iwl_tt_handler(struct iwl_priv *priv);
126void iwl_tt_initialize(struct iwl_priv *priv);
127void iwl_tt_exit(struct iwl_priv *priv);
128
129#endif /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 00000000000..475f9d4f56e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,980 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-agn-hw.h"
41#include "iwl-agn.h"
42#include "iwl-trans.h"
43
44/*
45 * mac80211 queues, ACs, hardware queues, FIFOs.
46 *
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 *
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
51 *
52 * VO 0
53 * VI 1
54 * BE 2
55 * BK 3
56 *
57 *
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
64 *
65 * Due to the way hw queues are set up (by the hw specific modules like
66 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
67 * mapping.
68 */
69
70static const u8 tid_to_ac[] = {
71 IEEE80211_AC_BE,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BK,
74 IEEE80211_AC_BE,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VI,
77 IEEE80211_AC_VO,
78 IEEE80211_AC_VO
79};
80
81static inline int get_ac_from_tid(u16 tid)
82{
83 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
84 return tid_to_ac[tid];
85
86 /* no support for TIDs 8-15 yet */
87 return -EINVAL;
88}
89
90static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
100 int tid)
101{
102 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
103 (IWLAGN_FIRST_AMPDU_QUEUE +
104 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
105 IWL_WARN(priv,
106 "queue number out of range: %d, must be %d to %d\n",
107 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
108 IWLAGN_FIRST_AMPDU_QUEUE +
109 priv->cfg->base_params->num_of_ampdu_queues - 1);
110 return -EINVAL;
111 }
112
113 /* Modify device's station table to Tx this TID */
114 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
115}
116
117static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
118 struct ieee80211_tx_info *info,
119 __le16 fc, __le32 *tx_flags)
120{
121 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
122 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
123 info->flags & IEEE80211_TX_CTL_AMPDU)
124 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
125}
126
127/*
128 * handle build REPLY_TX command notification.
129 */
130static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
131 struct sk_buff *skb,
132 struct iwl_tx_cmd *tx_cmd,
133 struct ieee80211_tx_info *info,
134 struct ieee80211_hdr *hdr,
135 u8 std_id)
136{
137 __le16 fc = hdr->frame_control;
138 __le32 tx_flags = tx_cmd->tx_flags;
139
140 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
141
142 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
143 tx_flags |= TX_CMD_FLG_ACK_MSK;
144 else
145 tx_flags &= ~TX_CMD_FLG_ACK_MSK;
146
147 if (ieee80211_is_probe_resp(fc))
148 tx_flags |= TX_CMD_FLG_TSF_MSK;
149 else if (ieee80211_is_back_req(fc))
150 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
151 else if (info->band == IEEE80211_BAND_2GHZ &&
152 priv->cfg->bt_params &&
153 priv->cfg->bt_params->advanced_bt_coexist &&
154 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
155 ieee80211_is_reassoc_req(fc) ||
156 skb->protocol == cpu_to_be16(ETH_P_PAE)))
157 tx_flags |= TX_CMD_FLG_IGNORE_BT;
158
159
160 tx_cmd->sta_id = std_id;
161 if (ieee80211_has_morefrags(fc))
162 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
163
164 if (ieee80211_is_data_qos(fc)) {
165 u8 *qc = ieee80211_get_qos_ctl(hdr);
166 tx_cmd->tid_tspec = qc[0] & 0xf;
167 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
168 } else {
169 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
170 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
171 else
172 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
173 }
174
175 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
176
177 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
178 if (ieee80211_is_mgmt(fc)) {
179 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
180 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
181 else
182 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
183 } else {
184 tx_cmd->timeout.pm_frame_timeout = 0;
185 }
186
187 tx_cmd->driver_txop = 0;
188 tx_cmd->tx_flags = tx_flags;
189 tx_cmd->next_frame_len = 0;
190}
191
192#define RTS_DFAULT_RETRY_LIMIT 60
193
194static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
195 struct iwl_tx_cmd *tx_cmd,
196 struct ieee80211_tx_info *info,
197 __le16 fc)
198{
199 u32 rate_flags;
200 int rate_idx;
201 u8 rts_retry_limit;
202 u8 data_retry_limit;
203 u8 rate_plcp;
204
205 /* Set retry limit on DATA packets and Probe Responses*/
206 if (ieee80211_is_probe_resp(fc))
207 data_retry_limit = 3;
208 else
209 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
210 tx_cmd->data_retry_limit = data_retry_limit;
211
212 /* Set retry limit on RTS packets */
213 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
214 if (data_retry_limit < rts_retry_limit)
215 rts_retry_limit = data_retry_limit;
216 tx_cmd->rts_retry_limit = rts_retry_limit;
217
218 /* DATA packets will use the uCode station table for rate/antenna
219 * selection */
220 if (ieee80211_is_data(fc)) {
221 tx_cmd->initial_rate_index = 0;
222 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
223#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
224 if (priv->tm_fixed_rate) {
225 /*
226 * rate overwrite by testmode
227 * we not only send lq command to change rate
228 * we also re-enforce per data pkt base.
229 */
230 tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
231 memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
232 sizeof(tx_cmd->rate_n_flags));
233 }
234#endif
235 return;
236 }
237
238 /**
239 * If the current TX rate stored in mac80211 has the MCS bit set, it's
240 * not really a TX rate. Thus, we use the lowest supported rate for
241 * this band. Also use the lowest supported rate if the stored rate
242 * index is invalid.
243 */
244 rate_idx = info->control.rates[0].idx;
245 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
246 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
247 rate_idx = rate_lowest_index(&priv->bands[info->band],
248 info->control.sta);
249 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
250 if (info->band == IEEE80211_BAND_5GHZ)
251 rate_idx += IWL_FIRST_OFDM_RATE;
252 /* Get PLCP rate for tx_cmd->rate_n_flags */
253 rate_plcp = iwl_rates[rate_idx].plcp;
254 /* Zero out flags for this packet */
255 rate_flags = 0;
256
257 /* Set CCK flag as needed */
258 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
259 rate_flags |= RATE_MCS_CCK_MSK;
260
261 /* Set up antennas */
262 if (priv->cfg->bt_params &&
263 priv->cfg->bt_params->advanced_bt_coexist &&
264 priv->bt_full_concurrent) {
265 /* operated as 1x1 in full concurrency mode */
266 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
267 first_antenna(priv->hw_params.valid_tx_ant));
268 } else
269 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
270 priv->hw_params.valid_tx_ant);
271 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
272
273 /* Set the rate in the TX cmd */
274 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
275}
276
277static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
278 struct ieee80211_tx_info *info,
279 struct iwl_tx_cmd *tx_cmd,
280 struct sk_buff *skb_frag,
281 int sta_id)
282{
283 struct ieee80211_key_conf *keyconf = info->control.hw_key;
284
285 switch (keyconf->cipher) {
286 case WLAN_CIPHER_SUITE_CCMP:
287 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
288 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
289 if (info->flags & IEEE80211_TX_CTL_AMPDU)
290 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
291 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
292 break;
293
294 case WLAN_CIPHER_SUITE_TKIP:
295 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
296 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
297 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
298 break;
299
300 case WLAN_CIPHER_SUITE_WEP104:
301 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
302 /* fall through */
303 case WLAN_CIPHER_SUITE_WEP40:
304 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
305 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
306
307 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
308
309 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
310 "with key %d\n", keyconf->keyidx);
311 break;
312
313 default:
314 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
315 break;
316 }
317}
318
319/*
320 * start REPLY_TX command process
321 */
322int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
323{
324 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
325 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
326 struct iwl_station_priv *sta_priv = NULL;
327 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
328 struct iwl_tx_cmd *tx_cmd;
329 int txq_id;
330
331 u16 seq_number = 0;
332 __le16 fc;
333 u8 hdr_len;
334 u16 len;
335 u8 sta_id;
336 u8 tid = 0;
337 unsigned long flags;
338 bool is_agg = false;
339
340 /*
341 * If the frame needs to go out off-channel, then
342 * we'll have put the PAN context to that channel,
343 * so make the frame go out there.
344 */
345 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
346 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
347 else if (info->control.vif)
348 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
349
350 spin_lock_irqsave(&priv->lock, flags);
351 if (iwl_is_rfkill(priv)) {
352 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
353 goto drop_unlock_priv;
354 }
355
356 fc = hdr->frame_control;
357
358#ifdef CONFIG_IWLWIFI_DEBUG
359 if (ieee80211_is_auth(fc))
360 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
361 else if (ieee80211_is_assoc_req(fc))
362 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
363 else if (ieee80211_is_reassoc_req(fc))
364 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
365#endif
366
367 hdr_len = ieee80211_hdrlen(fc);
368
369 /* For management frames use broadcast id to do not break aggregation */
370 if (!ieee80211_is_data(fc))
371 sta_id = ctx->bcast_sta_id;
372 else {
373 /* Find index into station table for destination station */
374 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
375 if (sta_id == IWL_INVALID_STATION) {
376 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
377 hdr->addr1);
378 goto drop_unlock_priv;
379 }
380 }
381
382 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
383
384 if (info->control.sta)
385 sta_priv = (void *)info->control.sta->drv_priv;
386
387 if (sta_priv && sta_priv->asleep &&
388 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
389 /*
390 * This sends an asynchronous command to the device,
391 * but we can rely on it being processed before the
392 * next frame is processed -- and the next frame to
393 * this station is the one that will consume this
394 * counter.
395 * For now set the counter to just 1 since we do not
396 * support uAPSD yet.
397 */
398 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
399 }
400
401 /*
402 * Send this frame after DTIM -- there's a special queue
403 * reserved for this for contexts that support AP mode.
404 */
405 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
406 txq_id = ctx->mcast_queue;
407 /*
408 * The microcode will clear the more data
409 * bit in the last frame it transmits.
410 */
411 hdr->frame_control |=
412 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
413 } else
414 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
415
416 /* irqs already disabled/saved above when locking priv->lock */
417 spin_lock(&priv->sta_lock);
418
419 if (ieee80211_is_data_qos(fc)) {
420 u8 *qc = NULL;
421 qc = ieee80211_get_qos_ctl(hdr);
422 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
423
424 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
425 goto drop_unlock_sta;
426
427 seq_number = priv->stations[sta_id].tid[tid].seq_number;
428 seq_number &= IEEE80211_SCTL_SEQ;
429 hdr->seq_ctrl = hdr->seq_ctrl &
430 cpu_to_le16(IEEE80211_SCTL_FRAG);
431 hdr->seq_ctrl |= cpu_to_le16(seq_number);
432 seq_number += 0x10;
433 /* aggregation is on for this <sta,tid> */
434 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
435 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
436 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
437 is_agg = true;
438 }
439 }
440
441 tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
442 if (unlikely(!tx_cmd))
443 goto drop_unlock_sta;
444
445 /* Copy MAC header from skb into command buffer */
446 memcpy(tx_cmd->hdr, hdr, hdr_len);
447
448 /* Total # bytes to be transmitted */
449 len = (u16)skb->len;
450 tx_cmd->len = cpu_to_le16(len);
451
452 if (info->control.hw_key)
453 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
454
455 /* TODO need this for burst mode later on */
456 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
457 iwl_dbg_log_tx_data_frame(priv, len, hdr);
458
459 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
460
461 iwl_update_stats(priv, true, fc, len);
462
463 if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
464 goto drop_unlock_sta;
465
466 if (ieee80211_is_data_qos(fc)) {
467 priv->stations[sta_id].tid[tid].tfds_in_queue++;
468 if (!ieee80211_has_morefrags(fc))
469 priv->stations[sta_id].tid[tid].seq_number = seq_number;
470 }
471
472 spin_unlock(&priv->sta_lock);
473 spin_unlock_irqrestore(&priv->lock, flags);
474
475 /*
476 * Avoid atomic ops if it isn't an associated client.
477 * Also, if this is a packet for aggregation, don't
478 * increase the counter because the ucode will stop
479 * aggregation queues when their respective station
480 * goes to sleep.
481 */
482 if (sta_priv && sta_priv->client && !is_agg)
483 atomic_inc(&sta_priv->pending_frames);
484
485 return 0;
486
487drop_unlock_sta:
488 spin_unlock(&priv->sta_lock);
489drop_unlock_priv:
490 spin_unlock_irqrestore(&priv->lock, flags);
491 return -1;
492}
493
494/*
495 * Find first available (lowest unused) Tx Queue, mark it "active".
496 * Called only when finding queue for aggregation.
497 * Should never return anything < 7, because they should already
498 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
499 */
500static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
501{
502 int txq_id;
503
504 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
505 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
506 return txq_id;
507 return -1;
508}
509
510int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
511 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
512{
513 int sta_id;
514 int tx_fifo;
515 int txq_id;
516 int ret;
517 unsigned long flags;
518 struct iwl_tid_data *tid_data;
519
520 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
521 if (unlikely(tx_fifo < 0))
522 return tx_fifo;
523
524 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
525 sta->addr, tid);
526
527 sta_id = iwl_sta_id(sta);
528 if (sta_id == IWL_INVALID_STATION) {
529 IWL_ERR(priv, "Start AGG on invalid station\n");
530 return -ENXIO;
531 }
532 if (unlikely(tid >= MAX_TID_COUNT))
533 return -EINVAL;
534
535 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
536 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
537 return -ENXIO;
538 }
539
540 txq_id = iwlagn_txq_ctx_activate_free(priv);
541 if (txq_id == -1) {
542 IWL_ERR(priv, "No free aggregation queue available\n");
543 return -ENXIO;
544 }
545
546 spin_lock_irqsave(&priv->sta_lock, flags);
547 tid_data = &priv->stations[sta_id].tid[tid];
548 *ssn = SEQ_TO_SN(tid_data->seq_number);
549 tid_data->agg.txq_id = txq_id;
550 tid_data->agg.tx_fifo = tx_fifo;
551 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
552 spin_unlock_irqrestore(&priv->sta_lock, flags);
553
554 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
555 if (ret)
556 return ret;
557
558 spin_lock_irqsave(&priv->sta_lock, flags);
559 tid_data = &priv->stations[sta_id].tid[tid];
560 if (tid_data->tfds_in_queue == 0) {
561 IWL_DEBUG_HT(priv, "HW queue is empty\n");
562 tid_data->agg.state = IWL_AGG_ON;
563 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
564 } else {
565 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
566 tid_data->tfds_in_queue);
567 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
568 }
569 spin_unlock_irqrestore(&priv->sta_lock, flags);
570 return ret;
571}
572
573int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
574 struct ieee80211_sta *sta, u16 tid)
575{
576 int tx_fifo_id, txq_id, sta_id, ssn;
577 struct iwl_tid_data *tid_data;
578 int write_ptr, read_ptr;
579 unsigned long flags;
580
581 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
582 if (unlikely(tx_fifo_id < 0))
583 return tx_fifo_id;
584
585 sta_id = iwl_sta_id(sta);
586
587 if (sta_id == IWL_INVALID_STATION) {
588 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
589 return -ENXIO;
590 }
591
592 spin_lock_irqsave(&priv->sta_lock, flags);
593
594 tid_data = &priv->stations[sta_id].tid[tid];
595 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
596 txq_id = tid_data->agg.txq_id;
597
598 switch (priv->stations[sta_id].tid[tid].agg.state) {
599 case IWL_EMPTYING_HW_QUEUE_ADDBA:
600 /*
601 * This can happen if the peer stops aggregation
602 * again before we've had a chance to drain the
603 * queue we selected previously, i.e. before the
604 * session was really started completely.
605 */
606 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
607 goto turn_off;
608 case IWL_AGG_ON:
609 break;
610 default:
611 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
612 }
613
614 write_ptr = priv->txq[txq_id].q.write_ptr;
615 read_ptr = priv->txq[txq_id].q.read_ptr;
616
617 /* The queue is not empty */
618 if (write_ptr != read_ptr) {
619 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
620 priv->stations[sta_id].tid[tid].agg.state =
621 IWL_EMPTYING_HW_QUEUE_DELBA;
622 spin_unlock_irqrestore(&priv->sta_lock, flags);
623 return 0;
624 }
625
626 IWL_DEBUG_HT(priv, "HW queue is empty\n");
627 turn_off:
628 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
629
630 /* do not restore/save irqs */
631 spin_unlock(&priv->sta_lock);
632 spin_lock(&priv->lock);
633
634 /*
635 * the only reason this call can fail is queue number out of range,
636 * which can happen if uCode is reloaded and all the station
637 * information are lost. if it is outside the range, there is no need
638 * to deactivate the uCode queue, just return "success" to allow
639 * mac80211 to clean up it own data.
640 */
641 trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
642 spin_unlock_irqrestore(&priv->lock, flags);
643
644 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
645
646 return 0;
647}
648
649int iwlagn_txq_check_empty(struct iwl_priv *priv,
650 int sta_id, u8 tid, int txq_id)
651{
652 struct iwl_queue *q = &priv->txq[txq_id].q;
653 u8 *addr = priv->stations[sta_id].sta.sta.addr;
654 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
655 struct iwl_rxon_context *ctx;
656
657 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
658
659 lockdep_assert_held(&priv->sta_lock);
660
661 switch (priv->stations[sta_id].tid[tid].agg.state) {
662 case IWL_EMPTYING_HW_QUEUE_DELBA:
663 /* We are reclaiming the last packet of the */
664 /* aggregated HW queue */
665 if ((txq_id == tid_data->agg.txq_id) &&
666 (q->read_ptr == q->write_ptr)) {
667 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
668 int tx_fifo = get_fifo_from_tid(ctx, tid);
669 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
670 trans_txq_agg_disable(&priv->trans, txq_id,
671 ssn, tx_fifo);
672 tid_data->agg.state = IWL_AGG_OFF;
673 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
674 }
675 break;
676 case IWL_EMPTYING_HW_QUEUE_ADDBA:
677 /* We are reclaiming the last packet of the queue */
678 if (tid_data->tfds_in_queue == 0) {
679 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
680 tid_data->agg.state = IWL_AGG_ON;
681 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
682 }
683 break;
684 }
685
686 return 0;
687}
688
689static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
690 struct iwl_rxon_context *ctx,
691 const u8 *addr1)
692{
693 struct ieee80211_sta *sta;
694 struct iwl_station_priv *sta_priv;
695
696 rcu_read_lock();
697 sta = ieee80211_find_sta(ctx->vif, addr1);
698 if (sta) {
699 sta_priv = (void *)sta->drv_priv;
700 /* avoid atomic ops if this isn't a client */
701 if (sta_priv->client &&
702 atomic_dec_return(&sta_priv->pending_frames) == 0)
703 ieee80211_sta_block_awake(priv->hw, sta, false);
704 }
705 rcu_read_unlock();
706}
707
708static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
709 bool is_agg)
710{
711 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
712
713 if (!is_agg)
714 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
715
716 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
717}
718
719int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
720{
721 struct iwl_tx_queue *txq = &priv->txq[txq_id];
722 struct iwl_queue *q = &txq->q;
723 struct iwl_tx_info *tx_info;
724 int nfreed = 0;
725 struct ieee80211_hdr *hdr;
726
727 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
728 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
729 "index %d is out of range [0-%d] %d %d.\n", __func__,
730 txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
731 return 0;
732 }
733
734 for (index = iwl_queue_inc_wrap(index, q->n_bd);
735 q->read_ptr != index;
736 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
737
738 tx_info = &txq->txb[txq->q.read_ptr];
739
740 if (WARN_ON_ONCE(tx_info->skb == NULL))
741 continue;
742
743 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
744 if (ieee80211_is_data_qos(hdr->frame_control))
745 nfreed++;
746
747 iwlagn_tx_status(priv, tx_info,
748 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
749 tx_info->skb = NULL;
750
751 iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
752
753 iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr);
754 }
755 return nfreed;
756}
757
758/**
759 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
760 *
761 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
762 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
763 */
764static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
765 struct iwl_ht_agg *agg,
766 struct iwl_compressed_ba_resp *ba_resp)
767
768{
769 int sh;
770 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
771 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
772 struct ieee80211_tx_info *info;
773 u64 bitmap, sent_bitmap;
774
775 if (unlikely(!agg->wait_for_ba)) {
776 if (unlikely(ba_resp->bitmap))
777 IWL_ERR(priv, "Received BA when not expected\n");
778 return -EINVAL;
779 }
780
781 /* Mark that the expected block-ack response arrived */
782 agg->wait_for_ba = 0;
783 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
784
785 /* Calculate shift to align block-ack bits with our Tx window bits */
786 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
787 if (sh < 0)
788 sh += 0x100;
789
790 /*
791 * Check for success or failure according to the
792 * transmitted bitmap and block-ack bitmap
793 */
794 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
795 sent_bitmap = bitmap & agg->bitmap;
796
797 /* Sanity check values reported by uCode */
798 if (ba_resp->txed_2_done > ba_resp->txed) {
799 IWL_DEBUG_TX_REPLY(priv,
800 "bogus sent(%d) and ack(%d) count\n",
801 ba_resp->txed, ba_resp->txed_2_done);
802 /*
803 * set txed_2_done = txed,
804 * so it won't impact rate scale
805 */
806 ba_resp->txed = ba_resp->txed_2_done;
807 }
808 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
809 ba_resp->txed, ba_resp->txed_2_done);
810
811 /* Find the first ACKed frame to store the TX status */
812 while (sent_bitmap && !(sent_bitmap & 1)) {
813 agg->start_idx = (agg->start_idx + 1) & 0xff;
814 sent_bitmap >>= 1;
815 }
816
817 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
818 memset(&info->status, 0, sizeof(info->status));
819 info->flags |= IEEE80211_TX_STAT_ACK;
820 info->flags |= IEEE80211_TX_STAT_AMPDU;
821 info->status.ampdu_ack_len = ba_resp->txed_2_done;
822 info->status.ampdu_len = ba_resp->txed;
823 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
824
825 return 0;
826}
827
828/**
829 * translate ucode response to mac80211 tx status control values
830 */
831void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
832 struct ieee80211_tx_info *info)
833{
834 struct ieee80211_tx_rate *r = &info->control.rates[0];
835
836 info->antenna_sel_tx =
837 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
838 if (rate_n_flags & RATE_MCS_HT_MSK)
839 r->flags |= IEEE80211_TX_RC_MCS;
840 if (rate_n_flags & RATE_MCS_GF_MSK)
841 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
842 if (rate_n_flags & RATE_MCS_HT40_MSK)
843 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
844 if (rate_n_flags & RATE_MCS_DUP_MSK)
845 r->flags |= IEEE80211_TX_RC_DUP_DATA;
846 if (rate_n_flags & RATE_MCS_SGI_MSK)
847 r->flags |= IEEE80211_TX_RC_SHORT_GI;
848 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
849}
850
851/**
852 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
853 *
854 * Handles block-acknowledge notification from device, which reports success
855 * of frames sent via aggregation.
856 */
857void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
858 struct iwl_rx_mem_buffer *rxb)
859{
860 struct iwl_rx_packet *pkt = rxb_addr(rxb);
861 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
862 struct iwl_tx_queue *txq = NULL;
863 struct iwl_ht_agg *agg;
864 int index;
865 int sta_id;
866 int tid;
867 unsigned long flags;
868
869 /* "flow" corresponds to Tx queue */
870 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
871
872 /* "ssn" is start of block-ack Tx window, corresponds to index
873 * (in Tx queue's circular buffer) of first TFD/frame in window */
874 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
875
876 if (scd_flow >= priv->hw_params.max_txq_num) {
877 IWL_ERR(priv,
878 "BUG_ON scd_flow is bigger than number of queues\n");
879 return;
880 }
881
882 txq = &priv->txq[scd_flow];
883 sta_id = ba_resp->sta_id;
884 tid = ba_resp->tid;
885 agg = &priv->stations[sta_id].tid[tid].agg;
886 if (unlikely(agg->txq_id != scd_flow)) {
887 /*
888 * FIXME: this is a uCode bug which need to be addressed,
889 * log the information and return for now!
890 * since it is possible happen very often and in order
891 * not to fill the syslog, don't enable the logging by default
892 */
893 IWL_DEBUG_TX_REPLY(priv,
894 "BA scd_flow %d does not match txq_id %d\n",
895 scd_flow, agg->txq_id);
896 return;
897 }
898
899 /* Find index just before block-ack window */
900 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
901
902 spin_lock_irqsave(&priv->sta_lock, flags);
903
904 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
905 "sta_id = %d\n",
906 agg->wait_for_ba,
907 (u8 *) &ba_resp->sta_addr_lo32,
908 ba_resp->sta_id);
909 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
910 "%d, scd_ssn = %d\n",
911 ba_resp->tid,
912 ba_resp->seq_ctl,
913 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
914 ba_resp->scd_flow,
915 ba_resp->scd_ssn);
916 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
917 agg->start_idx,
918 (unsigned long long)agg->bitmap);
919
920 /* Update driver's record of ACK vs. not for each frame in window */
921 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
922
923 /* Release all TFDs before the SSN, i.e. all TFDs in front of
924 * block-ack window (we assume that they've been successfully
925 * transmitted ... if not, it's too late anyway). */
926 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
927 /* calculate mac80211 ampdu sw queue to wake */
928 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
929 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
930
931 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
932 priv->mac80211_registered &&
933 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
934 iwl_wake_queue(priv, txq);
935
936 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
937 }
938
939 spin_unlock_irqrestore(&priv->sta_lock, flags);
940}
941
942#ifdef CONFIG_IWLWIFI_DEBUG
943const char *iwl_get_tx_fail_reason(u32 status)
944{
945#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
946#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
947
948 switch (status & TX_STATUS_MSK) {
949 case TX_STATUS_SUCCESS:
950 return "SUCCESS";
951 TX_STATUS_POSTPONE(DELAY);
952 TX_STATUS_POSTPONE(FEW_BYTES);
953 TX_STATUS_POSTPONE(BT_PRIO);
954 TX_STATUS_POSTPONE(QUIET_PERIOD);
955 TX_STATUS_POSTPONE(CALC_TTAK);
956 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
957 TX_STATUS_FAIL(SHORT_LIMIT);
958 TX_STATUS_FAIL(LONG_LIMIT);
959 TX_STATUS_FAIL(FIFO_UNDERRUN);
960 TX_STATUS_FAIL(DRAIN_FLOW);
961 TX_STATUS_FAIL(RFKILL_FLUSH);
962 TX_STATUS_FAIL(LIFE_EXPIRE);
963 TX_STATUS_FAIL(DEST_PS);
964 TX_STATUS_FAIL(HOST_ABORTED);
965 TX_STATUS_FAIL(BT_RETRY);
966 TX_STATUS_FAIL(STA_INVALID);
967 TX_STATUS_FAIL(FRAG_DROPPED);
968 TX_STATUS_FAIL(TID_DISABLE);
969 TX_STATUS_FAIL(FIFO_FLUSHED);
970 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
971 TX_STATUS_FAIL(PASSIVE_NO_RX);
972 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
973 }
974
975 return "UNKNOWN";
976
977#undef TX_STATUS_FAIL
978#undef TX_STATUS_POSTPONE
979}
980#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 00000000000..a5c5a0accd5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,580 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41#include "iwl-agn-calib.h"
42#include "iwl-trans.h"
43
44static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
45 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
46 0, COEX_UNASSOC_IDLE_FLAGS},
47 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
48 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
49 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
50 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
51 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
52 0, COEX_CALIBRATION_FLAGS},
53 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
54 0, COEX_PERIODIC_CALIBRATION_FLAGS},
55 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
56 0, COEX_CONNECTION_ESTAB_FLAGS},
57 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
58 0, COEX_ASSOCIATED_IDLE_FLAGS},
59 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
60 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
61 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
62 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
63 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
64 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
65 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
66 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
67 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
68 0, COEX_STAND_ALONE_DEBUG_FLAGS},
69 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
70 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
71 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
72 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
73};
74
75/*
76 * ucode
77 */
78static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
79 struct fw_desc *image, u32 dst_addr)
80{
81 dma_addr_t phy_addr = image->p_addr;
82 u32 byte_cnt = image->len;
83 int ret;
84
85 priv->ucode_write_complete = 0;
86
87 iwl_write_direct32(priv,
88 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
89 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
90
91 iwl_write_direct32(priv,
92 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
93
94 iwl_write_direct32(priv,
95 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
96 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
97
98 iwl_write_direct32(priv,
99 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
100 (iwl_get_dma_hi_addr(phy_addr)
101 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
102
103 iwl_write_direct32(priv,
104 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
105 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
106 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
107 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
108
109 iwl_write_direct32(priv,
110 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
111 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
112 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
113 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
114
115 IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
116 ret = wait_event_timeout(priv->wait_command_queue,
117 priv->ucode_write_complete, 5 * HZ);
118 if (!ret) {
119 IWL_ERR(priv, "Could not load the %s uCode section\n",
120 name);
121 return -ETIMEDOUT;
122 }
123
124 return 0;
125}
126
127static int iwlagn_load_given_ucode(struct iwl_priv *priv,
128 struct fw_img *image)
129{
130 int ret = 0;
131
132 ret = iwlagn_load_section(priv, "INST", &image->code,
133 IWLAGN_RTC_INST_LOWER_BOUND);
134 if (ret)
135 return ret;
136
137 return iwlagn_load_section(priv, "DATA", &image->data,
138 IWLAGN_RTC_DATA_LOWER_BOUND);
139}
140
141/*
142 * Calibration
143 */
144static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
145{
146 struct iwl_calib_xtal_freq_cmd cmd;
147 __le16 *xtal_calib =
148 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
149
150 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
151 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
152 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
153 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
154 (u8 *)&cmd, sizeof(cmd));
155}
156
157static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
158{
159 struct iwl_calib_temperature_offset_cmd cmd;
160 __le16 *offset_calib =
161 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
162
163 memset(&cmd, 0, sizeof(cmd));
164 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
165 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
166 if (!(cmd.radio_sensor_offset))
167 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
168
169 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
170 le16_to_cpu(cmd.radio_sensor_offset));
171 return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
172 (u8 *)&cmd, sizeof(cmd));
173}
174
175static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
176{
177 struct iwl_calib_cfg_cmd calib_cfg_cmd;
178 struct iwl_host_cmd cmd = {
179 .id = CALIBRATION_CFG_CMD,
180 .len = { sizeof(struct iwl_calib_cfg_cmd), },
181 .data = { &calib_cfg_cmd, },
182 };
183
184 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
185 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
186 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
187 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
188 calib_cfg_cmd.ucd_calib_cfg.flags =
189 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
190
191 return trans_send_cmd(&priv->trans, &cmd);
192}
193
194void iwlagn_rx_calib_result(struct iwl_priv *priv,
195 struct iwl_rx_mem_buffer *rxb)
196{
197 struct iwl_rx_packet *pkt = rxb_addr(rxb);
198 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
199 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
200 int index;
201
202 /* reduce the size of the length field itself */
203 len -= 4;
204
205 /* Define the order in which the results will be sent to the runtime
206 * uCode. iwl_send_calib_results sends them in a row according to
207 * their index. We sort them here
208 */
209 switch (hdr->op_code) {
210 case IWL_PHY_CALIBRATE_DC_CMD:
211 index = IWL_CALIB_DC;
212 break;
213 case IWL_PHY_CALIBRATE_LO_CMD:
214 index = IWL_CALIB_LO;
215 break;
216 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
217 index = IWL_CALIB_TX_IQ;
218 break;
219 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
220 index = IWL_CALIB_TX_IQ_PERD;
221 break;
222 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
223 index = IWL_CALIB_BASE_BAND;
224 break;
225 default:
226 IWL_ERR(priv, "Unknown calibration notification %d\n",
227 hdr->op_code);
228 return;
229 }
230 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
231}
232
233int iwlagn_init_alive_start(struct iwl_priv *priv)
234{
235 int ret;
236
237 if (priv->cfg->bt_params &&
238 priv->cfg->bt_params->advanced_bt_coexist) {
239 /*
240 * Tell uCode we are ready to perform calibration
241 * need to perform this before any calibration
242 * no need to close the envlope since we are going
243 * to load the runtime uCode later.
244 */
245 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
246 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
247 if (ret)
248 return ret;
249
250 }
251
252 ret = iwlagn_send_calib_cfg(priv);
253 if (ret)
254 return ret;
255
256 /**
257 * temperature offset calibration is only needed for runtime ucode,
258 * so prepare the value now.
259 */
260 if (priv->cfg->need_temp_offset_calib)
261 return iwlagn_set_temperature_offset_calib(priv);
262
263 return 0;
264}
265
266static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
267{
268 struct iwl_wimax_coex_cmd coex_cmd;
269
270 if (priv->cfg->base_params->support_wimax_coexist) {
271 /* UnMask wake up src at associated sleep */
272 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
273
274 /* UnMask wake up src at unassociated sleep */
275 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
276 memcpy(coex_cmd.sta_prio, cu_priorities,
277 sizeof(struct iwl_wimax_coex_event_entry) *
278 COEX_NUM_OF_EVENTS);
279
280 /* enabling the coexistence feature */
281 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
282
283 /* enabling the priorities tables */
284 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
285 } else {
286 /* coexistence is disabled */
287 memset(&coex_cmd, 0, sizeof(coex_cmd));
288 }
289 return trans_send_cmd_pdu(&priv->trans,
290 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
291 sizeof(coex_cmd), &coex_cmd);
292}
293
294static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
295 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
296 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
297 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
298 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
299 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
300 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
301 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
302 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
303 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
304 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
305 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
306 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
307 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
308 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
309 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
310 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
311 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
312 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
313 0, 0, 0, 0, 0, 0, 0
314};
315
316void iwlagn_send_prio_tbl(struct iwl_priv *priv)
317{
318 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
319
320 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
321 sizeof(iwlagn_bt_prio_tbl));
322 if (trans_send_cmd_pdu(&priv->trans,
323 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
324 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
325 IWL_ERR(priv, "failed to send BT prio tbl command\n");
326}
327
328int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
329{
330 struct iwl_bt_coex_prot_env_cmd env_cmd;
331 int ret;
332
333 env_cmd.action = action;
334 env_cmd.type = type;
335 ret = trans_send_cmd_pdu(&priv->trans,
336 REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
337 sizeof(env_cmd), &env_cmd);
338 if (ret)
339 IWL_ERR(priv, "failed to send BT env command\n");
340 return ret;
341}
342
343
344static int iwlagn_alive_notify(struct iwl_priv *priv)
345{
346 int ret;
347
348 trans_tx_start(&priv->trans);
349
350 ret = iwlagn_send_wimax_coex(priv);
351 if (ret)
352 return ret;
353
354 ret = iwlagn_set_Xtal_calib(priv);
355 if (ret)
356 return ret;
357
358 return iwl_send_calib_results(priv);
359}
360
361
362/**
363 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
364 * using sample data 100 bytes apart. If these sample points are good,
365 * it's a pretty good bet that everything between them is good, too.
366 */
367static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
368 struct fw_desc *fw_desc)
369{
370 __le32 *image = (__le32 *)fw_desc->v_addr;
371 u32 len = fw_desc->len;
372 u32 val;
373 u32 i;
374
375 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
376
377 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
378 /* read data comes through single port, auto-incr addr */
379 /* NOTE: Use the debugless read so we don't flood kernel log
380 * if IWL_DL_IO is set */
381 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
382 i + IWLAGN_RTC_INST_LOWER_BOUND);
383 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
384 if (val != le32_to_cpu(*image))
385 return -EIO;
386 }
387
388 return 0;
389}
390
391static void iwl_print_mismatch_inst(struct iwl_priv *priv,
392 struct fw_desc *fw_desc)
393{
394 __le32 *image = (__le32 *)fw_desc->v_addr;
395 u32 len = fw_desc->len;
396 u32 val;
397 u32 offs;
398 int errors = 0;
399
400 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
401
402 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
403 IWLAGN_RTC_INST_LOWER_BOUND);
404
405 for (offs = 0;
406 offs < len && errors < 20;
407 offs += sizeof(u32), image++) {
408 /* read data comes through single port, auto-incr addr */
409 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
410 if (val != le32_to_cpu(*image)) {
411 IWL_ERR(priv, "uCode INST section at "
412 "offset 0x%x, is 0x%x, s/b 0x%x\n",
413 offs, val, le32_to_cpu(*image));
414 errors++;
415 }
416 }
417}
418
419/**
420 * iwl_verify_ucode - determine which instruction image is in SRAM,
421 * and verify its contents
422 */
423static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
424{
425 if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
426 IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
427 return 0;
428 }
429
430 IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
431
432 iwl_print_mismatch_inst(priv, &img->code);
433 return -EIO;
434}
435
436struct iwlagn_alive_data {
437 bool valid;
438 u8 subtype;
439};
440
441static void iwlagn_alive_fn(struct iwl_priv *priv,
442 struct iwl_rx_packet *pkt,
443 void *data)
444{
445 struct iwlagn_alive_data *alive_data = data;
446 struct iwl_alive_resp *palive;
447
448 palive = &pkt->u.alive_frame;
449
450 IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
451 "0x%01X 0x%01X\n",
452 palive->is_valid, palive->ver_type,
453 palive->ver_subtype);
454
455 priv->device_pointers.error_event_table =
456 le32_to_cpu(palive->error_event_table_ptr);
457 priv->device_pointers.log_event_table =
458 le32_to_cpu(palive->log_event_table_ptr);
459
460 alive_data->subtype = palive->ver_subtype;
461 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
462}
463
464#define UCODE_ALIVE_TIMEOUT HZ
465#define UCODE_CALIB_TIMEOUT (2*HZ)
466
467int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
468 struct fw_img *image,
469 enum iwlagn_ucode_type ucode_type)
470{
471 struct iwl_notification_wait alive_wait;
472 struct iwlagn_alive_data alive_data;
473 int ret;
474 enum iwlagn_ucode_type old_type;
475
476 ret = trans_start_device(&priv->trans);
477 if (ret)
478 return ret;
479
480 iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
481 iwlagn_alive_fn, &alive_data);
482
483 old_type = priv->ucode_type;
484 priv->ucode_type = ucode_type;
485
486 ret = iwlagn_load_given_ucode(priv, image);
487 if (ret) {
488 priv->ucode_type = old_type;
489 iwlagn_remove_notification(priv, &alive_wait);
490 return ret;
491 }
492
493 trans_kick_nic(&priv->trans);
494
495 /*
496 * Some things may run in the background now, but we
497 * just wait for the ALIVE notification here.
498 */
499 ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
500 if (ret) {
501 priv->ucode_type = old_type;
502 return ret;
503 }
504
505 if (!alive_data.valid) {
506 IWL_ERR(priv, "Loaded ucode is not valid!\n");
507 priv->ucode_type = old_type;
508 return -EIO;
509 }
510
511 /*
512 * This step takes a long time (60-80ms!!) and
513 * WoWLAN image should be loaded quickly, so
514 * skip it for WoWLAN.
515 */
516 if (ucode_type != IWL_UCODE_WOWLAN) {
517 ret = iwl_verify_ucode(priv, image);
518 if (ret) {
519 priv->ucode_type = old_type;
520 return ret;
521 }
522
523 /* delay a bit to give rfkill time to run */
524 msleep(5);
525 }
526
527 ret = iwlagn_alive_notify(priv);
528 if (ret) {
529 IWL_WARN(priv,
530 "Could not complete ALIVE transition: %d\n", ret);
531 priv->ucode_type = old_type;
532 return ret;
533 }
534
535 return 0;
536}
537
538int iwlagn_run_init_ucode(struct iwl_priv *priv)
539{
540 struct iwl_notification_wait calib_wait;
541 int ret;
542
543 lockdep_assert_held(&priv->mutex);
544
545 /* No init ucode required? Curious, but maybe ok */
546 if (!priv->ucode_init.code.len)
547 return 0;
548
549 if (priv->ucode_type != IWL_UCODE_NONE)
550 return 0;
551
552 iwlagn_init_notification_wait(priv, &calib_wait,
553 CALIBRATION_COMPLETE_NOTIFICATION,
554 NULL, NULL);
555
556 /* Will also start the device */
557 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
558 IWL_UCODE_INIT);
559 if (ret)
560 goto error;
561
562 ret = iwlagn_init_alive_start(priv);
563 if (ret)
564 goto error;
565
566 /*
567 * Some things may run in the background now, but we
568 * just wait for the calibration complete notification.
569 */
570 ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
571
572 goto out;
573
574 error:
575 iwlagn_remove_notification(priv, &calib_wait);
576 out:
577 /* Whatever happened, stop the device */
578 trans_stop_device(&priv->trans);
579 return ret;
580}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
new file mode 100644
index 00000000000..f473c019c64
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -0,0 +1,3989 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/dma-mapping.h>
34#include <linux/delay.h>
35#include <linux/sched.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/wireless.h>
39#include <linux/firmware.h>
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/mac80211.h>
44
45#include <asm/div64.h>
46
47#include "iwl-eeprom.h"
48#include "iwl-dev.h"
49#include "iwl-core.h"
50#include "iwl-io.h"
51#include "iwl-helpers.h"
52#include "iwl-sta.h"
53#include "iwl-agn-calib.h"
54#include "iwl-agn.h"
55#include "iwl-bus.h"
56#include "iwl-trans.h"
57
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/*
65 * module name, copyright, version, etc.
66 */
67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
68
69#ifdef CONFIG_IWLWIFI_DEBUG
70#define VD "d"
71#else
72#define VD
73#endif
74
75#define DRV_VERSION IWLWIFI_VERSION VD
76
77
78MODULE_DESCRIPTION(DRV_DESCRIPTION);
79MODULE_VERSION(DRV_VERSION);
80MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
81MODULE_LICENSE("GPL");
82
83static int iwlagn_ant_coupling;
84static bool iwlagn_bt_ch_announce = 1;
85
86void iwl_update_chain_flags(struct iwl_priv *priv)
87{
88 struct iwl_rxon_context *ctx;
89
90 for_each_context(priv, ctx) {
91 iwlagn_set_rxon_chain(priv, ctx);
92 if (ctx->active.rx_chain != ctx->staging.rx_chain)
93 iwlagn_commit_rxon(priv, ctx);
94 }
95}
96
97/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
98static void iwl_set_beacon_tim(struct iwl_priv *priv,
99 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
100 u8 *beacon, u32 frame_size)
101{
102 u16 tim_idx;
103 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
104
105 /*
106 * The index is relative to frame start but we start looking at the
107 * variable-length part of the beacon.
108 */
109 tim_idx = mgmt->u.beacon.variable - beacon;
110
111 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
112 while ((tim_idx < (frame_size - 2)) &&
113 (beacon[tim_idx] != WLAN_EID_TIM))
114 tim_idx += beacon[tim_idx+1] + 2;
115
116 /* If TIM field was found, set variables */
117 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
118 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
119 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
120 } else
121 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
122}
123
124int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
125{
126 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
127 struct iwl_host_cmd cmd = {
128 .id = REPLY_TX_BEACON,
129 .flags = CMD_SYNC,
130 };
131 struct ieee80211_tx_info *info;
132 u32 frame_size;
133 u32 rate_flags;
134 u32 rate;
135
136 /*
137 * We have to set up the TX command, the TX Beacon command, and the
138 * beacon contents.
139 */
140
141 lockdep_assert_held(&priv->mutex);
142
143 if (!priv->beacon_ctx) {
144 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
145 return 0;
146 }
147
148 if (WARN_ON(!priv->beacon_skb))
149 return -EINVAL;
150
151 /* Allocate beacon command */
152 if (!priv->beacon_cmd)
153 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
154 tx_beacon_cmd = priv->beacon_cmd;
155 if (!tx_beacon_cmd)
156 return -ENOMEM;
157
158 frame_size = priv->beacon_skb->len;
159
160 /* Set up TX command fields */
161 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
162 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
163 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
164 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
165 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
166
167 /* Set up TX beacon command fields */
168 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
169 frame_size);
170
171 /* Set up packet rate and flags */
172 info = IEEE80211_SKB_CB(priv->beacon_skb);
173
174 /*
175 * Let's set up the rate at least somewhat correctly;
176 * it will currently not actually be used by the uCode,
177 * it uses the broadcast station's rate instead.
178 */
179 if (info->control.rates[0].idx < 0 ||
180 info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
181 rate = 0;
182 else
183 rate = info->control.rates[0].idx;
184
185 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
186 priv->hw_params.valid_tx_ant);
187 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
188
189 /* In mac80211, rates for 5 GHz start at 0 */
190 if (info->band == IEEE80211_BAND_5GHZ)
191 rate += IWL_FIRST_OFDM_RATE;
192 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
193 rate_flags |= RATE_MCS_CCK_MSK;
194
195 tx_beacon_cmd->tx.rate_n_flags =
196 iwl_hw_set_rate_n_flags(rate, rate_flags);
197
198 /* Submit command */
199 cmd.len[0] = sizeof(*tx_beacon_cmd);
200 cmd.data[0] = tx_beacon_cmd;
201 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
202 cmd.len[1] = frame_size;
203 cmd.data[1] = priv->beacon_skb->data;
204 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
205
206 return trans_send_cmd(&priv->trans, &cmd);
207}
208
209static void iwl_bg_beacon_update(struct work_struct *work)
210{
211 struct iwl_priv *priv =
212 container_of(work, struct iwl_priv, beacon_update);
213 struct sk_buff *beacon;
214
215 mutex_lock(&priv->mutex);
216 if (!priv->beacon_ctx) {
217 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
218 goto out;
219 }
220
221 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
222 /*
223 * The ucode will send beacon notifications even in
224 * IBSS mode, but we don't want to process them. But
225 * we need to defer the type check to here due to
226 * requiring locking around the beacon_ctx access.
227 */
228 goto out;
229 }
230
231 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
232 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
233 if (!beacon) {
234 IWL_ERR(priv, "update beacon failed -- keeping old\n");
235 goto out;
236 }
237
238 /* new beacon skb is allocated every time; dispose previous.*/
239 dev_kfree_skb(priv->beacon_skb);
240
241 priv->beacon_skb = beacon;
242
243 iwlagn_send_beacon_cmd(priv);
244 out:
245 mutex_unlock(&priv->mutex);
246}
247
248static void iwl_bg_bt_runtime_config(struct work_struct *work)
249{
250 struct iwl_priv *priv =
251 container_of(work, struct iwl_priv, bt_runtime_config);
252
253 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
254 return;
255
256 /* dont send host command if rf-kill is on */
257 if (!iwl_is_ready_rf(priv))
258 return;
259 iwlagn_send_advance_bt_config(priv);
260}
261
262static void iwl_bg_bt_full_concurrency(struct work_struct *work)
263{
264 struct iwl_priv *priv =
265 container_of(work, struct iwl_priv, bt_full_concurrency);
266 struct iwl_rxon_context *ctx;
267
268 mutex_lock(&priv->mutex);
269
270 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
271 goto out;
272
273 /* dont send host command if rf-kill is on */
274 if (!iwl_is_ready_rf(priv))
275 goto out;
276
277 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
278 priv->bt_full_concurrent ?
279 "full concurrency" : "3-wire");
280
281 /*
282 * LQ & RXON updated cmds must be sent before BT Config cmd
283 * to avoid 3-wire collisions
284 */
285 for_each_context(priv, ctx) {
286 iwlagn_set_rxon_chain(priv, ctx);
287 iwlagn_commit_rxon(priv, ctx);
288 }
289
290 iwlagn_send_advance_bt_config(priv);
291out:
292 mutex_unlock(&priv->mutex);
293}
294
295/**
296 * iwl_bg_statistics_periodic - Timer callback to queue statistics
297 *
298 * This callback is provided in order to send a statistics request.
299 *
300 * This timer function is continually reset to execute within
301 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
302 * was received. We need to ensure we receive the statistics in order
303 * to update the temperature used for calibrating the TXPOWER.
304 */
305static void iwl_bg_statistics_periodic(unsigned long data)
306{
307 struct iwl_priv *priv = (struct iwl_priv *)data;
308
309 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
310 return;
311
312 /* dont send host command if rf-kill is on */
313 if (!iwl_is_ready_rf(priv))
314 return;
315
316 iwl_send_statistics_request(priv, CMD_ASYNC, false);
317}
318
319
320static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
321 u32 start_idx, u32 num_events,
322 u32 mode)
323{
324 u32 i;
325 u32 ptr; /* SRAM byte address of log data */
326 u32 ev, time, data; /* event log data */
327 unsigned long reg_flags;
328
329 if (mode == 0)
330 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
331 else
332 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
333
334 /* Make sure device is powered up for SRAM reads */
335 spin_lock_irqsave(&priv->reg_lock, reg_flags);
336 if (iwl_grab_nic_access(priv)) {
337 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
338 return;
339 }
340
341 /* Set starting address; reads will auto-increment */
342 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
343 rmb();
344
345 /*
346 * "time" is actually "data" for mode 0 (no timestamp).
347 * place event id # at far right for easier visual parsing.
348 */
349 for (i = 0; i < num_events; i++) {
350 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
351 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
352 if (mode == 0) {
353 trace_iwlwifi_dev_ucode_cont_event(priv,
354 0, time, ev);
355 } else {
356 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
357 trace_iwlwifi_dev_ucode_cont_event(priv,
358 time, data, ev);
359 }
360 }
361 /* Allow device to power down */
362 iwl_release_nic_access(priv);
363 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
364}
365
366static void iwl_continuous_event_trace(struct iwl_priv *priv)
367{
368 u32 capacity; /* event log capacity in # entries */
369 u32 base; /* SRAM byte address of event log header */
370 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
371 u32 num_wraps; /* # times uCode wrapped to top of log */
372 u32 next_entry; /* index of next entry to be written by uCode */
373
374 base = priv->device_pointers.error_event_table;
375 if (iwlagn_hw_valid_rtc_data_addr(base)) {
376 capacity = iwl_read_targ_mem(priv, base);
377 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
378 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
379 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
380 } else
381 return;
382
383 if (num_wraps == priv->event_log.num_wraps) {
384 iwl_print_cont_event_trace(priv,
385 base, priv->event_log.next_entry,
386 next_entry - priv->event_log.next_entry,
387 mode);
388 priv->event_log.non_wraps_count++;
389 } else {
390 if ((num_wraps - priv->event_log.num_wraps) > 1)
391 priv->event_log.wraps_more_count++;
392 else
393 priv->event_log.wraps_once_count++;
394 trace_iwlwifi_dev_ucode_wrap_event(priv,
395 num_wraps - priv->event_log.num_wraps,
396 next_entry, priv->event_log.next_entry);
397 if (next_entry < priv->event_log.next_entry) {
398 iwl_print_cont_event_trace(priv, base,
399 priv->event_log.next_entry,
400 capacity - priv->event_log.next_entry,
401 mode);
402
403 iwl_print_cont_event_trace(priv, base, 0,
404 next_entry, mode);
405 } else {
406 iwl_print_cont_event_trace(priv, base,
407 next_entry, capacity - next_entry,
408 mode);
409
410 iwl_print_cont_event_trace(priv, base, 0,
411 next_entry, mode);
412 }
413 }
414 priv->event_log.num_wraps = num_wraps;
415 priv->event_log.next_entry = next_entry;
416}
417
418/**
419 * iwl_bg_ucode_trace - Timer callback to log ucode event
420 *
421 * The timer is continually set to execute every
422 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
423 * this function is to perform continuous uCode event logging operation
424 * if enabled
425 */
426static void iwl_bg_ucode_trace(unsigned long data)
427{
428 struct iwl_priv *priv = (struct iwl_priv *)data;
429
430 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
431 return;
432
433 if (priv->event_log.ucode_trace) {
434 iwl_continuous_event_trace(priv);
435 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
436 mod_timer(&priv->ucode_trace,
437 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
438 }
439}
440
441static void iwl_bg_tx_flush(struct work_struct *work)
442{
443 struct iwl_priv *priv =
444 container_of(work, struct iwl_priv, tx_flush);
445
446 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
447 return;
448
449 /* do nothing if rf-kill is on */
450 if (!iwl_is_ready_rf(priv))
451 return;
452
453 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
454 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
455}
456
457/*****************************************************************************
458 *
459 * sysfs attributes
460 *
461 *****************************************************************************/
462
463#ifdef CONFIG_IWLWIFI_DEBUG
464
465/*
466 * The following adds a new attribute to the sysfs representation
467 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
468 * used for controlling the debug level.
469 *
470 * See the level definitions in iwl for details.
471 *
472 * The debug_level being managed using sysfs below is a per device debug
473 * level that is used instead of the global debug level if it (the per
474 * device debug level) is set.
475 */
476static ssize_t show_debug_level(struct device *d,
477 struct device_attribute *attr, char *buf)
478{
479 struct iwl_priv *priv = dev_get_drvdata(d);
480 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
481}
482static ssize_t store_debug_level(struct device *d,
483 struct device_attribute *attr,
484 const char *buf, size_t count)
485{
486 struct iwl_priv *priv = dev_get_drvdata(d);
487 unsigned long val;
488 int ret;
489
490 ret = strict_strtoul(buf, 0, &val);
491 if (ret)
492 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
493 else {
494 priv->debug_level = val;
495 if (iwl_alloc_traffic_mem(priv))
496 IWL_ERR(priv,
497 "Not enough memory to generate traffic log\n");
498 }
499 return strnlen(buf, count);
500}
501
502static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
503 show_debug_level, store_debug_level);
504
505
506#endif /* CONFIG_IWLWIFI_DEBUG */
507
508
509static ssize_t show_temperature(struct device *d,
510 struct device_attribute *attr, char *buf)
511{
512 struct iwl_priv *priv = dev_get_drvdata(d);
513
514 if (!iwl_is_alive(priv))
515 return -EAGAIN;
516
517 return sprintf(buf, "%d\n", priv->temperature);
518}
519
520static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
521
522static ssize_t show_tx_power(struct device *d,
523 struct device_attribute *attr, char *buf)
524{
525 struct iwl_priv *priv = dev_get_drvdata(d);
526
527 if (!iwl_is_ready_rf(priv))
528 return sprintf(buf, "off\n");
529 else
530 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
531}
532
533static ssize_t store_tx_power(struct device *d,
534 struct device_attribute *attr,
535 const char *buf, size_t count)
536{
537 struct iwl_priv *priv = dev_get_drvdata(d);
538 unsigned long val;
539 int ret;
540
541 ret = strict_strtoul(buf, 10, &val);
542 if (ret)
543 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
544 else {
545 ret = iwl_set_tx_power(priv, val, false);
546 if (ret)
547 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
548 ret);
549 else
550 ret = count;
551 }
552 return ret;
553}
554
555static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
556
557static struct attribute *iwl_sysfs_entries[] = {
558 &dev_attr_temperature.attr,
559 &dev_attr_tx_power.attr,
560#ifdef CONFIG_IWLWIFI_DEBUG
561 &dev_attr_debug_level.attr,
562#endif
563 NULL
564};
565
566static struct attribute_group iwl_attribute_group = {
567 .name = NULL, /* put in device directory */
568 .attrs = iwl_sysfs_entries,
569};
570
571/******************************************************************************
572 *
573 * uCode download functions
574 *
575 ******************************************************************************/
576
577static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
578{
579 if (desc->v_addr)
580 dma_free_coherent(priv->bus->dev, desc->len,
581 desc->v_addr, desc->p_addr);
582 desc->v_addr = NULL;
583 desc->len = 0;
584}
585
586static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
587{
588 iwl_free_fw_desc(priv, &img->code);
589 iwl_free_fw_desc(priv, &img->data);
590}
591
592static void iwl_dealloc_ucode(struct iwl_priv *priv)
593{
594 iwl_free_fw_img(priv, &priv->ucode_rt);
595 iwl_free_fw_img(priv, &priv->ucode_init);
596 iwl_free_fw_img(priv, &priv->ucode_wowlan);
597}
598
599static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
600 const void *data, size_t len)
601{
602 if (!len) {
603 desc->v_addr = NULL;
604 return -EINVAL;
605 }
606
607 desc->v_addr = dma_alloc_coherent(priv->bus->dev, len,
608 &desc->p_addr, GFP_KERNEL);
609 if (!desc->v_addr)
610 return -ENOMEM;
611
612 desc->len = len;
613 memcpy(desc->v_addr, data, len);
614 return 0;
615}
616
617struct iwlagn_ucode_capabilities {
618 u32 max_probe_length;
619 u32 standard_phy_calibration_size;
620 u32 flags;
621};
622
623static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
624static int iwl_mac_setup_register(struct iwl_priv *priv,
625 struct iwlagn_ucode_capabilities *capa);
626
627#define UCODE_EXPERIMENTAL_INDEX 100
628#define UCODE_EXPERIMENTAL_TAG "exp"
629
630static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
631{
632 const char *name_pre = priv->cfg->fw_name_pre;
633 char tag[8];
634
635 if (first) {
636#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
637 priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
638 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
639 } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
640#endif
641 priv->fw_index = priv->cfg->ucode_api_max;
642 sprintf(tag, "%d", priv->fw_index);
643 } else {
644 priv->fw_index--;
645 sprintf(tag, "%d", priv->fw_index);
646 }
647
648 if (priv->fw_index < priv->cfg->ucode_api_min) {
649 IWL_ERR(priv, "no suitable firmware found!\n");
650 return -ENOENT;
651 }
652
653 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
654
655 IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
656 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
657 ? "EXPERIMENTAL " : "",
658 priv->firmware_name);
659
660 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
661 priv->bus->dev,
662 GFP_KERNEL, priv, iwl_ucode_callback);
663}
664
665struct iwlagn_firmware_pieces {
666 const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
667 size_t inst_size, data_size, init_size, init_data_size,
668 wowlan_inst_size, wowlan_data_size;
669
670 u32 build;
671
672 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
673 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
674};
675
676static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
677 const struct firmware *ucode_raw,
678 struct iwlagn_firmware_pieces *pieces)
679{
680 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
681 u32 api_ver, hdr_size;
682 const u8 *src;
683
684 priv->ucode_ver = le32_to_cpu(ucode->ver);
685 api_ver = IWL_UCODE_API(priv->ucode_ver);
686
687 switch (api_ver) {
688 default:
689 hdr_size = 28;
690 if (ucode_raw->size < hdr_size) {
691 IWL_ERR(priv, "File size too small!\n");
692 return -EINVAL;
693 }
694 pieces->build = le32_to_cpu(ucode->u.v2.build);
695 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
696 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
697 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
698 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
699 src = ucode->u.v2.data;
700 break;
701 case 0:
702 case 1:
703 case 2:
704 hdr_size = 24;
705 if (ucode_raw->size < hdr_size) {
706 IWL_ERR(priv, "File size too small!\n");
707 return -EINVAL;
708 }
709 pieces->build = 0;
710 pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
711 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
712 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
713 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
714 src = ucode->u.v1.data;
715 break;
716 }
717
718 /* Verify size of file vs. image size info in file's header */
719 if (ucode_raw->size != hdr_size + pieces->inst_size +
720 pieces->data_size + pieces->init_size +
721 pieces->init_data_size) {
722
723 IWL_ERR(priv,
724 "uCode file size %d does not match expected size\n",
725 (int)ucode_raw->size);
726 return -EINVAL;
727 }
728
729 pieces->inst = src;
730 src += pieces->inst_size;
731 pieces->data = src;
732 src += pieces->data_size;
733 pieces->init = src;
734 src += pieces->init_size;
735 pieces->init_data = src;
736 src += pieces->init_data_size;
737
738 return 0;
739}
740
741static int iwlagn_wanted_ucode_alternative = 1;
742
743static int iwlagn_load_firmware(struct iwl_priv *priv,
744 const struct firmware *ucode_raw,
745 struct iwlagn_firmware_pieces *pieces,
746 struct iwlagn_ucode_capabilities *capa)
747{
748 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
749 struct iwl_ucode_tlv *tlv;
750 size_t len = ucode_raw->size;
751 const u8 *data;
752 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
753 u64 alternatives;
754 u32 tlv_len;
755 enum iwl_ucode_tlv_type tlv_type;
756 const u8 *tlv_data;
757
758 if (len < sizeof(*ucode)) {
759 IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
760 return -EINVAL;
761 }
762
763 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
764 IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
765 le32_to_cpu(ucode->magic));
766 return -EINVAL;
767 }
768
769 /*
770 * Check which alternatives are present, and "downgrade"
771 * when the chosen alternative is not present, warning
772 * the user when that happens. Some files may not have
773 * any alternatives, so don't warn in that case.
774 */
775 alternatives = le64_to_cpu(ucode->alternatives);
776 tmp = wanted_alternative;
777 if (wanted_alternative > 63)
778 wanted_alternative = 63;
779 while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
780 wanted_alternative--;
781 if (wanted_alternative && wanted_alternative != tmp)
782 IWL_WARN(priv,
783 "uCode alternative %d not available, choosing %d\n",
784 tmp, wanted_alternative);
785
786 priv->ucode_ver = le32_to_cpu(ucode->ver);
787 pieces->build = le32_to_cpu(ucode->build);
788 data = ucode->data;
789
790 len -= sizeof(*ucode);
791
792 while (len >= sizeof(*tlv)) {
793 u16 tlv_alt;
794
795 len -= sizeof(*tlv);
796 tlv = (void *)data;
797
798 tlv_len = le32_to_cpu(tlv->length);
799 tlv_type = le16_to_cpu(tlv->type);
800 tlv_alt = le16_to_cpu(tlv->alternative);
801 tlv_data = tlv->data;
802
803 if (len < tlv_len) {
804 IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
805 len, tlv_len);
806 return -EINVAL;
807 }
808 len -= ALIGN(tlv_len, 4);
809 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
810
811 /*
812 * Alternative 0 is always valid.
813 *
814 * Skip alternative TLVs that are not selected.
815 */
816 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
817 continue;
818
819 switch (tlv_type) {
820 case IWL_UCODE_TLV_INST:
821 pieces->inst = tlv_data;
822 pieces->inst_size = tlv_len;
823 break;
824 case IWL_UCODE_TLV_DATA:
825 pieces->data = tlv_data;
826 pieces->data_size = tlv_len;
827 break;
828 case IWL_UCODE_TLV_INIT:
829 pieces->init = tlv_data;
830 pieces->init_size = tlv_len;
831 break;
832 case IWL_UCODE_TLV_INIT_DATA:
833 pieces->init_data = tlv_data;
834 pieces->init_data_size = tlv_len;
835 break;
836 case IWL_UCODE_TLV_BOOT:
837 IWL_ERR(priv, "Found unexpected BOOT ucode\n");
838 break;
839 case IWL_UCODE_TLV_PROBE_MAX_LEN:
840 if (tlv_len != sizeof(u32))
841 goto invalid_tlv_len;
842 capa->max_probe_length =
843 le32_to_cpup((__le32 *)tlv_data);
844 break;
845 case IWL_UCODE_TLV_PAN:
846 if (tlv_len)
847 goto invalid_tlv_len;
848 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
849 break;
850 case IWL_UCODE_TLV_FLAGS:
851 /* must be at least one u32 */
852 if (tlv_len < sizeof(u32))
853 goto invalid_tlv_len;
854 /* and a proper number of u32s */
855 if (tlv_len % sizeof(u32))
856 goto invalid_tlv_len;
857 /*
858 * This driver only reads the first u32 as
859 * right now no more features are defined,
860 * if that changes then either the driver
861 * will not work with the new firmware, or
862 * it'll not take advantage of new features.
863 */
864 capa->flags = le32_to_cpup((__le32 *)tlv_data);
865 break;
866 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
867 if (tlv_len != sizeof(u32))
868 goto invalid_tlv_len;
869 pieces->init_evtlog_ptr =
870 le32_to_cpup((__le32 *)tlv_data);
871 break;
872 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
873 if (tlv_len != sizeof(u32))
874 goto invalid_tlv_len;
875 pieces->init_evtlog_size =
876 le32_to_cpup((__le32 *)tlv_data);
877 break;
878 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
879 if (tlv_len != sizeof(u32))
880 goto invalid_tlv_len;
881 pieces->init_errlog_ptr =
882 le32_to_cpup((__le32 *)tlv_data);
883 break;
884 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
885 if (tlv_len != sizeof(u32))
886 goto invalid_tlv_len;
887 pieces->inst_evtlog_ptr =
888 le32_to_cpup((__le32 *)tlv_data);
889 break;
890 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
891 if (tlv_len != sizeof(u32))
892 goto invalid_tlv_len;
893 pieces->inst_evtlog_size =
894 le32_to_cpup((__le32 *)tlv_data);
895 break;
896 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
897 if (tlv_len != sizeof(u32))
898 goto invalid_tlv_len;
899 pieces->inst_errlog_ptr =
900 le32_to_cpup((__le32 *)tlv_data);
901 break;
902 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
903 if (tlv_len)
904 goto invalid_tlv_len;
905 priv->enhance_sensitivity_table = true;
906 break;
907 case IWL_UCODE_TLV_WOWLAN_INST:
908 pieces->wowlan_inst = tlv_data;
909 pieces->wowlan_inst_size = tlv_len;
910 break;
911 case IWL_UCODE_TLV_WOWLAN_DATA:
912 pieces->wowlan_data = tlv_data;
913 pieces->wowlan_data_size = tlv_len;
914 break;
915 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
916 if (tlv_len != sizeof(u32))
917 goto invalid_tlv_len;
918 capa->standard_phy_calibration_size =
919 le32_to_cpup((__le32 *)tlv_data);
920 break;
921 default:
922 IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
923 break;
924 }
925 }
926
927 if (len) {
928 IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
929 iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
930 return -EINVAL;
931 }
932
933 return 0;
934
935 invalid_tlv_len:
936 IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
937 iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
938
939 return -EINVAL;
940}
941
942/**
943 * iwl_ucode_callback - callback when firmware was loaded
944 *
945 * If loaded successfully, copies the firmware into buffers
946 * for the card to fetch (via DMA).
947 */
948static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
949{
950 struct iwl_priv *priv = context;
951 struct iwl_ucode_header *ucode;
952 int err;
953 struct iwlagn_firmware_pieces pieces;
954 const unsigned int api_max = priv->cfg->ucode_api_max;
955 const unsigned int api_min = priv->cfg->ucode_api_min;
956 u32 api_ver;
957 char buildstr[25];
958 u32 build;
959 struct iwlagn_ucode_capabilities ucode_capa = {
960 .max_probe_length = 200,
961 .standard_phy_calibration_size =
962 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
963 };
964
965 memset(&pieces, 0, sizeof(pieces));
966
967 if (!ucode_raw) {
968 if (priv->fw_index <= priv->cfg->ucode_api_max)
969 IWL_ERR(priv,
970 "request for firmware file '%s' failed.\n",
971 priv->firmware_name);
972 goto try_again;
973 }
974
975 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
976 priv->firmware_name, ucode_raw->size);
977
978 /* Make sure that we got at least the API version number */
979 if (ucode_raw->size < 4) {
980 IWL_ERR(priv, "File size way too small!\n");
981 goto try_again;
982 }
983
984 /* Data from ucode file: header followed by uCode images */
985 ucode = (struct iwl_ucode_header *)ucode_raw->data;
986
987 if (ucode->ver)
988 err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
989 else
990 err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
991 &ucode_capa);
992
993 if (err)
994 goto try_again;
995
996 api_ver = IWL_UCODE_API(priv->ucode_ver);
997 build = pieces.build;
998
999 /*
1000 * api_ver should match the api version forming part of the
1001 * firmware filename ... but we don't check for that and only rely
1002 * on the API version read from firmware header from here on forward
1003 */
1004 /* no api version check required for experimental uCode */
1005 if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
1006 if (api_ver < api_min || api_ver > api_max) {
1007 IWL_ERR(priv,
1008 "Driver unable to support your firmware API. "
1009 "Driver supports v%u, firmware is v%u.\n",
1010 api_max, api_ver);
1011 goto try_again;
1012 }
1013
1014 if (api_ver != api_max)
1015 IWL_ERR(priv,
1016 "Firmware has old API version. Expected v%u, "
1017 "got v%u. New firmware can be obtained "
1018 "from http://www.intellinuxwireless.org.\n",
1019 api_max, api_ver);
1020 }
1021
1022 if (build)
1023 sprintf(buildstr, " build %u%s", build,
1024 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1025 ? " (EXP)" : "");
1026 else
1027 buildstr[0] = '\0';
1028
1029 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
1030 IWL_UCODE_MAJOR(priv->ucode_ver),
1031 IWL_UCODE_MINOR(priv->ucode_ver),
1032 IWL_UCODE_API(priv->ucode_ver),
1033 IWL_UCODE_SERIAL(priv->ucode_ver),
1034 buildstr);
1035
1036 snprintf(priv->hw->wiphy->fw_version,
1037 sizeof(priv->hw->wiphy->fw_version),
1038 "%u.%u.%u.%u%s",
1039 IWL_UCODE_MAJOR(priv->ucode_ver),
1040 IWL_UCODE_MINOR(priv->ucode_ver),
1041 IWL_UCODE_API(priv->ucode_ver),
1042 IWL_UCODE_SERIAL(priv->ucode_ver),
1043 buildstr);
1044
1045 /*
1046 * For any of the failures below (before allocating pci memory)
1047 * we will try to load a version with a smaller API -- maybe the
1048 * user just got a corrupted version of the latest API.
1049 */
1050
1051 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1052 priv->ucode_ver);
1053 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1054 pieces.inst_size);
1055 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1056 pieces.data_size);
1057 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1058 pieces.init_size);
1059 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1060 pieces.init_data_size);
1061
1062 /* Verify that uCode images will fit in card's SRAM */
1063 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1064 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1065 pieces.inst_size);
1066 goto try_again;
1067 }
1068
1069 if (pieces.data_size > priv->hw_params.max_data_size) {
1070 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1071 pieces.data_size);
1072 goto try_again;
1073 }
1074
1075 if (pieces.init_size > priv->hw_params.max_inst_size) {
1076 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1077 pieces.init_size);
1078 goto try_again;
1079 }
1080
1081 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1082 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1083 pieces.init_data_size);
1084 goto try_again;
1085 }
1086
1087 /* Allocate ucode buffers for card's bus-master loading ... */
1088
1089 /* Runtime instructions and 2 copies of data:
1090 * 1) unmodified from disk
1091 * 2) backup cache for save/restore during power-downs */
1092 if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
1093 pieces.inst, pieces.inst_size))
1094 goto err_pci_alloc;
1095 if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
1096 pieces.data, pieces.data_size))
1097 goto err_pci_alloc;
1098
1099 /* Initialization instructions and data */
1100 if (pieces.init_size && pieces.init_data_size) {
1101 if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
1102 pieces.init, pieces.init_size))
1103 goto err_pci_alloc;
1104 if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
1105 pieces.init_data, pieces.init_data_size))
1106 goto err_pci_alloc;
1107 }
1108
1109 /* WoWLAN instructions and data */
1110 if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
1111 if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
1112 pieces.wowlan_inst,
1113 pieces.wowlan_inst_size))
1114 goto err_pci_alloc;
1115 if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
1116 pieces.wowlan_data,
1117 pieces.wowlan_data_size))
1118 goto err_pci_alloc;
1119 }
1120
1121 /* Now that we can no longer fail, copy information */
1122
1123 /*
1124 * The (size - 16) / 12 formula is based on the information recorded
1125 * for each event, which is of mode 1 (including timestamp) for all
1126 * new microcodes that include this information.
1127 */
1128 priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
1129 if (pieces.init_evtlog_size)
1130 priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
1131 else
1132 priv->init_evtlog_size =
1133 priv->cfg->base_params->max_event_log_size;
1134 priv->init_errlog_ptr = pieces.init_errlog_ptr;
1135 priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
1136 if (pieces.inst_evtlog_size)
1137 priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
1138 else
1139 priv->inst_evtlog_size =
1140 priv->cfg->base_params->max_event_log_size;
1141 priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
1142
1143 priv->new_scan_threshold_behaviour =
1144 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1145
1146 if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) &&
1147 (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) {
1148 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
1149 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1150 } else
1151 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1152
1153 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1154 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1155 else
1156 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1157
1158 /*
1159 * figure out the offset of chain noise reset and gain commands
1160 * base on the size of standard phy calibration commands table size
1161 */
1162 if (ucode_capa.standard_phy_calibration_size >
1163 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
1164 ucode_capa.standard_phy_calibration_size =
1165 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1166
1167 priv->phy_calib_chain_noise_reset_cmd =
1168 ucode_capa.standard_phy_calibration_size;
1169 priv->phy_calib_chain_noise_gain_cmd =
1170 ucode_capa.standard_phy_calibration_size + 1;
1171
1172 /**************************************************
1173 * This is still part of probe() in a sense...
1174 *
1175 * 9. Setup and register with mac80211 and debugfs
1176 **************************************************/
1177 err = iwl_mac_setup_register(priv, &ucode_capa);
1178 if (err)
1179 goto out_unbind;
1180
1181 err = iwl_dbgfs_register(priv, DRV_NAME);
1182 if (err)
1183 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
1184
1185 err = sysfs_create_group(&(priv->bus->dev->kobj),
1186 &iwl_attribute_group);
1187 if (err) {
1188 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1189 goto out_unbind;
1190 }
1191
1192 /* We have our copies now, allow OS release its copies */
1193 release_firmware(ucode_raw);
1194 complete(&priv->firmware_loading_complete);
1195 return;
1196
1197 try_again:
1198 /* try next, if any */
1199 if (iwl_request_firmware(priv, false))
1200 goto out_unbind;
1201 release_firmware(ucode_raw);
1202 return;
1203
1204 err_pci_alloc:
1205 IWL_ERR(priv, "failed to allocate pci memory\n");
1206 iwl_dealloc_ucode(priv);
1207 out_unbind:
1208 complete(&priv->firmware_loading_complete);
1209 device_release_driver(priv->bus->dev);
1210 release_firmware(ucode_raw);
1211}
1212
1213static const char * const desc_lookup_text[] = {
1214 "OK",
1215 "FAIL",
1216 "BAD_PARAM",
1217 "BAD_CHECKSUM",
1218 "NMI_INTERRUPT_WDG",
1219 "SYSASSERT",
1220 "FATAL_ERROR",
1221 "BAD_COMMAND",
1222 "HW_ERROR_TUNE_LOCK",
1223 "HW_ERROR_TEMPERATURE",
1224 "ILLEGAL_CHAN_FREQ",
1225 "VCC_NOT_STABLE",
1226 "FH_ERROR",
1227 "NMI_INTERRUPT_HOST",
1228 "NMI_INTERRUPT_ACTION_PT",
1229 "NMI_INTERRUPT_UNKNOWN",
1230 "UCODE_VERSION_MISMATCH",
1231 "HW_ERROR_ABS_LOCK",
1232 "HW_ERROR_CAL_LOCK_FAIL",
1233 "NMI_INTERRUPT_INST_ACTION_PT",
1234 "NMI_INTERRUPT_DATA_ACTION_PT",
1235 "NMI_TRM_HW_ER",
1236 "NMI_INTERRUPT_TRM",
1237 "NMI_INTERRUPT_BREAK_POINT",
1238 "DEBUG_0",
1239 "DEBUG_1",
1240 "DEBUG_2",
1241 "DEBUG_3",
1242};
1243
1244static struct { char *name; u8 num; } advanced_lookup[] = {
1245 { "NMI_INTERRUPT_WDG", 0x34 },
1246 { "SYSASSERT", 0x35 },
1247 { "UCODE_VERSION_MISMATCH", 0x37 },
1248 { "BAD_COMMAND", 0x38 },
1249 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1250 { "FATAL_ERROR", 0x3D },
1251 { "NMI_TRM_HW_ERR", 0x46 },
1252 { "NMI_INTERRUPT_TRM", 0x4C },
1253 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1254 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1255 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1256 { "NMI_INTERRUPT_HOST", 0x66 },
1257 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1258 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1259 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1260 { "ADVANCED_SYSASSERT", 0 },
1261};
1262
1263static const char *desc_lookup(u32 num)
1264{
1265 int i;
1266 int max = ARRAY_SIZE(desc_lookup_text);
1267
1268 if (num < max)
1269 return desc_lookup_text[num];
1270
1271 max = ARRAY_SIZE(advanced_lookup) - 1;
1272 for (i = 0; i < max; i++) {
1273 if (advanced_lookup[i].num == num)
1274 break;
1275 }
1276 return advanced_lookup[i].name;
1277}
1278
1279#define ERROR_START_OFFSET (1 * sizeof(u32))
1280#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1281
1282void iwl_dump_nic_error_log(struct iwl_priv *priv)
1283{
1284 u32 base;
1285 struct iwl_error_event_table table;
1286
1287 base = priv->device_pointers.error_event_table;
1288 if (priv->ucode_type == IWL_UCODE_INIT) {
1289 if (!base)
1290 base = priv->init_errlog_ptr;
1291 } else {
1292 if (!base)
1293 base = priv->inst_errlog_ptr;
1294 }
1295
1296 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1297 IWL_ERR(priv,
1298 "Not valid error log pointer 0x%08X for %s uCode\n",
1299 base,
1300 (priv->ucode_type == IWL_UCODE_INIT)
1301 ? "Init" : "RT");
1302 return;
1303 }
1304
1305 iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1306
1307 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1308 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1309 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1310 priv->status, table.valid);
1311 }
1312
1313 priv->isr_stats.err_code = table.error_id;
1314
1315 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
1316 table.data1, table.data2, table.line,
1317 table.blink1, table.blink2, table.ilink1,
1318 table.ilink2, table.bcon_time, table.gp1,
1319 table.gp2, table.gp3, table.ucode_ver,
1320 table.hw_ver, table.brd_ver);
1321 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1322 desc_lookup(table.error_id));
1323 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1324 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1325 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1326 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1327 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1328 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1329 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1330 IWL_ERR(priv, "0x%08X | line\n", table.line);
1331 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1332 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1333 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1334 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1335 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1336 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1337 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1338 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1339 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1340 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1341}
1342
1343#define EVENT_START_OFFSET (4 * sizeof(u32))
1344
1345/**
1346 * iwl_print_event_log - Dump error event log to syslog
1347 *
1348 */
1349static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1350 u32 num_events, u32 mode,
1351 int pos, char **buf, size_t bufsz)
1352{
1353 u32 i;
1354 u32 base; /* SRAM byte address of event log header */
1355 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1356 u32 ptr; /* SRAM byte address of log data */
1357 u32 ev, time, data; /* event log data */
1358 unsigned long reg_flags;
1359
1360 if (num_events == 0)
1361 return pos;
1362
1363 base = priv->device_pointers.log_event_table;
1364 if (priv->ucode_type == IWL_UCODE_INIT) {
1365 if (!base)
1366 base = priv->init_evtlog_ptr;
1367 } else {
1368 if (!base)
1369 base = priv->inst_evtlog_ptr;
1370 }
1371
1372 if (mode == 0)
1373 event_size = 2 * sizeof(u32);
1374 else
1375 event_size = 3 * sizeof(u32);
1376
1377 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1378
1379 /* Make sure device is powered up for SRAM reads */
1380 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1381 iwl_grab_nic_access(priv);
1382
1383 /* Set starting address; reads will auto-increment */
1384 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
1385 rmb();
1386
1387 /* "time" is actually "data" for mode 0 (no timestamp).
1388 * place event id # at far right for easier visual parsing. */
1389 for (i = 0; i < num_events; i++) {
1390 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1391 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1392 if (mode == 0) {
1393 /* data, ev */
1394 if (bufsz) {
1395 pos += scnprintf(*buf + pos, bufsz - pos,
1396 "EVT_LOG:0x%08x:%04u\n",
1397 time, ev);
1398 } else {
1399 trace_iwlwifi_dev_ucode_event(priv, 0,
1400 time, ev);
1401 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1402 time, ev);
1403 }
1404 } else {
1405 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1406 if (bufsz) {
1407 pos += scnprintf(*buf + pos, bufsz - pos,
1408 "EVT_LOGT:%010u:0x%08x:%04u\n",
1409 time, data, ev);
1410 } else {
1411 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1412 time, data, ev);
1413 trace_iwlwifi_dev_ucode_event(priv, time,
1414 data, ev);
1415 }
1416 }
1417 }
1418
1419 /* Allow device to power down */
1420 iwl_release_nic_access(priv);
1421 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1422 return pos;
1423}
1424
1425/**
1426 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1427 */
1428static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1429 u32 num_wraps, u32 next_entry,
1430 u32 size, u32 mode,
1431 int pos, char **buf, size_t bufsz)
1432{
1433 /*
1434 * display the newest DEFAULT_LOG_ENTRIES entries
1435 * i.e the entries just before the next ont that uCode would fill.
1436 */
1437 if (num_wraps) {
1438 if (next_entry < size) {
1439 pos = iwl_print_event_log(priv,
1440 capacity - (size - next_entry),
1441 size - next_entry, mode,
1442 pos, buf, bufsz);
1443 pos = iwl_print_event_log(priv, 0,
1444 next_entry, mode,
1445 pos, buf, bufsz);
1446 } else
1447 pos = iwl_print_event_log(priv, next_entry - size,
1448 size, mode, pos, buf, bufsz);
1449 } else {
1450 if (next_entry < size) {
1451 pos = iwl_print_event_log(priv, 0, next_entry,
1452 mode, pos, buf, bufsz);
1453 } else {
1454 pos = iwl_print_event_log(priv, next_entry - size,
1455 size, mode, pos, buf, bufsz);
1456 }
1457 }
1458 return pos;
1459}
1460
1461#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1462
1463int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1464 char **buf, bool display)
1465{
1466 u32 base; /* SRAM byte address of event log header */
1467 u32 capacity; /* event log capacity in # entries */
1468 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1469 u32 num_wraps; /* # times uCode wrapped to top of log */
1470 u32 next_entry; /* index of next entry to be written by uCode */
1471 u32 size; /* # entries that we'll print */
1472 u32 logsize;
1473 int pos = 0;
1474 size_t bufsz = 0;
1475
1476 base = priv->device_pointers.log_event_table;
1477 if (priv->ucode_type == IWL_UCODE_INIT) {
1478 logsize = priv->init_evtlog_size;
1479 if (!base)
1480 base = priv->init_evtlog_ptr;
1481 } else {
1482 logsize = priv->inst_evtlog_size;
1483 if (!base)
1484 base = priv->inst_evtlog_ptr;
1485 }
1486
1487 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1488 IWL_ERR(priv,
1489 "Invalid event log pointer 0x%08X for %s uCode\n",
1490 base,
1491 (priv->ucode_type == IWL_UCODE_INIT)
1492 ? "Init" : "RT");
1493 return -EINVAL;
1494 }
1495
1496 /* event log header */
1497 capacity = iwl_read_targ_mem(priv, base);
1498 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1499 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1500 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1501
1502 if (capacity > logsize) {
1503 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1504 capacity, logsize);
1505 capacity = logsize;
1506 }
1507
1508 if (next_entry > logsize) {
1509 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1510 next_entry, logsize);
1511 next_entry = logsize;
1512 }
1513
1514 size = num_wraps ? capacity : next_entry;
1515
1516 /* bail out if nothing in log */
1517 if (size == 0) {
1518 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1519 return pos;
1520 }
1521
1522 /* enable/disable bt channel inhibition */
1523 priv->bt_ch_announce = iwlagn_bt_ch_announce;
1524
1525#ifdef CONFIG_IWLWIFI_DEBUG
1526 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1527 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1528 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1529#else
1530 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1531 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1532#endif
1533 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1534 size);
1535
1536#ifdef CONFIG_IWLWIFI_DEBUG
1537 if (display) {
1538 if (full_log)
1539 bufsz = capacity * 48;
1540 else
1541 bufsz = size * 48;
1542 *buf = kmalloc(bufsz, GFP_KERNEL);
1543 if (!*buf)
1544 return -ENOMEM;
1545 }
1546 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1547 /*
1548 * if uCode has wrapped back to top of log,
1549 * start at the oldest entry,
1550 * i.e the next one that uCode would fill.
1551 */
1552 if (num_wraps)
1553 pos = iwl_print_event_log(priv, next_entry,
1554 capacity - next_entry, mode,
1555 pos, buf, bufsz);
1556 /* (then/else) start at top of log */
1557 pos = iwl_print_event_log(priv, 0,
1558 next_entry, mode, pos, buf, bufsz);
1559 } else
1560 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1561 next_entry, size, mode,
1562 pos, buf, bufsz);
1563#else
1564 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1565 next_entry, size, mode,
1566 pos, buf, bufsz);
1567#endif
1568 return pos;
1569}
1570
1571static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1572{
1573 struct iwl_ct_kill_config cmd;
1574 struct iwl_ct_kill_throttling_config adv_cmd;
1575 unsigned long flags;
1576 int ret = 0;
1577
1578 spin_lock_irqsave(&priv->lock, flags);
1579 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1580 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1581 spin_unlock_irqrestore(&priv->lock, flags);
1582 priv->thermal_throttle.ct_kill_toggle = false;
1583
1584 if (priv->cfg->base_params->support_ct_kill_exit) {
1585 adv_cmd.critical_temperature_enter =
1586 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1587 adv_cmd.critical_temperature_exit =
1588 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1589
1590 ret = trans_send_cmd_pdu(&priv->trans,
1591 REPLY_CT_KILL_CONFIG_CMD,
1592 CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
1593 if (ret)
1594 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1595 else
1596 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1597 "succeeded, "
1598 "critical temperature enter is %d,"
1599 "exit is %d\n",
1600 priv->hw_params.ct_kill_threshold,
1601 priv->hw_params.ct_kill_exit_threshold);
1602 } else {
1603 cmd.critical_temperature_R =
1604 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1605
1606 ret = trans_send_cmd_pdu(&priv->trans,
1607 REPLY_CT_KILL_CONFIG_CMD,
1608 CMD_SYNC, sizeof(cmd), &cmd);
1609 if (ret)
1610 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1611 else
1612 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1613 "succeeded, "
1614 "critical temperature is %d\n",
1615 priv->hw_params.ct_kill_threshold);
1616 }
1617}
1618
1619static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
1620{
1621 struct iwl_calib_cfg_cmd calib_cfg_cmd;
1622 struct iwl_host_cmd cmd = {
1623 .id = CALIBRATION_CFG_CMD,
1624 .len = { sizeof(struct iwl_calib_cfg_cmd), },
1625 .data = { &calib_cfg_cmd, },
1626 };
1627
1628 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
1629 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
1630 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
1631
1632 return trans_send_cmd(&priv->trans, &cmd);
1633}
1634
1635
1636static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1637{
1638 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1639 .valid = cpu_to_le32(valid_tx_ant),
1640 };
1641
1642 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1643 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1644 return trans_send_cmd_pdu(&priv->trans,
1645 TX_ANT_CONFIGURATION_CMD,
1646 CMD_SYNC,
1647 sizeof(struct iwl_tx_ant_config_cmd),
1648 &tx_ant_cmd);
1649 } else {
1650 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1651 return -EOPNOTSUPP;
1652 }
1653}
1654
1655/**
1656 * iwl_alive_start - called after REPLY_ALIVE notification received
1657 * from protocol/runtime uCode (initialization uCode's
1658 * Alive gets handled by iwl_init_alive_start()).
1659 */
1660int iwl_alive_start(struct iwl_priv *priv)
1661{
1662 int ret = 0;
1663 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1664
1665 /*TODO: this should go to the transport layer */
1666 iwl_reset_ict(priv);
1667
1668 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1669
1670 /* After the ALIVE response, we can send host commands to the uCode */
1671 set_bit(STATUS_ALIVE, &priv->status);
1672
1673 /* Enable watchdog to monitor the driver tx queues */
1674 iwl_setup_watchdog(priv);
1675
1676 if (iwl_is_rfkill(priv))
1677 return -ERFKILL;
1678
1679 /* download priority table before any calibration request */
1680 if (priv->cfg->bt_params &&
1681 priv->cfg->bt_params->advanced_bt_coexist) {
1682 /* Configure Bluetooth device coexistence support */
1683 if (priv->cfg->bt_params->bt_sco_disable)
1684 priv->bt_enable_pspoll = false;
1685 else
1686 priv->bt_enable_pspoll = true;
1687
1688 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
1689 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1690 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1691 iwlagn_send_advance_bt_config(priv);
1692 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
1693 priv->cur_rssi_ctx = NULL;
1694
1695 iwlagn_send_prio_tbl(priv);
1696
1697 /* FIXME: w/a to force change uCode BT state machine */
1698 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
1699 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1700 if (ret)
1701 return ret;
1702 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
1703 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1704 if (ret)
1705 return ret;
1706 } else {
1707 /*
1708 * default is 2-wire BT coexexistence support
1709 */
1710 iwl_send_bt_config(priv);
1711 }
1712
1713 if (priv->hw_params.calib_rt_cfg)
1714 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
1715
1716 ieee80211_wake_queues(priv->hw);
1717
1718 priv->active_rate = IWL_RATES_MASK;
1719
1720 /* Configure Tx antenna selection based on H/W config */
1721 iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
1722
1723 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
1724 struct iwl_rxon_cmd *active_rxon =
1725 (struct iwl_rxon_cmd *)&ctx->active;
1726 /* apply any changes in staging */
1727 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1728 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1729 } else {
1730 struct iwl_rxon_context *tmp;
1731 /* Initialize our rx_config data */
1732 for_each_context(priv, tmp)
1733 iwl_connection_init_rx_config(priv, tmp);
1734
1735 iwlagn_set_rxon_chain(priv, ctx);
1736 }
1737
1738 if (!priv->wowlan) {
1739 /* WoWLAN ucode will not reply in the same way, skip it */
1740 iwl_reset_run_time_calib(priv);
1741 }
1742
1743 set_bit(STATUS_READY, &priv->status);
1744
1745 /* Configure the adapter for unassociated operation */
1746 ret = iwlagn_commit_rxon(priv, ctx);
1747 if (ret)
1748 return ret;
1749
1750 /* At this point, the NIC is initialized and operational */
1751 iwl_rf_kill_ct_config(priv);
1752
1753 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1754
1755 return iwl_power_update_mode(priv, true);
1756}
1757
1758static void iwl_cancel_deferred_work(struct iwl_priv *priv);
1759
1760static void __iwl_down(struct iwl_priv *priv)
1761{
1762 int exit_pending;
1763
1764 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1765
1766 iwl_scan_cancel_timeout(priv, 200);
1767
1768 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1769
1770 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1771 * to prevent rearm timer */
1772 del_timer_sync(&priv->watchdog);
1773
1774 iwl_clear_ucode_stations(priv, NULL);
1775 iwl_dealloc_bcast_stations(priv);
1776 iwl_clear_driver_stations(priv);
1777
1778 /* reset BT coex data */
1779 priv->bt_status = 0;
1780 priv->cur_rssi_ctx = NULL;
1781 priv->bt_is_sco = 0;
1782 if (priv->cfg->bt_params)
1783 priv->bt_traffic_load =
1784 priv->cfg->bt_params->bt_init_traffic_load;
1785 else
1786 priv->bt_traffic_load = 0;
1787 priv->bt_full_concurrent = false;
1788 priv->bt_ci_compliance = 0;
1789
1790 /* Wipe out the EXIT_PENDING status bit if we are not actually
1791 * exiting the module */
1792 if (!exit_pending)
1793 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1794
1795 if (priv->mac80211_registered)
1796 ieee80211_stop_queues(priv->hw);
1797
1798 /* Clear out all status bits but a few that are stable across reset */
1799 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1800 STATUS_RF_KILL_HW |
1801 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1802 STATUS_GEO_CONFIGURED |
1803 test_bit(STATUS_FW_ERROR, &priv->status) <<
1804 STATUS_FW_ERROR |
1805 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1806 STATUS_EXIT_PENDING;
1807
1808 trans_stop_device(&priv->trans);
1809
1810 dev_kfree_skb(priv->beacon_skb);
1811 priv->beacon_skb = NULL;
1812}
1813
1814static void iwl_down(struct iwl_priv *priv)
1815{
1816 mutex_lock(&priv->mutex);
1817 __iwl_down(priv);
1818 mutex_unlock(&priv->mutex);
1819
1820 iwl_cancel_deferred_work(priv);
1821}
1822
1823#define MAX_HW_RESTARTS 5
1824
1825static int __iwl_up(struct iwl_priv *priv)
1826{
1827 struct iwl_rxon_context *ctx;
1828 int ret;
1829
1830 lockdep_assert_held(&priv->mutex);
1831
1832 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1833 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1834 return -EIO;
1835 }
1836
1837 for_each_context(priv, ctx) {
1838 ret = iwlagn_alloc_bcast_station(priv, ctx);
1839 if (ret) {
1840 iwl_dealloc_bcast_stations(priv);
1841 return ret;
1842 }
1843 }
1844
1845 ret = iwlagn_run_init_ucode(priv);
1846 if (ret) {
1847 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
1848 goto error;
1849 }
1850
1851 ret = iwlagn_load_ucode_wait_alive(priv,
1852 &priv->ucode_rt,
1853 IWL_UCODE_REGULAR);
1854 if (ret) {
1855 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
1856 goto error;
1857 }
1858
1859 ret = iwl_alive_start(priv);
1860 if (ret)
1861 goto error;
1862 return 0;
1863
1864 error:
1865 set_bit(STATUS_EXIT_PENDING, &priv->status);
1866 __iwl_down(priv);
1867 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1868
1869 IWL_ERR(priv, "Unable to initialize device.\n");
1870 return ret;
1871}
1872
1873
1874/*****************************************************************************
1875 *
1876 * Workqueue callbacks
1877 *
1878 *****************************************************************************/
1879
1880static void iwl_bg_run_time_calib_work(struct work_struct *work)
1881{
1882 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1883 run_time_calib_work);
1884
1885 mutex_lock(&priv->mutex);
1886
1887 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1888 test_bit(STATUS_SCANNING, &priv->status)) {
1889 mutex_unlock(&priv->mutex);
1890 return;
1891 }
1892
1893 if (priv->start_calib) {
1894 iwl_chain_noise_calibration(priv);
1895 iwl_sensitivity_calibration(priv);
1896 }
1897
1898 mutex_unlock(&priv->mutex);
1899}
1900
1901static void iwlagn_prepare_restart(struct iwl_priv *priv)
1902{
1903 struct iwl_rxon_context *ctx;
1904 bool bt_full_concurrent;
1905 u8 bt_ci_compliance;
1906 u8 bt_load;
1907 u8 bt_status;
1908 bool bt_is_sco;
1909
1910 lockdep_assert_held(&priv->mutex);
1911
1912 for_each_context(priv, ctx)
1913 ctx->vif = NULL;
1914 priv->is_open = 0;
1915
1916 /*
1917 * __iwl_down() will clear the BT status variables,
1918 * which is correct, but when we restart we really
1919 * want to keep them so restore them afterwards.
1920 *
1921 * The restart process will later pick them up and
1922 * re-configure the hw when we reconfigure the BT
1923 * command.
1924 */
1925 bt_full_concurrent = priv->bt_full_concurrent;
1926 bt_ci_compliance = priv->bt_ci_compliance;
1927 bt_load = priv->bt_traffic_load;
1928 bt_status = priv->bt_status;
1929 bt_is_sco = priv->bt_is_sco;
1930
1931 __iwl_down(priv);
1932
1933 priv->bt_full_concurrent = bt_full_concurrent;
1934 priv->bt_ci_compliance = bt_ci_compliance;
1935 priv->bt_traffic_load = bt_load;
1936 priv->bt_status = bt_status;
1937 priv->bt_is_sco = bt_is_sco;
1938}
1939
1940static void iwl_bg_restart(struct work_struct *data)
1941{
1942 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
1943
1944 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1945 return;
1946
1947 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
1948 mutex_lock(&priv->mutex);
1949 iwlagn_prepare_restart(priv);
1950 mutex_unlock(&priv->mutex);
1951 iwl_cancel_deferred_work(priv);
1952 ieee80211_restart_hw(priv->hw);
1953 } else {
1954 WARN_ON(1);
1955 }
1956}
1957
1958static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1959 struct ieee80211_channel *chan,
1960 enum nl80211_channel_type channel_type,
1961 unsigned int wait)
1962{
1963 struct iwl_priv *priv = hw->priv;
1964 int ret;
1965
1966 /* Not supported if we don't have PAN */
1967 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
1968 ret = -EOPNOTSUPP;
1969 goto free;
1970 }
1971
1972 /* Not supported on pre-P2P firmware */
1973 if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
1974 BIT(NL80211_IFTYPE_P2P_CLIENT))) {
1975 ret = -EOPNOTSUPP;
1976 goto free;
1977 }
1978
1979 mutex_lock(&priv->mutex);
1980
1981 if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
1982 /*
1983 * If the PAN context is free, use the normal
1984 * way of doing remain-on-channel offload + TX.
1985 */
1986 ret = 1;
1987 goto out;
1988 }
1989
1990 /* TODO: queue up if scanning? */
1991 if (test_bit(STATUS_SCANNING, &priv->status) ||
1992 priv->offchan_tx_skb) {
1993 ret = -EBUSY;
1994 goto out;
1995 }
1996
1997 /*
1998 * max_scan_ie_len doesn't include the blank SSID or the header,
1999 * so need to add that again here.
2000 */
2001 if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
2002 ret = -ENOBUFS;
2003 goto out;
2004 }
2005
2006 priv->offchan_tx_skb = skb;
2007 priv->offchan_tx_timeout = wait;
2008 priv->offchan_tx_chan = chan;
2009
2010 ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
2011 IWL_SCAN_OFFCH_TX, chan->band);
2012 if (ret)
2013 priv->offchan_tx_skb = NULL;
2014 out:
2015 mutex_unlock(&priv->mutex);
2016 free:
2017 if (ret < 0)
2018 kfree_skb(skb);
2019
2020 return ret;
2021}
2022
2023static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
2024{
2025 struct iwl_priv *priv = hw->priv;
2026 int ret;
2027
2028 mutex_lock(&priv->mutex);
2029
2030 if (!priv->offchan_tx_skb) {
2031 ret = -EINVAL;
2032 goto unlock;
2033 }
2034
2035 priv->offchan_tx_skb = NULL;
2036
2037 ret = iwl_scan_cancel_timeout(priv, 200);
2038 if (ret)
2039 ret = -EIO;
2040unlock:
2041 mutex_unlock(&priv->mutex);
2042
2043 return ret;
2044}
2045
2046/*****************************************************************************
2047 *
2048 * mac80211 entry point functions
2049 *
2050 *****************************************************************************/
2051
2052static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
2053 {
2054 .max = 1,
2055 .types = BIT(NL80211_IFTYPE_STATION),
2056 },
2057 {
2058 .max = 1,
2059 .types = BIT(NL80211_IFTYPE_AP),
2060 },
2061};
2062
2063static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
2064 {
2065 .max = 2,
2066 .types = BIT(NL80211_IFTYPE_STATION),
2067 },
2068};
2069
2070static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
2071 {
2072 .max = 1,
2073 .types = BIT(NL80211_IFTYPE_STATION),
2074 },
2075 {
2076 .max = 1,
2077 .types = BIT(NL80211_IFTYPE_P2P_GO) |
2078 BIT(NL80211_IFTYPE_AP),
2079 },
2080};
2081
2082static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
2083 {
2084 .max = 2,
2085 .types = BIT(NL80211_IFTYPE_STATION),
2086 },
2087 {
2088 .max = 1,
2089 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
2090 },
2091};
2092
2093static const struct ieee80211_iface_combination
2094iwlagn_iface_combinations_dualmode[] = {
2095 { .num_different_channels = 1,
2096 .max_interfaces = 2,
2097 .beacon_int_infra_match = true,
2098 .limits = iwlagn_sta_ap_limits,
2099 .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
2100 },
2101 { .num_different_channels = 1,
2102 .max_interfaces = 2,
2103 .limits = iwlagn_2sta_limits,
2104 .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
2105 },
2106};
2107
2108static const struct ieee80211_iface_combination
2109iwlagn_iface_combinations_p2p[] = {
2110 { .num_different_channels = 1,
2111 .max_interfaces = 2,
2112 .beacon_int_infra_match = true,
2113 .limits = iwlagn_p2p_sta_go_limits,
2114 .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
2115 },
2116 { .num_different_channels = 1,
2117 .max_interfaces = 2,
2118 .limits = iwlagn_p2p_2sta_limits,
2119 .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
2120 },
2121};
2122
2123/*
2124 * Not a mac80211 entry point function, but it fits in with all the
2125 * other mac80211 functions grouped here.
2126 */
2127static int iwl_mac_setup_register(struct iwl_priv *priv,
2128 struct iwlagn_ucode_capabilities *capa)
2129{
2130 int ret;
2131 struct ieee80211_hw *hw = priv->hw;
2132 struct iwl_rxon_context *ctx;
2133
2134 hw->rate_control_algorithm = "iwl-agn-rs";
2135
2136 /* Tell mac80211 our characteristics */
2137 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2138 IEEE80211_HW_AMPDU_AGGREGATION |
2139 IEEE80211_HW_NEED_DTIM_PERIOD |
2140 IEEE80211_HW_SPECTRUM_MGMT |
2141 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2142
2143 /*
2144 * Including the following line will crash some AP's. This
2145 * workaround removes the stimulus which causes the crash until
2146 * the AP software can be fixed.
2147 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2148 */
2149
2150 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2151 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2152
2153 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
2154 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2155 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2156
2157 if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
2158 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
2159
2160 hw->sta_data_size = sizeof(struct iwl_station_priv);
2161 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2162
2163 for_each_context(priv, ctx) {
2164 hw->wiphy->interface_modes |= ctx->interface_modes;
2165 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2166 }
2167
2168 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
2169
2170 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
2171 hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
2172 hw->wiphy->n_iface_combinations =
2173 ARRAY_SIZE(iwlagn_iface_combinations_p2p);
2174 } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
2175 hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
2176 hw->wiphy->n_iface_combinations =
2177 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
2178 }
2179
2180 hw->wiphy->max_remain_on_channel_duration = 1000;
2181
2182 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2183 WIPHY_FLAG_DISABLE_BEACON_HINTS |
2184 WIPHY_FLAG_IBSS_RSN;
2185
2186 if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) {
2187 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
2188 WIPHY_WOWLAN_DISCONNECT |
2189 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
2190 WIPHY_WOWLAN_RFKILL_RELEASE;
2191 if (!iwlagn_mod_params.sw_crypto)
2192 hw->wiphy->wowlan.flags |=
2193 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
2194 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
2195
2196 hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
2197 hw->wiphy->wowlan.pattern_min_len =
2198 IWLAGN_WOWLAN_MIN_PATTERN_LEN;
2199 hw->wiphy->wowlan.pattern_max_len =
2200 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
2201 }
2202
2203 if (iwlagn_mod_params.power_save)
2204 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
2205 else
2206 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2207
2208 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2209 /* we create the 802.11 header and a zero-length SSID element */
2210 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
2211
2212 /* Default value; 4 EDCA QOS priorities */
2213 hw->queues = 4;
2214
2215 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2216
2217 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2218 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2219 &priv->bands[IEEE80211_BAND_2GHZ];
2220 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2221 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2222 &priv->bands[IEEE80211_BAND_5GHZ];
2223
2224 iwl_leds_init(priv);
2225
2226 ret = ieee80211_register_hw(priv->hw);
2227 if (ret) {
2228 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2229 return ret;
2230 }
2231 priv->mac80211_registered = 1;
2232
2233 return 0;
2234}
2235
2236
2237static int iwlagn_mac_start(struct ieee80211_hw *hw)
2238{
2239 struct iwl_priv *priv = hw->priv;
2240 int ret;
2241
2242 IWL_DEBUG_MAC80211(priv, "enter\n");
2243
2244 /* we should be verifying the device is ready to be opened */
2245 mutex_lock(&priv->mutex);
2246 ret = __iwl_up(priv);
2247 mutex_unlock(&priv->mutex);
2248 if (ret)
2249 return ret;
2250
2251 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2252
2253 /* Now we should be done, and the READY bit should be set. */
2254 if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
2255 ret = -EIO;
2256
2257 iwlagn_led_enable(priv);
2258
2259 priv->is_open = 1;
2260 IWL_DEBUG_MAC80211(priv, "leave\n");
2261 return 0;
2262}
2263
2264static void iwlagn_mac_stop(struct ieee80211_hw *hw)
2265{
2266 struct iwl_priv *priv = hw->priv;
2267
2268 IWL_DEBUG_MAC80211(priv, "enter\n");
2269
2270 if (!priv->is_open)
2271 return;
2272
2273 priv->is_open = 0;
2274
2275 iwl_down(priv);
2276
2277 flush_workqueue(priv->workqueue);
2278
2279 /* User space software may expect getting rfkill changes
2280 * even if interface is down */
2281 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2282 iwl_enable_rfkill_int(priv);
2283
2284 IWL_DEBUG_MAC80211(priv, "leave\n");
2285}
2286
2287#ifdef CONFIG_PM
2288static int iwlagn_send_patterns(struct iwl_priv *priv,
2289 struct cfg80211_wowlan *wowlan)
2290{
2291 struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
2292 struct iwl_host_cmd cmd = {
2293 .id = REPLY_WOWLAN_PATTERNS,
2294 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
2295 .flags = CMD_SYNC,
2296 };
2297 int i, err;
2298
2299 if (!wowlan->n_patterns)
2300 return 0;
2301
2302 cmd.len[0] = sizeof(*pattern_cmd) +
2303 wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
2304
2305 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
2306 if (!pattern_cmd)
2307 return -ENOMEM;
2308
2309 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
2310
2311 for (i = 0; i < wowlan->n_patterns; i++) {
2312 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
2313
2314 memcpy(&pattern_cmd->patterns[i].mask,
2315 wowlan->patterns[i].mask, mask_len);
2316 memcpy(&pattern_cmd->patterns[i].pattern,
2317 wowlan->patterns[i].pattern,
2318 wowlan->patterns[i].pattern_len);
2319 pattern_cmd->patterns[i].mask_size = mask_len;
2320 pattern_cmd->patterns[i].pattern_size =
2321 wowlan->patterns[i].pattern_len;
2322 }
2323
2324 cmd.data[0] = pattern_cmd;
2325 err = trans_send_cmd(&priv->trans, &cmd);
2326 kfree(pattern_cmd);
2327 return err;
2328}
2329#endif
2330
2331static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
2332 struct ieee80211_vif *vif,
2333 struct cfg80211_gtk_rekey_data *data)
2334{
2335 struct iwl_priv *priv = hw->priv;
2336
2337 if (iwlagn_mod_params.sw_crypto)
2338 return;
2339
2340 mutex_lock(&priv->mutex);
2341
2342 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
2343 goto out;
2344
2345 memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
2346 memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
2347 priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
2348 priv->have_rekey_data = true;
2349
2350 out:
2351 mutex_unlock(&priv->mutex);
2352}
2353
2354struct wowlan_key_data {
2355 struct iwl_rxon_context *ctx;
2356 struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
2357 struct iwlagn_wowlan_tkip_params_cmd *tkip;
2358 const u8 *bssid;
2359 bool error, use_rsc_tsc, use_tkip;
2360};
2361
2362#ifdef CONFIG_PM
2363static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
2364{
2365 int i;
2366
2367 for (i = 0; i < IWLAGN_P1K_SIZE; i++)
2368 out[i] = cpu_to_le16(p1k[i]);
2369}
2370
2371static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
2372 struct ieee80211_vif *vif,
2373 struct ieee80211_sta *sta,
2374 struct ieee80211_key_conf *key,
2375 void *_data)
2376{
2377 struct iwl_priv *priv = hw->priv;
2378 struct wowlan_key_data *data = _data;
2379 struct iwl_rxon_context *ctx = data->ctx;
2380 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
2381 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
2382 struct iwlagn_p1k_cache *rx_p1ks;
2383 u8 *rx_mic_key;
2384 struct ieee80211_key_seq seq;
2385 u32 cur_rx_iv32 = 0;
2386 u16 p1k[IWLAGN_P1K_SIZE];
2387 int ret, i;
2388
2389 mutex_lock(&priv->mutex);
2390
2391 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2392 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2393 !sta && !ctx->key_mapping_keys)
2394 ret = iwl_set_default_wep_key(priv, ctx, key);
2395 else
2396 ret = iwl_set_dynamic_key(priv, ctx, key, sta);
2397
2398 if (ret) {
2399 IWL_ERR(priv, "Error setting key during suspend!\n");
2400 data->error = true;
2401 }
2402
2403 switch (key->cipher) {
2404 case WLAN_CIPHER_SUITE_TKIP:
2405 if (sta) {
2406 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
2407 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
2408
2409 rx_p1ks = data->tkip->rx_uni;
2410
2411 ieee80211_get_key_tx_seq(key, &seq);
2412 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
2413 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
2414
2415 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
2416 iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
2417
2418 memcpy(data->tkip->mic_keys.tx,
2419 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2420 IWLAGN_MIC_KEY_SIZE);
2421
2422 rx_mic_key = data->tkip->mic_keys.rx_unicast;
2423 } else {
2424 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
2425 rx_p1ks = data->tkip->rx_multi;
2426 rx_mic_key = data->tkip->mic_keys.rx_mcast;
2427 }
2428
2429 /*
2430 * For non-QoS this relies on the fact that both the uCode and
2431 * mac80211 use TID 0 (as they need to to avoid replay attacks)
2432 * for checking the IV in the frames.
2433 */
2434 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
2435 ieee80211_get_key_rx_seq(key, i, &seq);
2436 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
2437 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
2438 /* wrapping isn't allowed, AP must rekey */
2439 if (seq.tkip.iv32 > cur_rx_iv32)
2440 cur_rx_iv32 = seq.tkip.iv32;
2441 }
2442
2443 ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
2444 iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
2445 ieee80211_get_tkip_rx_p1k(key, data->bssid,
2446 cur_rx_iv32 + 1, p1k);
2447 iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
2448
2449 memcpy(rx_mic_key,
2450 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2451 IWLAGN_MIC_KEY_SIZE);
2452
2453 data->use_tkip = true;
2454 data->use_rsc_tsc = true;
2455 break;
2456 case WLAN_CIPHER_SUITE_CCMP:
2457 if (sta) {
2458 u8 *pn = seq.ccmp.pn;
2459
2460 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
2461 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
2462
2463 ieee80211_get_key_tx_seq(key, &seq);
2464 aes_tx_sc->pn = cpu_to_le64(
2465 (u64)pn[5] |
2466 ((u64)pn[4] << 8) |
2467 ((u64)pn[3] << 16) |
2468 ((u64)pn[2] << 24) |
2469 ((u64)pn[1] << 32) |
2470 ((u64)pn[0] << 40));
2471 } else
2472 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
2473
2474 /*
2475 * For non-QoS this relies on the fact that both the uCode and
2476 * mac80211 use TID 0 for checking the IV in the frames.
2477 */
2478 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
2479 u8 *pn = seq.ccmp.pn;
2480
2481 ieee80211_get_key_rx_seq(key, i, &seq);
2482 aes_sc->pn = cpu_to_le64(
2483 (u64)pn[5] |
2484 ((u64)pn[4] << 8) |
2485 ((u64)pn[3] << 16) |
2486 ((u64)pn[2] << 24) |
2487 ((u64)pn[1] << 32) |
2488 ((u64)pn[0] << 40));
2489 }
2490 data->use_rsc_tsc = true;
2491 break;
2492 }
2493
2494 mutex_unlock(&priv->mutex);
2495}
2496
2497static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2498 struct cfg80211_wowlan *wowlan)
2499{
2500 struct iwl_priv *priv = hw->priv;
2501 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
2502 struct iwl_rxon_cmd rxon;
2503 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2504 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
2505 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
2506 struct wowlan_key_data key_data = {
2507 .ctx = ctx,
2508 .bssid = ctx->active.bssid_addr,
2509 .use_rsc_tsc = false,
2510 .tkip = &tkip_cmd,
2511 .use_tkip = false,
2512 };
2513 int ret, i;
2514 u16 seq;
2515
2516 if (WARN_ON(!wowlan))
2517 return -EINVAL;
2518
2519 mutex_lock(&priv->mutex);
2520
2521 /* Don't attempt WoWLAN when not associated, tear down instead. */
2522 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
2523 !iwl_is_associated_ctx(ctx)) {
2524 ret = 1;
2525 goto out;
2526 }
2527
2528 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
2529 if (!key_data.rsc_tsc) {
2530 ret = -ENOMEM;
2531 goto out;
2532 }
2533
2534 memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
2535
2536 /*
2537 * We know the last used seqno, and the uCode expects to know that
2538 * one, it will increment before TX.
2539 */
2540 seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
2541 wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
2542
2543 /*
2544 * For QoS counters, we store the one to use next, so subtract 0x10
2545 * since the uCode will add 0x10 before using the value.
2546 */
2547 for (i = 0; i < 8; i++) {
2548 seq = priv->stations[IWL_AP_ID].tid[i].seq_number;
2549 seq -= 0x10;
2550 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
2551 }
2552
2553 if (wowlan->disconnect)
2554 wakeup_filter_cmd.enabled |=
2555 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
2556 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
2557 if (wowlan->magic_pkt)
2558 wakeup_filter_cmd.enabled |=
2559 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
2560 if (wowlan->gtk_rekey_failure)
2561 wakeup_filter_cmd.enabled |=
2562 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
2563 if (wowlan->eap_identity_req)
2564 wakeup_filter_cmd.enabled |=
2565 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
2566 if (wowlan->four_way_handshake)
2567 wakeup_filter_cmd.enabled |=
2568 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
2569 if (wowlan->rfkill_release)
2570 wakeup_filter_cmd.enabled |=
2571 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
2572 if (wowlan->n_patterns)
2573 wakeup_filter_cmd.enabled |=
2574 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
2575
2576 iwl_scan_cancel_timeout(priv, 200);
2577
2578 memcpy(&rxon, &ctx->active, sizeof(rxon));
2579
2580 trans_stop_device(&priv->trans);
2581
2582 priv->wowlan = true;
2583
2584 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
2585 IWL_UCODE_WOWLAN);
2586 if (ret)
2587 goto error;
2588
2589 /* now configure WoWLAN ucode */
2590 ret = iwl_alive_start(priv);
2591 if (ret)
2592 goto error;
2593
2594 memcpy(&ctx->staging, &rxon, sizeof(rxon));
2595 ret = iwlagn_commit_rxon(priv, ctx);
2596 if (ret)
2597 goto error;
2598
2599 ret = iwl_power_update_mode(priv, true);
2600 if (ret)
2601 goto error;
2602
2603 if (!iwlagn_mod_params.sw_crypto) {
2604 /* mark all keys clear */
2605 priv->ucode_key_table = 0;
2606 ctx->key_mapping_keys = 0;
2607
2608 /*
2609 * This needs to be unlocked due to lock ordering
2610 * constraints. Since we're in the suspend path
2611 * that isn't really a problem though.
2612 */
2613 mutex_unlock(&priv->mutex);
2614 ieee80211_iter_keys(priv->hw, ctx->vif,
2615 iwlagn_wowlan_program_keys,
2616 &key_data);
2617 mutex_lock(&priv->mutex);
2618 if (key_data.error) {
2619 ret = -EIO;
2620 goto error;
2621 }
2622
2623 if (key_data.use_rsc_tsc) {
2624 struct iwl_host_cmd rsc_tsc_cmd = {
2625 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
2626 .flags = CMD_SYNC,
2627 .data[0] = key_data.rsc_tsc,
2628 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
2629 .len[0] = sizeof(*key_data.rsc_tsc),
2630 };
2631
2632 ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd);
2633 if (ret)
2634 goto error;
2635 }
2636
2637 if (key_data.use_tkip) {
2638 ret = trans_send_cmd_pdu(&priv->trans,
2639 REPLY_WOWLAN_TKIP_PARAMS,
2640 CMD_SYNC, sizeof(tkip_cmd),
2641 &tkip_cmd);
2642 if (ret)
2643 goto error;
2644 }
2645
2646 if (priv->have_rekey_data) {
2647 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
2648 memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
2649 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
2650 memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
2651 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
2652 kek_kck_cmd.replay_ctr = priv->replay_ctr;
2653
2654 ret = trans_send_cmd_pdu(&priv->trans,
2655 REPLY_WOWLAN_KEK_KCK_MATERIAL,
2656 CMD_SYNC, sizeof(kek_kck_cmd),
2657 &kek_kck_cmd);
2658 if (ret)
2659 goto error;
2660 }
2661 }
2662
2663 ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER,
2664 CMD_SYNC, sizeof(wakeup_filter_cmd),
2665 &wakeup_filter_cmd);
2666 if (ret)
2667 goto error;
2668
2669 ret = iwlagn_send_patterns(priv, wowlan);
2670 if (ret)
2671 goto error;
2672
2673 device_set_wakeup_enable(priv->bus->dev, true);
2674
2675 /* Now let the ucode operate on its own */
2676 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2677 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2678
2679 goto out;
2680
2681 error:
2682 priv->wowlan = false;
2683 iwlagn_prepare_restart(priv);
2684 ieee80211_restart_hw(priv->hw);
2685 out:
2686 mutex_unlock(&priv->mutex);
2687 kfree(key_data.rsc_tsc);
2688 return ret;
2689}
2690
2691static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2692{
2693 struct iwl_priv *priv = hw->priv;
2694 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2695 struct ieee80211_vif *vif;
2696 unsigned long flags;
2697 u32 base, status = 0xffffffff;
2698 int ret = -EIO;
2699
2700 mutex_lock(&priv->mutex);
2701
2702 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2703 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2704
2705 base = priv->device_pointers.error_event_table;
2706 if (iwlagn_hw_valid_rtc_data_addr(base)) {
2707 spin_lock_irqsave(&priv->reg_lock, flags);
2708 ret = iwl_grab_nic_access_silent(priv);
2709 if (ret == 0) {
2710 iwl_write32(priv, HBUS_TARG_MEM_RADDR, base);
2711 status = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
2712 iwl_release_nic_access(priv);
2713 }
2714 spin_unlock_irqrestore(&priv->reg_lock, flags);
2715
2716#ifdef CONFIG_IWLWIFI_DEBUGFS
2717 if (ret == 0) {
2718 if (!priv->wowlan_sram)
2719 priv->wowlan_sram =
2720 kzalloc(priv->ucode_wowlan.data.len,
2721 GFP_KERNEL);
2722
2723 if (priv->wowlan_sram)
2724 _iwl_read_targ_mem_words(
2725 priv, 0x800000, priv->wowlan_sram,
2726 priv->ucode_wowlan.data.len / 4);
2727 }
2728#endif
2729 }
2730
2731 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
2732 vif = ctx->vif;
2733
2734 priv->wowlan = false;
2735
2736 device_set_wakeup_enable(priv->bus->dev, false);
2737
2738 iwlagn_prepare_restart(priv);
2739
2740 memset((void *)&ctx->active, 0, sizeof(ctx->active));
2741 iwl_connection_init_rx_config(priv, ctx);
2742 iwlagn_set_rxon_chain(priv, ctx);
2743
2744 mutex_unlock(&priv->mutex);
2745
2746 ieee80211_resume_disconnect(vif);
2747
2748 return 1;
2749}
2750#endif
2751
2752static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2753{
2754 struct iwl_priv *priv = hw->priv;
2755
2756 IWL_DEBUG_MACDUMP(priv, "enter\n");
2757
2758 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2759 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2760
2761 if (iwlagn_tx_skb(priv, skb))
2762 dev_kfree_skb_any(skb);
2763
2764 IWL_DEBUG_MACDUMP(priv, "leave\n");
2765}
2766
2767static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
2768 struct ieee80211_vif *vif,
2769 struct ieee80211_key_conf *keyconf,
2770 struct ieee80211_sta *sta,
2771 u32 iv32, u16 *phase1key)
2772{
2773 struct iwl_priv *priv = hw->priv;
2774
2775 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
2776}
2777
2778static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2779 struct ieee80211_vif *vif,
2780 struct ieee80211_sta *sta,
2781 struct ieee80211_key_conf *key)
2782{
2783 struct iwl_priv *priv = hw->priv;
2784 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2785 struct iwl_rxon_context *ctx = vif_priv->ctx;
2786 int ret;
2787 bool is_default_wep_key = false;
2788
2789 IWL_DEBUG_MAC80211(priv, "enter\n");
2790
2791 if (iwlagn_mod_params.sw_crypto) {
2792 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2793 return -EOPNOTSUPP;
2794 }
2795
2796 switch (key->cipher) {
2797 case WLAN_CIPHER_SUITE_TKIP:
2798 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2799 /* fall through */
2800 case WLAN_CIPHER_SUITE_CCMP:
2801 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2802 break;
2803 default:
2804 break;
2805 }
2806
2807 /*
2808 * We could program these keys into the hardware as well, but we
2809 * don't expect much multicast traffic in IBSS and having keys
2810 * for more stations is probably more useful.
2811 *
2812 * Mark key TX-only and return 0.
2813 */
2814 if (vif->type == NL80211_IFTYPE_ADHOC &&
2815 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
2816 key->hw_key_idx = WEP_INVALID_OFFSET;
2817 return 0;
2818 }
2819
2820 /* If they key was TX-only, accept deletion */
2821 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
2822 return 0;
2823
2824 mutex_lock(&priv->mutex);
2825 iwl_scan_cancel_timeout(priv, 100);
2826
2827 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
2828
2829 /*
2830 * If we are getting WEP group key and we didn't receive any key mapping
2831 * so far, we are in legacy wep mode (group key only), otherwise we are
2832 * in 1X mode.
2833 * In legacy wep mode, we use another host command to the uCode.
2834 */
2835 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2836 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
2837 if (cmd == SET_KEY)
2838 is_default_wep_key = !ctx->key_mapping_keys;
2839 else
2840 is_default_wep_key =
2841 key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
2842 }
2843
2844
2845 switch (cmd) {
2846 case SET_KEY:
2847 if (is_default_wep_key) {
2848 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
2849 break;
2850 }
2851 ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
2852 if (ret) {
2853 /*
2854 * can't add key for RX, but we don't need it
2855 * in the device for TX so still return 0
2856 */
2857 ret = 0;
2858 key->hw_key_idx = WEP_INVALID_OFFSET;
2859 }
2860
2861 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2862 break;
2863 case DISABLE_KEY:
2864 if (is_default_wep_key)
2865 ret = iwl_remove_default_wep_key(priv, ctx, key);
2866 else
2867 ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
2868
2869 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2870 break;
2871 default:
2872 ret = -EINVAL;
2873 }
2874
2875 mutex_unlock(&priv->mutex);
2876 IWL_DEBUG_MAC80211(priv, "leave\n");
2877
2878 return ret;
2879}
2880
2881static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2882 struct ieee80211_vif *vif,
2883 enum ieee80211_ampdu_mlme_action action,
2884 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2885 u8 buf_size)
2886{
2887 struct iwl_priv *priv = hw->priv;
2888 int ret = -EINVAL;
2889 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
2890
2891 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2892 sta->addr, tid);
2893
2894 if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
2895 return -EACCES;
2896
2897 mutex_lock(&priv->mutex);
2898
2899 switch (action) {
2900 case IEEE80211_AMPDU_RX_START:
2901 IWL_DEBUG_HT(priv, "start Rx\n");
2902 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2903 break;
2904 case IEEE80211_AMPDU_RX_STOP:
2905 IWL_DEBUG_HT(priv, "stop Rx\n");
2906 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
2907 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2908 ret = 0;
2909 break;
2910 case IEEE80211_AMPDU_TX_START:
2911 IWL_DEBUG_HT(priv, "start Tx\n");
2912 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
2913 if (ret == 0) {
2914 priv->agg_tids_count++;
2915 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
2916 priv->agg_tids_count);
2917 }
2918 break;
2919 case IEEE80211_AMPDU_TX_STOP:
2920 IWL_DEBUG_HT(priv, "stop Tx\n");
2921 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
2922 if ((ret == 0) && (priv->agg_tids_count > 0)) {
2923 priv->agg_tids_count--;
2924 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
2925 priv->agg_tids_count);
2926 }
2927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2928 ret = 0;
2929 if (priv->cfg->ht_params &&
2930 priv->cfg->ht_params->use_rts_for_aggregation) {
2931 /*
2932 * switch off RTS/CTS if it was previously enabled
2933 */
2934 sta_priv->lq_sta.lq.general_params.flags &=
2935 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2936 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2937 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2938 }
2939 break;
2940 case IEEE80211_AMPDU_TX_OPERATIONAL:
2941 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2942
2943 trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid,
2944 buf_size);
2945
2946 /*
2947 * If the limit is 0, then it wasn't initialised yet,
2948 * use the default. We can do that since we take the
2949 * minimum below, and we don't want to go above our
2950 * default due to hardware restrictions.
2951 */
2952 if (sta_priv->max_agg_bufsize == 0)
2953 sta_priv->max_agg_bufsize =
2954 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2955
2956 /*
2957 * Even though in theory the peer could have different
2958 * aggregation reorder buffer sizes for different sessions,
2959 * our ucode doesn't allow for that and has a global limit
2960 * for each station. Therefore, use the minimum of all the
2961 * aggregation sessions and our default value.
2962 */
2963 sta_priv->max_agg_bufsize =
2964 min(sta_priv->max_agg_bufsize, buf_size);
2965
2966 if (priv->cfg->ht_params &&
2967 priv->cfg->ht_params->use_rts_for_aggregation) {
2968 /*
2969 * switch to RTS/CTS if it is the prefer protection
2970 * method for HT traffic
2971 */
2972
2973 sta_priv->lq_sta.lq.general_params.flags |=
2974 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2975 }
2976
2977 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
2978 sta_priv->max_agg_bufsize;
2979
2980 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2981 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2982
2983 IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
2984 sta->addr, tid);
2985 ret = 0;
2986 break;
2987 }
2988 mutex_unlock(&priv->mutex);
2989
2990 return ret;
2991}
2992
2993static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
2994 struct ieee80211_vif *vif,
2995 struct ieee80211_sta *sta)
2996{
2997 struct iwl_priv *priv = hw->priv;
2998 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2999 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3000 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3001 int ret;
3002 u8 sta_id;
3003
3004 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3005 sta->addr);
3006 mutex_lock(&priv->mutex);
3007 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3008 sta->addr);
3009 sta_priv->common.sta_id = IWL_INVALID_STATION;
3010
3011 atomic_set(&sta_priv->pending_frames, 0);
3012 if (vif->type == NL80211_IFTYPE_AP)
3013 sta_priv->client = true;
3014
3015 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
3016 is_ap, sta, &sta_id);
3017 if (ret) {
3018 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3019 sta->addr, ret);
3020 /* Should we return success if return code is EEXIST ? */
3021 mutex_unlock(&priv->mutex);
3022 return ret;
3023 }
3024
3025 sta_priv->common.sta_id = sta_id;
3026
3027 /* Initialize rate scaling */
3028 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3029 sta->addr);
3030 iwl_rs_rate_init(priv, sta, sta_id);
3031 mutex_unlock(&priv->mutex);
3032
3033 return 0;
3034}
3035
3036static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3037 struct ieee80211_channel_switch *ch_switch)
3038{
3039 struct iwl_priv *priv = hw->priv;
3040 const struct iwl_channel_info *ch_info;
3041 struct ieee80211_conf *conf = &hw->conf;
3042 struct ieee80211_channel *channel = ch_switch->channel;
3043 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
3044 /*
3045 * MULTI-FIXME
3046 * When we add support for multiple interfaces, we need to
3047 * revisit this. The channel switch command in the device
3048 * only affects the BSS context, but what does that really
3049 * mean? And what if we get a CSA on the second interface?
3050 * This needs a lot of work.
3051 */
3052 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3053 u16 ch;
3054
3055 IWL_DEBUG_MAC80211(priv, "enter\n");
3056
3057 mutex_lock(&priv->mutex);
3058
3059 if (iwl_is_rfkill(priv))
3060 goto out;
3061
3062 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
3063 test_bit(STATUS_SCANNING, &priv->status) ||
3064 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
3065 goto out;
3066
3067 if (!iwl_is_associated_ctx(ctx))
3068 goto out;
3069
3070 if (!priv->cfg->lib->set_channel_switch)
3071 goto out;
3072
3073 ch = channel->hw_value;
3074 if (le16_to_cpu(ctx->active.channel) == ch)
3075 goto out;
3076
3077 ch_info = iwl_get_channel_info(priv, channel->band, ch);
3078 if (!is_channel_valid(ch_info)) {
3079 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
3080 goto out;
3081 }
3082
3083 spin_lock_irq(&priv->lock);
3084
3085 priv->current_ht_config.smps = conf->smps_mode;
3086
3087 /* Configure HT40 channels */
3088 ctx->ht.enabled = conf_is_ht(conf);
3089 if (ctx->ht.enabled)
3090 iwlagn_config_ht40(conf, ctx);
3091 else
3092 ctx->ht.is_40mhz = false;
3093
3094 if ((le16_to_cpu(ctx->staging.channel) != ch))
3095 ctx->staging.flags = 0;
3096
3097 iwl_set_rxon_channel(priv, channel, ctx);
3098 iwl_set_rxon_ht(priv, ht_conf);
3099 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
3100
3101 spin_unlock_irq(&priv->lock);
3102
3103 iwl_set_rate(priv);
3104 /*
3105 * at this point, staging_rxon has the
3106 * configuration for channel switch
3107 */
3108 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
3109 priv->switch_channel = cpu_to_le16(ch);
3110 if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
3111 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
3112 priv->switch_channel = 0;
3113 ieee80211_chswitch_done(ctx->vif, false);
3114 }
3115
3116out:
3117 mutex_unlock(&priv->mutex);
3118 IWL_DEBUG_MAC80211(priv, "leave\n");
3119}
3120
3121static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3122 unsigned int changed_flags,
3123 unsigned int *total_flags,
3124 u64 multicast)
3125{
3126 struct iwl_priv *priv = hw->priv;
3127 __le32 filter_or = 0, filter_nand = 0;
3128 struct iwl_rxon_context *ctx;
3129
3130#define CHK(test, flag) do { \
3131 if (*total_flags & (test)) \
3132 filter_or |= (flag); \
3133 else \
3134 filter_nand |= (flag); \
3135 } while (0)
3136
3137 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3138 changed_flags, *total_flags);
3139
3140 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3141 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
3142 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3143 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3144
3145#undef CHK
3146
3147 mutex_lock(&priv->mutex);
3148
3149 for_each_context(priv, ctx) {
3150 ctx->staging.filter_flags &= ~filter_nand;
3151 ctx->staging.filter_flags |= filter_or;
3152
3153 /*
3154 * Not committing directly because hardware can perform a scan,
3155 * but we'll eventually commit the filter flags change anyway.
3156 */
3157 }
3158
3159 mutex_unlock(&priv->mutex);
3160
3161 /*
3162 * Receiving all multicast frames is always enabled by the
3163 * default flags setup in iwl_connection_init_rx_config()
3164 * since we currently do not support programming multicast
3165 * filters into the device.
3166 */
3167 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3168 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3169}
3170
3171static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3172{
3173 struct iwl_priv *priv = hw->priv;
3174
3175 mutex_lock(&priv->mutex);
3176 IWL_DEBUG_MAC80211(priv, "enter\n");
3177
3178 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3179 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
3180 goto done;
3181 }
3182 if (iwl_is_rfkill(priv)) {
3183 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
3184 goto done;
3185 }
3186
3187 /*
3188 * mac80211 will not push any more frames for transmit
3189 * until the flush is completed
3190 */
3191 if (drop) {
3192 IWL_DEBUG_MAC80211(priv, "send flush command\n");
3193 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
3194 IWL_ERR(priv, "flush request fail\n");
3195 goto done;
3196 }
3197 }
3198 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
3199 iwlagn_wait_tx_queue_empty(priv);
3200done:
3201 mutex_unlock(&priv->mutex);
3202 IWL_DEBUG_MAC80211(priv, "leave\n");
3203}
3204
3205static void iwlagn_disable_roc(struct iwl_priv *priv)
3206{
3207 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3208 struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
3209
3210 lockdep_assert_held(&priv->mutex);
3211
3212 if (!ctx->is_active)
3213 return;
3214
3215 ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
3216 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3217 iwl_set_rxon_channel(priv, chan, ctx);
3218 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
3219
3220 priv->hw_roc_channel = NULL;
3221
3222 iwlagn_commit_rxon(priv, ctx);
3223
3224 ctx->is_active = false;
3225}
3226
3227static void iwlagn_bg_roc_done(struct work_struct *work)
3228{
3229 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3230 hw_roc_work.work);
3231
3232 mutex_lock(&priv->mutex);
3233 ieee80211_remain_on_channel_expired(priv->hw);
3234 iwlagn_disable_roc(priv);
3235 mutex_unlock(&priv->mutex);
3236}
3237
3238static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3239 struct ieee80211_channel *channel,
3240 enum nl80211_channel_type channel_type,
3241 int duration)
3242{
3243 struct iwl_priv *priv = hw->priv;
3244 int err = 0;
3245
3246 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3247 return -EOPNOTSUPP;
3248
3249 if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
3250 BIT(NL80211_IFTYPE_P2P_CLIENT)))
3251 return -EOPNOTSUPP;
3252
3253 mutex_lock(&priv->mutex);
3254
3255 if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
3256 test_bit(STATUS_SCAN_HW, &priv->status)) {
3257 err = -EBUSY;
3258 goto out;
3259 }
3260
3261 priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
3262 priv->hw_roc_channel = channel;
3263 priv->hw_roc_chantype = channel_type;
3264 priv->hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
3265 iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
3266 queue_delayed_work(priv->workqueue, &priv->hw_roc_work,
3267 msecs_to_jiffies(duration + 20));
3268
3269 msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
3270 ieee80211_ready_on_channel(priv->hw);
3271
3272 out:
3273 mutex_unlock(&priv->mutex);
3274
3275 return err;
3276}
3277
3278static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3279{
3280 struct iwl_priv *priv = hw->priv;
3281
3282 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3283 return -EOPNOTSUPP;
3284
3285 cancel_delayed_work_sync(&priv->hw_roc_work);
3286
3287 mutex_lock(&priv->mutex);
3288 iwlagn_disable_roc(priv);
3289 mutex_unlock(&priv->mutex);
3290
3291 return 0;
3292}
3293
3294/*****************************************************************************
3295 *
3296 * driver setup and teardown
3297 *
3298 *****************************************************************************/
3299
3300static void iwl_setup_deferred_work(struct iwl_priv *priv)
3301{
3302 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3303
3304 init_waitqueue_head(&priv->wait_command_queue);
3305
3306 INIT_WORK(&priv->restart, iwl_bg_restart);
3307 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3308 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3309 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
3310 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
3311 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3312 INIT_DELAYED_WORK(&priv->hw_roc_work, iwlagn_bg_roc_done);
3313
3314 iwl_setup_scan_deferred_work(priv);
3315
3316 if (priv->cfg->lib->bt_setup_deferred_work)
3317 priv->cfg->lib->bt_setup_deferred_work(priv);
3318
3319 init_timer(&priv->statistics_periodic);
3320 priv->statistics_periodic.data = (unsigned long)priv;
3321 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3322
3323 init_timer(&priv->ucode_trace);
3324 priv->ucode_trace.data = (unsigned long)priv;
3325 priv->ucode_trace.function = iwl_bg_ucode_trace;
3326
3327 init_timer(&priv->watchdog);
3328 priv->watchdog.data = (unsigned long)priv;
3329 priv->watchdog.function = iwl_bg_watchdog;
3330}
3331
3332static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3333{
3334 if (priv->cfg->lib->cancel_deferred_work)
3335 priv->cfg->lib->cancel_deferred_work(priv);
3336
3337 cancel_work_sync(&priv->run_time_calib_work);
3338 cancel_work_sync(&priv->beacon_update);
3339
3340 iwl_cancel_scan_deferred_work(priv);
3341
3342 cancel_work_sync(&priv->bt_full_concurrency);
3343 cancel_work_sync(&priv->bt_runtime_config);
3344
3345 del_timer_sync(&priv->statistics_periodic);
3346 del_timer_sync(&priv->ucode_trace);
3347}
3348
3349static void iwl_init_hw_rates(struct iwl_priv *priv,
3350 struct ieee80211_rate *rates)
3351{
3352 int i;
3353
3354 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3355 rates[i].bitrate = iwl_rates[i].ieee * 5;
3356 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3357 rates[i].hw_value_short = i;
3358 rates[i].flags = 0;
3359 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3360 /*
3361 * If CCK != 1M then set short preamble rate flag.
3362 */
3363 rates[i].flags |=
3364 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3365 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3366 }
3367 }
3368}
3369
3370static int iwl_init_drv(struct iwl_priv *priv)
3371{
3372 int ret;
3373
3374 spin_lock_init(&priv->sta_lock);
3375 spin_lock_init(&priv->hcmd_lock);
3376
3377 mutex_init(&priv->mutex);
3378
3379 priv->ieee_channels = NULL;
3380 priv->ieee_rates = NULL;
3381 priv->band = IEEE80211_BAND_2GHZ;
3382
3383 priv->iw_mode = NL80211_IFTYPE_STATION;
3384 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3385 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3386 priv->agg_tids_count = 0;
3387
3388 /* initialize force reset */
3389 priv->force_reset[IWL_RF_RESET].reset_duration =
3390 IWL_DELAY_NEXT_FORCE_RF_RESET;
3391 priv->force_reset[IWL_FW_RESET].reset_duration =
3392 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3393
3394 priv->rx_statistics_jiffies = jiffies;
3395
3396 /* Choose which receivers/antennas to use */
3397 iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
3398
3399 iwl_init_scan_params(priv);
3400
3401 /* init bt coex */
3402 if (priv->cfg->bt_params &&
3403 priv->cfg->bt_params->advanced_bt_coexist) {
3404 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
3405 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
3406 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
3407 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
3408 priv->bt_duration = BT_DURATION_LIMIT_DEF;
3409 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
3410 }
3411
3412 ret = iwl_init_channel_map(priv);
3413 if (ret) {
3414 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3415 goto err;
3416 }
3417
3418 ret = iwlcore_init_geos(priv);
3419 if (ret) {
3420 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3421 goto err_free_channel_map;
3422 }
3423 iwl_init_hw_rates(priv, priv->ieee_rates);
3424
3425 return 0;
3426
3427err_free_channel_map:
3428 iwl_free_channel_map(priv);
3429err:
3430 return ret;
3431}
3432
3433static void iwl_uninit_drv(struct iwl_priv *priv)
3434{
3435 iwl_calib_free_results(priv);
3436 iwlcore_free_geos(priv);
3437 iwl_free_channel_map(priv);
3438 kfree(priv->scan_cmd);
3439 kfree(priv->beacon_cmd);
3440#ifdef CONFIG_IWLWIFI_DEBUGFS
3441 kfree(priv->wowlan_sram);
3442#endif
3443}
3444
3445static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
3446 enum ieee80211_rssi_event rssi_event)
3447{
3448 struct iwl_priv *priv = hw->priv;
3449
3450 mutex_lock(&priv->mutex);
3451
3452 if (priv->cfg->bt_params &&
3453 priv->cfg->bt_params->advanced_bt_coexist) {
3454 if (rssi_event == RSSI_EVENT_LOW)
3455 priv->bt_enable_pspoll = true;
3456 else if (rssi_event == RSSI_EVENT_HIGH)
3457 priv->bt_enable_pspoll = false;
3458
3459 iwlagn_send_advance_bt_config(priv);
3460 } else {
3461 IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
3462 "ignoring RSSI callback\n");
3463 }
3464
3465 mutex_unlock(&priv->mutex);
3466}
3467
3468struct ieee80211_ops iwlagn_hw_ops = {
3469 .tx = iwlagn_mac_tx,
3470 .start = iwlagn_mac_start,
3471 .stop = iwlagn_mac_stop,
3472#ifdef CONFIG_PM
3473 .suspend = iwlagn_mac_suspend,
3474 .resume = iwlagn_mac_resume,
3475#endif
3476 .add_interface = iwl_mac_add_interface,
3477 .remove_interface = iwl_mac_remove_interface,
3478 .change_interface = iwl_mac_change_interface,
3479 .config = iwlagn_mac_config,
3480 .configure_filter = iwlagn_configure_filter,
3481 .set_key = iwlagn_mac_set_key,
3482 .update_tkip_key = iwlagn_mac_update_tkip_key,
3483 .set_rekey_data = iwlagn_mac_set_rekey_data,
3484 .conf_tx = iwl_mac_conf_tx,
3485 .bss_info_changed = iwlagn_bss_info_changed,
3486 .ampdu_action = iwlagn_mac_ampdu_action,
3487 .hw_scan = iwl_mac_hw_scan,
3488 .sta_notify = iwlagn_mac_sta_notify,
3489 .sta_add = iwlagn_mac_sta_add,
3490 .sta_remove = iwl_mac_sta_remove,
3491 .channel_switch = iwlagn_mac_channel_switch,
3492 .flush = iwlagn_mac_flush,
3493 .tx_last_beacon = iwl_mac_tx_last_beacon,
3494 .remain_on_channel = iwl_mac_remain_on_channel,
3495 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3496 .offchannel_tx = iwl_mac_offchannel_tx,
3497 .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
3498 .rssi_callback = iwl_mac_rssi_callback,
3499 CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
3500 CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
3501};
3502
3503static u32 iwl_hw_detect(struct iwl_priv *priv)
3504{
3505 return iwl_read32(priv, CSR_HW_REV);
3506}
3507
3508static int iwl_set_hw_params(struct iwl_priv *priv)
3509{
3510 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3511 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3512 if (iwlagn_mod_params.amsdu_size_8K)
3513 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3514 else
3515 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3516
3517 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3518
3519 if (iwlagn_mod_params.disable_11n)
3520 priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
3521
3522 /* Device-specific setup */
3523 return priv->cfg->lib->set_hw_params(priv);
3524}
3525
3526static const u8 iwlagn_bss_ac_to_fifo[] = {
3527 IWL_TX_FIFO_VO,
3528 IWL_TX_FIFO_VI,
3529 IWL_TX_FIFO_BE,
3530 IWL_TX_FIFO_BK,
3531};
3532
3533static const u8 iwlagn_bss_ac_to_queue[] = {
3534 0, 1, 2, 3,
3535};
3536
3537static const u8 iwlagn_pan_ac_to_fifo[] = {
3538 IWL_TX_FIFO_VO_IPAN,
3539 IWL_TX_FIFO_VI_IPAN,
3540 IWL_TX_FIFO_BE_IPAN,
3541 IWL_TX_FIFO_BK_IPAN,
3542};
3543
3544static const u8 iwlagn_pan_ac_to_queue[] = {
3545 7, 6, 5, 4,
3546};
3547
3548/* This function both allocates and initializes hw and priv. */
3549static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
3550{
3551 struct iwl_priv *priv;
3552 /* mac80211 allocates memory for this device instance, including
3553 * space for this driver's private structure */
3554 struct ieee80211_hw *hw;
3555
3556 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
3557 if (hw == NULL) {
3558 pr_err("%s: Can not allocate network device\n",
3559 cfg->name);
3560 goto out;
3561 }
3562
3563 priv = hw->priv;
3564 priv->hw = hw;
3565
3566out:
3567 return hw;
3568}
3569
3570static void iwl_init_context(struct iwl_priv *priv)
3571{
3572 int i;
3573
3574 /*
3575 * The default context is always valid,
3576 * more may be discovered when firmware
3577 * is loaded.
3578 */
3579 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3580
3581 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3582 priv->contexts[i].ctxid = i;
3583
3584 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3585 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3586 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3587 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3588 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3589 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3590 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3591 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3592 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
3593 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
3594 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3595 BIT(NL80211_IFTYPE_ADHOC);
3596 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3597 BIT(NL80211_IFTYPE_STATION);
3598 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3599 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3600 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3601 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3602
3603 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
3604 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
3605 REPLY_WIPAN_RXON_TIMING;
3606 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
3607 REPLY_WIPAN_RXON_ASSOC;
3608 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
3609 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
3610 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
3611 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
3612 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
3613 priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
3614 priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
3615 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
3616 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
3617 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
3618#ifdef CONFIG_IWL_P2P
3619 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
3620 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
3621#endif
3622 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
3623 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
3624 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
3625
3626 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
3627}
3628
3629int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3630{
3631 int err = 0;
3632 struct iwl_priv *priv;
3633 struct ieee80211_hw *hw;
3634 u16 num_mac;
3635 u32 hw_rev;
3636
3637 /************************
3638 * 1. Allocating HW data
3639 ************************/
3640 hw = iwl_alloc_all(cfg);
3641 if (!hw) {
3642 err = -ENOMEM;
3643 goto out;
3644 }
3645
3646 priv = hw->priv;
3647 priv->bus = bus;
3648 bus_set_drv_data(priv->bus, priv);
3649
3650 /* At this point both hw and priv are allocated. */
3651
3652 SET_IEEE80211_DEV(hw, priv->bus->dev);
3653
3654 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3655 priv->cfg = cfg;
3656 priv->inta_mask = CSR_INI_SET_MASK;
3657
3658 /* is antenna coupling more than 35dB ? */
3659 priv->bt_ant_couple_ok =
3660 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
3661 true : false;
3662
3663 /* enable/disable bt channel inhibition */
3664 priv->bt_ch_announce = iwlagn_bt_ch_announce;
3665 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
3666 (priv->bt_ch_announce) ? "On" : "Off");
3667
3668 if (iwl_alloc_traffic_mem(priv))
3669 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3670
3671 /* these spin locks will be used in apm_ops.init and EEPROM access
3672 * we should init now
3673 */
3674 spin_lock_init(&priv->reg_lock);
3675 spin_lock_init(&priv->lock);
3676
3677 /*
3678 * stop and reset the on-board processor just in case it is in a
3679 * strange state ... like being left stranded by a primary kernel
3680 * and this is now the kdump kernel trying to start up
3681 */
3682 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3683
3684 /***********************
3685 * 3. Read REV register
3686 ***********************/
3687 hw_rev = iwl_hw_detect(priv);
3688 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3689 priv->cfg->name, hw_rev);
3690
3691 err = iwl_trans_register(&priv->trans, priv);
3692 if (err)
3693 goto out_free_traffic_mem;
3694
3695 if (trans_prepare_card_hw(&priv->trans)) {
3696 err = -EIO;
3697 IWL_WARN(priv, "Failed, HW not ready\n");
3698 goto out_free_trans;
3699 }
3700
3701 /*****************
3702 * 4. Read EEPROM
3703 *****************/
3704 /* Read the EEPROM */
3705 err = iwl_eeprom_init(priv, hw_rev);
3706 if (err) {
3707 IWL_ERR(priv, "Unable to init EEPROM\n");
3708 goto out_free_trans;
3709 }
3710 err = iwl_eeprom_check_version(priv);
3711 if (err)
3712 goto out_free_eeprom;
3713
3714 err = iwl_eeprom_check_sku(priv);
3715 if (err)
3716 goto out_free_eeprom;
3717
3718 /* extract MAC Address */
3719 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
3720 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3721 priv->hw->wiphy->addresses = priv->addresses;
3722 priv->hw->wiphy->n_addresses = 1;
3723 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
3724 if (num_mac > 1) {
3725 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
3726 ETH_ALEN);
3727 priv->addresses[1].addr[5]++;
3728 priv->hw->wiphy->n_addresses++;
3729 }
3730
3731 /* initialize all valid contexts */
3732 iwl_init_context(priv);
3733
3734 /************************
3735 * 5. Setup HW constants
3736 ************************/
3737 if (iwl_set_hw_params(priv)) {
3738 err = -ENOENT;
3739 IWL_ERR(priv, "failed to set hw parameters\n");
3740 goto out_free_eeprom;
3741 }
3742
3743 /*******************
3744 * 6. Setup priv
3745 *******************/
3746
3747 err = iwl_init_drv(priv);
3748 if (err)
3749 goto out_free_eeprom;
3750 /* At this point both hw and priv are initialized. */
3751
3752 /********************
3753 * 7. Setup services
3754 ********************/
3755 iwl_setup_deferred_work(priv);
3756 iwl_setup_rx_handlers(priv);
3757 iwl_testmode_init(priv);
3758
3759 /*********************************************
3760 * 8. Enable interrupts
3761 *********************************************/
3762
3763 iwl_enable_rfkill_int(priv);
3764
3765 /* If platform's RF_KILL switch is NOT set to KILL */
3766 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3767 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3768 else
3769 set_bit(STATUS_RF_KILL_HW, &priv->status);
3770
3771 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3772 test_bit(STATUS_RF_KILL_HW, &priv->status));
3773
3774 iwl_power_initialize(priv);
3775 iwl_tt_initialize(priv);
3776
3777 init_completion(&priv->firmware_loading_complete);
3778
3779 err = iwl_request_firmware(priv, true);
3780 if (err)
3781 goto out_destroy_workqueue;
3782
3783 return 0;
3784
3785out_destroy_workqueue:
3786 destroy_workqueue(priv->workqueue);
3787 priv->workqueue = NULL;
3788 iwl_uninit_drv(priv);
3789out_free_eeprom:
3790 iwl_eeprom_free(priv);
3791out_free_trans:
3792 trans_free(&priv->trans);
3793out_free_traffic_mem:
3794 iwl_free_traffic_mem(priv);
3795 ieee80211_free_hw(priv->hw);
3796out:
3797 return err;
3798}
3799
3800void __devexit iwl_remove(struct iwl_priv * priv)
3801{
3802 unsigned long flags;
3803
3804 wait_for_completion(&priv->firmware_loading_complete);
3805
3806 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3807
3808 iwl_dbgfs_unregister(priv);
3809 sysfs_remove_group(&priv->bus->dev->kobj,
3810 &iwl_attribute_group);
3811
3812 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3813 * to be called and iwl_down since we are removing the device
3814 * we need to set STATUS_EXIT_PENDING bit.
3815 */
3816 set_bit(STATUS_EXIT_PENDING, &priv->status);
3817
3818 iwl_testmode_cleanup(priv);
3819 iwl_leds_exit(priv);
3820
3821 if (priv->mac80211_registered) {
3822 ieee80211_unregister_hw(priv->hw);
3823 priv->mac80211_registered = 0;
3824 }
3825
3826 /* Reset to low power before unloading driver. */
3827 iwl_apm_stop(priv);
3828
3829 iwl_tt_exit(priv);
3830
3831 /* make sure we flush any pending irq or
3832 * tasklet for the driver
3833 */
3834 spin_lock_irqsave(&priv->lock, flags);
3835 iwl_disable_interrupts(priv);
3836 spin_unlock_irqrestore(&priv->lock, flags);
3837
3838 trans_sync_irq(&priv->trans);
3839
3840 iwl_dealloc_ucode(priv);
3841
3842 trans_rx_free(&priv->trans);
3843 trans_tx_free(&priv->trans);
3844
3845 iwl_eeprom_free(priv);
3846
3847 /*netif_stop_queue(dev); */
3848 flush_workqueue(priv->workqueue);
3849
3850 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3851 * priv->workqueue... so we can't take down the workqueue
3852 * until now... */
3853 destroy_workqueue(priv->workqueue);
3854 priv->workqueue = NULL;
3855 iwl_free_traffic_mem(priv);
3856
3857 trans_free(&priv->trans);
3858
3859 bus_set_drv_data(priv->bus, NULL);
3860
3861 iwl_uninit_drv(priv);
3862
3863 dev_kfree_skb(priv->beacon_skb);
3864
3865 ieee80211_free_hw(priv->hw);
3866}
3867
3868
3869/*****************************************************************************
3870 *
3871 * driver and module entry point
3872 *
3873 *****************************************************************************/
3874static int __init iwl_init(void)
3875{
3876
3877 int ret;
3878 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3879 pr_info(DRV_COPYRIGHT "\n");
3880
3881 ret = iwlagn_rate_control_register();
3882 if (ret) {
3883 pr_err("Unable to register rate control algorithm: %d\n", ret);
3884 return ret;
3885 }
3886
3887 ret = iwl_pci_register_driver();
3888
3889 if (ret)
3890 goto error_register;
3891 return ret;
3892
3893error_register:
3894 iwlagn_rate_control_unregister();
3895 return ret;
3896}
3897
3898static void __exit iwl_exit(void)
3899{
3900 iwl_pci_unregister_driver();
3901 iwlagn_rate_control_unregister();
3902}
3903
3904module_exit(iwl_exit);
3905module_init(iwl_init);
3906
3907#ifdef CONFIG_IWLWIFI_DEBUG
3908module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3909MODULE_PARM_DESC(debug, "debug output mask");
3910#endif
3911
3912module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
3913MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3914module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
3915MODULE_PARM_DESC(queues_num, "number of hw queues.");
3916module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
3917MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3918module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
3919 int, S_IRUGO);
3920MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3921module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3922MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3923
3924module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
3925 S_IRUGO);
3926MODULE_PARM_DESC(ucode_alternative,
3927 "specify ucode alternative to use from ucode file");
3928
3929module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
3930MODULE_PARM_DESC(antenna_coupling,
3931 "specify antenna coupling in dB (defualt: 0 dB)");
3932
3933module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
3934MODULE_PARM_DESC(bt_ch_inhibition,
3935 "Disable BT channel inhibition (default: enable)");
3936
3937module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
3938MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3939
3940module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3941MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3942
3943module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
3944MODULE_PARM_DESC(wd_disable,
3945 "Disable stuck queue watchdog timer (default: 0 [enabled])");
3946
3947/*
3948 * set bt_coex_active to true, uCode will do kill/defer
3949 * every time the priority line is asserted (BT is sending signals on the
3950 * priority line in the PCIx).
3951 * set bt_coex_active to false, uCode will ignore the BT activity and
3952 * perform the normal operation
3953 *
3954 * User might experience transmit issue on some platform due to WiFi/BT
3955 * co-exist problem. The possible behaviors are:
3956 * Able to scan and finding all the available AP
3957 * Not able to associate with any AP
3958 * On those platforms, WiFi communication can be restored by set
3959 * "bt_coex_active" module parameter to "false"
3960 *
3961 * default: bt_coex_active = true (BT_COEX_ENABLE)
3962 */
3963module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
3964 bool, S_IRUGO);
3965MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
3966
3967module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
3968MODULE_PARM_DESC(led_mode, "0=system default, "
3969 "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
3970
3971module_param_named(power_save, iwlagn_mod_params.power_save,
3972 bool, S_IRUGO);
3973MODULE_PARM_DESC(power_save,
3974 "enable WiFi power management (default: disable)");
3975
3976module_param_named(power_level, iwlagn_mod_params.power_level,
3977 int, S_IRUGO);
3978MODULE_PARM_DESC(power_level,
3979 "default power save level (range from 1 - 5, default: 1)");
3980
3981/*
3982 * For now, keep using power level 1 instead of automatically
3983 * adjusting ...
3984 */
3985module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
3986 bool, S_IRUGO);
3987MODULE_PARM_DESC(no_sleep_autoadjust,
3988 "don't automatically adjust sleep level "
3989 "according to maximum network latency (default: true)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 00000000000..e172f6baad3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,343 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _agn devices */
69extern struct iwl_cfg iwl5300_agn_cfg;
70extern struct iwl_cfg iwl5100_agn_cfg;
71extern struct iwl_cfg iwl5350_agn_cfg;
72extern struct iwl_cfg iwl5100_bgn_cfg;
73extern struct iwl_cfg iwl5100_abg_cfg;
74extern struct iwl_cfg iwl5150_agn_cfg;
75extern struct iwl_cfg iwl5150_abg_cfg;
76extern struct iwl_cfg iwl6005_2agn_cfg;
77extern struct iwl_cfg iwl6005_2abg_cfg;
78extern struct iwl_cfg iwl6005_2bg_cfg;
79extern struct iwl_cfg iwl1030_bgn_cfg;
80extern struct iwl_cfg iwl1030_bg_cfg;
81extern struct iwl_cfg iwl6030_2agn_cfg;
82extern struct iwl_cfg iwl6030_2abg_cfg;
83extern struct iwl_cfg iwl6030_2bgn_cfg;
84extern struct iwl_cfg iwl6030_2bg_cfg;
85extern struct iwl_cfg iwl6000i_2agn_cfg;
86extern struct iwl_cfg iwl6000i_2abg_cfg;
87extern struct iwl_cfg iwl6000i_2bg_cfg;
88extern struct iwl_cfg iwl6000_3agn_cfg;
89extern struct iwl_cfg iwl6050_2agn_cfg;
90extern struct iwl_cfg iwl6050_2abg_cfg;
91extern struct iwl_cfg iwl6150_bgn_cfg;
92extern struct iwl_cfg iwl6150_bg_cfg;
93extern struct iwl_cfg iwl1000_bgn_cfg;
94extern struct iwl_cfg iwl1000_bg_cfg;
95extern struct iwl_cfg iwl100_bgn_cfg;
96extern struct iwl_cfg iwl100_bg_cfg;
97extern struct iwl_cfg iwl130_bgn_cfg;
98extern struct iwl_cfg iwl130_bg_cfg;
99extern struct iwl_cfg iwl2000_2bgn_cfg;
100extern struct iwl_cfg iwl2000_2bg_cfg;
101extern struct iwl_cfg iwl2030_2bgn_cfg;
102extern struct iwl_cfg iwl2030_2bg_cfg;
103extern struct iwl_cfg iwl6035_2agn_cfg;
104extern struct iwl_cfg iwl6035_2abg_cfg;
105extern struct iwl_cfg iwl6035_2bg_cfg;
106extern struct iwl_cfg iwl105_bg_cfg;
107extern struct iwl_cfg iwl105_bgn_cfg;
108extern struct iwl_cfg iwl135_bg_cfg;
109extern struct iwl_cfg iwl135_bgn_cfg;
110
111extern struct iwl_mod_params iwlagn_mod_params;
112
113extern struct ieee80211_ops iwlagn_hw_ops;
114
115int iwl_reset_ict(struct iwl_priv *priv);
116
117static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
118{
119 hdr->op_code = cmd;
120 hdr->first_group = 0;
121 hdr->groups_num = 1;
122 hdr->data_valid = 1;
123}
124
125/* tx queue */
126void iwl_free_tfds_in_queue(struct iwl_priv *priv,
127 int sta_id, int tid, int freed);
128
129/* RXON */
130int iwlagn_set_pan_params(struct iwl_priv *priv);
131int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
132void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
133int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
134void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
135 struct ieee80211_vif *vif,
136 struct ieee80211_bss_conf *bss_conf,
137 u32 changes);
138void iwlagn_config_ht40(struct ieee80211_conf *conf,
139 struct iwl_rxon_context *ctx);
140
141/* uCode */
142void iwlagn_rx_calib_result(struct iwl_priv *priv,
143 struct iwl_rx_mem_buffer *rxb);
144int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
145void iwlagn_send_prio_tbl(struct iwl_priv *priv);
146int iwlagn_run_init_ucode(struct iwl_priv *priv);
147int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
148 struct fw_img *image,
149 enum iwlagn_ucode_type ucode_type);
150
151/* lib */
152void iwl_check_abort_status(struct iwl_priv *priv,
153 u8 frame_count, u32 status);
154int iwlagn_hw_valid_rtc_data_addr(u32 addr);
155int iwlagn_send_tx_power(struct iwl_priv *priv);
156void iwlagn_temperature(struct iwl_priv *priv);
157u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
158int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
159int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
160void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
161int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
162
163/* rx */
164int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
165void iwl_setup_rx_handlers(struct iwl_priv *priv);
166void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
167
168
169/* tx */
170void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
171 int index);
172void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
173 struct ieee80211_tx_info *info);
174int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
175int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
176 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
177int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
178 struct ieee80211_sta *sta, u16 tid);
179int iwlagn_txq_check_empty(struct iwl_priv *priv,
180 int sta_id, u8 tid, int txq_id);
181void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
182 struct iwl_rx_mem_buffer *rxb);
183void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
184int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
185
186static inline u32 iwl_tx_status_to_mac80211(u32 status)
187{
188 status &= TX_STATUS_MSK;
189
190 switch (status) {
191 case TX_STATUS_SUCCESS:
192 case TX_STATUS_DIRECT_DONE:
193 return IEEE80211_TX_STAT_ACK;
194 case TX_STATUS_FAIL_DEST_PS:
195 case TX_STATUS_FAIL_PASSIVE_NO_RX:
196 return IEEE80211_TX_STAT_TX_FILTERED;
197 default:
198 return 0;
199 }
200}
201
202static inline bool iwl_is_tx_success(u32 status)
203{
204 status &= TX_STATUS_MSK;
205 return (status == TX_STATUS_SUCCESS) ||
206 (status == TX_STATUS_DIRECT_DONE);
207}
208
209u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
210
211/* scan */
212int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
213void iwlagn_post_scan(struct iwl_priv *priv);
214
215/* station mgmt */
216int iwlagn_manage_ibss_station(struct iwl_priv *priv,
217 struct ieee80211_vif *vif, bool add);
218
219/* bt coex */
220void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
221void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
222 struct iwl_rx_mem_buffer *rxb);
223void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
224void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
225void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
226void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
227void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
228
229#ifdef CONFIG_IWLWIFI_DEBUG
230const char *iwl_get_tx_fail_reason(u32 status);
231const char *iwl_get_agg_tx_fail_reason(u16 status);
232#else
233static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
234static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
235#endif
236
237/* station management */
238int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
239 struct iwl_rxon_context *ctx);
240int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
241 const u8 *addr, u8 *sta_id_r);
242int iwl_remove_default_wep_key(struct iwl_priv *priv,
243 struct iwl_rxon_context *ctx,
244 struct ieee80211_key_conf *key);
245int iwl_set_default_wep_key(struct iwl_priv *priv,
246 struct iwl_rxon_context *ctx,
247 struct ieee80211_key_conf *key);
248int iwl_restore_default_wep_keys(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx);
250int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
251 struct ieee80211_key_conf *key,
252 struct ieee80211_sta *sta);
253int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
254 struct ieee80211_key_conf *key,
255 struct ieee80211_sta *sta);
256void iwl_update_tkip_key(struct iwl_priv *priv,
257 struct ieee80211_vif *vif,
258 struct ieee80211_key_conf *keyconf,
259 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
260int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
261int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
262 int tid, u16 ssn);
263int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
264 int tid);
265void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
266int iwl_update_bcast_station(struct iwl_priv *priv,
267 struct iwl_rxon_context *ctx);
268int iwl_update_bcast_stations(struct iwl_priv *priv);
269void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
270 struct ieee80211_vif *vif,
271 enum sta_notify_cmd cmd,
272 struct ieee80211_sta *sta);
273
274/* rate */
275static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
276{
277 return BIT(ant_idx) << RATE_MCS_ANT_POS;
278}
279
280static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
281{
282 return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK;
283}
284
285static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
286{
287 return cpu_to_le32(flags|(u32)rate);
288}
289
290/* eeprom */
291void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
292void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
293
294/* notification wait support */
295void __acquires(wait_entry)
296iwlagn_init_notification_wait(struct iwl_priv *priv,
297 struct iwl_notification_wait *wait_entry,
298 u8 cmd,
299 void (*fn)(struct iwl_priv *priv,
300 struct iwl_rx_packet *pkt,
301 void *data),
302 void *fn_data);
303int __must_check __releases(wait_entry)
304iwlagn_wait_notification(struct iwl_priv *priv,
305 struct iwl_notification_wait *wait_entry,
306 unsigned long timeout);
307void __releases(wait_entry)
308iwlagn_remove_notification(struct iwl_priv *priv,
309 struct iwl_notification_wait *wait_entry);
310extern int iwlagn_init_alive_start(struct iwl_priv *priv);
311extern int iwl_alive_start(struct iwl_priv *priv);
312/* svtool */
313#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
314extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
315extern int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
316 struct netlink_callback *cb,
317 void *data, int len);
318extern void iwl_testmode_init(struct iwl_priv *priv);
319extern void iwl_testmode_cleanup(struct iwl_priv *priv);
320#else
321static inline
322int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
323{
324 return -ENOSYS;
325}
326static inline
327int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
328 struct netlink_callback *cb,
329 void *data, int len)
330{
331 return -ENOSYS;
332}
333static inline
334void iwl_testmode_init(struct iwl_priv *priv)
335{
336}
337static inline
338void iwl_testmode_cleanup(struct iwl_priv *priv)
339{
340}
341#endif
342
343#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
new file mode 100644
index 00000000000..f3ee1c0c004
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -0,0 +1,139 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66struct iwl_bus;
67
68/**
69 * struct iwl_bus_ops - bus specific operations
70 * @get_pm_support: must returns true if the bus can go to sleep
71 * @apm_config: will be called during the config of the APM configuration
72 * @set_drv_data: set the drv_data pointer to the bus layer
73 * @get_hw_id: prints the hw_id in the provided buffer
74 * @write8: write a byte to register at offset ofs
75 * @write32: write a dword to register at offset ofs
76 * @wread32: read a dword at register at offset ofs
77 */
78struct iwl_bus_ops {
79 bool (*get_pm_support)(struct iwl_bus *bus);
80 void (*apm_config)(struct iwl_bus *bus);
81 void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
82 void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
83 void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
84 void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
85 u32 (*read32)(struct iwl_bus *bus, u32 ofs);
86};
87
88struct iwl_bus {
89 /* Common data to all buses */
90 void *drv_data; /* driver's context */
91 struct device *dev;
92 struct iwl_bus_ops *ops;
93
94 unsigned int irq;
95
96 /* pointer to bus specific struct */
97 /*Ensure that this pointer will always be aligned to sizeof pointer */
98 char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
99};
100
101static inline bool bus_get_pm_support(struct iwl_bus *bus)
102{
103 return bus->ops->get_pm_support(bus);
104}
105
106static inline void bus_apm_config(struct iwl_bus *bus)
107{
108 bus->ops->apm_config(bus);
109}
110
111static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
112{
113 bus->ops->set_drv_data(bus, drv_data);
114}
115
116static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
117{
118 bus->ops->get_hw_id(bus, buf, buf_len);
119}
120
121static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
122{
123 bus->ops->write8(bus, ofs, val);
124}
125
126static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
127{
128 bus->ops->write32(bus, ofs, val);
129}
130
131static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
132{
133 return bus->ops->read32(bus, ofs);
134}
135
136int __must_check iwl_pci_register_driver(void);
137void iwl_pci_unregister_driver(void);
138
139#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
new file mode 100644
index 00000000000..e9e9d1d1778
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -0,0 +1,4033 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_commands_h__
70#define __iwl_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
100 REPLY_TXFIFO_FLUSH = 0x1e,
101
102 /* Security */
103 REPLY_WEPKEY = 0x20,
104
105 /* RX, TX, LEDs */
106 REPLY_TX = 0x1c,
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* WiMAX coexistence */
111 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
112 COEX_MEDIUM_NOTIFICATION = 0x5b,
113 COEX_EVENT_CMD = 0x5c,
114
115 /* Calibration */
116 TEMPERATURE_NOTIFICATION = 0x62,
117 CALIBRATION_CFG_CMD = 0x65,
118 CALIBRATION_RES_NOTIFICATION = 0x66,
119 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
120
121 /* 802.11h related */
122 REPLY_QUIET_CMD = 0x71, /* not used */
123 REPLY_CHANNEL_SWITCH = 0x72,
124 CHANNEL_SWITCH_NOTIFICATION = 0x73,
125 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
126 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
127
128 /* Power Management */
129 POWER_TABLE_CMD = 0x77,
130 PM_SLEEP_NOTIFICATION = 0x7A,
131 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
132
133 /* Scan commands and notifications */
134 REPLY_SCAN_CMD = 0x80,
135 REPLY_SCAN_ABORT_CMD = 0x81,
136 SCAN_START_NOTIFICATION = 0x82,
137 SCAN_RESULTS_NOTIFICATION = 0x83,
138 SCAN_COMPLETE_NOTIFICATION = 0x84,
139
140 /* IBSS/AP commands */
141 BEACON_NOTIFICATION = 0x90,
142 REPLY_TX_BEACON = 0x91,
143 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
144
145 /* Miscellaneous commands */
146 REPLY_TX_POWER_DBM_CMD = 0x95,
147 QUIET_NOTIFICATION = 0x96, /* not used */
148 REPLY_TX_PWR_TABLE_CMD = 0x97,
149 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
150 TX_ANT_CONFIGURATION_CMD = 0x98,
151 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
152
153 /* Bluetooth device coexistence config command */
154 REPLY_BT_CONFIG = 0x9b,
155
156 /* Statistics */
157 REPLY_STATISTICS_CMD = 0x9c,
158 STATISTICS_NOTIFICATION = 0x9d,
159
160 /* RF-KILL commands and notifications */
161 REPLY_CARD_STATE_CMD = 0xa0,
162 CARD_STATE_NOTIFICATION = 0xa1,
163
164 /* Missed beacons notification */
165 MISSED_BEACONS_NOTIFICATION = 0xa2,
166
167 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
168 SENSITIVITY_CMD = 0xa8,
169 REPLY_PHY_CALIBRATION_CMD = 0xb0,
170 REPLY_RX_PHY_CMD = 0xc0,
171 REPLY_RX_MPDU_CMD = 0xc1,
172 REPLY_RX = 0xc3,
173 REPLY_COMPRESSED_BA = 0xc5,
174
175 /* BT Coex */
176 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
177 REPLY_BT_COEX_PROT_ENV = 0xcd,
178 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
179
180 /* PAN commands */
181 REPLY_WIPAN_PARAMS = 0xb2,
182 REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
183 REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
184 REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
185 REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
186 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
187 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
188 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
189 REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
190
191 REPLY_WOWLAN_PATTERNS = 0xe0,
192 REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
193 REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
194 REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
195 REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
196 REPLY_WOWLAN_GET_STATUS = 0xe5,
197
198 REPLY_MAX = 0xff
199};
200
201/******************************************************************************
202 * (0)
203 * Commonly used structures and definitions:
204 * Command header, rate_n_flags, txpower
205 *
206 *****************************************************************************/
207
208/* iwl_cmd_header flags value */
209#define IWL_CMD_FAILED_MSK 0x40
210
211#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
212#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
213#define SEQ_TO_INDEX(s) ((s) & 0xff)
214#define INDEX_TO_SEQ(i) ((i) & 0xff)
215#define SEQ_RX_FRAME cpu_to_le16(0x8000)
216
217/**
218 * struct iwl_cmd_header
219 *
220 * This header format appears in the beginning of each command sent from the
221 * driver, and each response/notification received from uCode.
222 */
223struct iwl_cmd_header {
224 u8 cmd; /* Command ID: REPLY_RXON, etc. */
225 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
226 /*
227 * The driver sets up the sequence number to values of its choosing.
228 * uCode does not use this value, but passes it back to the driver
229 * when sending the response to each driver-originated command, so
230 * the driver can match the response to the command. Since the values
231 * don't get used by uCode, the driver may set up an arbitrary format.
232 *
233 * There is one exception: uCode sets bit 15 when it originates
234 * the response/notification, i.e. when the response/notification
235 * is not a direct response to a command sent by the driver. For
236 * example, uCode issues REPLY_RX when it sends a received frame
237 * to the driver; it is not a direct response to any driver command.
238 *
239 * The Linux driver uses the following format:
240 *
241 * 0:7 tfd index - position within TX queue
242 * 8:12 TX queue id
243 * 13:14 reserved
244 * 15 unsolicited RX or uCode-originated notification
245 */
246 __le16 sequence;
247
248 /* command or response/notification data follows immediately */
249 u8 data[0];
250} __packed;
251
252
253/**
254 * iwlagn rate_n_flags bit fields
255 *
256 * rate_n_flags format is used in following iwlagn commands:
257 * REPLY_RX (response only)
258 * REPLY_RX_MPDU (response only)
259 * REPLY_TX (both command and response)
260 * REPLY_TX_LINK_QUALITY_CMD
261 *
262 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
263 * 2-0: 0) 6 Mbps
264 * 1) 12 Mbps
265 * 2) 18 Mbps
266 * 3) 24 Mbps
267 * 4) 36 Mbps
268 * 5) 48 Mbps
269 * 6) 54 Mbps
270 * 7) 60 Mbps
271 *
272 * 4-3: 0) Single stream (SISO)
273 * 1) Dual stream (MIMO)
274 * 2) Triple stream (MIMO)
275 *
276 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
277 *
278 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
279 * 3-0: 0xD) 6 Mbps
280 * 0xF) 9 Mbps
281 * 0x5) 12 Mbps
282 * 0x7) 18 Mbps
283 * 0x9) 24 Mbps
284 * 0xB) 36 Mbps
285 * 0x1) 48 Mbps
286 * 0x3) 54 Mbps
287 *
288 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
289 * 6-0: 10) 1 Mbps
290 * 20) 2 Mbps
291 * 55) 5.5 Mbps
292 * 110) 11 Mbps
293 */
294#define RATE_MCS_CODE_MSK 0x7
295#define RATE_MCS_SPATIAL_POS 3
296#define RATE_MCS_SPATIAL_MSK 0x18
297#define RATE_MCS_HT_DUP_POS 5
298#define RATE_MCS_HT_DUP_MSK 0x20
299/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */
300#define RATE_MCS_RATE_MSK 0xff
301
302/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
303#define RATE_MCS_FLAGS_POS 8
304#define RATE_MCS_HT_POS 8
305#define RATE_MCS_HT_MSK 0x100
306
307/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
308#define RATE_MCS_CCK_POS 9
309#define RATE_MCS_CCK_MSK 0x200
310
311/* Bit 10: (1) Use Green Field preamble */
312#define RATE_MCS_GF_POS 10
313#define RATE_MCS_GF_MSK 0x400
314
315/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
316#define RATE_MCS_HT40_POS 11
317#define RATE_MCS_HT40_MSK 0x800
318
319/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
320#define RATE_MCS_DUP_POS 12
321#define RATE_MCS_DUP_MSK 0x1000
322
323/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
324#define RATE_MCS_SGI_POS 13
325#define RATE_MCS_SGI_MSK 0x2000
326
327/**
328 * rate_n_flags Tx antenna masks
329 * 4965 has 2 transmitters
330 * 5100 has 1 transmitter B
331 * 5150 has 1 transmitter A
332 * 5300 has 3 transmitters
333 * 5350 has 3 transmitters
334 * bit14:16
335 */
336#define RATE_MCS_ANT_POS 14
337#define RATE_MCS_ANT_A_MSK 0x04000
338#define RATE_MCS_ANT_B_MSK 0x08000
339#define RATE_MCS_ANT_C_MSK 0x10000
340#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
341#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
342#define RATE_ANT_NUM 3
343
344#define POWER_TABLE_NUM_ENTRIES 33
345#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
346#define POWER_TABLE_CCK_ENTRY 32
347
348#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
349#define IWL_PWR_CCK_ENTRIES 2
350
351/**
352 * struct tx_power_dual_stream
353 *
354 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
355 *
356 * Same format as iwl_tx_power_dual_stream, but __le32
357 */
358struct tx_power_dual_stream {
359 __le32 dw;
360} __packed;
361
362/**
363 * Command REPLY_TX_POWER_DBM_CMD = 0x98
364 * struct iwlagn_tx_power_dbm_cmd
365 */
366#define IWLAGN_TX_POWER_AUTO 0x7f
367#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6)
368
369struct iwlagn_tx_power_dbm_cmd {
370 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
371 u8 flags;
372 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
373 u8 reserved;
374} __packed;
375
376/**
377 * Command TX_ANT_CONFIGURATION_CMD = 0x98
378 * This command is used to configure valid Tx antenna.
379 * By default uCode concludes the valid antenna according to the radio flavor.
380 * This command enables the driver to override/modify this conclusion.
381 */
382struct iwl_tx_ant_config_cmd {
383 __le32 valid;
384} __packed;
385
386/******************************************************************************
387 * (0a)
388 * Alive and Error Commands & Responses:
389 *
390 *****************************************************************************/
391
392#define UCODE_VALID_OK cpu_to_le32(0x1)
393
394/**
395 * REPLY_ALIVE = 0x1 (response only, not a command)
396 *
397 * uCode issues this "alive" notification once the runtime image is ready
398 * to receive commands from the driver. This is the *second* "alive"
399 * notification that the driver will receive after rebooting uCode;
400 * this "alive" is indicated by subtype field != 9.
401 *
402 * See comments documenting "BSM" (bootstrap state machine).
403 *
404 * This response includes two pointers to structures within the device's
405 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
406 *
407 * 1) log_event_table_ptr indicates base of the event log. This traces
408 * a 256-entry history of uCode execution within a circular buffer.
409 * Its header format is:
410 *
411 * __le32 log_size; log capacity (in number of entries)
412 * __le32 type; (1) timestamp with each entry, (0) no timestamp
413 * __le32 wraps; # times uCode has wrapped to top of circular buffer
414 * __le32 write_index; next circular buffer entry that uCode would fill
415 *
416 * The header is followed by the circular buffer of log entries. Entries
417 * with timestamps have the following format:
418 *
419 * __le32 event_id; range 0 - 1500
420 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
421 * __le32 data; event_id-specific data value
422 *
423 * Entries without timestamps contain only event_id and data.
424 *
425 *
426 * 2) error_event_table_ptr indicates base of the error log. This contains
427 * information about any uCode error that occurs. For agn, the format
428 * of the error log is defined by struct iwl_error_event_table.
429 *
430 * The Linux driver can print both logs to the system log when a uCode error
431 * occurs.
432 */
433
434/*
435 * Note: This structure is read from the device with IO accesses,
436 * and the reading already does the endian conversion. As it is
437 * read with u32-sized accesses, any members with a different size
438 * need to be ordered correctly though!
439 */
440struct iwl_error_event_table {
441 u32 valid; /* (nonzero) valid, (0) log is empty */
442 u32 error_id; /* type of error */
443 u32 pc; /* program counter */
444 u32 blink1; /* branch link */
445 u32 blink2; /* branch link */
446 u32 ilink1; /* interrupt link */
447 u32 ilink2; /* interrupt link */
448 u32 data1; /* error-specific data */
449 u32 data2; /* error-specific data */
450 u32 line; /* source code line of error */
451 u32 bcon_time; /* beacon timer */
452 u32 tsf_low; /* network timestamp function timer */
453 u32 tsf_hi; /* network timestamp function timer */
454 u32 gp1; /* GP1 timer register */
455 u32 gp2; /* GP2 timer register */
456 u32 gp3; /* GP3 timer register */
457 u32 ucode_ver; /* uCode version */
458 u32 hw_ver; /* HW Silicon version */
459 u32 brd_ver; /* HW board version */
460 u32 log_pc; /* log program counter */
461 u32 frame_ptr; /* frame pointer */
462 u32 stack_ptr; /* stack pointer */
463 u32 hcmd; /* last host command header */
464#if 0
465 /* no need to read the remainder, we don't use the values */
466 u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
467 u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
468 u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
469 u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
470 u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
471 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
472 u32 wait_event; /* wait event() caller address */
473 u32 l2p_control; /* L2pControlField */
474 u32 l2p_duration; /* L2pDurationField */
475 u32 l2p_mhvalid; /* L2pMhValidBits */
476 u32 l2p_addr_match; /* L2pAddrMatchStat */
477 u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
478 u32 u_timestamp; /* indicate when the date and time of the compilation */
479 u32 flow_handler; /* FH read/write pointers, RX credit */
480#endif
481} __packed;
482
483struct iwl_alive_resp {
484 u8 ucode_minor;
485 u8 ucode_major;
486 __le16 reserved1;
487 u8 sw_rev[8];
488 u8 ver_type;
489 u8 ver_subtype; /* not "9" for runtime alive */
490 __le16 reserved2;
491 __le32 log_event_table_ptr; /* SRAM address for event log */
492 __le32 error_event_table_ptr; /* SRAM address for error log */
493 __le32 timestamp;
494 __le32 is_valid;
495} __packed;
496
497/*
498 * REPLY_ERROR = 0x2 (response only, not a command)
499 */
500struct iwl_error_resp {
501 __le32 error_type;
502 u8 cmd_id;
503 u8 reserved1;
504 __le16 bad_cmd_seq_num;
505 __le32 error_info;
506 __le64 timestamp;
507} __packed;
508
509/******************************************************************************
510 * (1)
511 * RXON Commands & Responses:
512 *
513 *****************************************************************************/
514
515/*
516 * Rx config defines & structure
517 */
518/* rx_config device types */
519enum {
520 RXON_DEV_TYPE_AP = 1,
521 RXON_DEV_TYPE_ESS = 3,
522 RXON_DEV_TYPE_IBSS = 4,
523 RXON_DEV_TYPE_SNIFFER = 6,
524 RXON_DEV_TYPE_CP = 7,
525 RXON_DEV_TYPE_2STA = 8,
526 RXON_DEV_TYPE_P2P = 9,
527};
528
529
530#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
531#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
532#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
533#define RXON_RX_CHAIN_VALID_POS (1)
534#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
535#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
536#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
537#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
538#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
539#define RXON_RX_CHAIN_CNT_POS (10)
540#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
541#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
542#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
543#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
544
545/* rx_config flags */
546/* band & modulation selection */
547#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
548#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
549/* auto detection enable */
550#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
551/* TGg protection when tx */
552#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
553/* cck short slot & preamble */
554#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
555#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
556/* antenna selection */
557#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
558#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
559#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
560#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
561/* radar detection enable */
562#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
563#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
564/* rx response to host with 8-byte TSF
565* (according to ON_AIR deassertion) */
566#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
567
568
569/* HT flags */
570#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
571#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
572
573#define RXON_FLG_HT_OPERATING_MODE_POS (23)
574
575#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
576#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
577
578#define RXON_FLG_CHANNEL_MODE_POS (25)
579#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
580
581/* channel mode */
582enum {
583 CHANNEL_MODE_LEGACY = 0,
584 CHANNEL_MODE_PURE_40 = 1,
585 CHANNEL_MODE_MIXED = 2,
586 CHANNEL_MODE_RESERVED = 3,
587};
588#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
589#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
590#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
591
592/* CTS to self (if spec allows) flag */
593#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
594
595/* rx_config filter flags */
596/* accept all data frames */
597#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
598/* pass control & management to host */
599#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
600/* accept multi-cast */
601#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
602/* don't decrypt uni-cast frames */
603#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
604/* don't decrypt multi-cast frames */
605#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
606/* STA is associated */
607#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
608/* transfer to host non bssid beacons in associated state */
609#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
610
611/**
612 * REPLY_RXON = 0x10 (command, has simple generic response)
613 *
614 * RXON tunes the radio tuner to a service channel, and sets up a number
615 * of parameters that are used primarily for Rx, but also for Tx operations.
616 *
617 * NOTE: When tuning to a new channel, driver must set the
618 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
619 * info within the device, including the station tables, tx retry
620 * rate tables, and txpower tables. Driver must build a new station
621 * table and txpower table before transmitting anything on the RXON
622 * channel.
623 *
624 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
625 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
626 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
627 */
628
629struct iwl_rxon_cmd {
630 u8 node_addr[6];
631 __le16 reserved1;
632 u8 bssid_addr[6];
633 __le16 reserved2;
634 u8 wlap_bssid_addr[6];
635 __le16 reserved3;
636 u8 dev_type;
637 u8 air_propagation;
638 __le16 rx_chain;
639 u8 ofdm_basic_rates;
640 u8 cck_basic_rates;
641 __le16 assoc_id;
642 __le32 flags;
643 __le32 filter_flags;
644 __le16 channel;
645 u8 ofdm_ht_single_stream_basic_rates;
646 u8 ofdm_ht_dual_stream_basic_rates;
647 u8 ofdm_ht_triple_stream_basic_rates;
648 u8 reserved5;
649 __le16 acquisition_data;
650 __le16 reserved6;
651} __packed;
652
653/*
654 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
655 */
656struct iwl_rxon_assoc_cmd {
657 __le32 flags;
658 __le32 filter_flags;
659 u8 ofdm_basic_rates;
660 u8 cck_basic_rates;
661 __le16 reserved1;
662 u8 ofdm_ht_single_stream_basic_rates;
663 u8 ofdm_ht_dual_stream_basic_rates;
664 u8 ofdm_ht_triple_stream_basic_rates;
665 u8 reserved2;
666 __le16 rx_chain_select_flags;
667 __le16 acquisition_data;
668 __le32 reserved3;
669} __packed;
670
671#define IWL_CONN_MAX_LISTEN_INTERVAL 10
672#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
673#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
674
675/*
676 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
677 */
678struct iwl_rxon_time_cmd {
679 __le64 timestamp;
680 __le16 beacon_interval;
681 __le16 atim_window;
682 __le32 beacon_init_val;
683 __le16 listen_interval;
684 u8 dtim_period;
685 u8 delta_cp_bss_tbtts;
686} __packed;
687
688/*
689 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
690 */
691/**
692 * struct iwl5000_channel_switch_cmd
693 * @band: 0- 5.2GHz, 1- 2.4GHz
694 * @expect_beacon: 0- resume transmits after channel switch
695 * 1- wait for beacon to resume transmits
696 * @channel: new channel number
697 * @rxon_flags: Rx on flags
698 * @rxon_filter_flags: filtering parameters
699 * @switch_time: switch time in extended beacon format
700 * @reserved: reserved bytes
701 */
702struct iwl5000_channel_switch_cmd {
703 u8 band;
704 u8 expect_beacon;
705 __le16 channel;
706 __le32 rxon_flags;
707 __le32 rxon_filter_flags;
708 __le32 switch_time;
709 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
710} __packed;
711
712/**
713 * struct iwl6000_channel_switch_cmd
714 * @band: 0- 5.2GHz, 1- 2.4GHz
715 * @expect_beacon: 0- resume transmits after channel switch
716 * 1- wait for beacon to resume transmits
717 * @channel: new channel number
718 * @rxon_flags: Rx on flags
719 * @rxon_filter_flags: filtering parameters
720 * @switch_time: switch time in extended beacon format
721 * @reserved: reserved bytes
722 */
723struct iwl6000_channel_switch_cmd {
724 u8 band;
725 u8 expect_beacon;
726 __le16 channel;
727 __le32 rxon_flags;
728 __le32 rxon_filter_flags;
729 __le32 switch_time;
730 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
731} __packed;
732
733/*
734 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
735 */
736struct iwl_csa_notification {
737 __le16 band;
738 __le16 channel;
739 __le32 status; /* 0 - OK, 1 - fail */
740} __packed;
741
742/******************************************************************************
743 * (2)
744 * Quality-of-Service (QOS) Commands & Responses:
745 *
746 *****************************************************************************/
747
748/**
749 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
750 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
751 *
752 * @cw_min: Contention window, start value in numbers of slots.
753 * Should be a power-of-2, minus 1. Device's default is 0x0f.
754 * @cw_max: Contention window, max value in numbers of slots.
755 * Should be a power-of-2, minus 1. Device's default is 0x3f.
756 * @aifsn: Number of slots in Arbitration Interframe Space (before
757 * performing random backoff timing prior to Tx). Device default 1.
758 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
759 *
760 * Device will automatically increase contention window by (2*CW) + 1 for each
761 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
762 * value, to cap the CW value.
763 */
764struct iwl_ac_qos {
765 __le16 cw_min;
766 __le16 cw_max;
767 u8 aifsn;
768 u8 reserved1;
769 __le16 edca_txop;
770} __packed;
771
772/* QoS flags defines */
773#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
774#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
775#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
776
777/* Number of Access Categories (AC) (EDCA), queues 0..3 */
778#define AC_NUM 4
779
780/*
781 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
782 *
783 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
784 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
785 */
786struct iwl_qosparam_cmd {
787 __le32 qos_flags;
788 struct iwl_ac_qos ac[AC_NUM];
789} __packed;
790
791/******************************************************************************
792 * (3)
793 * Add/Modify Stations Commands & Responses:
794 *
795 *****************************************************************************/
796/*
797 * Multi station support
798 */
799
800/* Special, dedicated locations within device's station table */
801#define IWL_AP_ID 0
802#define IWL_AP_ID_PAN 1
803#define IWL_STA_ID 2
804#define IWLAGN_PAN_BCAST_ID 14
805#define IWLAGN_BROADCAST_ID 15
806#define IWLAGN_STATION_COUNT 16
807
808#define IWL_INVALID_STATION 255
809
810#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
811#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
812#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
813#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
814#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
815#define STA_FLG_MAX_AGG_SIZE_POS (19)
816#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
817#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
818#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
819#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
820#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
821
822/* Use in mode field. 1: modify existing entry, 0: add new station entry */
823#define STA_CONTROL_MODIFY_MSK 0x01
824
825/* key flags __le16*/
826#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
827#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
828#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
829#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
830#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
831
832#define STA_KEY_FLG_KEYID_POS 8
833#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
834/* wep key is either from global key (0) or from station info array (1) */
835#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
836
837/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
838#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
839#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
840#define STA_KEY_MAX_NUM 8
841#define STA_KEY_MAX_NUM_PAN 16
842/* must not match WEP_INVALID_OFFSET */
843#define IWLAGN_HW_KEY_DEFAULT 0xfe
844
845/* Flags indicate whether to modify vs. don't change various station params */
846#define STA_MODIFY_KEY_MASK 0x01
847#define STA_MODIFY_TID_DISABLE_TX 0x02
848#define STA_MODIFY_TX_RATE_MSK 0x04
849#define STA_MODIFY_ADDBA_TID_MSK 0x08
850#define STA_MODIFY_DELBA_TID_MSK 0x10
851#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
852
853/* Receiver address (actually, Rx station's index into station table),
854 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
855#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
856
857/* agn */
858struct iwl_keyinfo {
859 __le16 key_flags;
860 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
861 u8 reserved1;
862 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
863 u8 key_offset;
864 u8 reserved2;
865 u8 key[16]; /* 16-byte unicast decryption key */
866 __le64 tx_secur_seq_cnt;
867 __le64 hw_tkip_mic_rx_key;
868 __le64 hw_tkip_mic_tx_key;
869} __packed;
870
871/**
872 * struct sta_id_modify
873 * @addr[ETH_ALEN]: station's MAC address
874 * @sta_id: index of station in uCode's station table
875 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
876 *
877 * Driver selects unused table index when adding new station,
878 * or the index to a pre-existing station entry when modifying that station.
879 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
880 *
881 * modify_mask flags select which parameters to modify vs. leave alone.
882 */
883struct sta_id_modify {
884 u8 addr[ETH_ALEN];
885 __le16 reserved1;
886 u8 sta_id;
887 u8 modify_mask;
888 __le16 reserved2;
889} __packed;
890
891/*
892 * REPLY_ADD_STA = 0x18 (command)
893 *
894 * The device contains an internal table of per-station information,
895 * with info on security keys, aggregation parameters, and Tx rates for
896 * initial Tx attempt and any retries (agn devices uses
897 * REPLY_TX_LINK_QUALITY_CMD,
898 *
899 * REPLY_ADD_STA sets up the table entry for one station, either creating
900 * a new entry, or modifying a pre-existing one.
901 *
902 * NOTE: RXON command (without "associated" bit set) wipes the station table
903 * clean. Moving into RF_KILL state does this also. Driver must set up
904 * new station table before transmitting anything on the RXON channel
905 * (except active scans or active measurements; those commands carry
906 * their own txpower/rate setup data).
907 *
908 * When getting started on a new channel, driver must set up the
909 * IWL_BROADCAST_ID entry (last entry in the table). For a client
910 * station in a BSS, once an AP is selected, driver sets up the AP STA
911 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
912 * are all that are needed for a BSS client station. If the device is
913 * used as AP, or in an IBSS network, driver must set up station table
914 * entries for all STAs in network, starting with index IWL_STA_ID.
915 */
916
917struct iwl_addsta_cmd {
918 u8 mode; /* 1: modify existing, 0: add new station */
919 u8 reserved[3];
920 struct sta_id_modify sta;
921 struct iwl_keyinfo key;
922 __le32 station_flags; /* STA_FLG_* */
923 __le32 station_flags_msk; /* STA_FLG_* */
924
925 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
926 * corresponding to bit (e.g. bit 5 controls TID 5).
927 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
928 __le16 tid_disable_tx;
929
930 __le16 rate_n_flags; /* 3945 only */
931
932 /* TID for which to add block-ack support.
933 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
934 u8 add_immediate_ba_tid;
935
936 /* TID for which to remove block-ack support.
937 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
938 u8 remove_immediate_ba_tid;
939
940 /* Starting Sequence Number for added block-ack support.
941 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
942 __le16 add_immediate_ba_ssn;
943
944 /*
945 * Number of packets OK to transmit to station even though
946 * it is asleep -- used to synchronise PS-poll and u-APSD
947 * responses while ucode keeps track of STA sleep state.
948 */
949 __le16 sleep_tx_count;
950
951 __le16 reserved2;
952} __packed;
953
954
955#define ADD_STA_SUCCESS_MSK 0x1
956#define ADD_STA_NO_ROOM_IN_TABLE 0x2
957#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
958#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
959/*
960 * REPLY_ADD_STA = 0x18 (response)
961 */
962struct iwl_add_sta_resp {
963 u8 status; /* ADD_STA_* */
964} __packed;
965
966#define REM_STA_SUCCESS_MSK 0x1
967/*
968 * REPLY_REM_STA = 0x19 (response)
969 */
970struct iwl_rem_sta_resp {
971 u8 status;
972} __packed;
973
974/*
975 * REPLY_REM_STA = 0x19 (command)
976 */
977struct iwl_rem_sta_cmd {
978 u8 num_sta; /* number of removed stations */
979 u8 reserved[3];
980 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
981 u8 reserved2[2];
982} __packed;
983
984
985/* WiFi queues mask */
986#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
987#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
988#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
989#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
990#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
991
992/* PAN queues mask */
993#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
994#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
995#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
996#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
997#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
998#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
999
1000#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1001
1002#define IWL_DROP_SINGLE 0
1003#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
1004
1005/*
1006 * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
1007 *
1008 * When using full FIFO flush this command checks the scheduler HW block WR/RD
1009 * pointers to check if all the frames were transferred by DMA into the
1010 * relevant TX FIFO queue. Only when the DMA is finished and the queue is
1011 * empty the command can finish.
1012 * This command is used to flush the TXFIFO from transmit commands, it may
1013 * operate on single or multiple queues, the command queue can't be flushed by
1014 * this command. The command response is returned when all the queue flush
1015 * operations are done. Each TX command flushed return response with the FLUSH
1016 * status set in the TX response status. When FIFO flush operation is used,
1017 * the flush operation ends when both the scheduler DMA done and TXFIFO empty
1018 * are set.
1019 *
1020 * @fifo_control: bit mask for which queues to flush
1021 * @flush_control: flush controls
1022 * 0: Dump single MSDU
1023 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
1024 * 2: Dump all FIFO
1025 */
1026struct iwl_txfifo_flush_cmd {
1027 __le32 fifo_control;
1028 __le16 flush_control;
1029 __le16 reserved;
1030} __packed;
1031
1032/*
1033 * REPLY_WEP_KEY = 0x20
1034 */
1035struct iwl_wep_key {
1036 u8 key_index;
1037 u8 key_offset;
1038 u8 reserved1[2];
1039 u8 key_size;
1040 u8 reserved2[3];
1041 u8 key[16];
1042} __packed;
1043
1044struct iwl_wep_cmd {
1045 u8 num_keys;
1046 u8 global_key_type;
1047 u8 flags;
1048 u8 reserved;
1049 struct iwl_wep_key key[0];
1050} __packed;
1051
1052#define WEP_KEY_WEP_TYPE 1
1053#define WEP_KEYS_MAX 4
1054#define WEP_INVALID_OFFSET 0xff
1055#define WEP_KEY_LEN_64 5
1056#define WEP_KEY_LEN_128 13
1057
1058/******************************************************************************
1059 * (4)
1060 * Rx Responses:
1061 *
1062 *****************************************************************************/
1063
1064#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1065#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1066
1067#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1068#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1069#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1070#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1071#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1072#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1073
1074#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1075#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1076#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1077#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1078#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1079#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1080
1081#define RX_RES_STATUS_STATION_FOUND (1<<6)
1082#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1083
1084#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1085#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1086#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1087#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1088#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1089
1090#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1091#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1092#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1093#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1094
1095
1096#define IWLAGN_RX_RES_PHY_CNT 8
1097#define IWLAGN_RX_RES_AGC_IDX 1
1098#define IWLAGN_RX_RES_RSSI_AB_IDX 2
1099#define IWLAGN_RX_RES_RSSI_C_IDX 3
1100#define IWLAGN_OFDM_AGC_MSK 0xfe00
1101#define IWLAGN_OFDM_AGC_BIT_POS 9
1102#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
1103#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
1104#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
1105#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
1106#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
1107#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
1108#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
1109#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
1110#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
1111
1112struct iwlagn_non_cfg_phy {
1113 __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1114} __packed;
1115
1116
1117/*
1118 * REPLY_RX = 0xc3 (response only, not a command)
1119 * Used only for legacy (non 11n) frames.
1120 */
1121struct iwl_rx_phy_res {
1122 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1123 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1124 u8 stat_id; /* configurable DSP phy data set ID */
1125 u8 reserved1;
1126 __le64 timestamp; /* TSF at on air rise */
1127 __le32 beacon_time_stamp; /* beacon at on-air rise */
1128 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1129 __le16 channel; /* channel number */
1130 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1131 __le32 rate_n_flags; /* RATE_MCS_* */
1132 __le16 byte_count; /* frame's byte-count */
1133 __le16 frame_time; /* frame's time on the air */
1134} __packed;
1135
1136struct iwl_rx_mpdu_res_start {
1137 __le16 byte_count;
1138 __le16 reserved;
1139} __packed;
1140
1141
1142/******************************************************************************
1143 * (5)
1144 * Tx Commands & Responses:
1145 *
1146 * Driver must place each REPLY_TX command into one of the prioritized Tx
1147 * queues in host DRAM, shared between driver and device (see comments for
1148 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1149 * are preparing to transmit, the device pulls the Tx command over the PCI
1150 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1151 * from which data will be transmitted.
1152 *
1153 * uCode handles all timing and protocol related to control frames
1154 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1155 * handle reception of block-acks; uCode updates the host driver via
1156 * REPLY_COMPRESSED_BA.
1157 *
1158 * uCode handles retrying Tx when an ACK is expected but not received.
1159 * This includes trying lower data rates than the one requested in the Tx
1160 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1161 * REPLY_TX_LINK_QUALITY_CMD (agn).
1162 *
1163 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1164 * This command must be executed after every RXON command, before Tx can occur.
1165 *****************************************************************************/
1166
1167/* REPLY_TX Tx flags field */
1168
1169/*
1170 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1171 * before this frame. if CTS-to-self required check
1172 * RXON_FLG_SELF_CTS_EN status.
1173 * unused in 3945/4965, used in 5000 series and after
1174 */
1175#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
1176
1177/*
1178 * 1: Use Request-To-Send protocol before this frame.
1179 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1180 * used in 3945/4965, unused in 5000 series and after
1181 */
1182#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1183
1184/*
1185 * 1: Transmit Clear-To-Send to self before this frame.
1186 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1187 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1188 * used in 3945/4965, unused in 5000 series and after
1189 */
1190#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1191
1192/* 1: Expect ACK from receiving station
1193 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1194 * Set this for unicast frames, but not broadcast/multicast. */
1195#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1196
1197/* For agn devices:
1198 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1199 * Tx command's initial_rate_index indicates first rate to try;
1200 * uCode walks through table for additional Tx attempts.
1201 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1202 * This rate will be used for all Tx attempts; it will not be scaled. */
1203#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1204
1205/* 1: Expect immediate block-ack.
1206 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1207#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1208
1209/*
1210 * 1: Frame requires full Tx-Op protection.
1211 * Set this if either RTS or CTS Tx Flag gets set.
1212 * used in 3945/4965, unused in 5000 series and after
1213 */
1214#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1215
1216/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
1217 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1218#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1219#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1220#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1221
1222/* 1: Ignore Bluetooth priority for this frame.
1223 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1224#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
1225
1226/* 1: uCode overrides sequence control field in MAC header.
1227 * 0: Driver provides sequence control field in MAC header.
1228 * Set this for management frames, non-QOS data frames, non-unicast frames,
1229 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1230#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1231
1232/* 1: This frame is non-last MPDU; more fragments are coming.
1233 * 0: Last fragment, or not using fragmentation. */
1234#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1235
1236/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1237 * 0: No TSF required in outgoing frame.
1238 * Set this for transmitting beacons and probe responses. */
1239#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1240
1241/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1242 * alignment of frame's payload data field.
1243 * 0: No pad
1244 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1245 * field (but not both). Driver must align frame data (i.e. data following
1246 * MAC header) to DWORD boundary. */
1247#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1248
1249/* accelerate aggregation support
1250 * 0 - no CCMP encryption; 1 - CCMP encryption */
1251#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1252
1253/* HCCA-AP - disable duration overwriting. */
1254#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1255
1256
1257/*
1258 * TX command security control
1259 */
1260#define TX_CMD_SEC_WEP 0x01
1261#define TX_CMD_SEC_CCM 0x02
1262#define TX_CMD_SEC_TKIP 0x03
1263#define TX_CMD_SEC_MSK 0x03
1264#define TX_CMD_SEC_SHIFT 6
1265#define TX_CMD_SEC_KEY128 0x08
1266
1267/*
1268 * security overhead sizes
1269 */
1270#define WEP_IV_LEN 4
1271#define WEP_ICV_LEN 4
1272#define CCMP_MIC_LEN 8
1273#define TKIP_ICV_LEN 4
1274
1275/*
1276 * REPLY_TX = 0x1c (command)
1277 */
1278
1279/*
1280 * 4965 uCode updates these Tx attempt count values in host DRAM.
1281 * Used for managing Tx retries when expecting block-acks.
1282 * Driver should set these fields to 0.
1283 */
1284struct iwl_dram_scratch {
1285 u8 try_cnt; /* Tx attempts */
1286 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1287 __le16 reserved;
1288} __packed;
1289
1290struct iwl_tx_cmd {
1291 /*
1292 * MPDU byte count:
1293 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1294 * + 8 byte IV for CCM or TKIP (not used for WEP)
1295 * + Data payload
1296 * + 8-byte MIC (not used for CCM/WEP)
1297 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1298 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1299 * Range: 14-2342 bytes.
1300 */
1301 __le16 len;
1302
1303 /*
1304 * MPDU or MSDU byte count for next frame.
1305 * Used for fragmentation and bursting, but not 11n aggregation.
1306 * Same as "len", but for next frame. Set to 0 if not applicable.
1307 */
1308 __le16 next_frame_len;
1309
1310 __le32 tx_flags; /* TX_CMD_FLG_* */
1311
1312 /* uCode may modify this field of the Tx command (in host DRAM!).
1313 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1314 struct iwl_dram_scratch scratch;
1315
1316 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1317 __le32 rate_n_flags; /* RATE_MCS_* */
1318
1319 /* Index of destination station in uCode's station table */
1320 u8 sta_id;
1321
1322 /* Type of security encryption: CCM or TKIP */
1323 u8 sec_ctl; /* TX_CMD_SEC_* */
1324
1325 /*
1326 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1327 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1328 * data frames, this field may be used to selectively reduce initial
1329 * rate (via non-0 value) for special frames (e.g. management), while
1330 * still supporting rate scaling for all frames.
1331 */
1332 u8 initial_rate_index;
1333 u8 reserved;
1334 u8 key[16];
1335 __le16 next_frame_flags;
1336 __le16 reserved2;
1337 union {
1338 __le32 life_time;
1339 __le32 attempt;
1340 } stop_time;
1341
1342 /* Host DRAM physical address pointer to "scratch" in this command.
1343 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1344 __le32 dram_lsb_ptr;
1345 u8 dram_msb_ptr;
1346
1347 u8 rts_retry_limit; /*byte 50 */
1348 u8 data_retry_limit; /*byte 51 */
1349 u8 tid_tspec;
1350 union {
1351 __le16 pm_frame_timeout;
1352 __le16 attempt_duration;
1353 } timeout;
1354
1355 /*
1356 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1357 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1358 */
1359 __le16 driver_txop;
1360
1361 /*
1362 * MAC header goes here, followed by 2 bytes padding if MAC header
1363 * length is 26 or 30 bytes, followed by payload data
1364 */
1365 u8 payload[0];
1366 struct ieee80211_hdr hdr[0];
1367} __packed;
1368
1369/*
1370 * TX command response is sent after *agn* transmission attempts.
1371 *
1372 * both postpone and abort status are expected behavior from uCode. there is
1373 * no special operation required from driver; except for RFKILL_FLUSH,
1374 * which required tx flush host command to flush all the tx frames in queues
1375 */
1376enum {
1377 TX_STATUS_SUCCESS = 0x01,
1378 TX_STATUS_DIRECT_DONE = 0x02,
1379 /* postpone TX */
1380 TX_STATUS_POSTPONE_DELAY = 0x40,
1381 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1382 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
1383 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1384 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1385 /* abort TX */
1386 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1387 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1388 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1389 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1390 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1391 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1392 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1393 TX_STATUS_FAIL_DEST_PS = 0x88,
1394 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1395 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1396 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1397 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1398 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1399 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1400 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1401 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1402 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1403};
1404
1405#define TX_PACKET_MODE_REGULAR 0x0000
1406#define TX_PACKET_MODE_BURST_SEQ 0x0100
1407#define TX_PACKET_MODE_BURST_FIRST 0x0200
1408
1409enum {
1410 TX_POWER_PA_NOT_ACTIVE = 0x0,
1411};
1412
1413enum {
1414 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1415 TX_STATUS_DELAY_MSK = 0x00000040,
1416 TX_STATUS_ABORT_MSK = 0x00000080,
1417 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1418 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1419 TX_RESERVED = 0x00780000, /* bits 19:22 */
1420 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1421 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1422};
1423
1424/* *******************************
1425 * TX aggregation status
1426 ******************************* */
1427
1428enum {
1429 AGG_TX_STATE_TRANSMITTED = 0x00,
1430 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1431 AGG_TX_STATE_BT_PRIO_MSK = 0x02,
1432 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1433 AGG_TX_STATE_ABORT_MSK = 0x08,
1434 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1435 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1436 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40,
1437 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1438 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1439 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1440 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1441 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1442};
1443
1444#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1445#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1446
1447#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1448 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1449 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
1450
1451/* # tx attempts for first frame in aggregation */
1452#define AGG_TX_STATE_TRY_CNT_POS 12
1453#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1454
1455/* Command ID and sequence number of Tx command for this frame */
1456#define AGG_TX_STATE_SEQ_NUM_POS 16
1457#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1458
1459/*
1460 * REPLY_TX = 0x1c (response)
1461 *
1462 * This response may be in one of two slightly different formats, indicated
1463 * by the frame_count field:
1464 *
1465 * 1) No aggregation (frame_count == 1). This reports Tx results for
1466 * a single frame. Multiple attempts, at various bit rates, may have
1467 * been made for this frame.
1468 *
1469 * 2) Aggregation (frame_count > 1). This reports Tx results for
1470 * 2 or more frames that used block-acknowledge. All frames were
1471 * transmitted at same rate. Rate scaling may have been used if first
1472 * frame in this new agg block failed in previous agg block(s).
1473 *
1474 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1475 * block-ack has not been received by the time the agn device records
1476 * this status.
1477 * This status relates to reasons the tx might have been blocked or aborted
1478 * within the sending station (this agn device), rather than whether it was
1479 * received successfully by the destination station.
1480 */
1481struct agg_tx_status {
1482 __le16 status;
1483 __le16 sequence;
1484} __packed;
1485
1486/*
1487 * definitions for initial rate index field
1488 * bits [3:0] initial rate index
1489 * bits [6:4] rate table color, used for the initial rate
1490 * bit-7 invalid rate indication
1491 * i.e. rate was not chosen from rate table
1492 * or rate table color was changed during frame retries
1493 * refer tlc rate info
1494 */
1495
1496#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0
1497#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f
1498#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4
1499#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70
1500#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
1501
1502/* refer to ra_tid */
1503#define IWLAGN_TX_RES_TID_POS 0
1504#define IWLAGN_TX_RES_TID_MSK 0x0f
1505#define IWLAGN_TX_RES_RA_POS 4
1506#define IWLAGN_TX_RES_RA_MSK 0xf0
1507
1508struct iwlagn_tx_resp {
1509 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1510 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1511 u8 failure_rts; /* # failures due to unsuccessful RTS */
1512 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1513
1514 /* For non-agg: Rate at which frame was successful.
1515 * For agg: Rate at which all frames were transmitted. */
1516 __le32 rate_n_flags; /* RATE_MCS_* */
1517
1518 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1519 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1520 __le16 wireless_media_time; /* uSecs */
1521
1522 u8 pa_status; /* RF power amplifier measurement (not used) */
1523 u8 pa_integ_res_a[3];
1524 u8 pa_integ_res_b[3];
1525 u8 pa_integ_res_C[3];
1526
1527 __le32 tfd_info;
1528 __le16 seq_ctl;
1529 __le16 byte_cnt;
1530 u8 tlc_info;
1531 u8 ra_tid; /* tid (0:3), sta_id (4:7) */
1532 __le16 frame_ctrl;
1533 /*
1534 * For non-agg: frame status TX_STATUS_*
1535 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1536 * fields follow this one, up to frame_count.
1537 * Bit fields:
1538 * 11- 0: AGG_TX_STATE_* status code
1539 * 15-12: Retry count for 1st frame in aggregation (retries
1540 * occur if tx failed for this frame when it was a
1541 * member of a previous aggregation block). If rate
1542 * scaling is used, retry count indicates the rate
1543 * table entry used for all frames in the new agg.
1544 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1545 */
1546 struct agg_tx_status status; /* TX status (in aggregation -
1547 * status of 1st frame) */
1548} __packed;
1549/*
1550 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1551 *
1552 * Reports Block-Acknowledge from recipient station
1553 */
1554struct iwl_compressed_ba_resp {
1555 __le32 sta_addr_lo32;
1556 __le16 sta_addr_hi16;
1557 __le16 reserved;
1558
1559 /* Index of recipient (BA-sending) station in uCode's station table */
1560 u8 sta_id;
1561 u8 tid;
1562 __le16 seq_ctl;
1563 __le64 bitmap;
1564 __le16 scd_flow;
1565 __le16 scd_ssn;
1566 /* following only for 5000 series and up */
1567 u8 txed; /* number of frames sent */
1568 u8 txed_2_done; /* number of frames acked */
1569} __packed;
1570
1571/*
1572 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1573 *
1574 */
1575
1576/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1577#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1578
1579/* # of EDCA prioritized tx fifos */
1580#define LINK_QUAL_AC_NUM AC_NUM
1581
1582/* # entries in rate scale table to support Tx retries */
1583#define LINK_QUAL_MAX_RETRY_NUM 16
1584
1585/* Tx antenna selection values */
1586#define LINK_QUAL_ANT_A_MSK (1 << 0)
1587#define LINK_QUAL_ANT_B_MSK (1 << 1)
1588#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1589
1590
1591/**
1592 * struct iwl_link_qual_general_params
1593 *
1594 * Used in REPLY_TX_LINK_QUALITY_CMD
1595 */
1596struct iwl_link_qual_general_params {
1597 u8 flags;
1598
1599 /* No entries at or above this (driver chosen) index contain MIMO */
1600 u8 mimo_delimiter;
1601
1602 /* Best single antenna to use for single stream (legacy, SISO). */
1603 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1604
1605 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1606 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1607
1608 /*
1609 * If driver needs to use different initial rates for different
1610 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1611 * this table will set that up, by indicating the indexes in the
1612 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1613 * Otherwise, driver should set all entries to 0.
1614 *
1615 * Entry usage:
1616 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1617 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1618 */
1619 u8 start_rate_index[LINK_QUAL_AC_NUM];
1620} __packed;
1621
1622#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1623#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1624#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1625
1626#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1627#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1628#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1629
1630#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
1631#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1632#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1633
1634/**
1635 * struct iwl_link_qual_agg_params
1636 *
1637 * Used in REPLY_TX_LINK_QUALITY_CMD
1638 */
1639struct iwl_link_qual_agg_params {
1640
1641 /*
1642 *Maximum number of uSec in aggregation.
1643 * default set to 4000 (4 milliseconds) if not configured in .cfg
1644 */
1645 __le16 agg_time_limit;
1646
1647 /*
1648 * Number of Tx retries allowed for a frame, before that frame will
1649 * no longer be considered for the start of an aggregation sequence
1650 * (scheduler will then try to tx it as single frame).
1651 * Driver should set this to 3.
1652 */
1653 u8 agg_dis_start_th;
1654
1655 /*
1656 * Maximum number of frames in aggregation.
1657 * 0 = no limit (default). 1 = no aggregation.
1658 * Other values = max # frames in aggregation.
1659 */
1660 u8 agg_frame_cnt_limit;
1661
1662 __le32 reserved;
1663} __packed;
1664
1665/*
1666 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1667 *
1668 * For agn devices only; 3945 uses REPLY_RATE_SCALE.
1669 *
1670 * Each station in the agn device's internal station table has its own table
1671 * of 16
1672 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1673 * an ACK is not received. This command replaces the entire table for
1674 * one station.
1675 *
1676 * NOTE: Station must already be in agn device's station table.
1677 * Use REPLY_ADD_STA.
1678 *
1679 * The rate scaling procedures described below work well. Of course, other
1680 * procedures are possible, and may work better for particular environments.
1681 *
1682 *
1683 * FILLING THE RATE TABLE
1684 *
1685 * Given a particular initial rate and mode, as determined by the rate
1686 * scaling algorithm described below, the Linux driver uses the following
1687 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1688 * Link Quality command:
1689 *
1690 *
1691 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1692 * a) Use this same initial rate for first 3 entries.
1693 * b) Find next lower available rate using same mode (SISO or MIMO),
1694 * use for next 3 entries. If no lower rate available, switch to
1695 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1696 * c) If using MIMO, set command's mimo_delimiter to number of entries
1697 * using MIMO (3 or 6).
1698 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1699 * no MIMO, no short guard interval), at the next lower bit rate
1700 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1701 * legacy procedure for remaining table entries.
1702 *
1703 * 2) If using legacy initial rate:
1704 * a) Use the initial rate for only one entry.
1705 * b) For each following entry, reduce the rate to next lower available
1706 * rate, until reaching the lowest available rate.
1707 * c) When reducing rate, also switch antenna selection.
1708 * d) Once lowest available rate is reached, repeat this rate until
1709 * rate table is filled (16 entries), switching antenna each entry.
1710 *
1711 *
1712 * ACCUMULATING HISTORY
1713 *
1714 * The rate scaling algorithm for agn devices, as implemented in Linux driver,
1715 * uses two sets of frame Tx success history: One for the current/active
1716 * modulation mode, and one for a speculative/search mode that is being
1717 * attempted. If the speculative mode turns out to be more effective (i.e.
1718 * actual transfer rate is better), then the driver continues to use the
1719 * speculative mode as the new current active mode.
1720 *
1721 * Each history set contains, separately for each possible rate, data for a
1722 * sliding window of the 62 most recent tx attempts at that rate. The data
1723 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1724 * and attempted frames, from which the driver can additionally calculate a
1725 * success ratio (success / attempted) and number of failures
1726 * (attempted - success), and control the size of the window (attempted).
1727 * The driver uses the bit map to remove successes from the success sum, as
1728 * the oldest tx attempts fall out of the window.
1729 *
1730 * When the agn device makes multiple tx attempts for a given frame, each
1731 * attempt might be at a different rate, and have different modulation
1732 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1733 * up in the rate scaling table in the Link Quality command. The driver must
1734 * determine which rate table entry was used for each tx attempt, to determine
1735 * which rate-specific history to update, and record only those attempts that
1736 * match the modulation characteristics of the history set.
1737 *
1738 * When using block-ack (aggregation), all frames are transmitted at the same
1739 * rate, since there is no per-attempt acknowledgment from the destination
1740 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1741 * rate_n_flags field. After receiving a block-ack, the driver can update
1742 * history for the entire block all at once.
1743 *
1744 *
1745 * FINDING BEST STARTING RATE:
1746 *
1747 * When working with a selected initial modulation mode (see below), the
1748 * driver attempts to find a best initial rate. The initial rate is the
1749 * first entry in the Link Quality command's rate table.
1750 *
1751 * 1) Calculate actual throughput (success ratio * expected throughput, see
1752 * table below) for current initial rate. Do this only if enough frames
1753 * have been attempted to make the value meaningful: at least 6 failed
1754 * tx attempts, or at least 8 successes. If not enough, don't try rate
1755 * scaling yet.
1756 *
1757 * 2) Find available rates adjacent to current initial rate. Available means:
1758 * a) supported by hardware &&
1759 * b) supported by association &&
1760 * c) within any constraints selected by user
1761 *
1762 * 3) Gather measured throughputs for adjacent rates. These might not have
1763 * enough history to calculate a throughput. That's okay, we might try
1764 * using one of them anyway!
1765 *
1766 * 4) Try decreasing rate if, for current rate:
1767 * a) success ratio is < 15% ||
1768 * b) lower adjacent rate has better measured throughput ||
1769 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1770 *
1771 * As a sanity check, if decrease was determined above, leave rate
1772 * unchanged if:
1773 * a) lower rate unavailable
1774 * b) success ratio at current rate > 85% (very good)
1775 * c) current measured throughput is better than expected throughput
1776 * of lower rate (under perfect 100% tx conditions, see table below)
1777 *
1778 * 5) Try increasing rate if, for current rate:
1779 * a) success ratio is < 15% ||
1780 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
1781 * b) higher adjacent rate has better measured throughput ||
1782 * c) lower adjacent rate has worse throughput, and higher is unmeasured
1783 *
1784 * As a sanity check, if increase was determined above, leave rate
1785 * unchanged if:
1786 * a) success ratio at current rate < 70%. This is not particularly
1787 * good performance; higher rate is sure to have poorer success.
1788 *
1789 * 6) Re-evaluate the rate after each tx frame. If working with block-
1790 * acknowledge, history and statistics may be calculated for the entire
1791 * block (including prior history that fits within the history windows),
1792 * before re-evaluation.
1793 *
1794 * FINDING BEST STARTING MODULATION MODE:
1795 *
1796 * After working with a modulation mode for a "while" (and doing rate scaling),
1797 * the driver searches for a new initial mode in an attempt to improve
1798 * throughput. The "while" is measured by numbers of attempted frames:
1799 *
1800 * For legacy mode, search for new mode after:
1801 * 480 successful frames, or 160 failed frames
1802 * For high-throughput modes (SISO or MIMO), search for new mode after:
1803 * 4500 successful frames, or 400 failed frames
1804 *
1805 * Mode switch possibilities are (3 for each mode):
1806 *
1807 * For legacy:
1808 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
1809 * For SISO:
1810 * Change antenna, try MIMO, try shortened guard interval (SGI)
1811 * For MIMO:
1812 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
1813 *
1814 * When trying a new mode, use the same bit rate as the old/current mode when
1815 * trying antenna switches and shortened guard interval. When switching to
1816 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
1817 * for which the expected throughput (under perfect conditions) is about the
1818 * same or slightly better than the actual measured throughput delivered by
1819 * the old/current mode.
1820 *
1821 * Actual throughput can be estimated by multiplying the expected throughput
1822 * by the success ratio (successful / attempted tx frames). Frame size is
1823 * not considered in this calculation; it assumes that frame size will average
1824 * out to be fairly consistent over several samples. The following are
1825 * metric values for expected throughput assuming 100% success ratio.
1826 * Only G band has support for CCK rates:
1827 *
1828 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
1829 *
1830 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
1831 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
1832 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
1833 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
1834 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
1835 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
1836 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
1837 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
1838 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
1839 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
1840 *
1841 * After the new mode has been tried for a short while (minimum of 6 failed
1842 * frames or 8 successful frames), compare success ratio and actual throughput
1843 * estimate of the new mode with the old. If either is better with the new
1844 * mode, continue to use the new mode.
1845 *
1846 * Continue comparing modes until all 3 possibilities have been tried.
1847 * If moving from legacy to HT, try all 3 possibilities from the new HT
1848 * mode. After trying all 3, a best mode is found. Continue to use this mode
1849 * for the longer "while" described above (e.g. 480 successful frames for
1850 * legacy), and then repeat the search process.
1851 *
1852 */
1853struct iwl_link_quality_cmd {
1854
1855 /* Index of destination/recipient station in uCode's station table */
1856 u8 sta_id;
1857 u8 reserved1;
1858 __le16 control; /* not used */
1859 struct iwl_link_qual_general_params general_params;
1860 struct iwl_link_qual_agg_params agg_params;
1861
1862 /*
1863 * Rate info; when using rate-scaling, Tx command's initial_rate_index
1864 * specifies 1st Tx rate attempted, via index into this table.
1865 * agn devices works its way through table when retrying Tx.
1866 */
1867 struct {
1868 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
1869 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
1870 __le32 reserved2;
1871} __packed;
1872
1873/*
1874 * BT configuration enable flags:
1875 * bit 0 - 1: BT channel announcement enabled
1876 * 0: disable
1877 * bit 1 - 1: priority of BT device enabled
1878 * 0: disable
1879 * bit 2 - 1: BT 2 wire support enabled
1880 * 0: disable
1881 */
1882#define BT_COEX_DISABLE (0x0)
1883#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
1884#define BT_ENABLE_PRIORITY BIT(1)
1885#define BT_ENABLE_2_WIRE BIT(2)
1886
1887#define BT_COEX_DISABLE (0x0)
1888#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
1889
1890#define BT_LEAD_TIME_MIN (0x0)
1891#define BT_LEAD_TIME_DEF (0x1E)
1892#define BT_LEAD_TIME_MAX (0xFF)
1893
1894#define BT_MAX_KILL_MIN (0x1)
1895#define BT_MAX_KILL_DEF (0x5)
1896#define BT_MAX_KILL_MAX (0xFF)
1897
1898#define BT_DURATION_LIMIT_DEF 625
1899#define BT_DURATION_LIMIT_MAX 1250
1900#define BT_DURATION_LIMIT_MIN 625
1901
1902#define BT_ON_THRESHOLD_DEF 4
1903#define BT_ON_THRESHOLD_MAX 1000
1904#define BT_ON_THRESHOLD_MIN 1
1905
1906#define BT_FRAG_THRESHOLD_DEF 0
1907#define BT_FRAG_THRESHOLD_MAX 0
1908#define BT_FRAG_THRESHOLD_MIN 0
1909
1910#define BT_AGG_THRESHOLD_DEF 1200
1911#define BT_AGG_THRESHOLD_MAX 8000
1912#define BT_AGG_THRESHOLD_MIN 400
1913
1914/*
1915 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
1916 *
1917 * 3945 and agn devices support hardware handshake with Bluetooth device on
1918 * same platform. Bluetooth device alerts wireless device when it will Tx;
1919 * wireless device can delay or kill its own Tx to accommodate.
1920 */
1921struct iwl_bt_cmd {
1922 u8 flags;
1923 u8 lead_time;
1924 u8 max_kill;
1925 u8 reserved;
1926 __le32 kill_ack_mask;
1927 __le32 kill_cts_mask;
1928} __packed;
1929
1930#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
1931
1932#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
1933#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
1934#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
1935#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
1936#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
1937#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
1938
1939#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
1940/* Disable Sync PSPoll on SCO/eSCO */
1941#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
1942
1943#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */
1944#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */
1945
1946#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
1947#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
1948#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
1949
1950#define IWLAGN_BT_MAX_KILL_DEFAULT 5
1951
1952#define IWLAGN_BT3_T7_DEFAULT 1
1953
1954#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
1955#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
1956#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
1957
1958#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
1959
1960#define IWLAGN_BT3_T2_DEFAULT 0xc
1961
1962#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
1963#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
1964#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
1965#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
1966#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
1967#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
1968#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6))
1969#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
1970
1971#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
1972 IWLAGN_BT_VALID_BOOST | \
1973 IWLAGN_BT_VALID_MAX_KILL | \
1974 IWLAGN_BT_VALID_3W_TIMERS | \
1975 IWLAGN_BT_VALID_KILL_ACK_MASK | \
1976 IWLAGN_BT_VALID_KILL_CTS_MASK | \
1977 IWLAGN_BT_VALID_BT4_TIMES | \
1978 IWLAGN_BT_VALID_3W_LUT)
1979
1980struct iwl_basic_bt_cmd {
1981 u8 flags;
1982 u8 ledtime; /* unused */
1983 u8 max_kill;
1984 u8 bt3_timer_t7_value;
1985 __le32 kill_ack_mask;
1986 __le32 kill_cts_mask;
1987 u8 bt3_prio_sample_time;
1988 u8 bt3_timer_t2_value;
1989 __le16 bt4_reaction_time; /* unused */
1990 __le32 bt3_lookup_table[12];
1991 __le16 bt4_decision_time; /* unused */
1992 __le16 valid;
1993};
1994
1995struct iwl6000_bt_cmd {
1996 struct iwl_basic_bt_cmd basic;
1997 u8 prio_boost;
1998 /*
1999 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2000 * if configure the following patterns
2001 */
2002 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2003 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2004};
2005
2006struct iwl2000_bt_cmd {
2007 struct iwl_basic_bt_cmd basic;
2008 __le32 prio_boost;
2009 /*
2010 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2011 * if configure the following patterns
2012 */
2013 u8 reserved;
2014 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2015 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2016};
2017
2018#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
2019
2020struct iwlagn_bt_sco_cmd {
2021 __le32 flags;
2022};
2023
2024/******************************************************************************
2025 * (6)
2026 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2027 *
2028 *****************************************************************************/
2029
2030/*
2031 * Spectrum Management
2032 */
2033#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2034 RXON_FILTER_CTL2HOST_MSK | \
2035 RXON_FILTER_ACCEPT_GRP_MSK | \
2036 RXON_FILTER_DIS_DECRYPT_MSK | \
2037 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2038 RXON_FILTER_ASSOC_MSK | \
2039 RXON_FILTER_BCON_AWARE_MSK)
2040
2041struct iwl_measure_channel {
2042 __le32 duration; /* measurement duration in extended beacon
2043 * format */
2044 u8 channel; /* channel to measure */
2045 u8 type; /* see enum iwl_measure_type */
2046 __le16 reserved;
2047} __packed;
2048
2049/*
2050 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2051 */
2052struct iwl_spectrum_cmd {
2053 __le16 len; /* number of bytes starting from token */
2054 u8 token; /* token id */
2055 u8 id; /* measurement id -- 0 or 1 */
2056 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2057 u8 periodic; /* 1 = periodic */
2058 __le16 path_loss_timeout;
2059 __le32 start_time; /* start time in extended beacon format */
2060 __le32 reserved2;
2061 __le32 flags; /* rxon flags */
2062 __le32 filter_flags; /* rxon filter flags */
2063 __le16 channel_count; /* minimum 1, maximum 10 */
2064 __le16 reserved3;
2065 struct iwl_measure_channel channels[10];
2066} __packed;
2067
2068/*
2069 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2070 */
2071struct iwl_spectrum_resp {
2072 u8 token;
2073 u8 id; /* id of the prior command replaced, or 0xff */
2074 __le16 status; /* 0 - command will be handled
2075 * 1 - cannot handle (conflicts with another
2076 * measurement) */
2077} __packed;
2078
2079enum iwl_measurement_state {
2080 IWL_MEASUREMENT_START = 0,
2081 IWL_MEASUREMENT_STOP = 1,
2082};
2083
2084enum iwl_measurement_status {
2085 IWL_MEASUREMENT_OK = 0,
2086 IWL_MEASUREMENT_CONCURRENT = 1,
2087 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2088 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2089 /* 4-5 reserved */
2090 IWL_MEASUREMENT_STOPPED = 6,
2091 IWL_MEASUREMENT_TIMEOUT = 7,
2092 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2093};
2094
2095#define NUM_ELEMENTS_IN_HISTOGRAM 8
2096
2097struct iwl_measurement_histogram {
2098 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2099 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2100} __packed;
2101
2102/* clear channel availability counters */
2103struct iwl_measurement_cca_counters {
2104 __le32 ofdm;
2105 __le32 cck;
2106} __packed;
2107
2108enum iwl_measure_type {
2109 IWL_MEASURE_BASIC = (1 << 0),
2110 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2111 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2112 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2113 IWL_MEASURE_FRAME = (1 << 4),
2114 /* bits 5:6 are reserved */
2115 IWL_MEASURE_IDLE = (1 << 7),
2116};
2117
2118/*
2119 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2120 */
2121struct iwl_spectrum_notification {
2122 u8 id; /* measurement id -- 0 or 1 */
2123 u8 token;
2124 u8 channel_index; /* index in measurement channel list */
2125 u8 state; /* 0 - start, 1 - stop */
2126 __le32 start_time; /* lower 32-bits of TSF */
2127 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2128 u8 channel;
2129 u8 type; /* see enum iwl_measurement_type */
2130 u8 reserved1;
2131 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2132 * valid if applicable for measurement type requested. */
2133 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2134 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2135 __le32 cca_time; /* channel load time in usecs */
2136 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2137 * unidentified */
2138 u8 reserved2[3];
2139 struct iwl_measurement_histogram histogram;
2140 __le32 stop_time; /* lower 32-bits of TSF */
2141 __le32 status; /* see iwl_measurement_status */
2142} __packed;
2143
2144/******************************************************************************
2145 * (7)
2146 * Power Management Commands, Responses, Notifications:
2147 *
2148 *****************************************************************************/
2149
2150/**
2151 * struct iwl_powertable_cmd - Power Table Command
2152 * @flags: See below:
2153 *
2154 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2155 *
2156 * PM allow:
2157 * bit 0 - '0' Driver not allow power management
2158 * '1' Driver allow PM (use rest of parameters)
2159 *
2160 * uCode send sleep notifications:
2161 * bit 1 - '0' Don't send sleep notification
2162 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2163 *
2164 * Sleep over DTIM
2165 * bit 2 - '0' PM have to walk up every DTIM
2166 * '1' PM could sleep over DTIM till listen Interval.
2167 *
2168 * PCI power managed
2169 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2170 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2171 *
2172 * Fast PD
2173 * bit 4 - '1' Put radio to sleep when receiving frame for others
2174 *
2175 * Force sleep Modes
2176 * bit 31/30- '00' use both mac/xtal sleeps
2177 * '01' force Mac sleep
2178 * '10' force xtal sleep
2179 * '11' Illegal set
2180 *
2181 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2182 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2183 * for every DTIM.
2184 */
2185#define IWL_POWER_VEC_SIZE 5
2186
2187#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2188#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2189#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2190#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2191#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2192#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2193#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2194#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2195#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2196#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2197#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2198
2199struct iwl_powertable_cmd {
2200 __le16 flags;
2201 u8 keep_alive_seconds; /* 3945 reserved */
2202 u8 debug_flags; /* 3945 reserved */
2203 __le32 rx_data_timeout;
2204 __le32 tx_data_timeout;
2205 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2206 __le32 keep_alive_beacons;
2207} __packed;
2208
2209/*
2210 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2211 * all devices identical.
2212 */
2213struct iwl_sleep_notification {
2214 u8 pm_sleep_mode;
2215 u8 pm_wakeup_src;
2216 __le16 reserved;
2217 __le32 sleep_time;
2218 __le32 tsf_low;
2219 __le32 bcon_timer;
2220} __packed;
2221
2222/* Sleep states. all devices identical. */
2223enum {
2224 IWL_PM_NO_SLEEP = 0,
2225 IWL_PM_SLP_MAC = 1,
2226 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2227 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2228 IWL_PM_SLP_PHY = 4,
2229 IWL_PM_SLP_REPENT = 5,
2230 IWL_PM_WAKEUP_BY_TIMER = 6,
2231 IWL_PM_WAKEUP_BY_DRIVER = 7,
2232 IWL_PM_WAKEUP_BY_RFKILL = 8,
2233 /* 3 reserved */
2234 IWL_PM_NUM_OF_MODES = 12,
2235};
2236
2237/*
2238 * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
2239 */
2240#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
2241#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
2242#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2243struct iwl_card_state_cmd {
2244 __le32 status; /* CARD_STATE_CMD_* request new power state */
2245} __packed;
2246
2247/*
2248 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2249 */
2250struct iwl_card_state_notif {
2251 __le32 flags;
2252} __packed;
2253
2254#define HW_CARD_DISABLED 0x01
2255#define SW_CARD_DISABLED 0x02
2256#define CT_CARD_DISABLED 0x04
2257#define RXON_CARD_DISABLED 0x10
2258
2259struct iwl_ct_kill_config {
2260 __le32 reserved;
2261 __le32 critical_temperature_M;
2262 __le32 critical_temperature_R;
2263} __packed;
2264
2265/* 1000, and 6x00 */
2266struct iwl_ct_kill_throttling_config {
2267 __le32 critical_temperature_exit;
2268 __le32 reserved;
2269 __le32 critical_temperature_enter;
2270} __packed;
2271
2272/******************************************************************************
2273 * (8)
2274 * Scan Commands, Responses, Notifications:
2275 *
2276 *****************************************************************************/
2277
2278#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2279#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2280
2281/**
2282 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2283 *
2284 * One for each channel in the scan list.
2285 * Each channel can independently select:
2286 * 1) SSID for directed active scans
2287 * 2) Txpower setting (for rate specified within Tx command)
2288 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2289 * quiet_plcp_th, good_CRC_th)
2290 *
2291 * To avoid uCode errors, make sure the following are true (see comments
2292 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2293 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2294 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2295 * 2) quiet_time <= active_dwell
2296 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2297 * passive_dwell < max_out_time
2298 * active_dwell < max_out_time
2299 */
2300
2301struct iwl_scan_channel {
2302 /*
2303 * type is defined as:
2304 * 0:0 1 = active, 0 = passive
2305 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2306 * SSID IE is transmitted in probe request.
2307 * 21:31 reserved
2308 */
2309 __le32 type;
2310 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2311 u8 tx_gain; /* gain for analog radio */
2312 u8 dsp_atten; /* gain for DSP */
2313 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2314 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2315} __packed;
2316
2317/* set number of direct probes __le32 type */
2318#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2319
2320/**
2321 * struct iwl_ssid_ie - directed scan network information element
2322 *
2323 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2324 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2325 * each channel may select different ssids from among the 20 (4) entries.
2326 * SSID IEs get transmitted in reverse order of entry.
2327 */
2328struct iwl_ssid_ie {
2329 u8 id;
2330 u8 len;
2331 u8 ssid[32];
2332} __packed;
2333
2334#define PROBE_OPTION_MAX_3945 4
2335#define PROBE_OPTION_MAX 20
2336#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2337#define IWL_GOOD_CRC_TH_DISABLED 0
2338#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2339#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2340#define IWL_MAX_SCAN_SIZE 1024
2341#define IWL_MAX_CMD_SIZE 4096
2342
2343/*
2344 * REPLY_SCAN_CMD = 0x80 (command)
2345 *
2346 * The hardware scan command is very powerful; the driver can set it up to
2347 * maintain (relatively) normal network traffic while doing a scan in the
2348 * background. The max_out_time and suspend_time control the ratio of how
2349 * long the device stays on an associated network channel ("service channel")
2350 * vs. how long it's away from the service channel, i.e. tuned to other channels
2351 * for scanning.
2352 *
2353 * max_out_time is the max time off-channel (in usec), and suspend_time
2354 * is how long (in "extended beacon" format) that the scan is "suspended"
2355 * after returning to the service channel. That is, suspend_time is the
2356 * time that we stay on the service channel, doing normal work, between
2357 * scan segments. The driver may set these parameters differently to support
2358 * scanning when associated vs. not associated, and light vs. heavy traffic
2359 * loads when associated.
2360 *
2361 * After receiving this command, the device's scan engine does the following;
2362 *
2363 * 1) Sends SCAN_START notification to driver
2364 * 2) Checks to see if it has time to do scan for one channel
2365 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2366 * to tell AP that we're going off-channel
2367 * 4) Tunes to first channel in scan list, does active or passive scan
2368 * 5) Sends SCAN_RESULT notification to driver
2369 * 6) Checks to see if it has time to do scan on *next* channel in list
2370 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2371 * before max_out_time expires
2372 * 8) Returns to service channel
2373 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2374 * 10) Stays on service channel until suspend_time expires
2375 * 11) Repeats entire process 2-10 until list is complete
2376 * 12) Sends SCAN_COMPLETE notification
2377 *
2378 * For fast, efficient scans, the scan command also has support for staying on
2379 * a channel for just a short time, if doing active scanning and getting no
2380 * responses to the transmitted probe request. This time is controlled by
2381 * quiet_time, and the number of received packets below which a channel is
2382 * considered "quiet" is controlled by quiet_plcp_threshold.
2383 *
2384 * For active scanning on channels that have regulatory restrictions against
2385 * blindly transmitting, the scan can listen before transmitting, to make sure
2386 * that there is already legitimate activity on the channel. If enough
2387 * packets are cleanly received on the channel (controlled by good_CRC_th,
2388 * typical value 1), the scan engine starts transmitting probe requests.
2389 *
2390 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2391 *
2392 * To avoid uCode errors, see timing restrictions described under
2393 * struct iwl_scan_channel.
2394 */
2395
2396enum iwl_scan_flags {
2397 /* BIT(0) currently unused */
2398 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
2399 /* bits 2-7 reserved */
2400};
2401
2402struct iwl_scan_cmd {
2403 __le16 len;
2404 u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
2405 u8 channel_count; /* # channels in channel list */
2406 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2407 * (only for active scan) */
2408 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2409 __le16 good_CRC_th; /* passive -> active promotion threshold */
2410 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2411 __le32 max_out_time; /* max usec to be away from associated (service)
2412 * channel */
2413 __le32 suspend_time; /* pause scan this long (in "extended beacon
2414 * format") when returning to service chnl:
2415 * 3945; 31:24 # beacons, 19:0 additional usec,
2416 * 4965; 31:22 # beacons, 21:0 additional usec.
2417 */
2418 __le32 flags; /* RXON_FLG_* */
2419 __le32 filter_flags; /* RXON_FILTER_* */
2420
2421 /* For active scans (set to all-0s for passive scans).
2422 * Does not include payload. Must specify Tx rate; no rate scaling. */
2423 struct iwl_tx_cmd tx_cmd;
2424
2425 /* For directed active scans (set to all-0s otherwise) */
2426 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2427
2428 /*
2429 * Probe request frame, followed by channel list.
2430 *
2431 * Size of probe request frame is specified by byte count in tx_cmd.
2432 * Channel list follows immediately after probe request frame.
2433 * Number of channels in list is specified by channel_count.
2434 * Each channel in list is of type:
2435 *
2436 * struct iwl_scan_channel channels[0];
2437 *
2438 * NOTE: Only one band of channels can be scanned per pass. You
2439 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2440 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2441 * before requesting another scan.
2442 */
2443 u8 data[0];
2444} __packed;
2445
2446/* Can abort will notify by complete notification with abort status. */
2447#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2448/* complete notification statuses */
2449#define ABORT_STATUS 0x2
2450
2451/*
2452 * REPLY_SCAN_CMD = 0x80 (response)
2453 */
2454struct iwl_scanreq_notification {
2455 __le32 status; /* 1: okay, 2: cannot fulfill request */
2456} __packed;
2457
2458/*
2459 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2460 */
2461struct iwl_scanstart_notification {
2462 __le32 tsf_low;
2463 __le32 tsf_high;
2464 __le32 beacon_timer;
2465 u8 channel;
2466 u8 band;
2467 u8 reserved[2];
2468 __le32 status;
2469} __packed;
2470
2471#define SCAN_OWNER_STATUS 0x1
2472#define MEASURE_OWNER_STATUS 0x2
2473
2474#define IWL_PROBE_STATUS_OK 0
2475#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2476/* error statuses combined with TX_FAILED */
2477#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2478#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2479
2480#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2481/*
2482 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2483 */
2484struct iwl_scanresults_notification {
2485 u8 channel;
2486 u8 band;
2487 u8 probe_status;
2488 u8 num_probe_not_sent; /* not enough time to send */
2489 __le32 tsf_low;
2490 __le32 tsf_high;
2491 __le32 statistics[NUMBER_OF_STATISTICS];
2492} __packed;
2493
2494/*
2495 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2496 */
2497struct iwl_scancomplete_notification {
2498 u8 scanned_channels;
2499 u8 status;
2500 u8 bt_status; /* BT On/Off status */
2501 u8 last_channel;
2502 __le32 tsf_low;
2503 __le32 tsf_high;
2504} __packed;
2505
2506
2507/******************************************************************************
2508 * (9)
2509 * IBSS/AP Commands and Notifications:
2510 *
2511 *****************************************************************************/
2512
2513enum iwl_ibss_manager {
2514 IWL_NOT_IBSS_MANAGER = 0,
2515 IWL_IBSS_MANAGER = 1,
2516};
2517
2518/*
2519 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2520 */
2521
2522struct iwlagn_beacon_notif {
2523 struct iwlagn_tx_resp beacon_notify_hdr;
2524 __le32 low_tsf;
2525 __le32 high_tsf;
2526 __le32 ibss_mgr_status;
2527} __packed;
2528
2529/*
2530 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2531 */
2532
2533struct iwl_tx_beacon_cmd {
2534 struct iwl_tx_cmd tx;
2535 __le16 tim_idx;
2536 u8 tim_size;
2537 u8 reserved1;
2538 struct ieee80211_hdr frame[0]; /* beacon frame */
2539} __packed;
2540
2541/******************************************************************************
2542 * (10)
2543 * Statistics Commands and Notifications:
2544 *
2545 *****************************************************************************/
2546
2547#define IWL_TEMP_CONVERT 260
2548
2549#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2550#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2551#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2552
2553/* Used for passing to driver number of successes and failures per rate */
2554struct rate_histogram {
2555 union {
2556 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2557 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2558 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2559 } success;
2560 union {
2561 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2562 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2563 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2564 } failed;
2565} __packed;
2566
2567/* statistics command response */
2568
2569struct statistics_dbg {
2570 __le32 burst_check;
2571 __le32 burst_count;
2572 __le32 wait_for_silence_timeout_cnt;
2573 __le32 reserved[3];
2574} __packed;
2575
2576struct statistics_rx_phy {
2577 __le32 ina_cnt;
2578 __le32 fina_cnt;
2579 __le32 plcp_err;
2580 __le32 crc32_err;
2581 __le32 overrun_err;
2582 __le32 early_overrun_err;
2583 __le32 crc32_good;
2584 __le32 false_alarm_cnt;
2585 __le32 fina_sync_err_cnt;
2586 __le32 sfd_timeout;
2587 __le32 fina_timeout;
2588 __le32 unresponded_rts;
2589 __le32 rxe_frame_limit_overrun;
2590 __le32 sent_ack_cnt;
2591 __le32 sent_cts_cnt;
2592 __le32 sent_ba_rsp_cnt;
2593 __le32 dsp_self_kill;
2594 __le32 mh_format_err;
2595 __le32 re_acq_main_rssi_sum;
2596 __le32 reserved3;
2597} __packed;
2598
2599struct statistics_rx_ht_phy {
2600 __le32 plcp_err;
2601 __le32 overrun_err;
2602 __le32 early_overrun_err;
2603 __le32 crc32_good;
2604 __le32 crc32_err;
2605 __le32 mh_format_err;
2606 __le32 agg_crc32_good;
2607 __le32 agg_mpdu_cnt;
2608 __le32 agg_cnt;
2609 __le32 unsupport_mcs;
2610} __packed;
2611
2612#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2613
2614struct statistics_rx_non_phy {
2615 __le32 bogus_cts; /* CTS received when not expecting CTS */
2616 __le32 bogus_ack; /* ACK received when not expecting ACK */
2617 __le32 non_bssid_frames; /* number of frames with BSSID that
2618 * doesn't belong to the STA BSSID */
2619 __le32 filtered_frames; /* count frames that were dumped in the
2620 * filtering process */
2621 __le32 non_channel_beacons; /* beacons with our bss id but not on
2622 * our serving channel */
2623 __le32 channel_beacons; /* beacons with our bss id and in our
2624 * serving channel */
2625 __le32 num_missed_bcon; /* number of missed beacons */
2626 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2627 * ADC was in saturation */
2628 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2629 * for INA */
2630 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2631 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2632 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2633 __le32 interference_data_flag; /* flag for interference data
2634 * availability. 1 when data is
2635 * available. */
2636 __le32 channel_load; /* counts RX Enable time in uSec */
2637 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2638 * and CCK) counter */
2639 __le32 beacon_rssi_a;
2640 __le32 beacon_rssi_b;
2641 __le32 beacon_rssi_c;
2642 __le32 beacon_energy_a;
2643 __le32 beacon_energy_b;
2644 __le32 beacon_energy_c;
2645} __packed;
2646
2647struct statistics_rx_non_phy_bt {
2648 struct statistics_rx_non_phy common;
2649 /* additional stats for bt */
2650 __le32 num_bt_kills;
2651 __le32 reserved[2];
2652} __packed;
2653
2654struct statistics_rx {
2655 struct statistics_rx_phy ofdm;
2656 struct statistics_rx_phy cck;
2657 struct statistics_rx_non_phy general;
2658 struct statistics_rx_ht_phy ofdm_ht;
2659} __packed;
2660
2661struct statistics_rx_bt {
2662 struct statistics_rx_phy ofdm;
2663 struct statistics_rx_phy cck;
2664 struct statistics_rx_non_phy_bt general;
2665 struct statistics_rx_ht_phy ofdm_ht;
2666} __packed;
2667
2668/**
2669 * struct statistics_tx_power - current tx power
2670 *
2671 * @ant_a: current tx power on chain a in 1/2 dB step
2672 * @ant_b: current tx power on chain b in 1/2 dB step
2673 * @ant_c: current tx power on chain c in 1/2 dB step
2674 */
2675struct statistics_tx_power {
2676 u8 ant_a;
2677 u8 ant_b;
2678 u8 ant_c;
2679 u8 reserved;
2680} __packed;
2681
2682struct statistics_tx_non_phy_agg {
2683 __le32 ba_timeout;
2684 __le32 ba_reschedule_frames;
2685 __le32 scd_query_agg_frame_cnt;
2686 __le32 scd_query_no_agg;
2687 __le32 scd_query_agg;
2688 __le32 scd_query_mismatch;
2689 __le32 frame_not_ready;
2690 __le32 underrun;
2691 __le32 bt_prio_kill;
2692 __le32 rx_ba_rsp_cnt;
2693} __packed;
2694
2695struct statistics_tx {
2696 __le32 preamble_cnt;
2697 __le32 rx_detected_cnt;
2698 __le32 bt_prio_defer_cnt;
2699 __le32 bt_prio_kill_cnt;
2700 __le32 few_bytes_cnt;
2701 __le32 cts_timeout;
2702 __le32 ack_timeout;
2703 __le32 expected_ack_cnt;
2704 __le32 actual_ack_cnt;
2705 __le32 dump_msdu_cnt;
2706 __le32 burst_abort_next_frame_mismatch_cnt;
2707 __le32 burst_abort_missing_next_frame_cnt;
2708 __le32 cts_timeout_collision;
2709 __le32 ack_or_ba_timeout_collision;
2710 struct statistics_tx_non_phy_agg agg;
2711 /*
2712 * "tx_power" are optional parameters provided by uCode,
2713 * 6000 series is the only device provide the information,
2714 * Those are reserved fields for all the other devices
2715 */
2716 struct statistics_tx_power tx_power;
2717 __le32 reserved1;
2718} __packed;
2719
2720
2721struct statistics_div {
2722 __le32 tx_on_a;
2723 __le32 tx_on_b;
2724 __le32 exec_time;
2725 __le32 probe_time;
2726 __le32 reserved1;
2727 __le32 reserved2;
2728} __packed;
2729
2730struct statistics_general_common {
2731 __le32 temperature; /* radio temperature */
2732 __le32 temperature_m; /* for 5000 and up, this is radio voltage */
2733 struct statistics_dbg dbg;
2734 __le32 sleep_time;
2735 __le32 slots_out;
2736 __le32 slots_idle;
2737 __le32 ttl_timestamp;
2738 struct statistics_div div;
2739 __le32 rx_enable_counter;
2740 /*
2741 * num_of_sos_states:
2742 * count the number of times we have to re-tune
2743 * in order to get out of bad PHY status
2744 */
2745 __le32 num_of_sos_states;
2746} __packed;
2747
2748struct statistics_bt_activity {
2749 /* Tx statistics */
2750 __le32 hi_priority_tx_req_cnt;
2751 __le32 hi_priority_tx_denied_cnt;
2752 __le32 lo_priority_tx_req_cnt;
2753 __le32 lo_priority_tx_denied_cnt;
2754 /* Rx statistics */
2755 __le32 hi_priority_rx_req_cnt;
2756 __le32 hi_priority_rx_denied_cnt;
2757 __le32 lo_priority_rx_req_cnt;
2758 __le32 lo_priority_rx_denied_cnt;
2759} __packed;
2760
2761struct statistics_general {
2762 struct statistics_general_common common;
2763 __le32 reserved2;
2764 __le32 reserved3;
2765} __packed;
2766
2767struct statistics_general_bt {
2768 struct statistics_general_common common;
2769 struct statistics_bt_activity activity;
2770 __le32 reserved2;
2771 __le32 reserved3;
2772} __packed;
2773
2774#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2775#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2776#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2777
2778/*
2779 * REPLY_STATISTICS_CMD = 0x9c,
2780 * all devices identical.
2781 *
2782 * This command triggers an immediate response containing uCode statistics.
2783 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2784 *
2785 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2786 * internal copy of the statistics (counters) after issuing the response.
2787 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2788 *
2789 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2790 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2791 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2792 */
2793#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2794#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2795struct iwl_statistics_cmd {
2796 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2797} __packed;
2798
2799/*
2800 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2801 *
2802 * By default, uCode issues this notification after receiving a beacon
2803 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2804 * REPLY_STATISTICS_CMD 0x9c, above.
2805 *
2806 * Statistics counters continue to increment beacon after beacon, but are
2807 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
2808 * 0x9c with CLEAR_STATS bit set (see above).
2809 *
2810 * uCode also issues this notification during scans. uCode clears statistics
2811 * appropriately so that each notification contains statistics for only the
2812 * one channel that has just been scanned.
2813 */
2814#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2815#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
2816
2817struct iwl_notif_statistics {
2818 __le32 flag;
2819 struct statistics_rx rx;
2820 struct statistics_tx tx;
2821 struct statistics_general general;
2822} __packed;
2823
2824struct iwl_bt_notif_statistics {
2825 __le32 flag;
2826 struct statistics_rx_bt rx;
2827 struct statistics_tx tx;
2828 struct statistics_general_bt general;
2829} __packed;
2830
2831/*
2832 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
2833 *
2834 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
2835 * in regardless of how many missed beacons, which mean when driver receive the
2836 * notification, inside the command, it can find all the beacons information
2837 * which include number of total missed beacons, number of consecutive missed
2838 * beacons, number of beacons received and number of beacons expected to
2839 * receive.
2840 *
2841 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
2842 * in order to bring the radio/PHY back to working state; which has no relation
2843 * to when driver will perform sensitivity calibration.
2844 *
2845 * Driver should set it own missed_beacon_threshold to decide when to perform
2846 * sensitivity calibration based on number of consecutive missed beacons in
2847 * order to improve overall performance, especially in noisy environment.
2848 *
2849 */
2850
2851#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
2852#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
2853#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
2854
2855struct iwl_missed_beacon_notif {
2856 __le32 consecutive_missed_beacons;
2857 __le32 total_missed_becons;
2858 __le32 num_expected_beacons;
2859 __le32 num_recvd_beacons;
2860} __packed;
2861
2862
2863/******************************************************************************
2864 * (11)
2865 * Rx Calibration Commands:
2866 *
2867 * With the uCode used for open source drivers, most Tx calibration (except
2868 * for Tx Power) and most Rx calibration is done by uCode during the
2869 * "initialize" phase of uCode boot. Driver must calibrate only:
2870 *
2871 * 1) Tx power (depends on temperature), described elsewhere
2872 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
2873 * 3) Receiver sensitivity (to optimize signal detection)
2874 *
2875 *****************************************************************************/
2876
2877/**
2878 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
2879 *
2880 * This command sets up the Rx signal detector for a sensitivity level that
2881 * is high enough to lock onto all signals within the associated network,
2882 * but low enough to ignore signals that are below a certain threshold, so as
2883 * not to have too many "false alarms". False alarms are signals that the
2884 * Rx DSP tries to lock onto, but then discards after determining that they
2885 * are noise.
2886 *
2887 * The optimum number of false alarms is between 5 and 50 per 200 TUs
2888 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
2889 * time listening, not transmitting). Driver must adjust sensitivity so that
2890 * the ratio of actual false alarms to actual Rx time falls within this range.
2891 *
2892 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
2893 * received beacon. These provide information to the driver to analyze the
2894 * sensitivity. Don't analyze statistics that come in from scanning, or any
2895 * other non-associated-network source. Pertinent statistics include:
2896 *
2897 * From "general" statistics (struct statistics_rx_non_phy):
2898 *
2899 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
2900 * Measure of energy of desired signal. Used for establishing a level
2901 * below which the device does not detect signals.
2902 *
2903 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
2904 * Measure of background noise in silent period after beacon.
2905 *
2906 * channel_load
2907 * uSecs of actual Rx time during beacon period (varies according to
2908 * how much time was spent transmitting).
2909 *
2910 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
2911 *
2912 * false_alarm_cnt
2913 * Signal locks abandoned early (before phy-level header).
2914 *
2915 * plcp_err
2916 * Signal locks abandoned late (during phy-level header).
2917 *
2918 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
2919 * beacon to beacon, i.e. each value is an accumulation of all errors
2920 * before and including the latest beacon. Values will wrap around to 0
2921 * after counting up to 2^32 - 1. Driver must differentiate vs.
2922 * previous beacon's values to determine # false alarms in the current
2923 * beacon period.
2924 *
2925 * Total number of false alarms = false_alarms + plcp_errs
2926 *
2927 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
2928 * (notice that the start points for OFDM are at or close to settings for
2929 * maximum sensitivity):
2930 *
2931 * START / MIN / MAX
2932 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
2933 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
2934 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
2935 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
2936 *
2937 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
2938 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
2939 * by *adding* 1 to all 4 of the table entries above, up to the max for
2940 * each entry. Conversely, if false alarm rate is too low (less than 5
2941 * for each 204.8 msecs listening), *subtract* 1 from each entry to
2942 * increase sensitivity.
2943 *
2944 * For CCK sensitivity, keep track of the following:
2945 *
2946 * 1). 20-beacon history of maximum background noise, indicated by
2947 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
2948 * 3 receivers. For any given beacon, the "silence reference" is
2949 * the maximum of last 60 samples (20 beacons * 3 receivers).
2950 *
2951 * 2). 10-beacon history of strongest signal level, as indicated
2952 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
2953 * i.e. the strength of the signal through the best receiver at the
2954 * moment. These measurements are "upside down", with lower values
2955 * for stronger signals, so max energy will be *minimum* value.
2956 *
2957 * Then for any given beacon, the driver must determine the *weakest*
2958 * of the strongest signals; this is the minimum level that needs to be
2959 * successfully detected, when using the best receiver at the moment.
2960 * "Max cck energy" is the maximum (higher value means lower energy!)
2961 * of the last 10 minima. Once this is determined, driver must add
2962 * a little margin by adding "6" to it.
2963 *
2964 * 3). Number of consecutive beacon periods with too few false alarms.
2965 * Reset this to 0 at the first beacon period that falls within the
2966 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
2967 *
2968 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
2969 * (notice that the start points for CCK are at maximum sensitivity):
2970 *
2971 * START / MIN / MAX
2972 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
2973 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
2974 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
2975 *
2976 * If actual rate of CCK false alarms (+ plcp_errors) is too high
2977 * (greater than 50 for each 204.8 msecs listening), method for reducing
2978 * sensitivity is:
2979 *
2980 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
2981 * up to max 400.
2982 *
2983 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
2984 * sensitivity has been reduced a significant amount; bring it up to
2985 * a moderate 161. Otherwise, *add* 3, up to max 200.
2986 *
2987 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
2988 * sensitivity has been reduced only a moderate or small amount;
2989 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
2990 * down to min 0. Otherwise (if gain has been significantly reduced),
2991 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
2992 *
2993 * b) Save a snapshot of the "silence reference".
2994 *
2995 * If actual rate of CCK false alarms (+ plcp_errors) is too low
2996 * (less than 5 for each 204.8 msecs listening), method for increasing
2997 * sensitivity is used only if:
2998 *
2999 * 1a) Previous beacon did not have too many false alarms
3000 * 1b) AND difference between previous "silence reference" and current
3001 * "silence reference" (prev - current) is 2 or more,
3002 * OR 2) 100 or more consecutive beacon periods have had rate of
3003 * less than 5 false alarms per 204.8 milliseconds rx time.
3004 *
3005 * Method for increasing sensitivity:
3006 *
3007 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3008 * down to min 125.
3009 *
3010 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3011 * down to min 200.
3012 *
3013 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3014 *
3015 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3016 * (between 5 and 50 for each 204.8 msecs listening):
3017 *
3018 * 1) Save a snapshot of the silence reference.
3019 *
3020 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3021 * give some extra margin to energy threshold by *subtracting* 8
3022 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3023 *
3024 * For all cases (too few, too many, good range), make sure that the CCK
3025 * detection threshold (energy) is below the energy level for robust
3026 * detection over the past 10 beacon periods, the "Max cck energy".
3027 * Lower values mean higher energy; this means making sure that the value
3028 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3029 *
3030 */
3031
3032/*
3033 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3034 */
3035#define HD_TABLE_SIZE (11) /* number of entries */
3036#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3037#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3038#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3039#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3040#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3041#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3042#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3043#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3044#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3045#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3046#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3047
3048/*
3049 * Additional table entries in enhance SENSITIVITY_CMD
3050 */
3051#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11)
3052#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12)
3053#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13)
3054#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14)
3055#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15)
3056#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16)
3057#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17)
3058#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18)
3059#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19)
3060#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20)
3061#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21)
3062#define HD_RESERVED (22)
3063
3064/* number of entries for enhanced tbl */
3065#define ENHANCE_HD_TABLE_SIZE (23)
3066
3067/* number of additional entries for enhanced tbl */
3068#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
3069
3070#define HD_INA_NON_SQUARE_DET_OFDM_DATA cpu_to_le16(0)
3071#define HD_INA_NON_SQUARE_DET_CCK_DATA cpu_to_le16(0)
3072#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA cpu_to_le16(0)
3073#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(668)
3074#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
3075#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(486)
3076#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(37)
3077#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(853)
3078#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
3079#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(476)
3080#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(99)
3081
3082
3083/* Control field in struct iwl_sensitivity_cmd */
3084#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3085#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3086
3087/**
3088 * struct iwl_sensitivity_cmd
3089 * @control: (1) updates working table, (0) updates default table
3090 * @table: energy threshold values, use HD_* as index into table
3091 *
3092 * Always use "1" in "control" to update uCode's working table and DSP.
3093 */
3094struct iwl_sensitivity_cmd {
3095 __le16 control; /* always use "1" */
3096 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3097} __packed;
3098
3099/*
3100 *
3101 */
3102struct iwl_enhance_sensitivity_cmd {
3103 __le16 control; /* always use "1" */
3104 __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */
3105} __packed;
3106
3107
3108/**
3109 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3110 *
3111 * This command sets the relative gains of agn device's 3 radio receiver chains.
3112 *
3113 * After the first association, driver should accumulate signal and noise
3114 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3115 * beacons from the associated network (don't collect statistics that come
3116 * in from scanning, or any other non-network source).
3117 *
3118 * DISCONNECTED ANTENNA:
3119 *
3120 * Driver should determine which antennas are actually connected, by comparing
3121 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3122 * following values over 20 beacons, one accumulator for each of the chains
3123 * a/b/c, from struct statistics_rx_non_phy:
3124 *
3125 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3126 *
3127 * Find the strongest signal from among a/b/c. Compare the other two to the
3128 * strongest. If any signal is more than 15 dB (times 20, unless you
3129 * divide the accumulated values by 20) below the strongest, the driver
3130 * considers that antenna to be disconnected, and should not try to use that
3131 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3132 * driver should declare the stronger one as connected, and attempt to use it
3133 * (A and B are the only 2 Tx chains!).
3134 *
3135 *
3136 * RX BALANCE:
3137 *
3138 * Driver should balance the 3 receivers (but just the ones that are connected
3139 * to antennas, see above) for gain, by comparing the average signal levels
3140 * detected during the silence after each beacon (background noise).
3141 * Accumulate (add) the following values over 20 beacons, one accumulator for
3142 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3143 *
3144 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3145 *
3146 * Find the weakest background noise level from among a/b/c. This Rx chain
3147 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3148 * finding noise difference:
3149 *
3150 * (accum_noise[i] - accum_noise[reference]) / 30
3151 *
3152 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3153 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3154 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3155 * and set bit 2 to indicate "reduce gain". The value for the reference
3156 * (weakest) chain should be "0".
3157 *
3158 * diff_gain_[abc] bit fields:
3159 * 2: (1) reduce gain, (0) increase gain
3160 * 1-0: amount of gain, units of 1.5 dB
3161 */
3162
3163/* Phy calibration command for series */
3164/* The default calibrate table size if not specified by firmware */
3165#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3166enum {
3167 IWL_PHY_CALIBRATE_DC_CMD = 8,
3168 IWL_PHY_CALIBRATE_LO_CMD = 9,
3169 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
3170 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3171 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3172 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
3173 IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
3174 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3175};
3176
3177#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3178
3179/* This enum defines the bitmap of various calibrations to enable in both
3180 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
3181 */
3182enum iwl_ucode_calib_cfg {
3183 IWL_CALIB_CFG_RX_BB_IDX = BIT(0),
3184 IWL_CALIB_CFG_DC_IDX = BIT(1),
3185 IWL_CALIB_CFG_LO_IDX = BIT(2),
3186 IWL_CALIB_CFG_TX_IQ_IDX = BIT(3),
3187 IWL_CALIB_CFG_RX_IQ_IDX = BIT(4),
3188 IWL_CALIB_CFG_NOISE_IDX = BIT(5),
3189 IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6),
3190 IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7),
3191 IWL_CALIB_CFG_PAPD_IDX = BIT(8),
3192 IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9),
3193 IWL_CALIB_CFG_TX_PWR_IDX = BIT(10),
3194};
3195
3196#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
3197 IWL_CALIB_CFG_DC_IDX | \
3198 IWL_CALIB_CFG_LO_IDX | \
3199 IWL_CALIB_CFG_TX_IQ_IDX | \
3200 IWL_CALIB_CFG_RX_IQ_IDX | \
3201 IWL_CALIB_CFG_NOISE_IDX | \
3202 IWL_CALIB_CFG_CRYSTAL_IDX | \
3203 IWL_CALIB_CFG_TEMPERATURE_IDX | \
3204 IWL_CALIB_CFG_PAPD_IDX | \
3205 IWL_CALIB_CFG_SENSITIVITY_IDX | \
3206 IWL_CALIB_CFG_TX_PWR_IDX)
3207
3208#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
3209
3210struct iwl_calib_cfg_elmnt_s {
3211 __le32 is_enable;
3212 __le32 start;
3213 __le32 send_res;
3214 __le32 apply_res;
3215 __le32 reserved;
3216} __packed;
3217
3218struct iwl_calib_cfg_status_s {
3219 struct iwl_calib_cfg_elmnt_s once;
3220 struct iwl_calib_cfg_elmnt_s perd;
3221 __le32 flags;
3222} __packed;
3223
3224struct iwl_calib_cfg_cmd {
3225 struct iwl_calib_cfg_status_s ucd_calib_cfg;
3226 struct iwl_calib_cfg_status_s drv_calib_cfg;
3227 __le32 reserved1;
3228} __packed;
3229
3230struct iwl_calib_hdr {
3231 u8 op_code;
3232 u8 first_group;
3233 u8 groups_num;
3234 u8 data_valid;
3235} __packed;
3236
3237struct iwl_calib_cmd {
3238 struct iwl_calib_hdr hdr;
3239 u8 data[0];
3240} __packed;
3241
3242struct iwl_calib_xtal_freq_cmd {
3243 struct iwl_calib_hdr hdr;
3244 u8 cap_pin1;
3245 u8 cap_pin2;
3246 u8 pad[2];
3247} __packed;
3248
3249#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700)
3250struct iwl_calib_temperature_offset_cmd {
3251 struct iwl_calib_hdr hdr;
3252 __le16 radio_sensor_offset;
3253 __le16 reserved;
3254} __packed;
3255
3256/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3257struct iwl_calib_chain_noise_reset_cmd {
3258 struct iwl_calib_hdr hdr;
3259 u8 data[0];
3260};
3261
3262/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
3263struct iwl_calib_chain_noise_gain_cmd {
3264 struct iwl_calib_hdr hdr;
3265 u8 delta_gain_1;
3266 u8 delta_gain_2;
3267 u8 pad[2];
3268} __packed;
3269
3270/******************************************************************************
3271 * (12)
3272 * Miscellaneous Commands:
3273 *
3274 *****************************************************************************/
3275
3276/*
3277 * LEDs Command & Response
3278 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3279 *
3280 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3281 * this command turns it on or off, or sets up a periodic blinking cycle.
3282 */
3283struct iwl_led_cmd {
3284 __le32 interval; /* "interval" in uSec */
3285 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3286 u8 off; /* # intervals off while blinking;
3287 * "0", with >0 "on" value, turns LED on */
3288 u8 on; /* # intervals on while blinking;
3289 * "0", regardless of "off", turns LED off */
3290 u8 reserved;
3291} __packed;
3292
3293/*
3294 * station priority table entries
3295 * also used as potential "events" value for both
3296 * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
3297 */
3298
3299/*
3300 * COEX events entry flag masks
3301 * RP - Requested Priority
3302 * WP - Win Medium Priority: priority assigned when the contention has been won
3303 */
3304#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1)
3305#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2)
3306#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4)
3307
3308#define COEX_CU_UNASSOC_IDLE_RP 4
3309#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4
3310#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4
3311#define COEX_CU_CALIBRATION_RP 4
3312#define COEX_CU_PERIODIC_CALIBRATION_RP 4
3313#define COEX_CU_CONNECTION_ESTAB_RP 4
3314#define COEX_CU_ASSOCIATED_IDLE_RP 4
3315#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4
3316#define COEX_CU_ASSOC_AUTO_SCAN_RP 4
3317#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4
3318#define COEX_CU_RF_ON_RP 6
3319#define COEX_CU_RF_OFF_RP 4
3320#define COEX_CU_STAND_ALONE_DEBUG_RP 6
3321#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4
3322#define COEX_CU_RSRVD1_RP 4
3323#define COEX_CU_RSRVD2_RP 4
3324
3325#define COEX_CU_UNASSOC_IDLE_WP 3
3326#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3
3327#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3
3328#define COEX_CU_CALIBRATION_WP 3
3329#define COEX_CU_PERIODIC_CALIBRATION_WP 3
3330#define COEX_CU_CONNECTION_ESTAB_WP 3
3331#define COEX_CU_ASSOCIATED_IDLE_WP 3
3332#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3
3333#define COEX_CU_ASSOC_AUTO_SCAN_WP 3
3334#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3
3335#define COEX_CU_RF_ON_WP 3
3336#define COEX_CU_RF_OFF_WP 3
3337#define COEX_CU_STAND_ALONE_DEBUG_WP 6
3338#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3
3339#define COEX_CU_RSRVD1_WP 3
3340#define COEX_CU_RSRVD2_WP 3
3341
3342#define COEX_UNASSOC_IDLE_FLAGS 0
3343#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \
3344 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3345 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3346#define COEX_UNASSOC_AUTO_SCAN_FLAGS \
3347 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3348 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3349#define COEX_CALIBRATION_FLAGS \
3350 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3351 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3352#define COEX_PERIODIC_CALIBRATION_FLAGS 0
3353/*
3354 * COEX_CONNECTION_ESTAB:
3355 * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3356 */
3357#define COEX_CONNECTION_ESTAB_FLAGS \
3358 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3359 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3360 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3361#define COEX_ASSOCIATED_IDLE_FLAGS 0
3362#define COEX_ASSOC_MANUAL_SCAN_FLAGS \
3363 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3364 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3365#define COEX_ASSOC_AUTO_SCAN_FLAGS \
3366 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3367 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3368#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
3369#define COEX_RF_ON_FLAGS 0
3370#define COEX_RF_OFF_FLAGS 0
3371#define COEX_STAND_ALONE_DEBUG_FLAGS \
3372 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3373 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3374#define COEX_IPAN_ASSOC_LEVEL_FLAGS \
3375 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3376 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3377 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3378#define COEX_RSRVD1_FLAGS 0
3379#define COEX_RSRVD2_FLAGS 0
3380/*
3381 * COEX_CU_RF_ON is the event wrapping all radio ownership.
3382 * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3383 */
3384#define COEX_CU_RF_ON_FLAGS \
3385 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3386 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3387 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3388
3389
3390enum {
3391 /* un-association part */
3392 COEX_UNASSOC_IDLE = 0,
3393 COEX_UNASSOC_MANUAL_SCAN = 1,
3394 COEX_UNASSOC_AUTO_SCAN = 2,
3395 /* calibration */
3396 COEX_CALIBRATION = 3,
3397 COEX_PERIODIC_CALIBRATION = 4,
3398 /* connection */
3399 COEX_CONNECTION_ESTAB = 5,
3400 /* association part */
3401 COEX_ASSOCIATED_IDLE = 6,
3402 COEX_ASSOC_MANUAL_SCAN = 7,
3403 COEX_ASSOC_AUTO_SCAN = 8,
3404 COEX_ASSOC_ACTIVE_LEVEL = 9,
3405 /* RF ON/OFF */
3406 COEX_RF_ON = 10,
3407 COEX_RF_OFF = 11,
3408 COEX_STAND_ALONE_DEBUG = 12,
3409 /* IPAN */
3410 COEX_IPAN_ASSOC_LEVEL = 13,
3411 /* reserved */
3412 COEX_RSRVD1 = 14,
3413 COEX_RSRVD2 = 15,
3414 COEX_NUM_OF_EVENTS = 16
3415};
3416
3417/*
3418 * Coexistence WIFI/WIMAX Command
3419 * COEX_PRIORITY_TABLE_CMD = 0x5a
3420 *
3421 */
3422struct iwl_wimax_coex_event_entry {
3423 u8 request_prio;
3424 u8 win_medium_prio;
3425 u8 reserved;
3426 u8 flags;
3427} __packed;
3428
3429/* COEX flag masks */
3430
3431/* Station table is valid */
3432#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
3433/* UnMask wake up src at unassociated sleep */
3434#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
3435/* UnMask wake up src at associated sleep */
3436#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
3437/* Enable CoEx feature. */
3438#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
3439
3440struct iwl_wimax_coex_cmd {
3441 u8 flags;
3442 u8 reserved[3];
3443 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3444} __packed;
3445
3446/*
3447 * Coexistence MEDIUM NOTIFICATION
3448 * COEX_MEDIUM_NOTIFICATION = 0x5b
3449 *
3450 * notification from uCode to host to indicate medium changes
3451 *
3452 */
3453/*
3454 * status field
3455 * bit 0 - 2: medium status
3456 * bit 3: medium change indication
3457 * bit 4 - 31: reserved
3458 */
3459/* status option values, (0 - 2 bits) */
3460#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */
3461#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */
3462#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */
3463#define COEX_MEDIUM_MSK (0x7)
3464
3465/* send notification status (1 bit) */
3466#define COEX_MEDIUM_CHANGED (0x8)
3467#define COEX_MEDIUM_CHANGED_MSK (0x8)
3468#define COEX_MEDIUM_SHIFT (3)
3469
3470struct iwl_coex_medium_notification {
3471 __le32 status;
3472 __le32 events;
3473} __packed;
3474
3475/*
3476 * Coexistence EVENT Command
3477 * COEX_EVENT_CMD = 0x5c
3478 *
3479 * send from host to uCode for coex event request.
3480 */
3481/* flags options */
3482#define COEX_EVENT_REQUEST_MSK (0x1)
3483
3484struct iwl_coex_event_cmd {
3485 u8 flags;
3486 u8 event;
3487 __le16 reserved;
3488} __packed;
3489
3490struct iwl_coex_event_resp {
3491 __le32 status;
3492} __packed;
3493
3494
3495/******************************************************************************
3496 * Bluetooth Coexistence commands
3497 *
3498 *****************************************************************************/
3499
3500/*
3501 * BT Status notification
3502 * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
3503 */
3504enum iwl_bt_coex_profile_traffic_load {
3505 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
3506 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
3507 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
3508 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
3509/*
3510 * There are no more even though below is a u8, the
3511 * indication from the BT device only has two bits.
3512 */
3513};
3514
3515#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
3516#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
3517
3518/* BT UART message - Share Part (BT -> WiFi) */
3519#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
3520#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
3521 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
3522#define BT_UART_MSG_FRAME1SSN_POS (3)
3523#define BT_UART_MSG_FRAME1SSN_MSK \
3524 (0x3 << BT_UART_MSG_FRAME1SSN_POS)
3525#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
3526#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
3527 (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
3528#define BT_UART_MSG_FRAME1RESERVED_POS (6)
3529#define BT_UART_MSG_FRAME1RESERVED_MSK \
3530 (0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
3531
3532#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
3533#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
3534 (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
3535#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
3536#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
3537 (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
3538#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
3539#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
3540 (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
3541#define BT_UART_MSG_FRAME2INBAND_POS (5)
3542#define BT_UART_MSG_FRAME2INBAND_MSK \
3543 (0x1 << BT_UART_MSG_FRAME2INBAND_POS)
3544#define BT_UART_MSG_FRAME2RESERVED_POS (6)
3545#define BT_UART_MSG_FRAME2RESERVED_MSK \
3546 (0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
3547
3548#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
3549#define BT_UART_MSG_FRAME3SCOESCO_MSK \
3550 (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
3551#define BT_UART_MSG_FRAME3SNIFF_POS (1)
3552#define BT_UART_MSG_FRAME3SNIFF_MSK \
3553 (0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
3554#define BT_UART_MSG_FRAME3A2DP_POS (2)
3555#define BT_UART_MSG_FRAME3A2DP_MSK \
3556 (0x1 << BT_UART_MSG_FRAME3A2DP_POS)
3557#define BT_UART_MSG_FRAME3ACL_POS (3)
3558#define BT_UART_MSG_FRAME3ACL_MSK \
3559 (0x1 << BT_UART_MSG_FRAME3ACL_POS)
3560#define BT_UART_MSG_FRAME3MASTER_POS (4)
3561#define BT_UART_MSG_FRAME3MASTER_MSK \
3562 (0x1 << BT_UART_MSG_FRAME3MASTER_POS)
3563#define BT_UART_MSG_FRAME3OBEX_POS (5)
3564#define BT_UART_MSG_FRAME3OBEX_MSK \
3565 (0x1 << BT_UART_MSG_FRAME3OBEX_POS)
3566#define BT_UART_MSG_FRAME3RESERVED_POS (6)
3567#define BT_UART_MSG_FRAME3RESERVED_MSK \
3568 (0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
3569
3570#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
3571#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
3572 (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
3573#define BT_UART_MSG_FRAME4RESERVED_POS (6)
3574#define BT_UART_MSG_FRAME4RESERVED_MSK \
3575 (0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
3576
3577#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
3578#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
3579 (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
3580#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
3581#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
3582 (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
3583#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
3584#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
3585 (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
3586#define BT_UART_MSG_FRAME5RESERVED_POS (6)
3587#define BT_UART_MSG_FRAME5RESERVED_MSK \
3588 (0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
3589
3590#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
3591#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
3592 (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
3593#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
3594#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
3595 (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
3596#define BT_UART_MSG_FRAME6RESERVED_POS (6)
3597#define BT_UART_MSG_FRAME6RESERVED_MSK \
3598 (0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
3599
3600#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
3601#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
3602 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
3603#define BT_UART_MSG_FRAME7PAGE_POS (3)
3604#define BT_UART_MSG_FRAME7PAGE_MSK \
3605 (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
3606#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
3607#define BT_UART_MSG_FRAME7INQUIRY_MSK \
3608 (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
3609#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
3610#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
3611 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
3612#define BT_UART_MSG_FRAME7RESERVED_POS (6)
3613#define BT_UART_MSG_FRAME7RESERVED_MSK \
3614 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
3615
3616/* BT Session Activity 2 UART message (BT -> WiFi) */
3617#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
3618#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
3619 (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
3620#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
3621#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
3622 (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
3623
3624#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
3625#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
3626 (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
3627#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
3628#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
3629 (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
3630
3631#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
3632#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
3633 (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
3634#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
3635#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
3636 (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
3637#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
3638#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
3639 (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
3640#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
3641#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
3642 (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
3643
3644#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
3645#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
3646 (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
3647#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
3648#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
3649 (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
3650#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
3651#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
3652 (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
3653
3654#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
3655#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
3656 (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
3657#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
3658#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
3659 (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
3660#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
3661#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
3662 (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
3663#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
3664#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
3665 (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
3666
3667#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
3668#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
3669 (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
3670#define BT_UART_MSG_2_FRAME6RFU_POS (5)
3671#define BT_UART_MSG_2_FRAME6RFU_MSK \
3672 (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
3673#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
3674#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
3675 (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
3676
3677#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
3678#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
3679 (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
3680#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
3681#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
3682 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
3683#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
3684#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
3685 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
3686#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
3687#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
3688 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
3689#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
3690#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
3691 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
3692
3693
3694struct iwl_bt_uart_msg {
3695 u8 header;
3696 u8 frame1;
3697 u8 frame2;
3698 u8 frame3;
3699 u8 frame4;
3700 u8 frame5;
3701 u8 frame6;
3702 u8 frame7;
3703} __attribute__((packed));
3704
3705struct iwl_bt_coex_profile_notif {
3706 struct iwl_bt_uart_msg last_bt_uart_msg;
3707 u8 bt_status; /* 0 - off, 1 - on */
3708 u8 bt_traffic_load; /* 0 .. 3? */
3709 u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
3710 u8 reserved;
3711} __attribute__((packed));
3712
3713#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
3714#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
3715#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
3716#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
3717#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
3718#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
3719#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
3720
3721/*
3722 * BT Coexistence Priority table
3723 * REPLY_BT_COEX_PRIO_TABLE = 0xcc
3724 */
3725enum bt_coex_prio_table_events {
3726 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
3727 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
3728 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
3729 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
3730 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
3731 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
3732 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
3733 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
3734 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
3735 BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
3736 BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
3737 BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
3738 BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
3739 BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
3740 BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
3741 BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
3742 /* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
3743 BT_COEX_PRIO_TBL_EVT_MAX,
3744};
3745
3746enum bt_coex_prio_table_priorities {
3747 BT_COEX_PRIO_TBL_DISABLED = 0,
3748 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
3749 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
3750 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
3751 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
3752 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
3753 BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
3754 BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
3755 BT_COEX_PRIO_TBL_MAX,
3756};
3757
3758struct iwl_bt_coex_prio_table_cmd {
3759 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
3760} __attribute__((packed));
3761
3762#define IWL_BT_COEX_ENV_CLOSE 0
3763#define IWL_BT_COEX_ENV_OPEN 1
3764/*
3765 * BT Protection Envelope
3766 * REPLY_BT_COEX_PROT_ENV = 0xcd
3767 */
3768struct iwl_bt_coex_prot_env_cmd {
3769 u8 action; /* 0 = closed, 1 = open */
3770 u8 type; /* 0 .. 15 */
3771 u8 reserved[2];
3772} __attribute__((packed));
3773
3774/*
3775 * REPLY_WOWLAN_PATTERNS
3776 */
3777#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
3778#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128
3779
3780struct iwlagn_wowlan_pattern {
3781 u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
3782 u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
3783 u8 mask_size;
3784 u8 pattern_size;
3785 __le16 reserved;
3786} __packed;
3787
3788#define IWLAGN_WOWLAN_MAX_PATTERNS 20
3789
3790struct iwlagn_wowlan_patterns_cmd {
3791 __le32 n_patterns;
3792 struct iwlagn_wowlan_pattern patterns[];
3793} __packed;
3794
3795/*
3796 * REPLY_WOWLAN_WAKEUP_FILTER
3797 */
3798enum iwlagn_wowlan_wakeup_filters {
3799 IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
3800 IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
3801 IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
3802 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
3803 IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
3804 IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
3805 IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
3806 IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
3807 IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
3808 IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
3809 IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
3810};
3811
3812struct iwlagn_wowlan_wakeup_filter_cmd {
3813 __le32 enabled;
3814 __le16 non_qos_seq;
3815 u8 min_sleep_seconds;
3816 u8 reserved;
3817 __le16 qos_seq[8];
3818};
3819
3820/*
3821 * REPLY_WOWLAN_TSC_RSC_PARAMS
3822 */
3823#define IWLAGN_NUM_RSC 16
3824
3825struct tkip_sc {
3826 __le16 iv16;
3827 __le16 pad;
3828 __le32 iv32;
3829} __packed;
3830
3831struct iwlagn_tkip_rsc_tsc {
3832 struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
3833 struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
3834 struct tkip_sc tsc;
3835} __packed;
3836
3837struct aes_sc {
3838 __le64 pn;
3839} __packed;
3840
3841struct iwlagn_aes_rsc_tsc {
3842 struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
3843 struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
3844 struct aes_sc tsc;
3845} __packed;
3846
3847union iwlagn_all_tsc_rsc {
3848 struct iwlagn_tkip_rsc_tsc tkip;
3849 struct iwlagn_aes_rsc_tsc aes;
3850};
3851
3852struct iwlagn_wowlan_rsc_tsc_params_cmd {
3853 union iwlagn_all_tsc_rsc all_tsc_rsc;
3854} __packed;
3855
3856/*
3857 * REPLY_WOWLAN_TKIP_PARAMS
3858 */
3859#define IWLAGN_MIC_KEY_SIZE 8
3860#define IWLAGN_P1K_SIZE 5
3861struct iwlagn_mic_keys {
3862 u8 tx[IWLAGN_MIC_KEY_SIZE];
3863 u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
3864 u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
3865} __packed;
3866
3867struct iwlagn_p1k_cache {
3868 __le16 p1k[IWLAGN_P1K_SIZE];
3869} __packed;
3870
3871#define IWLAGN_NUM_RX_P1K_CACHE 2
3872
3873struct iwlagn_wowlan_tkip_params_cmd {
3874 struct iwlagn_mic_keys mic_keys;
3875 struct iwlagn_p1k_cache tx;
3876 struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
3877 struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
3878} __packed;
3879
3880/*
3881 * REPLY_WOWLAN_KEK_KCK_MATERIAL
3882 */
3883
3884#define IWLAGN_KCK_MAX_SIZE 32
3885#define IWLAGN_KEK_MAX_SIZE 32
3886
3887struct iwlagn_wowlan_kek_kck_material_cmd {
3888 u8 kck[IWLAGN_KCK_MAX_SIZE];
3889 u8 kek[IWLAGN_KEK_MAX_SIZE];
3890 __le16 kck_len;
3891 __le16 kek_len;
3892 __le64 replay_ctr;
3893} __packed;
3894
3895/******************************************************************************
3896 * (13)
3897 * Union of all expected notifications/responses:
3898 *
3899 *****************************************************************************/
3900
3901struct iwl_rx_packet {
3902 /*
3903 * The first 4 bytes of the RX frame header contain both the RX frame
3904 * size and some flags.
3905 * Bit fields:
3906 * 31: flag flush RB request
3907 * 30: flag ignore TC (terminal counter) request
3908 * 29: flag fast IRQ request
3909 * 28-14: Reserved
3910 * 13-00: RX frame size
3911 */
3912 __le32 len_n_flags;
3913 struct iwl_cmd_header hdr;
3914 union {
3915 struct iwl_alive_resp alive_frame;
3916 struct iwl_spectrum_notification spectrum_notif;
3917 struct iwl_csa_notification csa_notif;
3918 struct iwl_error_resp err_resp;
3919 struct iwl_card_state_notif card_state_notif;
3920 struct iwl_add_sta_resp add_sta;
3921 struct iwl_rem_sta_resp rem_sta;
3922 struct iwl_sleep_notification sleep_notif;
3923 struct iwl_spectrum_resp spectrum;
3924 struct iwl_notif_statistics stats;
3925 struct iwl_bt_notif_statistics stats_bt;
3926 struct iwl_compressed_ba_resp compressed_ba;
3927 struct iwl_missed_beacon_notif missed_beacon;
3928 struct iwl_coex_medium_notification coex_medium_notif;
3929 struct iwl_coex_event_resp coex_event;
3930 struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
3931 __le32 status;
3932 u8 raw[0];
3933 } u;
3934} __packed;
3935
3936int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
3937
3938/*
3939 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
3940 */
3941
3942/*
3943 * Minimum slot time in TU
3944 */
3945#define IWL_MIN_SLOT_TIME 20
3946
3947/**
3948 * struct iwl_wipan_slot
3949 * @width: Time in TU
3950 * @type:
3951 * 0 - BSS
3952 * 1 - PAN
3953 */
3954struct iwl_wipan_slot {
3955 __le16 width;
3956 u8 type;
3957 u8 reserved;
3958} __packed;
3959
3960#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
3961#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
3962#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
3963#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
3964#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
3965
3966/**
3967 * struct iwl_wipan_params_cmd
3968 * @flags:
3969 * bit0: reserved
3970 * bit1: CP leave channel with CTS
3971 * bit2: CP leave channel qith Quiet
3972 * bit3: slotted mode
3973 * 1 - work in slotted mode
3974 * 0 - work in non slotted mode
3975 * bit4: filter beacon notification
3976 * bit5: full tx slotted mode. if this flag is set,
3977 * uCode will perform leaving channel methods in context switch
3978 * also when working in same channel mode
3979 * @num_slots: 1 - 10
3980 */
3981struct iwl_wipan_params_cmd {
3982 __le16 flags;
3983 u8 reserved;
3984 u8 num_slots;
3985 struct iwl_wipan_slot slots[10];
3986} __packed;
3987
3988/*
3989 * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
3990 *
3991 * TODO: Figure out what this is used for,
3992 * it can only switch between 2.4 GHz
3993 * channels!!
3994 */
3995
3996struct iwl_wipan_p2p_channel_switch_cmd {
3997 __le16 channel;
3998 __le16 reserved;
3999};
4000
4001/*
4002 * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
4003 *
4004 * This is used by the device to notify us of the
4005 * NoA schedule it determined so we can forward it
4006 * to userspace for inclusion in probe responses.
4007 *
4008 * In beacons, the NoA schedule is simply appended
4009 * to the frame we give the device.
4010 */
4011
4012struct iwl_wipan_noa_descriptor {
4013 u8 count;
4014 __le32 duration;
4015 __le32 interval;
4016 __le32 starttime;
4017} __packed;
4018
4019struct iwl_wipan_noa_attribute {
4020 u8 id;
4021 __le16 length;
4022 u8 index;
4023 u8 ct_window;
4024 struct iwl_wipan_noa_descriptor descr0, descr1;
4025 u8 reserved;
4026} __packed;
4027
4028struct iwl_wipan_noa_notification {
4029 u32 noa_active;
4030 struct iwl_wipan_noa_attribute noa_attribute;
4031} __packed;
4032
4033#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
new file mode 100644
index 00000000000..d652778253a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -0,0 +1,1938 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h" /* FIXME: remove */
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44#include "iwl-agn.h"
45#include "iwl-trans.h"
46
47u32 iwl_debug_level;
48
49const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
50
51#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
52#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
53static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
54 struct ieee80211_sta_ht_cap *ht_info,
55 enum ieee80211_band band)
56{
57 u16 max_bit_rate = 0;
58 u8 rx_chains_num = priv->hw_params.rx_chains_num;
59 u8 tx_chains_num = priv->hw_params.tx_chains_num;
60
61 ht_info->cap = 0;
62 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
63
64 ht_info->ht_supported = true;
65
66 if (priv->cfg->ht_params &&
67 priv->cfg->ht_params->ht_greenfield_support)
68 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
69 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
70 max_bit_rate = MAX_BIT_RATE_20_MHZ;
71 if (priv->hw_params.ht40_channel & BIT(band)) {
72 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
73 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
74 ht_info->mcs.rx_mask[4] = 0x01;
75 max_bit_rate = MAX_BIT_RATE_40_MHZ;
76 }
77
78 if (iwlagn_mod_params.amsdu_size_8K)
79 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
80
81 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
82 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
83 ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
84 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
85 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
86 ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
87
88 ht_info->mcs.rx_mask[0] = 0xFF;
89 if (rx_chains_num >= 2)
90 ht_info->mcs.rx_mask[1] = 0xFF;
91 if (rx_chains_num >= 3)
92 ht_info->mcs.rx_mask[2] = 0xFF;
93
94 /* Highest supported Rx data rate */
95 max_bit_rate *= rx_chains_num;
96 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
97 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
98
99 /* Tx MCS capabilities */
100 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
101 if (tx_chains_num != rx_chains_num) {
102 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
103 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
104 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
105 }
106}
107
108/**
109 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
110 */
111int iwlcore_init_geos(struct iwl_priv *priv)
112{
113 struct iwl_channel_info *ch;
114 struct ieee80211_supported_band *sband;
115 struct ieee80211_channel *channels;
116 struct ieee80211_channel *geo_ch;
117 struct ieee80211_rate *rates;
118 int i = 0;
119 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
120
121 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
122 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
123 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
124 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
125 return 0;
126 }
127
128 channels = kzalloc(sizeof(struct ieee80211_channel) *
129 priv->channel_count, GFP_KERNEL);
130 if (!channels)
131 return -ENOMEM;
132
133 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
134 GFP_KERNEL);
135 if (!rates) {
136 kfree(channels);
137 return -ENOMEM;
138 }
139
140 /* 5.2GHz channels start after the 2.4GHz channels */
141 sband = &priv->bands[IEEE80211_BAND_5GHZ];
142 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
143 /* just OFDM */
144 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
145 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
146
147 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
148 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
149 IEEE80211_BAND_5GHZ);
150
151 sband = &priv->bands[IEEE80211_BAND_2GHZ];
152 sband->channels = channels;
153 /* OFDM & CCK */
154 sband->bitrates = rates;
155 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
156
157 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
158 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
159 IEEE80211_BAND_2GHZ);
160
161 priv->ieee_channels = channels;
162 priv->ieee_rates = rates;
163
164 for (i = 0; i < priv->channel_count; i++) {
165 ch = &priv->channel_info[i];
166
167 /* FIXME: might be removed if scan is OK */
168 if (!is_channel_valid(ch))
169 continue;
170
171 sband = &priv->bands[ch->band];
172
173 geo_ch = &sband->channels[sband->n_channels++];
174
175 geo_ch->center_freq =
176 ieee80211_channel_to_frequency(ch->channel, ch->band);
177 geo_ch->max_power = ch->max_power_avg;
178 geo_ch->max_antenna_gain = 0xff;
179 geo_ch->hw_value = ch->channel;
180
181 if (is_channel_valid(ch)) {
182 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
183 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
184
185 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
186 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
187
188 if (ch->flags & EEPROM_CHANNEL_RADAR)
189 geo_ch->flags |= IEEE80211_CHAN_RADAR;
190
191 geo_ch->flags |= ch->ht40_extension_channel;
192
193 if (ch->max_power_avg > max_tx_power)
194 max_tx_power = ch->max_power_avg;
195 } else {
196 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
197 }
198
199 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
200 ch->channel, geo_ch->center_freq,
201 is_channel_a_band(ch) ? "5.2" : "2.4",
202 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
203 "restricted" : "valid",
204 geo_ch->flags);
205 }
206
207 priv->tx_power_device_lmt = max_tx_power;
208 priv->tx_power_user_lmt = max_tx_power;
209 priv->tx_power_next = max_tx_power;
210
211 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
212 priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
213 char buf[32];
214 bus_get_hw_id(priv->bus, buf, sizeof(buf));
215 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
216 "Please send your %s to maintainer.\n", buf);
217 priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
218 }
219
220 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
221 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
222 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
223
224 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
225
226 return 0;
227}
228
229/*
230 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
231 */
232void iwlcore_free_geos(struct iwl_priv *priv)
233{
234 kfree(priv->ieee_channels);
235 kfree(priv->ieee_rates);
236 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
237}
238
239static bool iwl_is_channel_extension(struct iwl_priv *priv,
240 enum ieee80211_band band,
241 u16 channel, u8 extension_chan_offset)
242{
243 const struct iwl_channel_info *ch_info;
244
245 ch_info = iwl_get_channel_info(priv, band, channel);
246 if (!is_channel_valid(ch_info))
247 return false;
248
249 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
250 return !(ch_info->ht40_extension_channel &
251 IEEE80211_CHAN_NO_HT40PLUS);
252 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
253 return !(ch_info->ht40_extension_channel &
254 IEEE80211_CHAN_NO_HT40MINUS);
255
256 return false;
257}
258
259bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
260 struct iwl_rxon_context *ctx,
261 struct ieee80211_sta_ht_cap *ht_cap)
262{
263 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
264 return false;
265
266 /*
267 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
268 * the bit will not set if it is pure 40MHz case
269 */
270 if (ht_cap && !ht_cap->ht_supported)
271 return false;
272
273#ifdef CONFIG_IWLWIFI_DEBUGFS
274 if (priv->disable_ht40)
275 return false;
276#endif
277
278 return iwl_is_channel_extension(priv, priv->band,
279 le16_to_cpu(ctx->staging.channel),
280 ctx->ht.extension_chan_offset);
281}
282
283static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
284{
285 u16 new_val;
286 u16 beacon_factor;
287
288 /*
289 * If mac80211 hasn't given us a beacon interval, program
290 * the default into the device (not checking this here
291 * would cause the adjustment below to return the maximum
292 * value, which may break PAN.)
293 */
294 if (!beacon_val)
295 return DEFAULT_BEACON_INTERVAL;
296
297 /*
298 * If the beacon interval we obtained from the peer
299 * is too large, we'll have to wake up more often
300 * (and in IBSS case, we'll beacon too much)
301 *
302 * For example, if max_beacon_val is 4096, and the
303 * requested beacon interval is 7000, we'll have to
304 * use 3500 to be able to wake up on the beacons.
305 *
306 * This could badly influence beacon detection stats.
307 */
308
309 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
310 new_val = beacon_val / beacon_factor;
311
312 if (!new_val)
313 new_val = max_beacon_val;
314
315 return new_val;
316}
317
318int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
319{
320 u64 tsf;
321 s32 interval_tm, rem;
322 struct ieee80211_conf *conf = NULL;
323 u16 beacon_int;
324 struct ieee80211_vif *vif = ctx->vif;
325
326 conf = ieee80211_get_hw_conf(priv->hw);
327
328 lockdep_assert_held(&priv->mutex);
329
330 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
331
332 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
333 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
334
335 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
336
337 /*
338 * TODO: For IBSS we need to get atim_window from mac80211,
339 * for now just always use 0
340 */
341 ctx->timing.atim_window = 0;
342
343 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
344 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
345 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
346 priv->contexts[IWL_RXON_CTX_BSS].vif &&
347 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
348 ctx->timing.beacon_interval =
349 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
350 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
351 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
352 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
353 priv->contexts[IWL_RXON_CTX_PAN].vif &&
354 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
355 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
356 !ctx->vif->bss_conf.beacon_int)) {
357 ctx->timing.beacon_interval =
358 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
359 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
360 } else {
361 beacon_int = iwl_adjust_beacon_interval(beacon_int,
362 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
363 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
364 }
365
366 ctx->beacon_int = beacon_int;
367
368 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
369 interval_tm = beacon_int * TIME_UNIT;
370 rem = do_div(tsf, interval_tm);
371 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
372
373 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
374
375 IWL_DEBUG_ASSOC(priv,
376 "beacon interval %d beacon timer %d beacon tim %d\n",
377 le16_to_cpu(ctx->timing.beacon_interval),
378 le32_to_cpu(ctx->timing.beacon_init_val),
379 le16_to_cpu(ctx->timing.atim_window));
380
381 return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
382 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
383}
384
385void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
386 int hw_decrypt)
387{
388 struct iwl_rxon_cmd *rxon = &ctx->staging;
389
390 if (hw_decrypt)
391 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
392 else
393 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
394
395}
396
397/* validate RXON structure is valid */
398int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
399{
400 struct iwl_rxon_cmd *rxon = &ctx->staging;
401 u32 errors = 0;
402
403 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
404 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
405 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
406 errors |= BIT(0);
407 }
408 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
409 IWL_WARN(priv, "check 2.4G: wrong radar\n");
410 errors |= BIT(1);
411 }
412 } else {
413 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
414 IWL_WARN(priv, "check 5.2G: not short slot!\n");
415 errors |= BIT(2);
416 }
417 if (rxon->flags & RXON_FLG_CCK_MSK) {
418 IWL_WARN(priv, "check 5.2G: CCK!\n");
419 errors |= BIT(3);
420 }
421 }
422 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
423 IWL_WARN(priv, "mac/bssid mcast!\n");
424 errors |= BIT(4);
425 }
426
427 /* make sure basic rates 6Mbps and 1Mbps are supported */
428 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
429 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
430 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
431 errors |= BIT(5);
432 }
433
434 if (le16_to_cpu(rxon->assoc_id) > 2007) {
435 IWL_WARN(priv, "aid > 2007\n");
436 errors |= BIT(6);
437 }
438
439 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
440 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
441 IWL_WARN(priv, "CCK and short slot\n");
442 errors |= BIT(7);
443 }
444
445 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
446 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
447 IWL_WARN(priv, "CCK and auto detect");
448 errors |= BIT(8);
449 }
450
451 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
452 RXON_FLG_TGG_PROTECT_MSK)) ==
453 RXON_FLG_TGG_PROTECT_MSK) {
454 IWL_WARN(priv, "TGg but no auto-detect\n");
455 errors |= BIT(9);
456 }
457
458 if (rxon->channel == 0) {
459 IWL_WARN(priv, "zero channel is invalid\n");
460 errors |= BIT(10);
461 }
462
463 WARN(errors, "Invalid RXON (%#x), channel %d",
464 errors, le16_to_cpu(rxon->channel));
465
466 return errors ? -EINVAL : 0;
467}
468
469/**
470 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
471 * @priv: staging_rxon is compared to active_rxon
472 *
473 * If the RXON structure is changing enough to require a new tune,
474 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
475 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
476 */
477int iwl_full_rxon_required(struct iwl_priv *priv,
478 struct iwl_rxon_context *ctx)
479{
480 const struct iwl_rxon_cmd *staging = &ctx->staging;
481 const struct iwl_rxon_cmd *active = &ctx->active;
482
483#define CHK(cond) \
484 if ((cond)) { \
485 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
486 return 1; \
487 }
488
489#define CHK_NEQ(c1, c2) \
490 if ((c1) != (c2)) { \
491 IWL_DEBUG_INFO(priv, "need full RXON - " \
492 #c1 " != " #c2 " - %d != %d\n", \
493 (c1), (c2)); \
494 return 1; \
495 }
496
497 /* These items are only settable from the full RXON command */
498 CHK(!iwl_is_associated_ctx(ctx));
499 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
500 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
501 CHK(compare_ether_addr(staging->wlap_bssid_addr,
502 active->wlap_bssid_addr));
503 CHK_NEQ(staging->dev_type, active->dev_type);
504 CHK_NEQ(staging->channel, active->channel);
505 CHK_NEQ(staging->air_propagation, active->air_propagation);
506 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
507 active->ofdm_ht_single_stream_basic_rates);
508 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
509 active->ofdm_ht_dual_stream_basic_rates);
510 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
511 active->ofdm_ht_triple_stream_basic_rates);
512 CHK_NEQ(staging->assoc_id, active->assoc_id);
513
514 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
515 * be updated with the RXON_ASSOC command -- however only some
516 * flag transitions are allowed using RXON_ASSOC */
517
518 /* Check if we are not switching bands */
519 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
520 active->flags & RXON_FLG_BAND_24G_MSK);
521
522 /* Check if we are switching association toggle */
523 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
524 active->filter_flags & RXON_FILTER_ASSOC_MSK);
525
526#undef CHK
527#undef CHK_NEQ
528
529 return 0;
530}
531
532static void _iwl_set_rxon_ht(struct iwl_priv *priv,
533 struct iwl_ht_config *ht_conf,
534 struct iwl_rxon_context *ctx)
535{
536 struct iwl_rxon_cmd *rxon = &ctx->staging;
537
538 if (!ctx->ht.enabled) {
539 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
540 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
541 RXON_FLG_HT40_PROT_MSK |
542 RXON_FLG_HT_PROT_MSK);
543 return;
544 }
545
546 /* FIXME: if the definition of ht.protection changed, the "translation"
547 * will be needed for rxon->flags
548 */
549 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
550
551 /* Set up channel bandwidth:
552 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
553 /* clear the HT channel mode before set the mode */
554 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
555 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
556 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
557 /* pure ht40 */
558 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
559 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
560 /* Note: control channel is opposite of extension channel */
561 switch (ctx->ht.extension_chan_offset) {
562 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
563 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
564 break;
565 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
566 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
567 break;
568 }
569 } else {
570 /* Note: control channel is opposite of extension channel */
571 switch (ctx->ht.extension_chan_offset) {
572 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
573 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
574 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
575 break;
576 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
577 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
578 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
579 break;
580 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
581 default:
582 /* channel location only valid if in Mixed mode */
583 IWL_ERR(priv, "invalid extension channel offset\n");
584 break;
585 }
586 }
587 } else {
588 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
589 }
590
591 iwlagn_set_rxon_chain(priv, ctx);
592
593 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
594 "extension channel offset 0x%x\n",
595 le32_to_cpu(rxon->flags), ctx->ht.protection,
596 ctx->ht.extension_chan_offset);
597}
598
599void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
600{
601 struct iwl_rxon_context *ctx;
602
603 for_each_context(priv, ctx)
604 _iwl_set_rxon_ht(priv, ht_conf, ctx);
605}
606
607/* Return valid, unused, channel for a passive scan to reset the RF */
608u8 iwl_get_single_channel_number(struct iwl_priv *priv,
609 enum ieee80211_band band)
610{
611 const struct iwl_channel_info *ch_info;
612 int i;
613 u8 channel = 0;
614 u8 min, max;
615 struct iwl_rxon_context *ctx;
616
617 if (band == IEEE80211_BAND_5GHZ) {
618 min = 14;
619 max = priv->channel_count;
620 } else {
621 min = 0;
622 max = 14;
623 }
624
625 for (i = min; i < max; i++) {
626 bool busy = false;
627
628 for_each_context(priv, ctx) {
629 busy = priv->channel_info[i].channel ==
630 le16_to_cpu(ctx->staging.channel);
631 if (busy)
632 break;
633 }
634
635 if (busy)
636 continue;
637
638 channel = priv->channel_info[i].channel;
639 ch_info = iwl_get_channel_info(priv, band, channel);
640 if (is_channel_valid(ch_info))
641 break;
642 }
643
644 return channel;
645}
646
647/**
648 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
649 * @ch: requested channel as a pointer to struct ieee80211_channel
650
651 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
652 * in the staging RXON flag structure based on the ch->band
653 */
654int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
655 struct iwl_rxon_context *ctx)
656{
657 enum ieee80211_band band = ch->band;
658 u16 channel = ch->hw_value;
659
660 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
661 (priv->band == band))
662 return 0;
663
664 ctx->staging.channel = cpu_to_le16(channel);
665 if (band == IEEE80211_BAND_5GHZ)
666 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
667 else
668 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
669
670 priv->band = band;
671
672 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
673
674 return 0;
675}
676
677void iwl_set_flags_for_band(struct iwl_priv *priv,
678 struct iwl_rxon_context *ctx,
679 enum ieee80211_band band,
680 struct ieee80211_vif *vif)
681{
682 if (band == IEEE80211_BAND_5GHZ) {
683 ctx->staging.flags &=
684 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
685 | RXON_FLG_CCK_MSK);
686 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
687 } else {
688 /* Copied from iwl_post_associate() */
689 if (vif && vif->bss_conf.use_short_slot)
690 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
691 else
692 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
693
694 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
695 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
696 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
697 }
698}
699
700/*
701 * initialize rxon structure with default values from eeprom
702 */
703void iwl_connection_init_rx_config(struct iwl_priv *priv,
704 struct iwl_rxon_context *ctx)
705{
706 const struct iwl_channel_info *ch_info;
707
708 memset(&ctx->staging, 0, sizeof(ctx->staging));
709
710 if (!ctx->vif) {
711 ctx->staging.dev_type = ctx->unused_devtype;
712 } else switch (ctx->vif->type) {
713 case NL80211_IFTYPE_AP:
714 ctx->staging.dev_type = ctx->ap_devtype;
715 break;
716
717 case NL80211_IFTYPE_STATION:
718 ctx->staging.dev_type = ctx->station_devtype;
719 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
720 break;
721
722 case NL80211_IFTYPE_ADHOC:
723 ctx->staging.dev_type = ctx->ibss_devtype;
724 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
725 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
726 RXON_FILTER_ACCEPT_GRP_MSK;
727 break;
728
729 default:
730 IWL_ERR(priv, "Unsupported interface type %d\n",
731 ctx->vif->type);
732 break;
733 }
734
735#if 0
736 /* TODO: Figure out when short_preamble would be set and cache from
737 * that */
738 if (!hw_to_local(priv->hw)->short_preamble)
739 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
740 else
741 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
742#endif
743
744 ch_info = iwl_get_channel_info(priv, priv->band,
745 le16_to_cpu(ctx->active.channel));
746
747 if (!ch_info)
748 ch_info = &priv->channel_info[0];
749
750 ctx->staging.channel = cpu_to_le16(ch_info->channel);
751 priv->band = ch_info->band;
752
753 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
754
755 ctx->staging.ofdm_basic_rates =
756 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
757 ctx->staging.cck_basic_rates =
758 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
759
760 /* clear both MIX and PURE40 mode flag */
761 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
762 RXON_FLG_CHANNEL_MODE_PURE_40);
763 if (ctx->vif)
764 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
765
766 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
767 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
768 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
769}
770
771void iwl_set_rate(struct iwl_priv *priv)
772{
773 const struct ieee80211_supported_band *hw = NULL;
774 struct ieee80211_rate *rate;
775 struct iwl_rxon_context *ctx;
776 int i;
777
778 hw = iwl_get_hw_mode(priv, priv->band);
779 if (!hw) {
780 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
781 return;
782 }
783
784 priv->active_rate = 0;
785
786 for (i = 0; i < hw->n_bitrates; i++) {
787 rate = &(hw->bitrates[i]);
788 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
789 priv->active_rate |= (1 << rate->hw_value);
790 }
791
792 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
793
794 for_each_context(priv, ctx) {
795 ctx->staging.cck_basic_rates =
796 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
797
798 ctx->staging.ofdm_basic_rates =
799 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
800 }
801}
802
803void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
804{
805 /*
806 * MULTI-FIXME
807 * See iwl_mac_channel_switch.
808 */
809 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
810
811 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
812 return;
813
814 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
815 ieee80211_chswitch_done(ctx->vif, is_success);
816}
817
818#ifdef CONFIG_IWLWIFI_DEBUG
819void iwl_print_rx_config_cmd(struct iwl_priv *priv,
820 struct iwl_rxon_context *ctx)
821{
822 struct iwl_rxon_cmd *rxon = &ctx->staging;
823
824 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
825 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
826 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
827 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
828 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
829 le32_to_cpu(rxon->filter_flags));
830 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
831 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
832 rxon->ofdm_basic_rates);
833 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
834 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
835 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
836 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
837}
838#endif
839
840static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
841{
842 unsigned long flags;
843 struct iwl_notification_wait *wait_entry;
844
845 spin_lock_irqsave(&priv->notif_wait_lock, flags);
846 list_for_each_entry(wait_entry, &priv->notif_waits, list)
847 wait_entry->aborted = true;
848 spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
849
850 wake_up_all(&priv->notif_waitq);
851}
852
853void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
854{
855 unsigned int reload_msec;
856 unsigned long reload_jiffies;
857
858 /* Set the FW error flag -- cleared on iwl_down */
859 set_bit(STATUS_FW_ERROR, &priv->status);
860
861 /* Cancel currently queued command. */
862 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
863
864 iwlagn_abort_notification_waits(priv);
865
866 /* Keep the restart process from trying to send host
867 * commands by clearing the ready bit */
868 clear_bit(STATUS_READY, &priv->status);
869
870 wake_up(&priv->wait_command_queue);
871
872 if (!ondemand) {
873 /*
874 * If firmware keep reloading, then it indicate something
875 * serious wrong and firmware having problem to recover
876 * from it. Instead of keep trying which will fill the syslog
877 * and hang the system, let's just stop it
878 */
879 reload_jiffies = jiffies;
880 reload_msec = jiffies_to_msecs((long) reload_jiffies -
881 (long) priv->reload_jiffies);
882 priv->reload_jiffies = reload_jiffies;
883 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
884 priv->reload_count++;
885 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
886 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
887 return;
888 }
889 } else
890 priv->reload_count = 0;
891 }
892
893 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
894 if (iwlagn_mod_params.restart_fw) {
895 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
896 "Restarting adapter due to uCode error.\n");
897 queue_work(priv->workqueue, &priv->restart);
898 } else
899 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
900 "Detected FW error, but not restarting\n");
901 }
902}
903
904/**
905 * iwl_irq_handle_error - called for HW or SW error interrupt from card
906 */
907void iwl_irq_handle_error(struct iwl_priv *priv)
908{
909 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
910 if (priv->cfg->internal_wimax_coex &&
911 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
912 APMS_CLK_VAL_MRB_FUNC_MODE) ||
913 (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
914 APMG_PS_CTRL_VAL_RESET_REQ))) {
915 /*
916 * Keep the restart process from trying to send host
917 * commands by clearing the ready bit.
918 */
919 clear_bit(STATUS_READY, &priv->status);
920 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
921 wake_up(&priv->wait_command_queue);
922 IWL_ERR(priv, "RF is used by WiMAX\n");
923 return;
924 }
925
926 IWL_ERR(priv, "Loaded firmware version: %s\n",
927 priv->hw->wiphy->fw_version);
928
929 iwl_dump_nic_error_log(priv);
930 iwl_dump_csr(priv);
931 iwl_dump_fh(priv, NULL, false);
932 iwl_dump_nic_event_log(priv, false, NULL, false);
933#ifdef CONFIG_IWLWIFI_DEBUG
934 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
935 iwl_print_rx_config_cmd(priv,
936 &priv->contexts[IWL_RXON_CTX_BSS]);
937#endif
938
939 iwlagn_fw_error(priv, false);
940}
941
942static int iwl_apm_stop_master(struct iwl_priv *priv)
943{
944 int ret = 0;
945
946 /* stop device's busmaster DMA activity */
947 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
948
949 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
950 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
951 if (ret)
952 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
953
954 IWL_DEBUG_INFO(priv, "stop master\n");
955
956 return ret;
957}
958
959void iwl_apm_stop(struct iwl_priv *priv)
960{
961 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
962
963 clear_bit(STATUS_DEVICE_ENABLED, &priv->status);
964
965 /* Stop device's DMA activity */
966 iwl_apm_stop_master(priv);
967
968 /* Reset the entire device */
969 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
970
971 udelay(10);
972
973 /*
974 * Clear "initialization complete" bit to move adapter from
975 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
976 */
977 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
978}
979
980
981/*
982 * Start up NIC's basic functionality after it has been reset
983 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
984 * NOTE: This does not load uCode nor start the embedded processor
985 */
986int iwl_apm_init(struct iwl_priv *priv)
987{
988 int ret = 0;
989 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
990
991 /*
992 * Use "set_bit" below rather than "write", to preserve any hardware
993 * bits already set by default after reset.
994 */
995
996 /* Disable L0S exit timer (platform NMI Work/Around) */
997 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
998 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
999
1000 /*
1001 * Disable L0s without affecting L1;
1002 * don't wait for ICH L0s (ICH bug W/A)
1003 */
1004 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1005 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1006
1007 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1008 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1009
1010 /*
1011 * Enable HAP INTA (interrupt from management bus) to
1012 * wake device's PCI Express link L1a -> L0s
1013 */
1014 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1015 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1016
1017 bus_apm_config(priv->bus);
1018
1019 /* Configure analog phase-lock-loop before activating to D0A */
1020 if (priv->cfg->base_params->pll_cfg_val)
1021 iwl_set_bit(priv, CSR_ANA_PLL_CFG,
1022 priv->cfg->base_params->pll_cfg_val);
1023
1024 /*
1025 * Set "initialization complete" bit to move adapter from
1026 * D0U* --> D0A* (powered-up active) state.
1027 */
1028 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1029
1030 /*
1031 * Wait for clock stabilization; once stabilized, access to
1032 * device-internal resources is supported, e.g. iwl_write_prph()
1033 * and accesses to uCode SRAM.
1034 */
1035 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1036 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1037 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1038 if (ret < 0) {
1039 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1040 goto out;
1041 }
1042
1043 /*
1044 * Enable DMA clock and wait for it to stabilize.
1045 *
1046 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1047 * do not disable clocks. This preserves any hardware bits already
1048 * set by default in "CLK_CTRL_REG" after reset.
1049 */
1050 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1051 udelay(20);
1052
1053 /* Disable L1-Active */
1054 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1055 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1056
1057 set_bit(STATUS_DEVICE_ENABLED, &priv->status);
1058
1059out:
1060 return ret;
1061}
1062
1063
1064int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1065{
1066 int ret;
1067 s8 prev_tx_power;
1068 bool defer;
1069 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1070
1071 lockdep_assert_held(&priv->mutex);
1072
1073 if (priv->tx_power_user_lmt == tx_power && !force)
1074 return 0;
1075
1076 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1077 IWL_WARN(priv,
1078 "Requested user TXPOWER %d below lower limit %d.\n",
1079 tx_power,
1080 IWLAGN_TX_POWER_TARGET_POWER_MIN);
1081 return -EINVAL;
1082 }
1083
1084 if (tx_power > priv->tx_power_device_lmt) {
1085 IWL_WARN(priv,
1086 "Requested user TXPOWER %d above upper limit %d.\n",
1087 tx_power, priv->tx_power_device_lmt);
1088 return -EINVAL;
1089 }
1090
1091 if (!iwl_is_ready_rf(priv))
1092 return -EIO;
1093
1094 /* scan complete and commit_rxon use tx_power_next value,
1095 * it always need to be updated for newest request */
1096 priv->tx_power_next = tx_power;
1097
1098 /* do not set tx power when scanning or channel changing */
1099 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1100 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1101 if (defer && !force) {
1102 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1103 return 0;
1104 }
1105
1106 prev_tx_power = priv->tx_power_user_lmt;
1107 priv->tx_power_user_lmt = tx_power;
1108
1109 ret = iwlagn_send_tx_power(priv);
1110
1111 /* if fail to set tx_power, restore the orig. tx power */
1112 if (ret) {
1113 priv->tx_power_user_lmt = prev_tx_power;
1114 priv->tx_power_next = prev_tx_power;
1115 }
1116 return ret;
1117}
1118
1119void iwl_send_bt_config(struct iwl_priv *priv)
1120{
1121 struct iwl_bt_cmd bt_cmd = {
1122 .lead_time = BT_LEAD_TIME_DEF,
1123 .max_kill = BT_MAX_KILL_DEF,
1124 .kill_ack_mask = 0,
1125 .kill_cts_mask = 0,
1126 };
1127
1128 if (!iwlagn_mod_params.bt_coex_active)
1129 bt_cmd.flags = BT_COEX_DISABLE;
1130 else
1131 bt_cmd.flags = BT_COEX_ENABLE;
1132
1133 priv->bt_enable_flag = bt_cmd.flags;
1134 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1135 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1136
1137 if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
1138 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
1139 IWL_ERR(priv, "failed to send BT Coex Config\n");
1140}
1141
1142int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1143{
1144 struct iwl_statistics_cmd statistics_cmd = {
1145 .configuration_flags =
1146 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1147 };
1148
1149 if (flags & CMD_ASYNC)
1150 return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
1151 CMD_ASYNC,
1152 sizeof(struct iwl_statistics_cmd),
1153 &statistics_cmd);
1154 else
1155 return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
1156 CMD_SYNC,
1157 sizeof(struct iwl_statistics_cmd),
1158 &statistics_cmd);
1159}
1160
1161void iwl_clear_isr_stats(struct iwl_priv *priv)
1162{
1163 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1164}
1165
1166int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1167 const struct ieee80211_tx_queue_params *params)
1168{
1169 struct iwl_priv *priv = hw->priv;
1170 struct iwl_rxon_context *ctx;
1171 unsigned long flags;
1172 int q;
1173
1174 IWL_DEBUG_MAC80211(priv, "enter\n");
1175
1176 if (!iwl_is_ready_rf(priv)) {
1177 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1178 return -EIO;
1179 }
1180
1181 if (queue >= AC_NUM) {
1182 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1183 return 0;
1184 }
1185
1186 q = AC_NUM - 1 - queue;
1187
1188 spin_lock_irqsave(&priv->lock, flags);
1189
1190 /*
1191 * MULTI-FIXME
1192 * This may need to be done per interface in nl80211/cfg80211/mac80211.
1193 */
1194 for_each_context(priv, ctx) {
1195 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1196 cpu_to_le16(params->cw_min);
1197 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1198 cpu_to_le16(params->cw_max);
1199 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1200 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1201 cpu_to_le16((params->txop * 32));
1202
1203 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1204 }
1205
1206 spin_unlock_irqrestore(&priv->lock, flags);
1207
1208 IWL_DEBUG_MAC80211(priv, "leave\n");
1209 return 0;
1210}
1211
1212int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1213{
1214 struct iwl_priv *priv = hw->priv;
1215
1216 return priv->ibss_manager == IWL_IBSS_MANAGER;
1217}
1218
1219static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1220{
1221 iwl_connection_init_rx_config(priv, ctx);
1222
1223 iwlagn_set_rxon_chain(priv, ctx);
1224
1225 return iwlagn_commit_rxon(priv, ctx);
1226}
1227
1228static int iwl_setup_interface(struct iwl_priv *priv,
1229 struct iwl_rxon_context *ctx)
1230{
1231 struct ieee80211_vif *vif = ctx->vif;
1232 int err;
1233
1234 lockdep_assert_held(&priv->mutex);
1235
1236 /*
1237 * This variable will be correct only when there's just
1238 * a single context, but all code using it is for hardware
1239 * that supports only one context.
1240 */
1241 priv->iw_mode = vif->type;
1242
1243 ctx->is_active = true;
1244
1245 err = iwl_set_mode(priv, ctx);
1246 if (err) {
1247 if (!ctx->always_active)
1248 ctx->is_active = false;
1249 return err;
1250 }
1251
1252 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1253 vif->type == NL80211_IFTYPE_ADHOC) {
1254 /*
1255 * pretend to have high BT traffic as long as we
1256 * are operating in IBSS mode, as this will cause
1257 * the rate scaling etc. to behave as intended.
1258 */
1259 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1260 }
1261
1262 return 0;
1263}
1264
1265int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1266{
1267 struct iwl_priv *priv = hw->priv;
1268 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1269 struct iwl_rxon_context *tmp, *ctx = NULL;
1270 int err;
1271 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1272
1273 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1274 viftype, vif->addr);
1275
1276 mutex_lock(&priv->mutex);
1277
1278 if (!iwl_is_ready_rf(priv)) {
1279 IWL_WARN(priv, "Try to add interface when device not ready\n");
1280 err = -EINVAL;
1281 goto out;
1282 }
1283
1284 for_each_context(priv, tmp) {
1285 u32 possible_modes =
1286 tmp->interface_modes | tmp->exclusive_interface_modes;
1287
1288 if (tmp->vif) {
1289 /* check if this busy context is exclusive */
1290 if (tmp->exclusive_interface_modes &
1291 BIT(tmp->vif->type)) {
1292 err = -EINVAL;
1293 goto out;
1294 }
1295 continue;
1296 }
1297
1298 if (!(possible_modes & BIT(viftype)))
1299 continue;
1300
1301 /* have maybe usable context w/o interface */
1302 ctx = tmp;
1303 break;
1304 }
1305
1306 if (!ctx) {
1307 err = -EOPNOTSUPP;
1308 goto out;
1309 }
1310
1311 vif_priv->ctx = ctx;
1312 ctx->vif = vif;
1313
1314 err = iwl_setup_interface(priv, ctx);
1315 if (!err)
1316 goto out;
1317
1318 ctx->vif = NULL;
1319 priv->iw_mode = NL80211_IFTYPE_STATION;
1320 out:
1321 mutex_unlock(&priv->mutex);
1322
1323 IWL_DEBUG_MAC80211(priv, "leave\n");
1324 return err;
1325}
1326
1327static void iwl_teardown_interface(struct iwl_priv *priv,
1328 struct ieee80211_vif *vif,
1329 bool mode_change)
1330{
1331 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1332
1333 lockdep_assert_held(&priv->mutex);
1334
1335 if (priv->scan_vif == vif) {
1336 iwl_scan_cancel_timeout(priv, 200);
1337 iwl_force_scan_end(priv);
1338 }
1339
1340 if (!mode_change) {
1341 iwl_set_mode(priv, ctx);
1342 if (!ctx->always_active)
1343 ctx->is_active = false;
1344 }
1345
1346 /*
1347 * When removing the IBSS interface, overwrite the
1348 * BT traffic load with the stored one from the last
1349 * notification, if any. If this is a device that
1350 * doesn't implement this, this has no effect since
1351 * both values are the same and zero.
1352 */
1353 if (vif->type == NL80211_IFTYPE_ADHOC)
1354 priv->bt_traffic_load = priv->last_bt_traffic_load;
1355}
1356
1357void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1358 struct ieee80211_vif *vif)
1359{
1360 struct iwl_priv *priv = hw->priv;
1361 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1362
1363 IWL_DEBUG_MAC80211(priv, "enter\n");
1364
1365 mutex_lock(&priv->mutex);
1366
1367 WARN_ON(ctx->vif != vif);
1368 ctx->vif = NULL;
1369
1370 iwl_teardown_interface(priv, vif, false);
1371
1372 mutex_unlock(&priv->mutex);
1373
1374 IWL_DEBUG_MAC80211(priv, "leave\n");
1375
1376}
1377
1378#ifdef CONFIG_IWLWIFI_DEBUGFS
1379
1380#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1381
1382void iwl_reset_traffic_log(struct iwl_priv *priv)
1383{
1384 priv->tx_traffic_idx = 0;
1385 priv->rx_traffic_idx = 0;
1386 if (priv->tx_traffic)
1387 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1388 if (priv->rx_traffic)
1389 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1390}
1391
1392int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1393{
1394 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1395
1396 if (iwl_debug_level & IWL_DL_TX) {
1397 if (!priv->tx_traffic) {
1398 priv->tx_traffic =
1399 kzalloc(traffic_size, GFP_KERNEL);
1400 if (!priv->tx_traffic)
1401 return -ENOMEM;
1402 }
1403 }
1404 if (iwl_debug_level & IWL_DL_RX) {
1405 if (!priv->rx_traffic) {
1406 priv->rx_traffic =
1407 kzalloc(traffic_size, GFP_KERNEL);
1408 if (!priv->rx_traffic)
1409 return -ENOMEM;
1410 }
1411 }
1412 iwl_reset_traffic_log(priv);
1413 return 0;
1414}
1415
1416void iwl_free_traffic_mem(struct iwl_priv *priv)
1417{
1418 kfree(priv->tx_traffic);
1419 priv->tx_traffic = NULL;
1420
1421 kfree(priv->rx_traffic);
1422 priv->rx_traffic = NULL;
1423}
1424
1425void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1426 u16 length, struct ieee80211_hdr *header)
1427{
1428 __le16 fc;
1429 u16 len;
1430
1431 if (likely(!(iwl_debug_level & IWL_DL_TX)))
1432 return;
1433
1434 if (!priv->tx_traffic)
1435 return;
1436
1437 fc = header->frame_control;
1438 if (ieee80211_is_data(fc)) {
1439 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1440 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1441 memcpy((priv->tx_traffic +
1442 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1443 header, len);
1444 priv->tx_traffic_idx =
1445 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1446 }
1447}
1448
1449void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1450 u16 length, struct ieee80211_hdr *header)
1451{
1452 __le16 fc;
1453 u16 len;
1454
1455 if (likely(!(iwl_debug_level & IWL_DL_RX)))
1456 return;
1457
1458 if (!priv->rx_traffic)
1459 return;
1460
1461 fc = header->frame_control;
1462 if (ieee80211_is_data(fc)) {
1463 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1464 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1465 memcpy((priv->rx_traffic +
1466 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1467 header, len);
1468 priv->rx_traffic_idx =
1469 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1470 }
1471}
1472
1473const char *get_mgmt_string(int cmd)
1474{
1475 switch (cmd) {
1476 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1477 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1478 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1479 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1480 IWL_CMD(MANAGEMENT_PROBE_REQ);
1481 IWL_CMD(MANAGEMENT_PROBE_RESP);
1482 IWL_CMD(MANAGEMENT_BEACON);
1483 IWL_CMD(MANAGEMENT_ATIM);
1484 IWL_CMD(MANAGEMENT_DISASSOC);
1485 IWL_CMD(MANAGEMENT_AUTH);
1486 IWL_CMD(MANAGEMENT_DEAUTH);
1487 IWL_CMD(MANAGEMENT_ACTION);
1488 default:
1489 return "UNKNOWN";
1490
1491 }
1492}
1493
1494const char *get_ctrl_string(int cmd)
1495{
1496 switch (cmd) {
1497 IWL_CMD(CONTROL_BACK_REQ);
1498 IWL_CMD(CONTROL_BACK);
1499 IWL_CMD(CONTROL_PSPOLL);
1500 IWL_CMD(CONTROL_RTS);
1501 IWL_CMD(CONTROL_CTS);
1502 IWL_CMD(CONTROL_ACK);
1503 IWL_CMD(CONTROL_CFEND);
1504 IWL_CMD(CONTROL_CFENDACK);
1505 default:
1506 return "UNKNOWN";
1507
1508 }
1509}
1510
1511void iwl_clear_traffic_stats(struct iwl_priv *priv)
1512{
1513 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1514 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1515}
1516
1517/*
1518 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1519 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1520 * Use debugFs to display the rx/rx_statistics
1521 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1522 * information will be recorded, but DATA pkt still will be recorded
1523 * for the reason of iwl_led.c need to control the led blinking based on
1524 * number of tx and rx data.
1525 *
1526 */
1527void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1528{
1529 struct traffic_stats *stats;
1530
1531 if (is_tx)
1532 stats = &priv->tx_stats;
1533 else
1534 stats = &priv->rx_stats;
1535
1536 if (ieee80211_is_mgmt(fc)) {
1537 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1538 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1539 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1540 break;
1541 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1542 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1543 break;
1544 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1545 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1546 break;
1547 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1548 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1549 break;
1550 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1551 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1552 break;
1553 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1554 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1555 break;
1556 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1557 stats->mgmt[MANAGEMENT_BEACON]++;
1558 break;
1559 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1560 stats->mgmt[MANAGEMENT_ATIM]++;
1561 break;
1562 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1563 stats->mgmt[MANAGEMENT_DISASSOC]++;
1564 break;
1565 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1566 stats->mgmt[MANAGEMENT_AUTH]++;
1567 break;
1568 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1569 stats->mgmt[MANAGEMENT_DEAUTH]++;
1570 break;
1571 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1572 stats->mgmt[MANAGEMENT_ACTION]++;
1573 break;
1574 }
1575 } else if (ieee80211_is_ctl(fc)) {
1576 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1577 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1578 stats->ctrl[CONTROL_BACK_REQ]++;
1579 break;
1580 case cpu_to_le16(IEEE80211_STYPE_BACK):
1581 stats->ctrl[CONTROL_BACK]++;
1582 break;
1583 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1584 stats->ctrl[CONTROL_PSPOLL]++;
1585 break;
1586 case cpu_to_le16(IEEE80211_STYPE_RTS):
1587 stats->ctrl[CONTROL_RTS]++;
1588 break;
1589 case cpu_to_le16(IEEE80211_STYPE_CTS):
1590 stats->ctrl[CONTROL_CTS]++;
1591 break;
1592 case cpu_to_le16(IEEE80211_STYPE_ACK):
1593 stats->ctrl[CONTROL_ACK]++;
1594 break;
1595 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1596 stats->ctrl[CONTROL_CFEND]++;
1597 break;
1598 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1599 stats->ctrl[CONTROL_CFENDACK]++;
1600 break;
1601 }
1602 } else {
1603 /* data */
1604 stats->data_cnt++;
1605 stats->data_bytes += len;
1606 }
1607}
1608#endif
1609
1610static void iwl_force_rf_reset(struct iwl_priv *priv)
1611{
1612 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1613 return;
1614
1615 if (!iwl_is_any_associated(priv)) {
1616 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1617 return;
1618 }
1619 /*
1620 * There is no easy and better way to force reset the radio,
1621 * the only known method is switching channel which will force to
1622 * reset and tune the radio.
1623 * Use internal short scan (single channel) operation to should
1624 * achieve this objective.
1625 * Driver should reset the radio when number of consecutive missed
1626 * beacon, or any other uCode error condition detected.
1627 */
1628 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1629 iwl_internal_short_hw_scan(priv);
1630}
1631
1632
1633int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1634{
1635 struct iwl_force_reset *force_reset;
1636
1637 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1638 return -EINVAL;
1639
1640 if (mode >= IWL_MAX_FORCE_RESET) {
1641 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1642 return -EINVAL;
1643 }
1644 force_reset = &priv->force_reset[mode];
1645 force_reset->reset_request_count++;
1646 if (!external) {
1647 if (force_reset->last_force_reset_jiffies &&
1648 time_after(force_reset->last_force_reset_jiffies +
1649 force_reset->reset_duration, jiffies)) {
1650 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1651 force_reset->reset_reject_count++;
1652 return -EAGAIN;
1653 }
1654 }
1655 force_reset->reset_success_count++;
1656 force_reset->last_force_reset_jiffies = jiffies;
1657 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1658 switch (mode) {
1659 case IWL_RF_RESET:
1660 iwl_force_rf_reset(priv);
1661 break;
1662 case IWL_FW_RESET:
1663 /*
1664 * if the request is from external(ex: debugfs),
1665 * then always perform the request in regardless the module
1666 * parameter setting
1667 * if the request is from internal (uCode error or driver
1668 * detect failure), then fw_restart module parameter
1669 * need to be check before performing firmware reload
1670 */
1671 if (!external && !iwlagn_mod_params.restart_fw) {
1672 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1673 "module parameter setting\n");
1674 break;
1675 }
1676 IWL_ERR(priv, "On demand firmware reload\n");
1677 iwlagn_fw_error(priv, true);
1678 break;
1679 }
1680 return 0;
1681}
1682
1683int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1684 enum nl80211_iftype newtype, bool newp2p)
1685{
1686 struct iwl_priv *priv = hw->priv;
1687 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1688 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1689 struct iwl_rxon_context *tmp;
1690 enum nl80211_iftype newviftype = newtype;
1691 u32 interface_modes;
1692 int err;
1693
1694 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1695
1696 mutex_lock(&priv->mutex);
1697
1698 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1699 /*
1700 * Huh? But wait ... this can maybe happen when
1701 * we're in the middle of a firmware restart!
1702 */
1703 err = -EBUSY;
1704 goto out;
1705 }
1706
1707 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1708
1709 if (!(interface_modes & BIT(newtype))) {
1710 err = -EBUSY;
1711 goto out;
1712 }
1713
1714 /*
1715 * Refuse a change that should be done by moving from the PAN
1716 * context to the BSS context instead, if the BSS context is
1717 * available and can support the new interface type.
1718 */
1719 if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
1720 (bss_ctx->interface_modes & BIT(newtype) ||
1721 bss_ctx->exclusive_interface_modes & BIT(newtype))) {
1722 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1723 err = -EBUSY;
1724 goto out;
1725 }
1726
1727 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1728 for_each_context(priv, tmp) {
1729 if (ctx == tmp)
1730 continue;
1731
1732 if (!tmp->vif)
1733 continue;
1734
1735 /*
1736 * The current mode switch would be exclusive, but
1737 * another context is active ... refuse the switch.
1738 */
1739 err = -EBUSY;
1740 goto out;
1741 }
1742 }
1743
1744 /* success */
1745 iwl_teardown_interface(priv, vif, true);
1746 vif->type = newviftype;
1747 vif->p2p = newp2p;
1748 err = iwl_setup_interface(priv, ctx);
1749 WARN_ON(err);
1750 /*
1751 * We've switched internally, but submitting to the
1752 * device may have failed for some reason. Mask this
1753 * error, because otherwise mac80211 will not switch
1754 * (and set the interface type back) and we'll be
1755 * out of sync with it.
1756 */
1757 err = 0;
1758
1759 out:
1760 mutex_unlock(&priv->mutex);
1761 return err;
1762}
1763
1764/*
1765 * On every watchdog tick we check (latest) time stamp. If it does not
1766 * change during timeout period and queue is not empty we reset firmware.
1767 */
1768static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
1769{
1770 struct iwl_tx_queue *txq = &priv->txq[cnt];
1771 struct iwl_queue *q = &txq->q;
1772 unsigned long timeout;
1773 int ret;
1774
1775 if (q->read_ptr == q->write_ptr) {
1776 txq->time_stamp = jiffies;
1777 return 0;
1778 }
1779
1780 timeout = txq->time_stamp +
1781 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1782
1783 if (time_after(jiffies, timeout)) {
1784 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1785 q->id, priv->cfg->base_params->wd_timeout);
1786 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1787 return (ret == -EAGAIN) ? 0 : 1;
1788 }
1789
1790 return 0;
1791}
1792
1793/*
1794 * Making watchdog tick be a quarter of timeout assure we will
1795 * discover the queue hung between timeout and 1.25*timeout
1796 */
1797#define IWL_WD_TICK(timeout) ((timeout) / 4)
1798
1799/*
1800 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1801 * we reset the firmware. If everything is fine just rearm the timer.
1802 */
1803void iwl_bg_watchdog(unsigned long data)
1804{
1805 struct iwl_priv *priv = (struct iwl_priv *)data;
1806 int cnt;
1807 unsigned long timeout;
1808
1809 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1810 return;
1811
1812 timeout = priv->cfg->base_params->wd_timeout;
1813 if (timeout == 0)
1814 return;
1815
1816 /* monitor and check for stuck cmd queue */
1817 if (iwl_check_stuck_queue(priv, priv->cmd_queue))
1818 return;
1819
1820 /* monitor and check for other stuck queues */
1821 if (iwl_is_any_associated(priv)) {
1822 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1823 /* skip as we already checked the command queue */
1824 if (cnt == priv->cmd_queue)
1825 continue;
1826 if (iwl_check_stuck_queue(priv, cnt))
1827 return;
1828 }
1829 }
1830
1831 mod_timer(&priv->watchdog, jiffies +
1832 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1833}
1834
1835void iwl_setup_watchdog(struct iwl_priv *priv)
1836{
1837 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1838
1839 if (timeout && !iwlagn_mod_params.wd_disable)
1840 mod_timer(&priv->watchdog,
1841 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1842 else
1843 del_timer(&priv->watchdog);
1844}
1845
1846/*
1847 * extended beacon time format
1848 * time in usec will be changed into a 32-bit value in extended:internal format
1849 * the extended part is the beacon counts
1850 * the internal part is the time in usec within one beacon interval
1851 */
1852u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
1853{
1854 u32 quot;
1855 u32 rem;
1856 u32 interval = beacon_interval * TIME_UNIT;
1857
1858 if (!interval || !usec)
1859 return 0;
1860
1861 quot = (usec / interval) &
1862 (iwl_beacon_time_mask_high(priv,
1863 priv->hw_params.beacon_time_tsf_bits) >>
1864 priv->hw_params.beacon_time_tsf_bits);
1865 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
1866 priv->hw_params.beacon_time_tsf_bits);
1867
1868 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1869}
1870
1871/* base is usually what we get from ucode with each received frame,
1872 * the same as HW timer counter counting down
1873 */
1874__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
1875 u32 addon, u32 beacon_interval)
1876{
1877 u32 base_low = base & iwl_beacon_time_mask_low(priv,
1878 priv->hw_params.beacon_time_tsf_bits);
1879 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
1880 priv->hw_params.beacon_time_tsf_bits);
1881 u32 interval = beacon_interval * TIME_UNIT;
1882 u32 res = (base & iwl_beacon_time_mask_high(priv,
1883 priv->hw_params.beacon_time_tsf_bits)) +
1884 (addon & iwl_beacon_time_mask_high(priv,
1885 priv->hw_params.beacon_time_tsf_bits));
1886
1887 if (base_low > addon_low)
1888 res += base_low - addon_low;
1889 else if (base_low < addon_low) {
1890 res += interval + base_low - addon_low;
1891 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1892 } else
1893 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1894
1895 return cpu_to_le32(res);
1896}
1897
1898#ifdef CONFIG_PM
1899
1900int iwl_suspend(struct iwl_priv *priv)
1901{
1902 /*
1903 * This function is called when system goes into suspend state
1904 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1905 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1906 * it will not call apm_ops.stop() to stop the DMA operation.
1907 * Calling apm_ops.stop here to make sure we stop the DMA.
1908 *
1909 * But of course ... if we have configured WoWLAN then we did other
1910 * things already :-)
1911 */
1912 if (!priv->wowlan)
1913 iwl_apm_stop(priv);
1914
1915 return 0;
1916}
1917
1918int iwl_resume(struct iwl_priv *priv)
1919{
1920 bool hw_rfkill = false;
1921
1922 iwl_enable_interrupts(priv);
1923
1924 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
1925 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1926 hw_rfkill = true;
1927
1928 if (hw_rfkill)
1929 set_bit(STATUS_RF_KILL_HW, &priv->status);
1930 else
1931 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1932
1933 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
1934
1935 return 0;
1936}
1937
1938#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
new file mode 100644
index 00000000000..02817a43855
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -0,0 +1,521 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_core_h__
64#define __iwl_core_h__
65
66#include "iwl-dev.h"
67
68/************************
69 * forward declarations *
70 ************************/
71struct iwl_host_cmd;
72struct iwl_cmd;
73
74
75#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>"
78
79#define TIME_UNIT 1024
80
81#define IWL_CMD(x) case x: return #x
82
83struct iwl_lib_ops {
84 /* set hw dependent parameters */
85 int (*set_hw_params)(struct iwl_priv *priv);
86 /* setup BT Rx handler */
87 void (*bt_rx_handler_setup)(struct iwl_priv *priv);
88 /* setup BT related deferred work */
89 void (*bt_setup_deferred_work)(struct iwl_priv *priv);
90 /* cancel deferred work */
91 void (*cancel_deferred_work)(struct iwl_priv *priv);
92 int (*set_channel_switch)(struct iwl_priv *priv,
93 struct ieee80211_channel_switch *ch_switch);
94 /* device specific configuration */
95 void (*nic_config)(struct iwl_priv *priv);
96
97 /* eeprom operations (as defined in iwl-eeprom.h) */
98 struct iwl_eeprom_ops eeprom_ops;
99
100 /* temperature */
101 void (*temperature)(struct iwl_priv *priv);
102};
103
104struct iwl_mod_params {
105 int sw_crypto; /* def: 0 = using hardware encryption */
106 int num_of_queues; /* def: HW dependent */
107 int disable_11n; /* def: 0 = 11n capabilities enabled */
108 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
109 int antenna; /* def: 0 = both antennas (use diversity) */
110 int restart_fw; /* def: 1 = restart firmware */
111 bool plcp_check; /* def: true = enable plcp health check */
112 bool ack_check; /* def: false = disable ack health check */
113 bool wd_disable; /* def: false = enable stuck queue check */
114 bool bt_coex_active; /* def: true = enable bt coex */
115 int led_mode; /* def: 0 = system default */
116 bool no_sleep_autoadjust; /* def: true = disable autoadjust */
117 bool power_save; /* def: false = disable power save */
118 int power_level; /* def: 1 = power level */
119};
120
121/*
122 * @max_ll_items: max number of OTP blocks
123 * @shadow_ram_support: shadow support for OTP memory
124 * @led_compensation: compensate on the led on/off time per HW according
125 * to the deviation to achieve the desired led frequency.
126 * The detail algorithm is described in iwl-led.c
127 * @chain_noise_num_beacons: number of beacons used to compute chain noise
128 * @adv_thermal_throttle: support advance thermal throttle
129 * @support_ct_kill_exit: support ct kill exit condition
130 * @support_wimax_coexist: support wimax/wifi co-exist
131 * @plcp_delta_threshold: plcp error rate threshold used to trigger
132 * radio tuning when there is a high receiving plcp error rate
133 * @chain_noise_scale: default chain noise scale used for gain computation
134 * @wd_timeout: TX queues watchdog timeout
135 * @temperature_kelvin: temperature report by uCode in kelvin
136 * @max_event_log_size: size of event log buffer size for ucode event logging
137 * @shadow_reg_enable: HW shadhow register bit
138 * @no_idle_support: do not support idle mode
139 */
140struct iwl_base_params {
141 int eeprom_size;
142 int num_of_queues; /* def: HW dependent */
143 int num_of_ampdu_queues;/* def: HW dependent */
144 /* for iwl_apm_init() */
145 u32 pll_cfg_val;
146
147 const u16 max_ll_items;
148 const bool shadow_ram_support;
149 u16 led_compensation;
150 int chain_noise_num_beacons;
151 bool adv_thermal_throttle;
152 bool support_ct_kill_exit;
153 const bool support_wimax_coexist;
154 u8 plcp_delta_threshold;
155 s32 chain_noise_scale;
156 unsigned int wd_timeout;
157 bool temperature_kelvin;
158 u32 max_event_log_size;
159 const bool shadow_reg_enable;
160 const bool no_idle_support;
161};
162/*
163 * @advanced_bt_coexist: support advanced bt coexist
164 * @bt_init_traffic_load: specify initial bt traffic load
165 * @bt_prio_boost: default bt priority boost value
166 * @agg_time_limit: maximum number of uSec in aggregation
167 * @ampdu_factor: Maximum A-MPDU length factor
168 * @ampdu_density: Minimum A-MPDU spacing
169 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
170 */
171struct iwl_bt_params {
172 bool advanced_bt_coexist;
173 u8 bt_init_traffic_load;
174 u8 bt_prio_boost;
175 u16 agg_time_limit;
176 u8 ampdu_factor;
177 u8 ampdu_density;
178 bool bt_sco_disable;
179 bool bt_session_2;
180};
181/*
182 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
183 */
184struct iwl_ht_params {
185 const bool ht_greenfield_support; /* if used set to true */
186 bool use_rts_for_aggregation;
187 enum ieee80211_smps_mode smps_mode;
188};
189
190/**
191 * struct iwl_cfg
192 * @name: Offical name of the device
193 * @fw_name_pre: Firmware filename prefix. The api version and extension
194 * (.ucode) will be added to filename before loading from disk. The
195 * filename is constructed as fw_name_pre<api>.ucode.
196 * @ucode_api_max: Highest version of uCode API supported by driver.
197 * @ucode_api_min: Lowest version of uCode API supported by driver.
198 * @valid_tx_ant: valid transmit antenna
199 * @valid_rx_ant: valid receive antenna
200 * @sku: sku information from EEPROM
201 * @eeprom_ver: EEPROM version
202 * @eeprom_calib_ver: EEPROM calibration version
203 * @lib: pointer to the lib ops
204 * @additional_nic_config: additional nic configuration
205 * @base_params: pointer to basic parameters
206 * @ht_params: point to ht patameters
207 * @bt_params: pointer to bt parameters
208 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
209 * @need_dc_calib: need to perform init dc calibration
210 * @need_temp_offset_calib: need to perform temperature offset calibration
211 * @scan_antennas: available antenna for scan operation
212 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
213 * @adv_pm: advance power management
214 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
215 * @internal_wimax_coex: internal wifi/wimax combo device
216 * @iq_invert: I/Q inversion
217 *
218 * We enable the driver to be backward compatible wrt API version. The
219 * driver specifies which APIs it supports (with @ucode_api_max being the
220 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
221 * it has a supported API version. The firmware's API version will be
222 * stored in @iwl_priv, enabling the driver to make runtime changes based
223 * on firmware version used.
224 *
225 * For example,
226 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
227 * Driver interacts with Firmware API version >= 2.
228 * } else {
229 * Driver interacts with Firmware API version 1.
230 * }
231 *
232 * The ideal usage of this infrastructure is to treat a new ucode API
233 * release as a new hardware revision.
234 */
235struct iwl_cfg {
236 /* params specific to an individual device within a device family */
237 const char *name;
238 const char *fw_name_pre;
239 const unsigned int ucode_api_max;
240 const unsigned int ucode_api_min;
241 u8 valid_tx_ant;
242 u8 valid_rx_ant;
243 u16 sku;
244 u16 eeprom_ver;
245 u16 eeprom_calib_ver;
246 const struct iwl_lib_ops *lib;
247 void (*additional_nic_config)(struct iwl_priv *priv);
248 /* params not likely to change within a device family */
249 struct iwl_base_params *base_params;
250 /* params likely to change within a device family */
251 struct iwl_ht_params *ht_params;
252 struct iwl_bt_params *bt_params;
253 enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
254 const bool need_dc_calib; /* if used set to true */
255 const bool need_temp_offset_calib; /* if used set to true */
256 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
257 enum iwl_led_mode led_mode;
258 const bool adv_pm;
259 const bool rx_with_siso_diversity;
260 const bool internal_wimax_coex;
261 const bool iq_invert;
262};
263
264/***************************
265 * L i b *
266 ***************************/
267
268int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
269 const struct ieee80211_tx_queue_params *params);
270int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
271void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
272 int hw_decrypt);
273int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
274int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
275int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
276 struct iwl_rxon_context *ctx);
277void iwl_set_flags_for_band(struct iwl_priv *priv,
278 struct iwl_rxon_context *ctx,
279 enum ieee80211_band band,
280 struct ieee80211_vif *vif);
281u8 iwl_get_single_channel_number(struct iwl_priv *priv,
282 enum ieee80211_band band);
283void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
284bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
285 struct iwl_rxon_context *ctx,
286 struct ieee80211_sta_ht_cap *ht_cap);
287void iwl_connection_init_rx_config(struct iwl_priv *priv,
288 struct iwl_rxon_context *ctx);
289void iwl_set_rate(struct iwl_priv *priv);
290void iwl_irq_handle_error(struct iwl_priv *priv);
291int iwl_mac_add_interface(struct ieee80211_hw *hw,
292 struct ieee80211_vif *vif);
293void iwl_mac_remove_interface(struct ieee80211_hw *hw,
294 struct ieee80211_vif *vif);
295int iwl_mac_change_interface(struct ieee80211_hw *hw,
296 struct ieee80211_vif *vif,
297 enum nl80211_iftype newtype, bool newp2p);
298#ifdef CONFIG_IWLWIFI_DEBUGFS
299int iwl_alloc_traffic_mem(struct iwl_priv *priv);
300void iwl_free_traffic_mem(struct iwl_priv *priv);
301void iwl_reset_traffic_log(struct iwl_priv *priv);
302void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
303 u16 length, struct ieee80211_hdr *header);
304void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
305 u16 length, struct ieee80211_hdr *header);
306const char *get_mgmt_string(int cmd);
307const char *get_ctrl_string(int cmd);
308void iwl_clear_traffic_stats(struct iwl_priv *priv);
309void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
310 u16 len);
311#else
312static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv)
313{
314 return 0;
315}
316static inline void iwl_free_traffic_mem(struct iwl_priv *priv)
317{
318}
319static inline void iwl_reset_traffic_log(struct iwl_priv *priv)
320{
321}
322static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
323 u16 length, struct ieee80211_hdr *header)
324{
325}
326static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
327 u16 length, struct ieee80211_hdr *header)
328{
329}
330static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
331 __le16 fc, u16 len)
332{
333}
334#endif
335
336/*****************************************************
337* RX
338******************************************************/
339void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
340
341void iwl_setup_watchdog(struct iwl_priv *priv);
342/*****************************************************
343 * TX power
344 ****************************************************/
345int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
346
347/*******************************************************************************
348 * Scanning
349 ******************************************************************************/
350void iwl_init_scan_params(struct iwl_priv *priv);
351int iwl_scan_cancel(struct iwl_priv *priv);
352int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
353void iwl_force_scan_end(struct iwl_priv *priv);
354int iwl_mac_hw_scan(struct ieee80211_hw *hw,
355 struct ieee80211_vif *vif,
356 struct cfg80211_scan_request *req);
357void iwl_internal_short_hw_scan(struct iwl_priv *priv);
358int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
359u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
360 const u8 *ta, const u8 *ie, int ie_len, int left);
361void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
362u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
363 enum ieee80211_band band,
364 u8 n_probes);
365u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
366 enum ieee80211_band band,
367 struct ieee80211_vif *vif);
368void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
369void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
370int __must_check iwl_scan_initiate(struct iwl_priv *priv,
371 struct ieee80211_vif *vif,
372 enum iwl_scan_type scan_type,
373 enum ieee80211_band band);
374
375/* For faster active scanning, scan will move to the next channel if fewer than
376 * PLCP_QUIET_THRESH packets are heard on this channel within
377 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
378 * time if it's a quiet channel (nothing responded to our probe, and there's
379 * no other traffic).
380 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
381#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
382#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
383
384#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
385
386/*****************************************************
387 * S e n d i n g H o s t C o m m a n d s *
388 *****************************************************/
389
390const char *get_cmd_string(u8 cmd);
391void iwl_bg_watchdog(unsigned long data);
392u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
393__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
394 u32 addon, u32 beacon_interval);
395
396#ifdef CONFIG_PM
397int iwl_suspend(struct iwl_priv *priv);
398int iwl_resume(struct iwl_priv *priv);
399#endif /* !CONFIG_PM */
400
401int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
402void __devexit iwl_remove(struct iwl_priv * priv);
403
404/*****************************************************
405* Error Handling Debugging
406******************************************************/
407void iwl_dump_nic_error_log(struct iwl_priv *priv);
408int iwl_dump_nic_event_log(struct iwl_priv *priv,
409 bool full_log, char **buf, bool display);
410void iwl_dump_csr(struct iwl_priv *priv);
411int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
412#ifdef CONFIG_IWLWIFI_DEBUG
413void iwl_print_rx_config_cmd(struct iwl_priv *priv,
414 struct iwl_rxon_context *ctx);
415#else
416static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
417 struct iwl_rxon_context *ctx)
418{
419}
420#endif
421
422void iwl_clear_isr_stats(struct iwl_priv *priv);
423
424/*****************************************************
425* GEOS
426******************************************************/
427int iwlcore_init_geos(struct iwl_priv *priv);
428void iwlcore_free_geos(struct iwl_priv *priv);
429
430/*************** DRIVER STATUS FUNCTIONS *****/
431
432#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
433/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
434#define STATUS_INT_ENABLED 2
435#define STATUS_RF_KILL_HW 3
436#define STATUS_CT_KILL 4
437#define STATUS_INIT 5
438#define STATUS_ALIVE 6
439#define STATUS_READY 7
440#define STATUS_TEMPERATURE 8
441#define STATUS_GEO_CONFIGURED 9
442#define STATUS_EXIT_PENDING 10
443#define STATUS_STATISTICS 12
444#define STATUS_SCANNING 13
445#define STATUS_SCAN_ABORTING 14
446#define STATUS_SCAN_HW 15
447#define STATUS_POWER_PMI 16
448#define STATUS_FW_ERROR 17
449#define STATUS_DEVICE_ENABLED 18
450#define STATUS_CHANNEL_SWITCH_PENDING 19
451
452
453static inline int iwl_is_ready(struct iwl_priv *priv)
454{
455 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
456 * set but EXIT_PENDING is not */
457 return test_bit(STATUS_READY, &priv->status) &&
458 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
459 !test_bit(STATUS_EXIT_PENDING, &priv->status);
460}
461
462static inline int iwl_is_alive(struct iwl_priv *priv)
463{
464 return test_bit(STATUS_ALIVE, &priv->status);
465}
466
467static inline int iwl_is_init(struct iwl_priv *priv)
468{
469 return test_bit(STATUS_INIT, &priv->status);
470}
471
472static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
473{
474 return test_bit(STATUS_RF_KILL_HW, &priv->status);
475}
476
477static inline int iwl_is_rfkill(struct iwl_priv *priv)
478{
479 return iwl_is_rfkill_hw(priv);
480}
481
482static inline int iwl_is_ctkill(struct iwl_priv *priv)
483{
484 return test_bit(STATUS_CT_KILL, &priv->status);
485}
486
487static inline int iwl_is_ready_rf(struct iwl_priv *priv)
488{
489
490 if (iwl_is_rfkill(priv))
491 return 0;
492
493 return iwl_is_ready(priv);
494}
495
496extern void iwl_send_bt_config(struct iwl_priv *priv);
497extern int iwl_send_statistics_request(struct iwl_priv *priv,
498 u8 flags, bool clear);
499void iwl_apm_stop(struct iwl_priv *priv);
500int iwl_apm_init(struct iwl_priv *priv);
501
502int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
503
504static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
505 struct iwl_priv *priv, enum ieee80211_band band)
506{
507 return priv->hw->wiphy->bands[band];
508}
509
510static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
511{
512 return priv->cfg->bt_params &&
513 priv->cfg->bt_params->advanced_bt_coexist;
514}
515
516extern bool bt_siso_mode;
517
518
519void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
520
521#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
new file mode 100644
index 00000000000..ec1485b2d3f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -0,0 +1,2750 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/slab.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/debugfs.h>
33
34#include <linux/ieee80211.h>
35#include <net/mac80211.h>
36
37
38#include "iwl-dev.h"
39#include "iwl-debug.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43
44/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
46 if (!debugfs_create_file(#name, mode, parent, priv, \
47 &iwl_dbgfs_##name##_ops)) \
48 goto err; \
49} while (0)
50
51#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
52 struct dentry *__tmp; \
53 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
54 parent, ptr); \
55 if (IS_ERR(__tmp) || !__tmp) \
56 goto err; \
57} while (0)
58
59#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
60 struct dentry *__tmp; \
61 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
62 parent, ptr); \
63 if (IS_ERR(__tmp) || !__tmp) \
64 goto err; \
65} while (0)
66
67/* file operation */
68#define DEBUGFS_READ_FUNC(name) \
69static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
70 char __user *user_buf, \
71 size_t count, loff_t *ppos);
72
73#define DEBUGFS_WRITE_FUNC(name) \
74static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
75 const char __user *user_buf, \
76 size_t count, loff_t *ppos);
77
78
79static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
80{
81 file->private_data = inode->i_private;
82 return 0;
83}
84
85#define DEBUGFS_READ_FILE_OPS(name) \
86 DEBUGFS_READ_FUNC(name); \
87static const struct file_operations iwl_dbgfs_##name##_ops = { \
88 .read = iwl_dbgfs_##name##_read, \
89 .open = iwl_dbgfs_open_file_generic, \
90 .llseek = generic_file_llseek, \
91};
92
93#define DEBUGFS_WRITE_FILE_OPS(name) \
94 DEBUGFS_WRITE_FUNC(name); \
95static const struct file_operations iwl_dbgfs_##name##_ops = { \
96 .write = iwl_dbgfs_##name##_write, \
97 .open = iwl_dbgfs_open_file_generic, \
98 .llseek = generic_file_llseek, \
99};
100
101
102#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
103 DEBUGFS_READ_FUNC(name); \
104 DEBUGFS_WRITE_FUNC(name); \
105static const struct file_operations iwl_dbgfs_##name##_ops = { \
106 .write = iwl_dbgfs_##name##_write, \
107 .read = iwl_dbgfs_##name##_read, \
108 .open = iwl_dbgfs_open_file_generic, \
109 .llseek = generic_file_llseek, \
110};
111
112static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
113 char __user *user_buf,
114 size_t count, loff_t *ppos) {
115
116 struct iwl_priv *priv = file->private_data;
117 char *buf;
118 int pos = 0;
119
120 int cnt;
121 ssize_t ret;
122 const size_t bufsz = 100 +
123 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
124 buf = kzalloc(bufsz, GFP_KERNEL);
125 if (!buf)
126 return -ENOMEM;
127 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
128 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 get_mgmt_string(cnt),
132 priv->tx_stats.mgmt[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
135 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
136 pos += scnprintf(buf + pos, bufsz - pos,
137 "\t%25s\t\t: %u\n",
138 get_ctrl_string(cnt),
139 priv->tx_stats.ctrl[cnt]);
140 }
141 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
142 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
143 priv->tx_stats.data_cnt);
144 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
145 priv->tx_stats.data_bytes);
146 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
147 kfree(buf);
148 return ret;
149}
150
151static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
152 const char __user *user_buf,
153 size_t count, loff_t *ppos)
154{
155 struct iwl_priv *priv = file->private_data;
156 u32 clear_flag;
157 char buf[8];
158 int buf_size;
159
160 memset(buf, 0, sizeof(buf));
161 buf_size = min(count, sizeof(buf) - 1);
162 if (copy_from_user(buf, user_buf, buf_size))
163 return -EFAULT;
164 if (sscanf(buf, "%x", &clear_flag) != 1)
165 return -EFAULT;
166 iwl_clear_traffic_stats(priv);
167
168 return count;
169}
170
171static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
172 char __user *user_buf,
173 size_t count, loff_t *ppos) {
174
175 struct iwl_priv *priv = file->private_data;
176 char *buf;
177 int pos = 0;
178 int cnt;
179 ssize_t ret;
180 const size_t bufsz = 100 +
181 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
182 buf = kzalloc(bufsz, GFP_KERNEL);
183 if (!buf)
184 return -ENOMEM;
185
186 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
187 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
188 pos += scnprintf(buf + pos, bufsz - pos,
189 "\t%25s\t\t: %u\n",
190 get_mgmt_string(cnt),
191 priv->rx_stats.mgmt[cnt]);
192 }
193 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
194 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
195 pos += scnprintf(buf + pos, bufsz - pos,
196 "\t%25s\t\t: %u\n",
197 get_ctrl_string(cnt),
198 priv->rx_stats.ctrl[cnt]);
199 }
200 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
201 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
202 priv->rx_stats.data_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
204 priv->rx_stats.data_bytes);
205
206 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
207 kfree(buf);
208 return ret;
209}
210
211static ssize_t iwl_dbgfs_sram_read(struct file *file,
212 char __user *user_buf,
213 size_t count, loff_t *ppos)
214{
215 u32 val = 0;
216 char *buf;
217 ssize_t ret;
218 int i = 0;
219 bool device_format = false;
220 int offset = 0;
221 int len = 0;
222 int pos = 0;
223 int sram;
224 struct iwl_priv *priv = file->private_data;
225 size_t bufsz;
226
227 /* default is to dump the entire data segment */
228 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
229 priv->dbgfs_sram_offset = 0x800000;
230 if (priv->ucode_type == IWL_UCODE_INIT)
231 priv->dbgfs_sram_len = priv->ucode_init.data.len;
232 else
233 priv->dbgfs_sram_len = priv->ucode_rt.data.len;
234 }
235 len = priv->dbgfs_sram_len;
236
237 if (len == -4) {
238 device_format = true;
239 len = 4;
240 }
241
242 bufsz = 50 + len * 4;
243 buf = kmalloc(bufsz, GFP_KERNEL);
244 if (!buf)
245 return -ENOMEM;
246
247 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
248 len);
249 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
250 priv->dbgfs_sram_offset);
251
252 /* adjust sram address since reads are only on even u32 boundaries */
253 offset = priv->dbgfs_sram_offset & 0x3;
254 sram = priv->dbgfs_sram_offset & ~0x3;
255
256 /* read the first u32 from sram */
257 val = iwl_read_targ_mem(priv, sram);
258
259 for (; len; len--) {
260 /* put the address at the start of every line */
261 if (i == 0)
262 pos += scnprintf(buf + pos, bufsz - pos,
263 "%08X: ", sram + offset);
264
265 if (device_format)
266 pos += scnprintf(buf + pos, bufsz - pos,
267 "%02x", (val >> (8 * (3 - offset))) & 0xff);
268 else
269 pos += scnprintf(buf + pos, bufsz - pos,
270 "%02x ", (val >> (8 * offset)) & 0xff);
271
272 /* if all bytes processed, read the next u32 from sram */
273 if (++offset == 4) {
274 sram += 4;
275 offset = 0;
276 val = iwl_read_targ_mem(priv, sram);
277 }
278
279 /* put in extra spaces and split lines for human readability */
280 if (++i == 16) {
281 i = 0;
282 pos += scnprintf(buf + pos, bufsz - pos, "\n");
283 } else if (!(i & 7)) {
284 pos += scnprintf(buf + pos, bufsz - pos, " ");
285 } else if (!(i & 3)) {
286 pos += scnprintf(buf + pos, bufsz - pos, " ");
287 }
288 }
289 if (i)
290 pos += scnprintf(buf + pos, bufsz - pos, "\n");
291
292 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
293 kfree(buf);
294 return ret;
295}
296
297static ssize_t iwl_dbgfs_sram_write(struct file *file,
298 const char __user *user_buf,
299 size_t count, loff_t *ppos)
300{
301 struct iwl_priv *priv = file->private_data;
302 char buf[64];
303 int buf_size;
304 u32 offset, len;
305
306 memset(buf, 0, sizeof(buf));
307 buf_size = min(count, sizeof(buf) - 1);
308 if (copy_from_user(buf, user_buf, buf_size))
309 return -EFAULT;
310
311 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
312 priv->dbgfs_sram_offset = offset;
313 priv->dbgfs_sram_len = len;
314 } else if (sscanf(buf, "%x", &offset) == 1) {
315 priv->dbgfs_sram_offset = offset;
316 priv->dbgfs_sram_len = -4;
317 } else {
318 priv->dbgfs_sram_offset = 0;
319 priv->dbgfs_sram_len = 0;
320 }
321
322 return count;
323}
324
325static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
326 char __user *user_buf,
327 size_t count, loff_t *ppos)
328{
329 struct iwl_priv *priv = file->private_data;
330
331 if (!priv->wowlan_sram)
332 return -ENODATA;
333
334 return simple_read_from_buffer(user_buf, count, ppos,
335 priv->wowlan_sram,
336 priv->ucode_wowlan.data.len);
337}
338static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
339 size_t count, loff_t *ppos)
340{
341 struct iwl_priv *priv = file->private_data;
342 struct iwl_station_entry *station;
343 int max_sta = priv->hw_params.max_stations;
344 char *buf;
345 int i, j, pos = 0;
346 ssize_t ret;
347 /* Add 30 for initial string */
348 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
349
350 buf = kmalloc(bufsz, GFP_KERNEL);
351 if (!buf)
352 return -ENOMEM;
353
354 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
355 priv->num_stations);
356
357 for (i = 0; i < max_sta; i++) {
358 station = &priv->stations[i];
359 if (!station->used)
360 continue;
361 pos += scnprintf(buf + pos, bufsz - pos,
362 "station %d - addr: %pM, flags: %#x\n",
363 i, station->sta.sta.addr,
364 station->sta.station_flags_msk);
365 pos += scnprintf(buf + pos, bufsz - pos,
366 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
367 pos += scnprintf(buf + pos, bufsz - pos,
368 "start_idx\tbitmap\t\t\trate_n_flags\n");
369
370 for (j = 0; j < MAX_TID_COUNT; j++) {
371 pos += scnprintf(buf + pos, bufsz - pos,
372 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
373 j, station->tid[j].seq_number,
374 station->tid[j].agg.txq_id,
375 station->tid[j].agg.frame_count,
376 station->tid[j].tfds_in_queue,
377 station->tid[j].agg.start_idx,
378 station->tid[j].agg.bitmap,
379 station->tid[j].agg.rate_n_flags);
380
381 if (station->tid[j].agg.wait_for_ba)
382 pos += scnprintf(buf + pos, bufsz - pos,
383 " - waitforba");
384 pos += scnprintf(buf + pos, bufsz - pos, "\n");
385 }
386
387 pos += scnprintf(buf + pos, bufsz - pos, "\n");
388 }
389
390 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
391 kfree(buf);
392 return ret;
393}
394
395static ssize_t iwl_dbgfs_nvm_read(struct file *file,
396 char __user *user_buf,
397 size_t count,
398 loff_t *ppos)
399{
400 ssize_t ret;
401 struct iwl_priv *priv = file->private_data;
402 int pos = 0, ofs = 0, buf_size = 0;
403 const u8 *ptr;
404 char *buf;
405 u16 eeprom_ver;
406 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
407 buf_size = 4 * eeprom_len + 256;
408
409 if (eeprom_len % 16) {
410 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
411 return -ENODATA;
412 }
413
414 ptr = priv->eeprom;
415 if (!ptr) {
416 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
417 return -ENOMEM;
418 }
419
420 /* 4 characters for byte 0xYY */
421 buf = kzalloc(buf_size, GFP_KERNEL);
422 if (!buf) {
423 IWL_ERR(priv, "Can not allocate Buffer\n");
424 return -ENOMEM;
425 }
426 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
427 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
428 "version: 0x%x\n",
429 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
430 ? "OTP" : "EEPROM", eeprom_ver);
431 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
432 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
433 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
434 buf_size - pos, 0);
435 pos += strlen(buf + pos);
436 if (buf_size - pos > 0)
437 buf[pos++] = '\n';
438 }
439
440 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
441 kfree(buf);
442 return ret;
443}
444
445static ssize_t iwl_dbgfs_log_event_read(struct file *file,
446 char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct iwl_priv *priv = file->private_data;
450 char *buf;
451 int pos = 0;
452 ssize_t ret = -ENOMEM;
453
454 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
455 if (buf) {
456 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
457 kfree(buf);
458 }
459 return ret;
460}
461
462static ssize_t iwl_dbgfs_log_event_write(struct file *file,
463 const char __user *user_buf,
464 size_t count, loff_t *ppos)
465{
466 struct iwl_priv *priv = file->private_data;
467 u32 event_log_flag;
468 char buf[8];
469 int buf_size;
470
471 memset(buf, 0, sizeof(buf));
472 buf_size = min(count, sizeof(buf) - 1);
473 if (copy_from_user(buf, user_buf, buf_size))
474 return -EFAULT;
475 if (sscanf(buf, "%d", &event_log_flag) != 1)
476 return -EFAULT;
477 if (event_log_flag == 1)
478 iwl_dump_nic_event_log(priv, true, NULL, false);
479
480 return count;
481}
482
483
484
485static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
486 size_t count, loff_t *ppos)
487{
488 struct iwl_priv *priv = file->private_data;
489 struct ieee80211_channel *channels = NULL;
490 const struct ieee80211_supported_band *supp_band = NULL;
491 int pos = 0, i, bufsz = PAGE_SIZE;
492 char *buf;
493 ssize_t ret;
494
495 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
496 return -EAGAIN;
497
498 buf = kzalloc(bufsz, GFP_KERNEL);
499 if (!buf) {
500 IWL_ERR(priv, "Can not allocate Buffer\n");
501 return -ENOMEM;
502 }
503
504 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
505 if (supp_band) {
506 channels = supp_band->channels;
507
508 pos += scnprintf(buf + pos, bufsz - pos,
509 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
510 supp_band->n_channels);
511
512 for (i = 0; i < supp_band->n_channels; i++)
513 pos += scnprintf(buf + pos, bufsz - pos,
514 "%d: %ddBm: BSS%s%s, %s.\n",
515 channels[i].hw_value,
516 channels[i].max_power,
517 channels[i].flags & IEEE80211_CHAN_RADAR ?
518 " (IEEE 802.11h required)" : "",
519 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
520 || (channels[i].flags &
521 IEEE80211_CHAN_RADAR)) ? "" :
522 ", IBSS",
523 channels[i].flags &
524 IEEE80211_CHAN_PASSIVE_SCAN ?
525 "passive only" : "active/passive");
526 }
527 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
528 if (supp_band) {
529 channels = supp_band->channels;
530
531 pos += scnprintf(buf + pos, bufsz - pos,
532 "Displaying %d channels in 5.2GHz band (802.11a)\n",
533 supp_band->n_channels);
534
535 for (i = 0; i < supp_band->n_channels; i++)
536 pos += scnprintf(buf + pos, bufsz - pos,
537 "%d: %ddBm: BSS%s%s, %s.\n",
538 channels[i].hw_value,
539 channels[i].max_power,
540 channels[i].flags & IEEE80211_CHAN_RADAR ?
541 " (IEEE 802.11h required)" : "",
542 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
543 || (channels[i].flags &
544 IEEE80211_CHAN_RADAR)) ? "" :
545 ", IBSS",
546 channels[i].flags &
547 IEEE80211_CHAN_PASSIVE_SCAN ?
548 "passive only" : "active/passive");
549 }
550 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
551 kfree(buf);
552 return ret;
553}
554
555static ssize_t iwl_dbgfs_status_read(struct file *file,
556 char __user *user_buf,
557 size_t count, loff_t *ppos) {
558
559 struct iwl_priv *priv = file->private_data;
560 char buf[512];
561 int pos = 0;
562 const size_t bufsz = sizeof(buf);
563
564 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
565 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
566 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
567 test_bit(STATUS_INT_ENABLED, &priv->status));
568 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
569 test_bit(STATUS_RF_KILL_HW, &priv->status));
570 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
571 test_bit(STATUS_CT_KILL, &priv->status));
572 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
573 test_bit(STATUS_INIT, &priv->status));
574 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
575 test_bit(STATUS_ALIVE, &priv->status));
576 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
577 test_bit(STATUS_READY, &priv->status));
578 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
579 test_bit(STATUS_TEMPERATURE, &priv->status));
580 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
581 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
582 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
583 test_bit(STATUS_EXIT_PENDING, &priv->status));
584 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
585 test_bit(STATUS_STATISTICS, &priv->status));
586 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
587 test_bit(STATUS_SCANNING, &priv->status));
588 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
589 test_bit(STATUS_SCAN_ABORTING, &priv->status));
590 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
591 test_bit(STATUS_SCAN_HW, &priv->status));
592 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
593 test_bit(STATUS_POWER_PMI, &priv->status));
594 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
595 test_bit(STATUS_FW_ERROR, &priv->status));
596 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
597}
598
599static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
600 char __user *user_buf,
601 size_t count, loff_t *ppos) {
602
603 struct iwl_priv *priv = file->private_data;
604 int pos = 0;
605 int cnt = 0;
606 char *buf;
607 int bufsz = 24 * 64; /* 24 items * 64 char per item */
608 ssize_t ret;
609
610 buf = kzalloc(bufsz, GFP_KERNEL);
611 if (!buf) {
612 IWL_ERR(priv, "Can not allocate Buffer\n");
613 return -ENOMEM;
614 }
615
616 pos += scnprintf(buf + pos, bufsz - pos,
617 "Interrupt Statistics Report:\n");
618
619 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
620 priv->isr_stats.hw);
621 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
622 priv->isr_stats.sw);
623 if (priv->isr_stats.sw || priv->isr_stats.hw) {
624 pos += scnprintf(buf + pos, bufsz - pos,
625 "\tLast Restarting Code: 0x%X\n",
626 priv->isr_stats.err_code);
627 }
628#ifdef CONFIG_IWLWIFI_DEBUG
629 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
630 priv->isr_stats.sch);
631 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
632 priv->isr_stats.alive);
633#endif
634 pos += scnprintf(buf + pos, bufsz - pos,
635 "HW RF KILL switch toggled:\t %u\n",
636 priv->isr_stats.rfkill);
637
638 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
639 priv->isr_stats.ctkill);
640
641 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
642 priv->isr_stats.wakeup);
643
644 pos += scnprintf(buf + pos, bufsz - pos,
645 "Rx command responses:\t\t %u\n",
646 priv->isr_stats.rx);
647 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
648 if (priv->isr_stats.rx_handlers[cnt] > 0)
649 pos += scnprintf(buf + pos, bufsz - pos,
650 "\tRx handler[%36s]:\t\t %u\n",
651 get_cmd_string(cnt),
652 priv->isr_stats.rx_handlers[cnt]);
653 }
654
655 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
656 priv->isr_stats.tx);
657
658 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
659 priv->isr_stats.unhandled);
660
661 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
662 kfree(buf);
663 return ret;
664}
665
666static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
667 const char __user *user_buf,
668 size_t count, loff_t *ppos)
669{
670 struct iwl_priv *priv = file->private_data;
671 char buf[8];
672 int buf_size;
673 u32 reset_flag;
674
675 memset(buf, 0, sizeof(buf));
676 buf_size = min(count, sizeof(buf) - 1);
677 if (copy_from_user(buf, user_buf, buf_size))
678 return -EFAULT;
679 if (sscanf(buf, "%x", &reset_flag) != 1)
680 return -EFAULT;
681 if (reset_flag == 0)
682 iwl_clear_isr_stats(priv);
683
684 return count;
685}
686
687static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
688 size_t count, loff_t *ppos)
689{
690 struct iwl_priv *priv = file->private_data;
691 struct iwl_rxon_context *ctx;
692 int pos = 0, i;
693 char buf[256 * NUM_IWL_RXON_CTX];
694 const size_t bufsz = sizeof(buf);
695
696 for_each_context(priv, ctx) {
697 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
698 ctx->ctxid);
699 for (i = 0; i < AC_NUM; i++) {
700 pos += scnprintf(buf + pos, bufsz - pos,
701 "\tcw_min\tcw_max\taifsn\ttxop\n");
702 pos += scnprintf(buf + pos, bufsz - pos,
703 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
704 ctx->qos_data.def_qos_parm.ac[i].cw_min,
705 ctx->qos_data.def_qos_parm.ac[i].cw_max,
706 ctx->qos_data.def_qos_parm.ac[i].aifsn,
707 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
708 }
709 pos += scnprintf(buf + pos, bufsz - pos, "\n");
710 }
711 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
712}
713
714static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
715 char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct iwl_priv *priv = file->private_data;
719 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
720 struct iwl_tt_restriction *restriction;
721 char buf[100];
722 int pos = 0;
723 const size_t bufsz = sizeof(buf);
724
725 pos += scnprintf(buf + pos, bufsz - pos,
726 "Thermal Throttling Mode: %s\n",
727 tt->advanced_tt ? "Advance" : "Legacy");
728 pos += scnprintf(buf + pos, bufsz - pos,
729 "Thermal Throttling State: %d\n",
730 tt->state);
731 if (tt->advanced_tt) {
732 restriction = tt->restriction + tt->state;
733 pos += scnprintf(buf + pos, bufsz - pos,
734 "Tx mode: %d\n",
735 restriction->tx_stream);
736 pos += scnprintf(buf + pos, bufsz - pos,
737 "Rx mode: %d\n",
738 restriction->rx_stream);
739 pos += scnprintf(buf + pos, bufsz - pos,
740 "HT mode: %d\n",
741 restriction->is_ht);
742 }
743 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
744}
745
746static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
747 const char __user *user_buf,
748 size_t count, loff_t *ppos)
749{
750 struct iwl_priv *priv = file->private_data;
751 char buf[8];
752 int buf_size;
753 int ht40;
754
755 memset(buf, 0, sizeof(buf));
756 buf_size = min(count, sizeof(buf) - 1);
757 if (copy_from_user(buf, user_buf, buf_size))
758 return -EFAULT;
759 if (sscanf(buf, "%d", &ht40) != 1)
760 return -EFAULT;
761 if (!iwl_is_any_associated(priv))
762 priv->disable_ht40 = ht40 ? true : false;
763 else {
764 IWL_ERR(priv, "Sta associated with AP - "
765 "Change to 40MHz channel support is not allowed\n");
766 return -EINVAL;
767 }
768
769 return count;
770}
771
772static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
773 char __user *user_buf,
774 size_t count, loff_t *ppos)
775{
776 struct iwl_priv *priv = file->private_data;
777 char buf[100];
778 int pos = 0;
779 const size_t bufsz = sizeof(buf);
780
781 pos += scnprintf(buf + pos, bufsz - pos,
782 "11n 40MHz Mode: %s\n",
783 priv->disable_ht40 ? "Disabled" : "Enabled");
784 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
785}
786
787static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
788 const char __user *user_buf,
789 size_t count, loff_t *ppos)
790{
791 struct iwl_priv *priv = file->private_data;
792 char buf[8];
793 int buf_size;
794 int value;
795
796 memset(buf, 0, sizeof(buf));
797 buf_size = min(count, sizeof(buf) - 1);
798 if (copy_from_user(buf, user_buf, buf_size))
799 return -EFAULT;
800
801 if (sscanf(buf, "%d", &value) != 1)
802 return -EINVAL;
803
804 /*
805 * Our users expect 0 to be "CAM", but 0 isn't actually
806 * valid here. However, let's not confuse them and present
807 * IWL_POWER_INDEX_1 as "1", not "0".
808 */
809 if (value == 0)
810 return -EINVAL;
811 else if (value > 0)
812 value -= 1;
813
814 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
815 return -EINVAL;
816
817 if (!iwl_is_ready_rf(priv))
818 return -EAGAIN;
819
820 priv->power_data.debug_sleep_level_override = value;
821
822 mutex_lock(&priv->mutex);
823 iwl_power_update_mode(priv, true);
824 mutex_unlock(&priv->mutex);
825
826 return count;
827}
828
829static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
830 char __user *user_buf,
831 size_t count, loff_t *ppos)
832{
833 struct iwl_priv *priv = file->private_data;
834 char buf[10];
835 int pos, value;
836 const size_t bufsz = sizeof(buf);
837
838 /* see the write function */
839 value = priv->power_data.debug_sleep_level_override;
840 if (value >= 0)
841 value += 1;
842
843 pos = scnprintf(buf, bufsz, "%d\n", value);
844 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
845}
846
847static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
848 char __user *user_buf,
849 size_t count, loff_t *ppos)
850{
851 struct iwl_priv *priv = file->private_data;
852 char buf[200];
853 int pos = 0, i;
854 const size_t bufsz = sizeof(buf);
855 struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd;
856
857 pos += scnprintf(buf + pos, bufsz - pos,
858 "flags: %#.2x\n", le16_to_cpu(cmd->flags));
859 pos += scnprintf(buf + pos, bufsz - pos,
860 "RX/TX timeout: %d/%d usec\n",
861 le32_to_cpu(cmd->rx_data_timeout),
862 le32_to_cpu(cmd->tx_data_timeout));
863 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
864 pos += scnprintf(buf + pos, bufsz - pos,
865 "sleep_interval[%d]: %d\n", i,
866 le32_to_cpu(cmd->sleep_interval[i]));
867
868 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
869}
870
871DEBUGFS_READ_WRITE_FILE_OPS(sram);
872DEBUGFS_READ_FILE_OPS(wowlan_sram);
873DEBUGFS_READ_WRITE_FILE_OPS(log_event);
874DEBUGFS_READ_FILE_OPS(nvm);
875DEBUGFS_READ_FILE_OPS(stations);
876DEBUGFS_READ_FILE_OPS(channels);
877DEBUGFS_READ_FILE_OPS(status);
878DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
879DEBUGFS_READ_FILE_OPS(qos);
880DEBUGFS_READ_FILE_OPS(thermal_throttling);
881DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
882DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
883DEBUGFS_READ_FILE_OPS(current_sleep_command);
884
885static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
886 char __user *user_buf,
887 size_t count, loff_t *ppos)
888{
889 struct iwl_priv *priv = file->private_data;
890 int pos = 0, ofs = 0;
891 int cnt = 0, entry;
892 struct iwl_tx_queue *txq;
893 struct iwl_queue *q;
894 struct iwl_rx_queue *rxq = &priv->rxq;
895 char *buf;
896 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
897 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
898 const u8 *ptr;
899 ssize_t ret;
900
901 if (!priv->txq) {
902 IWL_ERR(priv, "txq not ready\n");
903 return -EAGAIN;
904 }
905 buf = kzalloc(bufsz, GFP_KERNEL);
906 if (!buf) {
907 IWL_ERR(priv, "Can not allocate buffer\n");
908 return -ENOMEM;
909 }
910 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
911 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
912 txq = &priv->txq[cnt];
913 q = &txq->q;
914 pos += scnprintf(buf + pos, bufsz - pos,
915 "q[%d]: read_ptr: %u, write_ptr: %u\n",
916 cnt, q->read_ptr, q->write_ptr);
917 }
918 if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
919 ptr = priv->tx_traffic;
920 pos += scnprintf(buf + pos, bufsz - pos,
921 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
922 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
923 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
924 entry++, ofs += 16) {
925 pos += scnprintf(buf + pos, bufsz - pos,
926 "0x%.4x ", ofs);
927 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
928 buf + pos, bufsz - pos, 0);
929 pos += strlen(buf + pos);
930 if (bufsz - pos > 0)
931 buf[pos++] = '\n';
932 }
933 }
934 }
935
936 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
937 pos += scnprintf(buf + pos, bufsz - pos,
938 "read: %u, write: %u\n",
939 rxq->read, rxq->write);
940
941 if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
942 ptr = priv->rx_traffic;
943 pos += scnprintf(buf + pos, bufsz - pos,
944 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
945 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
946 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
947 entry++, ofs += 16) {
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "0x%.4x ", ofs);
950 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
951 buf + pos, bufsz - pos, 0);
952 pos += strlen(buf + pos);
953 if (bufsz - pos > 0)
954 buf[pos++] = '\n';
955 }
956 }
957 }
958
959 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
960 kfree(buf);
961 return ret;
962}
963
964static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
965 const char __user *user_buf,
966 size_t count, loff_t *ppos)
967{
968 struct iwl_priv *priv = file->private_data;
969 char buf[8];
970 int buf_size;
971 int traffic_log;
972
973 memset(buf, 0, sizeof(buf));
974 buf_size = min(count, sizeof(buf) - 1);
975 if (copy_from_user(buf, user_buf, buf_size))
976 return -EFAULT;
977 if (sscanf(buf, "%d", &traffic_log) != 1)
978 return -EFAULT;
979 if (traffic_log == 0)
980 iwl_reset_traffic_log(priv);
981
982 return count;
983}
984
985static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
986 char __user *user_buf,
987 size_t count, loff_t *ppos) {
988
989 struct iwl_priv *priv = file->private_data;
990 struct iwl_tx_queue *txq;
991 struct iwl_queue *q;
992 char *buf;
993 int pos = 0;
994 int cnt;
995 int ret;
996 const size_t bufsz = sizeof(char) * 64 *
997 priv->cfg->base_params->num_of_queues;
998
999 if (!priv->txq) {
1000 IWL_ERR(priv, "txq not ready\n");
1001 return -EAGAIN;
1002 }
1003 buf = kzalloc(bufsz, GFP_KERNEL);
1004 if (!buf)
1005 return -ENOMEM;
1006
1007 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1008 txq = &priv->txq[cnt];
1009 q = &txq->q;
1010 pos += scnprintf(buf + pos, bufsz - pos,
1011 "hwq %.2d: read=%u write=%u stop=%d"
1012 " swq_id=%#.2x (ac %d/hwq %d)\n",
1013 cnt, q->read_ptr, q->write_ptr,
1014 !!test_bit(cnt, priv->queue_stopped),
1015 txq->swq_id, txq->swq_id & 3,
1016 (txq->swq_id >> 2) & 0x1f);
1017 if (cnt >= 4)
1018 continue;
1019 /* for the ACs, display the stop count too */
1020 pos += scnprintf(buf + pos, bufsz - pos,
1021 " stop-count: %d\n",
1022 atomic_read(&priv->queue_stop_count[cnt]));
1023 }
1024 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1025 kfree(buf);
1026 return ret;
1027}
1028
1029static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1030 char __user *user_buf,
1031 size_t count, loff_t *ppos) {
1032
1033 struct iwl_priv *priv = file->private_data;
1034 struct iwl_rx_queue *rxq = &priv->rxq;
1035 char buf[256];
1036 int pos = 0;
1037 const size_t bufsz = sizeof(buf);
1038
1039 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1040 rxq->read);
1041 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1042 rxq->write);
1043 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1044 rxq->free_count);
1045 if (rxq->rb_stts) {
1046 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1047 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1048 } else {
1049 pos += scnprintf(buf + pos, bufsz - pos,
1050 "closed_rb_num: Not Allocated\n");
1051 }
1052 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1053}
1054
1055static const char *fmt_value = " %-30s %10u\n";
1056static const char *fmt_hex = " %-30s 0x%02X\n";
1057static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
1058static const char *fmt_header =
1059 "%-32s current cumulative delta max\n";
1060
1061static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
1062{
1063 int p = 0;
1064 u32 flag;
1065
1066 flag = le32_to_cpu(priv->statistics.flag);
1067
1068 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
1069 if (flag & UCODE_STATISTICS_CLEAR_MSK)
1070 p += scnprintf(buf + p, bufsz - p,
1071 "\tStatistics have been cleared\n");
1072 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
1073 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
1074 ? "2.4 GHz" : "5.2 GHz");
1075 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
1076 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
1077 ? "enabled" : "disabled");
1078
1079 return p;
1080}
1081
1082static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1083 char __user *user_buf,
1084 size_t count, loff_t *ppos)
1085{
1086 struct iwl_priv *priv = file->private_data;
1087 int pos = 0;
1088 char *buf;
1089 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
1090 sizeof(struct statistics_rx_non_phy) * 40 +
1091 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
1092 ssize_t ret;
1093 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
1094 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
1095 struct statistics_rx_non_phy *general, *accum_general;
1096 struct statistics_rx_non_phy *delta_general, *max_general;
1097 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1098
1099 if (!iwl_is_alive(priv))
1100 return -EAGAIN;
1101
1102 buf = kzalloc(bufsz, GFP_KERNEL);
1103 if (!buf) {
1104 IWL_ERR(priv, "Can not allocate Buffer\n");
1105 return -ENOMEM;
1106 }
1107
1108 /*
1109 * the statistic information display here is based on
1110 * the last statistics notification from uCode
1111 * might not reflect the current uCode activity
1112 */
1113 ofdm = &priv->statistics.rx_ofdm;
1114 cck = &priv->statistics.rx_cck;
1115 general = &priv->statistics.rx_non_phy;
1116 ht = &priv->statistics.rx_ofdm_ht;
1117 accum_ofdm = &priv->accum_stats.rx_ofdm;
1118 accum_cck = &priv->accum_stats.rx_cck;
1119 accum_general = &priv->accum_stats.rx_non_phy;
1120 accum_ht = &priv->accum_stats.rx_ofdm_ht;
1121 delta_ofdm = &priv->delta_stats.rx_ofdm;
1122 delta_cck = &priv->delta_stats.rx_cck;
1123 delta_general = &priv->delta_stats.rx_non_phy;
1124 delta_ht = &priv->delta_stats.rx_ofdm_ht;
1125 max_ofdm = &priv->max_delta_stats.rx_ofdm;
1126 max_cck = &priv->max_delta_stats.rx_cck;
1127 max_general = &priv->max_delta_stats.rx_non_phy;
1128 max_ht = &priv->max_delta_stats.rx_ofdm_ht;
1129
1130 pos += iwl_statistics_flag(priv, buf, bufsz);
1131 pos += scnprintf(buf + pos, bufsz - pos,
1132 fmt_header, "Statistics_Rx - OFDM:");
1133 pos += scnprintf(buf + pos, bufsz - pos,
1134 fmt_table, "ina_cnt:",
1135 le32_to_cpu(ofdm->ina_cnt),
1136 accum_ofdm->ina_cnt,
1137 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
1138 pos += scnprintf(buf + pos, bufsz - pos,
1139 fmt_table, "fina_cnt:",
1140 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
1141 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
1142 pos += scnprintf(buf + pos, bufsz - pos,
1143 fmt_table, "plcp_err:",
1144 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
1145 delta_ofdm->plcp_err, max_ofdm->plcp_err);
1146 pos += scnprintf(buf + pos, bufsz - pos,
1147 fmt_table, "crc32_err:",
1148 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
1149 delta_ofdm->crc32_err, max_ofdm->crc32_err);
1150 pos += scnprintf(buf + pos, bufsz - pos,
1151 fmt_table, "overrun_err:",
1152 le32_to_cpu(ofdm->overrun_err),
1153 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
1154 max_ofdm->overrun_err);
1155 pos += scnprintf(buf + pos, bufsz - pos,
1156 fmt_table, "early_overrun_err:",
1157 le32_to_cpu(ofdm->early_overrun_err),
1158 accum_ofdm->early_overrun_err,
1159 delta_ofdm->early_overrun_err,
1160 max_ofdm->early_overrun_err);
1161 pos += scnprintf(buf + pos, bufsz - pos,
1162 fmt_table, "crc32_good:",
1163 le32_to_cpu(ofdm->crc32_good),
1164 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
1165 max_ofdm->crc32_good);
1166 pos += scnprintf(buf + pos, bufsz - pos,
1167 fmt_table, "false_alarm_cnt:",
1168 le32_to_cpu(ofdm->false_alarm_cnt),
1169 accum_ofdm->false_alarm_cnt,
1170 delta_ofdm->false_alarm_cnt,
1171 max_ofdm->false_alarm_cnt);
1172 pos += scnprintf(buf + pos, bufsz - pos,
1173 fmt_table, "fina_sync_err_cnt:",
1174 le32_to_cpu(ofdm->fina_sync_err_cnt),
1175 accum_ofdm->fina_sync_err_cnt,
1176 delta_ofdm->fina_sync_err_cnt,
1177 max_ofdm->fina_sync_err_cnt);
1178 pos += scnprintf(buf + pos, bufsz - pos,
1179 fmt_table, "sfd_timeout:",
1180 le32_to_cpu(ofdm->sfd_timeout),
1181 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
1182 max_ofdm->sfd_timeout);
1183 pos += scnprintf(buf + pos, bufsz - pos,
1184 fmt_table, "fina_timeout:",
1185 le32_to_cpu(ofdm->fina_timeout),
1186 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
1187 max_ofdm->fina_timeout);
1188 pos += scnprintf(buf + pos, bufsz - pos,
1189 fmt_table, "unresponded_rts:",
1190 le32_to_cpu(ofdm->unresponded_rts),
1191 accum_ofdm->unresponded_rts,
1192 delta_ofdm->unresponded_rts,
1193 max_ofdm->unresponded_rts);
1194 pos += scnprintf(buf + pos, bufsz - pos,
1195 fmt_table, "rxe_frame_lmt_ovrun:",
1196 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1197 accum_ofdm->rxe_frame_limit_overrun,
1198 delta_ofdm->rxe_frame_limit_overrun,
1199 max_ofdm->rxe_frame_limit_overrun);
1200 pos += scnprintf(buf + pos, bufsz - pos,
1201 fmt_table, "sent_ack_cnt:",
1202 le32_to_cpu(ofdm->sent_ack_cnt),
1203 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
1204 max_ofdm->sent_ack_cnt);
1205 pos += scnprintf(buf + pos, bufsz - pos,
1206 fmt_table, "sent_cts_cnt:",
1207 le32_to_cpu(ofdm->sent_cts_cnt),
1208 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
1209 max_ofdm->sent_cts_cnt);
1210 pos += scnprintf(buf + pos, bufsz - pos,
1211 fmt_table, "sent_ba_rsp_cnt:",
1212 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1213 accum_ofdm->sent_ba_rsp_cnt,
1214 delta_ofdm->sent_ba_rsp_cnt,
1215 max_ofdm->sent_ba_rsp_cnt);
1216 pos += scnprintf(buf + pos, bufsz - pos,
1217 fmt_table, "dsp_self_kill:",
1218 le32_to_cpu(ofdm->dsp_self_kill),
1219 accum_ofdm->dsp_self_kill,
1220 delta_ofdm->dsp_self_kill,
1221 max_ofdm->dsp_self_kill);
1222 pos += scnprintf(buf + pos, bufsz - pos,
1223 fmt_table, "mh_format_err:",
1224 le32_to_cpu(ofdm->mh_format_err),
1225 accum_ofdm->mh_format_err,
1226 delta_ofdm->mh_format_err,
1227 max_ofdm->mh_format_err);
1228 pos += scnprintf(buf + pos, bufsz - pos,
1229 fmt_table, "re_acq_main_rssi_sum:",
1230 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1231 accum_ofdm->re_acq_main_rssi_sum,
1232 delta_ofdm->re_acq_main_rssi_sum,
1233 max_ofdm->re_acq_main_rssi_sum);
1234
1235 pos += scnprintf(buf + pos, bufsz - pos,
1236 fmt_header, "Statistics_Rx - CCK:");
1237 pos += scnprintf(buf + pos, bufsz - pos,
1238 fmt_table, "ina_cnt:",
1239 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
1240 delta_cck->ina_cnt, max_cck->ina_cnt);
1241 pos += scnprintf(buf + pos, bufsz - pos,
1242 fmt_table, "fina_cnt:",
1243 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
1244 delta_cck->fina_cnt, max_cck->fina_cnt);
1245 pos += scnprintf(buf + pos, bufsz - pos,
1246 fmt_table, "plcp_err:",
1247 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
1248 delta_cck->plcp_err, max_cck->plcp_err);
1249 pos += scnprintf(buf + pos, bufsz - pos,
1250 fmt_table, "crc32_err:",
1251 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
1252 delta_cck->crc32_err, max_cck->crc32_err);
1253 pos += scnprintf(buf + pos, bufsz - pos,
1254 fmt_table, "overrun_err:",
1255 le32_to_cpu(cck->overrun_err),
1256 accum_cck->overrun_err, delta_cck->overrun_err,
1257 max_cck->overrun_err);
1258 pos += scnprintf(buf + pos, bufsz - pos,
1259 fmt_table, "early_overrun_err:",
1260 le32_to_cpu(cck->early_overrun_err),
1261 accum_cck->early_overrun_err,
1262 delta_cck->early_overrun_err,
1263 max_cck->early_overrun_err);
1264 pos += scnprintf(buf + pos, bufsz - pos,
1265 fmt_table, "crc32_good:",
1266 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
1267 delta_cck->crc32_good, max_cck->crc32_good);
1268 pos += scnprintf(buf + pos, bufsz - pos,
1269 fmt_table, "false_alarm_cnt:",
1270 le32_to_cpu(cck->false_alarm_cnt),
1271 accum_cck->false_alarm_cnt,
1272 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
1273 pos += scnprintf(buf + pos, bufsz - pos,
1274 fmt_table, "fina_sync_err_cnt:",
1275 le32_to_cpu(cck->fina_sync_err_cnt),
1276 accum_cck->fina_sync_err_cnt,
1277 delta_cck->fina_sync_err_cnt,
1278 max_cck->fina_sync_err_cnt);
1279 pos += scnprintf(buf + pos, bufsz - pos,
1280 fmt_table, "sfd_timeout:",
1281 le32_to_cpu(cck->sfd_timeout),
1282 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
1283 max_cck->sfd_timeout);
1284 pos += scnprintf(buf + pos, bufsz - pos,
1285 fmt_table, "fina_timeout:",
1286 le32_to_cpu(cck->fina_timeout),
1287 accum_cck->fina_timeout, delta_cck->fina_timeout,
1288 max_cck->fina_timeout);
1289 pos += scnprintf(buf + pos, bufsz - pos,
1290 fmt_table, "unresponded_rts:",
1291 le32_to_cpu(cck->unresponded_rts),
1292 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
1293 max_cck->unresponded_rts);
1294 pos += scnprintf(buf + pos, bufsz - pos,
1295 fmt_table, "rxe_frame_lmt_ovrun:",
1296 le32_to_cpu(cck->rxe_frame_limit_overrun),
1297 accum_cck->rxe_frame_limit_overrun,
1298 delta_cck->rxe_frame_limit_overrun,
1299 max_cck->rxe_frame_limit_overrun);
1300 pos += scnprintf(buf + pos, bufsz - pos,
1301 fmt_table, "sent_ack_cnt:",
1302 le32_to_cpu(cck->sent_ack_cnt),
1303 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
1304 max_cck->sent_ack_cnt);
1305 pos += scnprintf(buf + pos, bufsz - pos,
1306 fmt_table, "sent_cts_cnt:",
1307 le32_to_cpu(cck->sent_cts_cnt),
1308 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
1309 max_cck->sent_cts_cnt);
1310 pos += scnprintf(buf + pos, bufsz - pos,
1311 fmt_table, "sent_ba_rsp_cnt:",
1312 le32_to_cpu(cck->sent_ba_rsp_cnt),
1313 accum_cck->sent_ba_rsp_cnt,
1314 delta_cck->sent_ba_rsp_cnt,
1315 max_cck->sent_ba_rsp_cnt);
1316 pos += scnprintf(buf + pos, bufsz - pos,
1317 fmt_table, "dsp_self_kill:",
1318 le32_to_cpu(cck->dsp_self_kill),
1319 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
1320 max_cck->dsp_self_kill);
1321 pos += scnprintf(buf + pos, bufsz - pos,
1322 fmt_table, "mh_format_err:",
1323 le32_to_cpu(cck->mh_format_err),
1324 accum_cck->mh_format_err, delta_cck->mh_format_err,
1325 max_cck->mh_format_err);
1326 pos += scnprintf(buf + pos, bufsz - pos,
1327 fmt_table, "re_acq_main_rssi_sum:",
1328 le32_to_cpu(cck->re_acq_main_rssi_sum),
1329 accum_cck->re_acq_main_rssi_sum,
1330 delta_cck->re_acq_main_rssi_sum,
1331 max_cck->re_acq_main_rssi_sum);
1332
1333 pos += scnprintf(buf + pos, bufsz - pos,
1334 fmt_header, "Statistics_Rx - GENERAL:");
1335 pos += scnprintf(buf + pos, bufsz - pos,
1336 fmt_table, "bogus_cts:",
1337 le32_to_cpu(general->bogus_cts),
1338 accum_general->bogus_cts, delta_general->bogus_cts,
1339 max_general->bogus_cts);
1340 pos += scnprintf(buf + pos, bufsz - pos,
1341 fmt_table, "bogus_ack:",
1342 le32_to_cpu(general->bogus_ack),
1343 accum_general->bogus_ack, delta_general->bogus_ack,
1344 max_general->bogus_ack);
1345 pos += scnprintf(buf + pos, bufsz - pos,
1346 fmt_table, "non_bssid_frames:",
1347 le32_to_cpu(general->non_bssid_frames),
1348 accum_general->non_bssid_frames,
1349 delta_general->non_bssid_frames,
1350 max_general->non_bssid_frames);
1351 pos += scnprintf(buf + pos, bufsz - pos,
1352 fmt_table, "filtered_frames:",
1353 le32_to_cpu(general->filtered_frames),
1354 accum_general->filtered_frames,
1355 delta_general->filtered_frames,
1356 max_general->filtered_frames);
1357 pos += scnprintf(buf + pos, bufsz - pos,
1358 fmt_table, "non_channel_beacons:",
1359 le32_to_cpu(general->non_channel_beacons),
1360 accum_general->non_channel_beacons,
1361 delta_general->non_channel_beacons,
1362 max_general->non_channel_beacons);
1363 pos += scnprintf(buf + pos, bufsz - pos,
1364 fmt_table, "channel_beacons:",
1365 le32_to_cpu(general->channel_beacons),
1366 accum_general->channel_beacons,
1367 delta_general->channel_beacons,
1368 max_general->channel_beacons);
1369 pos += scnprintf(buf + pos, bufsz - pos,
1370 fmt_table, "num_missed_bcon:",
1371 le32_to_cpu(general->num_missed_bcon),
1372 accum_general->num_missed_bcon,
1373 delta_general->num_missed_bcon,
1374 max_general->num_missed_bcon);
1375 pos += scnprintf(buf + pos, bufsz - pos,
1376 fmt_table, "adc_rx_saturation_time:",
1377 le32_to_cpu(general->adc_rx_saturation_time),
1378 accum_general->adc_rx_saturation_time,
1379 delta_general->adc_rx_saturation_time,
1380 max_general->adc_rx_saturation_time);
1381 pos += scnprintf(buf + pos, bufsz - pos,
1382 fmt_table, "ina_detect_search_tm:",
1383 le32_to_cpu(general->ina_detection_search_time),
1384 accum_general->ina_detection_search_time,
1385 delta_general->ina_detection_search_time,
1386 max_general->ina_detection_search_time);
1387 pos += scnprintf(buf + pos, bufsz - pos,
1388 fmt_table, "beacon_silence_rssi_a:",
1389 le32_to_cpu(general->beacon_silence_rssi_a),
1390 accum_general->beacon_silence_rssi_a,
1391 delta_general->beacon_silence_rssi_a,
1392 max_general->beacon_silence_rssi_a);
1393 pos += scnprintf(buf + pos, bufsz - pos,
1394 fmt_table, "beacon_silence_rssi_b:",
1395 le32_to_cpu(general->beacon_silence_rssi_b),
1396 accum_general->beacon_silence_rssi_b,
1397 delta_general->beacon_silence_rssi_b,
1398 max_general->beacon_silence_rssi_b);
1399 pos += scnprintf(buf + pos, bufsz - pos,
1400 fmt_table, "beacon_silence_rssi_c:",
1401 le32_to_cpu(general->beacon_silence_rssi_c),
1402 accum_general->beacon_silence_rssi_c,
1403 delta_general->beacon_silence_rssi_c,
1404 max_general->beacon_silence_rssi_c);
1405 pos += scnprintf(buf + pos, bufsz - pos,
1406 fmt_table, "interference_data_flag:",
1407 le32_to_cpu(general->interference_data_flag),
1408 accum_general->interference_data_flag,
1409 delta_general->interference_data_flag,
1410 max_general->interference_data_flag);
1411 pos += scnprintf(buf + pos, bufsz - pos,
1412 fmt_table, "channel_load:",
1413 le32_to_cpu(general->channel_load),
1414 accum_general->channel_load,
1415 delta_general->channel_load,
1416 max_general->channel_load);
1417 pos += scnprintf(buf + pos, bufsz - pos,
1418 fmt_table, "dsp_false_alarms:",
1419 le32_to_cpu(general->dsp_false_alarms),
1420 accum_general->dsp_false_alarms,
1421 delta_general->dsp_false_alarms,
1422 max_general->dsp_false_alarms);
1423 pos += scnprintf(buf + pos, bufsz - pos,
1424 fmt_table, "beacon_rssi_a:",
1425 le32_to_cpu(general->beacon_rssi_a),
1426 accum_general->beacon_rssi_a,
1427 delta_general->beacon_rssi_a,
1428 max_general->beacon_rssi_a);
1429 pos += scnprintf(buf + pos, bufsz - pos,
1430 fmt_table, "beacon_rssi_b:",
1431 le32_to_cpu(general->beacon_rssi_b),
1432 accum_general->beacon_rssi_b,
1433 delta_general->beacon_rssi_b,
1434 max_general->beacon_rssi_b);
1435 pos += scnprintf(buf + pos, bufsz - pos,
1436 fmt_table, "beacon_rssi_c:",
1437 le32_to_cpu(general->beacon_rssi_c),
1438 accum_general->beacon_rssi_c,
1439 delta_general->beacon_rssi_c,
1440 max_general->beacon_rssi_c);
1441 pos += scnprintf(buf + pos, bufsz - pos,
1442 fmt_table, "beacon_energy_a:",
1443 le32_to_cpu(general->beacon_energy_a),
1444 accum_general->beacon_energy_a,
1445 delta_general->beacon_energy_a,
1446 max_general->beacon_energy_a);
1447 pos += scnprintf(buf + pos, bufsz - pos,
1448 fmt_table, "beacon_energy_b:",
1449 le32_to_cpu(general->beacon_energy_b),
1450 accum_general->beacon_energy_b,
1451 delta_general->beacon_energy_b,
1452 max_general->beacon_energy_b);
1453 pos += scnprintf(buf + pos, bufsz - pos,
1454 fmt_table, "beacon_energy_c:",
1455 le32_to_cpu(general->beacon_energy_c),
1456 accum_general->beacon_energy_c,
1457 delta_general->beacon_energy_c,
1458 max_general->beacon_energy_c);
1459
1460 pos += scnprintf(buf + pos, bufsz - pos,
1461 fmt_header, "Statistics_Rx - OFDM_HT:");
1462 pos += scnprintf(buf + pos, bufsz - pos,
1463 fmt_table, "plcp_err:",
1464 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1465 delta_ht->plcp_err, max_ht->plcp_err);
1466 pos += scnprintf(buf + pos, bufsz - pos,
1467 fmt_table, "overrun_err:",
1468 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1469 delta_ht->overrun_err, max_ht->overrun_err);
1470 pos += scnprintf(buf + pos, bufsz - pos,
1471 fmt_table, "early_overrun_err:",
1472 le32_to_cpu(ht->early_overrun_err),
1473 accum_ht->early_overrun_err,
1474 delta_ht->early_overrun_err,
1475 max_ht->early_overrun_err);
1476 pos += scnprintf(buf + pos, bufsz - pos,
1477 fmt_table, "crc32_good:",
1478 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1479 delta_ht->crc32_good, max_ht->crc32_good);
1480 pos += scnprintf(buf + pos, bufsz - pos,
1481 fmt_table, "crc32_err:",
1482 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1483 delta_ht->crc32_err, max_ht->crc32_err);
1484 pos += scnprintf(buf + pos, bufsz - pos,
1485 fmt_table, "mh_format_err:",
1486 le32_to_cpu(ht->mh_format_err),
1487 accum_ht->mh_format_err,
1488 delta_ht->mh_format_err, max_ht->mh_format_err);
1489 pos += scnprintf(buf + pos, bufsz - pos,
1490 fmt_table, "agg_crc32_good:",
1491 le32_to_cpu(ht->agg_crc32_good),
1492 accum_ht->agg_crc32_good,
1493 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1494 pos += scnprintf(buf + pos, bufsz - pos,
1495 fmt_table, "agg_mpdu_cnt:",
1496 le32_to_cpu(ht->agg_mpdu_cnt),
1497 accum_ht->agg_mpdu_cnt,
1498 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1499 pos += scnprintf(buf + pos, bufsz - pos,
1500 fmt_table, "agg_cnt:",
1501 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1502 delta_ht->agg_cnt, max_ht->agg_cnt);
1503 pos += scnprintf(buf + pos, bufsz - pos,
1504 fmt_table, "unsupport_mcs:",
1505 le32_to_cpu(ht->unsupport_mcs),
1506 accum_ht->unsupport_mcs,
1507 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1508
1509 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1510 kfree(buf);
1511 return ret;
1512}
1513
1514static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1515 char __user *user_buf,
1516 size_t count, loff_t *ppos)
1517{
1518 struct iwl_priv *priv = file->private_data;
1519 int pos = 0;
1520 char *buf;
1521 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1522 ssize_t ret;
1523 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1524
1525 if (!iwl_is_alive(priv))
1526 return -EAGAIN;
1527
1528 buf = kzalloc(bufsz, GFP_KERNEL);
1529 if (!buf) {
1530 IWL_ERR(priv, "Can not allocate Buffer\n");
1531 return -ENOMEM;
1532 }
1533
1534 /* the statistic information display here is based on
1535 * the last statistics notification from uCode
1536 * might not reflect the current uCode activity
1537 */
1538 tx = &priv->statistics.tx;
1539 accum_tx = &priv->accum_stats.tx;
1540 delta_tx = &priv->delta_stats.tx;
1541 max_tx = &priv->max_delta_stats.tx;
1542
1543 pos += iwl_statistics_flag(priv, buf, bufsz);
1544 pos += scnprintf(buf + pos, bufsz - pos,
1545 fmt_header, "Statistics_Tx:");
1546 pos += scnprintf(buf + pos, bufsz - pos,
1547 fmt_table, "preamble:",
1548 le32_to_cpu(tx->preamble_cnt),
1549 accum_tx->preamble_cnt,
1550 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1551 pos += scnprintf(buf + pos, bufsz - pos,
1552 fmt_table, "rx_detected_cnt:",
1553 le32_to_cpu(tx->rx_detected_cnt),
1554 accum_tx->rx_detected_cnt,
1555 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1556 pos += scnprintf(buf + pos, bufsz - pos,
1557 fmt_table, "bt_prio_defer_cnt:",
1558 le32_to_cpu(tx->bt_prio_defer_cnt),
1559 accum_tx->bt_prio_defer_cnt,
1560 delta_tx->bt_prio_defer_cnt,
1561 max_tx->bt_prio_defer_cnt);
1562 pos += scnprintf(buf + pos, bufsz - pos,
1563 fmt_table, "bt_prio_kill_cnt:",
1564 le32_to_cpu(tx->bt_prio_kill_cnt),
1565 accum_tx->bt_prio_kill_cnt,
1566 delta_tx->bt_prio_kill_cnt,
1567 max_tx->bt_prio_kill_cnt);
1568 pos += scnprintf(buf + pos, bufsz - pos,
1569 fmt_table, "few_bytes_cnt:",
1570 le32_to_cpu(tx->few_bytes_cnt),
1571 accum_tx->few_bytes_cnt,
1572 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1573 pos += scnprintf(buf + pos, bufsz - pos,
1574 fmt_table, "cts_timeout:",
1575 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1576 delta_tx->cts_timeout, max_tx->cts_timeout);
1577 pos += scnprintf(buf + pos, bufsz - pos,
1578 fmt_table, "ack_timeout:",
1579 le32_to_cpu(tx->ack_timeout),
1580 accum_tx->ack_timeout,
1581 delta_tx->ack_timeout, max_tx->ack_timeout);
1582 pos += scnprintf(buf + pos, bufsz - pos,
1583 fmt_table, "expected_ack_cnt:",
1584 le32_to_cpu(tx->expected_ack_cnt),
1585 accum_tx->expected_ack_cnt,
1586 delta_tx->expected_ack_cnt,
1587 max_tx->expected_ack_cnt);
1588 pos += scnprintf(buf + pos, bufsz - pos,
1589 fmt_table, "actual_ack_cnt:",
1590 le32_to_cpu(tx->actual_ack_cnt),
1591 accum_tx->actual_ack_cnt,
1592 delta_tx->actual_ack_cnt,
1593 max_tx->actual_ack_cnt);
1594 pos += scnprintf(buf + pos, bufsz - pos,
1595 fmt_table, "dump_msdu_cnt:",
1596 le32_to_cpu(tx->dump_msdu_cnt),
1597 accum_tx->dump_msdu_cnt,
1598 delta_tx->dump_msdu_cnt,
1599 max_tx->dump_msdu_cnt);
1600 pos += scnprintf(buf + pos, bufsz - pos,
1601 fmt_table, "abort_nxt_frame_mismatch:",
1602 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1603 accum_tx->burst_abort_next_frame_mismatch_cnt,
1604 delta_tx->burst_abort_next_frame_mismatch_cnt,
1605 max_tx->burst_abort_next_frame_mismatch_cnt);
1606 pos += scnprintf(buf + pos, bufsz - pos,
1607 fmt_table, "abort_missing_nxt_frame:",
1608 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1609 accum_tx->burst_abort_missing_next_frame_cnt,
1610 delta_tx->burst_abort_missing_next_frame_cnt,
1611 max_tx->burst_abort_missing_next_frame_cnt);
1612 pos += scnprintf(buf + pos, bufsz - pos,
1613 fmt_table, "cts_timeout_collision:",
1614 le32_to_cpu(tx->cts_timeout_collision),
1615 accum_tx->cts_timeout_collision,
1616 delta_tx->cts_timeout_collision,
1617 max_tx->cts_timeout_collision);
1618 pos += scnprintf(buf + pos, bufsz - pos,
1619 fmt_table, "ack_ba_timeout_collision:",
1620 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1621 accum_tx->ack_or_ba_timeout_collision,
1622 delta_tx->ack_or_ba_timeout_collision,
1623 max_tx->ack_or_ba_timeout_collision);
1624 pos += scnprintf(buf + pos, bufsz - pos,
1625 fmt_table, "agg ba_timeout:",
1626 le32_to_cpu(tx->agg.ba_timeout),
1627 accum_tx->agg.ba_timeout,
1628 delta_tx->agg.ba_timeout,
1629 max_tx->agg.ba_timeout);
1630 pos += scnprintf(buf + pos, bufsz - pos,
1631 fmt_table, "agg ba_resched_frames:",
1632 le32_to_cpu(tx->agg.ba_reschedule_frames),
1633 accum_tx->agg.ba_reschedule_frames,
1634 delta_tx->agg.ba_reschedule_frames,
1635 max_tx->agg.ba_reschedule_frames);
1636 pos += scnprintf(buf + pos, bufsz - pos,
1637 fmt_table, "agg scd_query_agg_frame:",
1638 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1639 accum_tx->agg.scd_query_agg_frame_cnt,
1640 delta_tx->agg.scd_query_agg_frame_cnt,
1641 max_tx->agg.scd_query_agg_frame_cnt);
1642 pos += scnprintf(buf + pos, bufsz - pos,
1643 fmt_table, "agg scd_query_no_agg:",
1644 le32_to_cpu(tx->agg.scd_query_no_agg),
1645 accum_tx->agg.scd_query_no_agg,
1646 delta_tx->agg.scd_query_no_agg,
1647 max_tx->agg.scd_query_no_agg);
1648 pos += scnprintf(buf + pos, bufsz - pos,
1649 fmt_table, "agg scd_query_agg:",
1650 le32_to_cpu(tx->agg.scd_query_agg),
1651 accum_tx->agg.scd_query_agg,
1652 delta_tx->agg.scd_query_agg,
1653 max_tx->agg.scd_query_agg);
1654 pos += scnprintf(buf + pos, bufsz - pos,
1655 fmt_table, "agg scd_query_mismatch:",
1656 le32_to_cpu(tx->agg.scd_query_mismatch),
1657 accum_tx->agg.scd_query_mismatch,
1658 delta_tx->agg.scd_query_mismatch,
1659 max_tx->agg.scd_query_mismatch);
1660 pos += scnprintf(buf + pos, bufsz - pos,
1661 fmt_table, "agg frame_not_ready:",
1662 le32_to_cpu(tx->agg.frame_not_ready),
1663 accum_tx->agg.frame_not_ready,
1664 delta_tx->agg.frame_not_ready,
1665 max_tx->agg.frame_not_ready);
1666 pos += scnprintf(buf + pos, bufsz - pos,
1667 fmt_table, "agg underrun:",
1668 le32_to_cpu(tx->agg.underrun),
1669 accum_tx->agg.underrun,
1670 delta_tx->agg.underrun, max_tx->agg.underrun);
1671 pos += scnprintf(buf + pos, bufsz - pos,
1672 fmt_table, "agg bt_prio_kill:",
1673 le32_to_cpu(tx->agg.bt_prio_kill),
1674 accum_tx->agg.bt_prio_kill,
1675 delta_tx->agg.bt_prio_kill,
1676 max_tx->agg.bt_prio_kill);
1677 pos += scnprintf(buf + pos, bufsz - pos,
1678 fmt_table, "agg rx_ba_rsp_cnt:",
1679 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1680 accum_tx->agg.rx_ba_rsp_cnt,
1681 delta_tx->agg.rx_ba_rsp_cnt,
1682 max_tx->agg.rx_ba_rsp_cnt);
1683
1684 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1685 pos += scnprintf(buf + pos, bufsz - pos,
1686 "tx power: (1/2 dB step)\n");
1687 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
1688 pos += scnprintf(buf + pos, bufsz - pos,
1689 fmt_hex, "antenna A:",
1690 tx->tx_power.ant_a);
1691 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
1692 pos += scnprintf(buf + pos, bufsz - pos,
1693 fmt_hex, "antenna B:",
1694 tx->tx_power.ant_b);
1695 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
1696 pos += scnprintf(buf + pos, bufsz - pos,
1697 fmt_hex, "antenna C:",
1698 tx->tx_power.ant_c);
1699 }
1700 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1701 kfree(buf);
1702 return ret;
1703}
1704
1705static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1706 char __user *user_buf,
1707 size_t count, loff_t *ppos)
1708{
1709 struct iwl_priv *priv = file->private_data;
1710 int pos = 0;
1711 char *buf;
1712 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1713 ssize_t ret;
1714 struct statistics_general_common *general, *accum_general;
1715 struct statistics_general_common *delta_general, *max_general;
1716 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1717 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1718
1719 if (!iwl_is_alive(priv))
1720 return -EAGAIN;
1721
1722 buf = kzalloc(bufsz, GFP_KERNEL);
1723 if (!buf) {
1724 IWL_ERR(priv, "Can not allocate Buffer\n");
1725 return -ENOMEM;
1726 }
1727
1728 /* the statistic information display here is based on
1729 * the last statistics notification from uCode
1730 * might not reflect the current uCode activity
1731 */
1732 general = &priv->statistics.common;
1733 dbg = &priv->statistics.common.dbg;
1734 div = &priv->statistics.common.div;
1735 accum_general = &priv->accum_stats.common;
1736 accum_dbg = &priv->accum_stats.common.dbg;
1737 accum_div = &priv->accum_stats.common.div;
1738 delta_general = &priv->delta_stats.common;
1739 max_general = &priv->max_delta_stats.common;
1740 delta_dbg = &priv->delta_stats.common.dbg;
1741 max_dbg = &priv->max_delta_stats.common.dbg;
1742 delta_div = &priv->delta_stats.common.div;
1743 max_div = &priv->max_delta_stats.common.div;
1744
1745 pos += iwl_statistics_flag(priv, buf, bufsz);
1746 pos += scnprintf(buf + pos, bufsz - pos,
1747 fmt_header, "Statistics_General:");
1748 pos += scnprintf(buf + pos, bufsz - pos,
1749 fmt_value, "temperature:",
1750 le32_to_cpu(general->temperature));
1751 pos += scnprintf(buf + pos, bufsz - pos,
1752 fmt_value, "temperature_m:",
1753 le32_to_cpu(general->temperature_m));
1754 pos += scnprintf(buf + pos, bufsz - pos,
1755 fmt_value, "ttl_timestamp:",
1756 le32_to_cpu(general->ttl_timestamp));
1757 pos += scnprintf(buf + pos, bufsz - pos,
1758 fmt_table, "burst_check:",
1759 le32_to_cpu(dbg->burst_check),
1760 accum_dbg->burst_check,
1761 delta_dbg->burst_check, max_dbg->burst_check);
1762 pos += scnprintf(buf + pos, bufsz - pos,
1763 fmt_table, "burst_count:",
1764 le32_to_cpu(dbg->burst_count),
1765 accum_dbg->burst_count,
1766 delta_dbg->burst_count, max_dbg->burst_count);
1767 pos += scnprintf(buf + pos, bufsz - pos,
1768 fmt_table, "wait_for_silence_timeout_count:",
1769 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
1770 accum_dbg->wait_for_silence_timeout_cnt,
1771 delta_dbg->wait_for_silence_timeout_cnt,
1772 max_dbg->wait_for_silence_timeout_cnt);
1773 pos += scnprintf(buf + pos, bufsz - pos,
1774 fmt_table, "sleep_time:",
1775 le32_to_cpu(general->sleep_time),
1776 accum_general->sleep_time,
1777 delta_general->sleep_time, max_general->sleep_time);
1778 pos += scnprintf(buf + pos, bufsz - pos,
1779 fmt_table, "slots_out:",
1780 le32_to_cpu(general->slots_out),
1781 accum_general->slots_out,
1782 delta_general->slots_out, max_general->slots_out);
1783 pos += scnprintf(buf + pos, bufsz - pos,
1784 fmt_table, "slots_idle:",
1785 le32_to_cpu(general->slots_idle),
1786 accum_general->slots_idle,
1787 delta_general->slots_idle, max_general->slots_idle);
1788 pos += scnprintf(buf + pos, bufsz - pos,
1789 fmt_table, "tx_on_a:",
1790 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1791 delta_div->tx_on_a, max_div->tx_on_a);
1792 pos += scnprintf(buf + pos, bufsz - pos,
1793 fmt_table, "tx_on_b:",
1794 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1795 delta_div->tx_on_b, max_div->tx_on_b);
1796 pos += scnprintf(buf + pos, bufsz - pos,
1797 fmt_table, "exec_time:",
1798 le32_to_cpu(div->exec_time), accum_div->exec_time,
1799 delta_div->exec_time, max_div->exec_time);
1800 pos += scnprintf(buf + pos, bufsz - pos,
1801 fmt_table, "probe_time:",
1802 le32_to_cpu(div->probe_time), accum_div->probe_time,
1803 delta_div->probe_time, max_div->probe_time);
1804 pos += scnprintf(buf + pos, bufsz - pos,
1805 fmt_table, "rx_enable_counter:",
1806 le32_to_cpu(general->rx_enable_counter),
1807 accum_general->rx_enable_counter,
1808 delta_general->rx_enable_counter,
1809 max_general->rx_enable_counter);
1810 pos += scnprintf(buf + pos, bufsz - pos,
1811 fmt_table, "num_of_sos_states:",
1812 le32_to_cpu(general->num_of_sos_states),
1813 accum_general->num_of_sos_states,
1814 delta_general->num_of_sos_states,
1815 max_general->num_of_sos_states);
1816 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1817 kfree(buf);
1818 return ret;
1819}
1820
1821static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1822 char __user *user_buf,
1823 size_t count, loff_t *ppos)
1824{
1825 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1826 int pos = 0;
1827 char *buf;
1828 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
1829 ssize_t ret;
1830 struct statistics_bt_activity *bt, *accum_bt;
1831
1832 if (!iwl_is_alive(priv))
1833 return -EAGAIN;
1834
1835 if (!priv->bt_enable_flag)
1836 return -EINVAL;
1837
1838 /* make request to uCode to retrieve statistics information */
1839 mutex_lock(&priv->mutex);
1840 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1841 mutex_unlock(&priv->mutex);
1842
1843 if (ret) {
1844 IWL_ERR(priv,
1845 "Error sending statistics request: %zd\n", ret);
1846 return -EAGAIN;
1847 }
1848 buf = kzalloc(bufsz, GFP_KERNEL);
1849 if (!buf) {
1850 IWL_ERR(priv, "Can not allocate Buffer\n");
1851 return -ENOMEM;
1852 }
1853
1854 /*
1855 * the statistic information display here is based on
1856 * the last statistics notification from uCode
1857 * might not reflect the current uCode activity
1858 */
1859 bt = &priv->statistics.bt_activity;
1860 accum_bt = &priv->accum_stats.bt_activity;
1861
1862 pos += iwl_statistics_flag(priv, buf, bufsz);
1863 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
1864 pos += scnprintf(buf + pos, bufsz - pos,
1865 "\t\t\tcurrent\t\t\taccumulative\n");
1866 pos += scnprintf(buf + pos, bufsz - pos,
1867 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1868 le32_to_cpu(bt->hi_priority_tx_req_cnt),
1869 accum_bt->hi_priority_tx_req_cnt);
1870 pos += scnprintf(buf + pos, bufsz - pos,
1871 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1872 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
1873 accum_bt->hi_priority_tx_denied_cnt);
1874 pos += scnprintf(buf + pos, bufsz - pos,
1875 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1876 le32_to_cpu(bt->lo_priority_tx_req_cnt),
1877 accum_bt->lo_priority_tx_req_cnt);
1878 pos += scnprintf(buf + pos, bufsz - pos,
1879 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1880 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
1881 accum_bt->lo_priority_tx_denied_cnt);
1882 pos += scnprintf(buf + pos, bufsz - pos,
1883 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1884 le32_to_cpu(bt->hi_priority_rx_req_cnt),
1885 accum_bt->hi_priority_rx_req_cnt);
1886 pos += scnprintf(buf + pos, bufsz - pos,
1887 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1888 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
1889 accum_bt->hi_priority_rx_denied_cnt);
1890 pos += scnprintf(buf + pos, bufsz - pos,
1891 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1892 le32_to_cpu(bt->lo_priority_rx_req_cnt),
1893 accum_bt->lo_priority_rx_req_cnt);
1894 pos += scnprintf(buf + pos, bufsz - pos,
1895 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1896 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
1897 accum_bt->lo_priority_rx_denied_cnt);
1898
1899 pos += scnprintf(buf + pos, bufsz - pos,
1900 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
1901 le32_to_cpu(priv->statistics.num_bt_kills),
1902 priv->statistics.accum_num_bt_kills);
1903
1904 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1905 kfree(buf);
1906 return ret;
1907}
1908
1909static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1910 char __user *user_buf,
1911 size_t count, loff_t *ppos)
1912{
1913 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1914 int pos = 0;
1915 char *buf;
1916 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
1917 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
1918 ssize_t ret;
1919
1920 if (!iwl_is_alive(priv))
1921 return -EAGAIN;
1922
1923 buf = kzalloc(bufsz, GFP_KERNEL);
1924 if (!buf) {
1925 IWL_ERR(priv, "Can not allocate Buffer\n");
1926 return -ENOMEM;
1927 }
1928
1929 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1930 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
1931 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
1932 priv->reply_tx_stats.pp_delay);
1933 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1934 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
1935 priv->reply_tx_stats.pp_few_bytes);
1936 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1937 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
1938 priv->reply_tx_stats.pp_bt_prio);
1939 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1940 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
1941 priv->reply_tx_stats.pp_quiet_period);
1942 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1943 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
1944 priv->reply_tx_stats.pp_calc_ttak);
1945 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1946 iwl_get_tx_fail_reason(
1947 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
1948 priv->reply_tx_stats.int_crossed_retry);
1949 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1950 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
1951 priv->reply_tx_stats.short_limit);
1952 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1953 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
1954 priv->reply_tx_stats.long_limit);
1955 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1956 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
1957 priv->reply_tx_stats.fifo_underrun);
1958 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1959 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
1960 priv->reply_tx_stats.drain_flow);
1961 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1962 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
1963 priv->reply_tx_stats.rfkill_flush);
1964 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1965 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
1966 priv->reply_tx_stats.life_expire);
1967 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1968 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
1969 priv->reply_tx_stats.dest_ps);
1970 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1971 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
1972 priv->reply_tx_stats.host_abort);
1973 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1974 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
1975 priv->reply_tx_stats.pp_delay);
1976 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1977 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1978 priv->reply_tx_stats.sta_invalid);
1979 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1980 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1981 priv->reply_tx_stats.frag_drop);
1982 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1983 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1984 priv->reply_tx_stats.tid_disable);
1985 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1986 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1987 priv->reply_tx_stats.fifo_flush);
1988 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1989 iwl_get_tx_fail_reason(
1990 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1991 priv->reply_tx_stats.insuff_cf_poll);
1992 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1993 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1994 priv->reply_tx_stats.fail_hw_drop);
1995 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1996 iwl_get_tx_fail_reason(
1997 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1998 priv->reply_tx_stats.sta_color_mismatch);
1999 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
2000 priv->reply_tx_stats.unknown);
2001
2002 pos += scnprintf(buf + pos, bufsz - pos,
2003 "\nStatistics_Agg_TX_Error:\n");
2004
2005 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2006 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
2007 priv->reply_agg_tx_stats.underrun);
2008 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2009 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
2010 priv->reply_agg_tx_stats.bt_prio);
2011 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2012 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
2013 priv->reply_agg_tx_stats.few_bytes);
2014 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2015 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
2016 priv->reply_agg_tx_stats.abort);
2017 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2018 iwl_get_agg_tx_fail_reason(
2019 AGG_TX_STATE_LAST_SENT_TTL_MSK),
2020 priv->reply_agg_tx_stats.last_sent_ttl);
2021 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2022 iwl_get_agg_tx_fail_reason(
2023 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
2024 priv->reply_agg_tx_stats.last_sent_try);
2025 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2026 iwl_get_agg_tx_fail_reason(
2027 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
2028 priv->reply_agg_tx_stats.last_sent_bt_kill);
2029 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2030 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
2031 priv->reply_agg_tx_stats.scd_query);
2032 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2033 iwl_get_agg_tx_fail_reason(
2034 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
2035 priv->reply_agg_tx_stats.bad_crc32);
2036 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2037 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
2038 priv->reply_agg_tx_stats.response);
2039 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2040 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
2041 priv->reply_agg_tx_stats.dump_tx);
2042 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2043 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
2044 priv->reply_agg_tx_stats.delay_tx);
2045 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
2046 priv->reply_agg_tx_stats.unknown);
2047
2048 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2049 kfree(buf);
2050 return ret;
2051}
2052
2053static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
2054 char __user *user_buf,
2055 size_t count, loff_t *ppos) {
2056
2057 struct iwl_priv *priv = file->private_data;
2058 int pos = 0;
2059 int cnt = 0;
2060 char *buf;
2061 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
2062 ssize_t ret;
2063 struct iwl_sensitivity_data *data;
2064
2065 data = &priv->sensitivity_data;
2066 buf = kzalloc(bufsz, GFP_KERNEL);
2067 if (!buf) {
2068 IWL_ERR(priv, "Can not allocate Buffer\n");
2069 return -ENOMEM;
2070 }
2071
2072 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
2073 data->auto_corr_ofdm);
2074 pos += scnprintf(buf + pos, bufsz - pos,
2075 "auto_corr_ofdm_mrc:\t\t %u\n",
2076 data->auto_corr_ofdm_mrc);
2077 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
2078 data->auto_corr_ofdm_x1);
2079 pos += scnprintf(buf + pos, bufsz - pos,
2080 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
2081 data->auto_corr_ofdm_mrc_x1);
2082 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
2083 data->auto_corr_cck);
2084 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
2085 data->auto_corr_cck_mrc);
2086 pos += scnprintf(buf + pos, bufsz - pos,
2087 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
2088 data->last_bad_plcp_cnt_ofdm);
2089 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
2090 data->last_fa_cnt_ofdm);
2091 pos += scnprintf(buf + pos, bufsz - pos,
2092 "last_bad_plcp_cnt_cck:\t\t %u\n",
2093 data->last_bad_plcp_cnt_cck);
2094 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
2095 data->last_fa_cnt_cck);
2096 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
2097 data->nrg_curr_state);
2098 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
2099 data->nrg_prev_state);
2100 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
2101 for (cnt = 0; cnt < 10; cnt++) {
2102 pos += scnprintf(buf + pos, bufsz - pos, " %u",
2103 data->nrg_value[cnt]);
2104 }
2105 pos += scnprintf(buf + pos, bufsz - pos, "\n");
2106 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
2107 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
2108 pos += scnprintf(buf + pos, bufsz - pos, " %u",
2109 data->nrg_silence_rssi[cnt]);
2110 }
2111 pos += scnprintf(buf + pos, bufsz - pos, "\n");
2112 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
2113 data->nrg_silence_ref);
2114 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
2115 data->nrg_energy_idx);
2116 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
2117 data->nrg_silence_idx);
2118 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
2119 data->nrg_th_cck);
2120 pos += scnprintf(buf + pos, bufsz - pos,
2121 "nrg_auto_corr_silence_diff:\t %u\n",
2122 data->nrg_auto_corr_silence_diff);
2123 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
2124 data->num_in_cck_no_fa);
2125 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
2126 data->nrg_th_ofdm);
2127
2128 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2129 kfree(buf);
2130 return ret;
2131}
2132
2133
2134static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
2135 char __user *user_buf,
2136 size_t count, loff_t *ppos) {
2137
2138 struct iwl_priv *priv = file->private_data;
2139 int pos = 0;
2140 int cnt = 0;
2141 char *buf;
2142 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
2143 ssize_t ret;
2144 struct iwl_chain_noise_data *data;
2145
2146 data = &priv->chain_noise_data;
2147 buf = kzalloc(bufsz, GFP_KERNEL);
2148 if (!buf) {
2149 IWL_ERR(priv, "Can not allocate Buffer\n");
2150 return -ENOMEM;
2151 }
2152
2153 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
2154 data->active_chains);
2155 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
2156 data->chain_noise_a);
2157 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
2158 data->chain_noise_b);
2159 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
2160 data->chain_noise_c);
2161 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
2162 data->chain_signal_a);
2163 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
2164 data->chain_signal_b);
2165 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
2166 data->chain_signal_c);
2167 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
2168 data->beacon_count);
2169
2170 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
2171 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
2172 pos += scnprintf(buf + pos, bufsz - pos, " %u",
2173 data->disconn_array[cnt]);
2174 }
2175 pos += scnprintf(buf + pos, bufsz - pos, "\n");
2176 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
2177 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
2178 pos += scnprintf(buf + pos, bufsz - pos, " %u",
2179 data->delta_gain_code[cnt]);
2180 }
2181 pos += scnprintf(buf + pos, bufsz - pos, "\n");
2182 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
2183 data->radio_write);
2184 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
2185 data->state);
2186
2187 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2188 kfree(buf);
2189 return ret;
2190}
2191
2192static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
2193 char __user *user_buf,
2194 size_t count, loff_t *ppos)
2195{
2196 struct iwl_priv *priv = file->private_data;
2197 char buf[60];
2198 int pos = 0;
2199 const size_t bufsz = sizeof(buf);
2200 u32 pwrsave_status;
2201
2202 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
2203 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
2204
2205 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
2206 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
2207 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
2208 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
2209 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
2210 "error");
2211
2212 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2213}
2214
2215static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
2216 const char __user *user_buf,
2217 size_t count, loff_t *ppos)
2218{
2219 struct iwl_priv *priv = file->private_data;
2220 char buf[8];
2221 int buf_size;
2222 int clear;
2223
2224 memset(buf, 0, sizeof(buf));
2225 buf_size = min(count, sizeof(buf) - 1);
2226 if (copy_from_user(buf, user_buf, buf_size))
2227 return -EFAULT;
2228 if (sscanf(buf, "%d", &clear) != 1)
2229 return -EFAULT;
2230
2231 /* make request to uCode to retrieve statistics information */
2232 mutex_lock(&priv->mutex);
2233 iwl_send_statistics_request(priv, CMD_SYNC, true);
2234 mutex_unlock(&priv->mutex);
2235
2236 return count;
2237}
2238
2239static ssize_t iwl_dbgfs_csr_write(struct file *file,
2240 const char __user *user_buf,
2241 size_t count, loff_t *ppos)
2242{
2243 struct iwl_priv *priv = file->private_data;
2244 char buf[8];
2245 int buf_size;
2246 int csr;
2247
2248 memset(buf, 0, sizeof(buf));
2249 buf_size = min(count, sizeof(buf) - 1);
2250 if (copy_from_user(buf, user_buf, buf_size))
2251 return -EFAULT;
2252 if (sscanf(buf, "%d", &csr) != 1)
2253 return -EFAULT;
2254
2255 iwl_dump_csr(priv);
2256
2257 return count;
2258}
2259
2260static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2261 char __user *user_buf,
2262 size_t count, loff_t *ppos) {
2263
2264 struct iwl_priv *priv = file->private_data;
2265 int pos = 0;
2266 char buf[128];
2267 const size_t bufsz = sizeof(buf);
2268
2269 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
2270 priv->event_log.ucode_trace ? "On" : "Off");
2271 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
2272 priv->event_log.non_wraps_count);
2273 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
2274 priv->event_log.wraps_once_count);
2275 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
2276 priv->event_log.wraps_more_count);
2277
2278 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2279}
2280
2281static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
2282 const char __user *user_buf,
2283 size_t count, loff_t *ppos)
2284{
2285 struct iwl_priv *priv = file->private_data;
2286 char buf[8];
2287 int buf_size;
2288 int trace;
2289
2290 memset(buf, 0, sizeof(buf));
2291 buf_size = min(count, sizeof(buf) - 1);
2292 if (copy_from_user(buf, user_buf, buf_size))
2293 return -EFAULT;
2294 if (sscanf(buf, "%d", &trace) != 1)
2295 return -EFAULT;
2296
2297 if (trace) {
2298 priv->event_log.ucode_trace = true;
2299 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
2300 mod_timer(&priv->ucode_trace,
2301 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
2302 } else {
2303 priv->event_log.ucode_trace = false;
2304 del_timer_sync(&priv->ucode_trace);
2305 }
2306
2307 return count;
2308}
2309
2310static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
2311 char __user *user_buf,
2312 size_t count, loff_t *ppos) {
2313
2314 struct iwl_priv *priv = file->private_data;
2315 int len = 0;
2316 char buf[20];
2317
2318 len = sprintf(buf, "0x%04X\n",
2319 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
2320 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
2321}
2322
2323static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
2324 char __user *user_buf,
2325 size_t count, loff_t *ppos) {
2326
2327 struct iwl_priv *priv = file->private_data;
2328 int len = 0;
2329 char buf[20];
2330
2331 len = sprintf(buf, "0x%04X\n",
2332 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
2333 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
2334}
2335
2336static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2337 char __user *user_buf,
2338 size_t count, loff_t *ppos)
2339{
2340 struct iwl_priv *priv = file->private_data;
2341 char *buf;
2342 int pos = 0;
2343 ssize_t ret = -EFAULT;
2344
2345 ret = pos = iwl_dump_fh(priv, &buf, true);
2346 if (buf) {
2347 ret = simple_read_from_buffer(user_buf,
2348 count, ppos, buf, pos);
2349 kfree(buf);
2350 }
2351
2352 return ret;
2353}
2354
2355static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
2356 char __user *user_buf,
2357 size_t count, loff_t *ppos) {
2358
2359 struct iwl_priv *priv = file->private_data;
2360 int pos = 0;
2361 char buf[12];
2362 const size_t bufsz = sizeof(buf);
2363
2364 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
2365 priv->missed_beacon_threshold);
2366
2367 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2368}
2369
2370static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
2371 const char __user *user_buf,
2372 size_t count, loff_t *ppos)
2373{
2374 struct iwl_priv *priv = file->private_data;
2375 char buf[8];
2376 int buf_size;
2377 int missed;
2378
2379 memset(buf, 0, sizeof(buf));
2380 buf_size = min(count, sizeof(buf) - 1);
2381 if (copy_from_user(buf, user_buf, buf_size))
2382 return -EFAULT;
2383 if (sscanf(buf, "%d", &missed) != 1)
2384 return -EINVAL;
2385
2386 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
2387 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
2388 priv->missed_beacon_threshold =
2389 IWL_MISSED_BEACON_THRESHOLD_DEF;
2390 else
2391 priv->missed_beacon_threshold = missed;
2392
2393 return count;
2394}
2395
2396static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2397 char __user *user_buf,
2398 size_t count, loff_t *ppos) {
2399
2400 struct iwl_priv *priv = file->private_data;
2401 int pos = 0;
2402 char buf[12];
2403 const size_t bufsz = sizeof(buf);
2404
2405 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2406 priv->cfg->base_params->plcp_delta_threshold);
2407
2408 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2409}
2410
2411static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2412 const char __user *user_buf,
2413 size_t count, loff_t *ppos) {
2414
2415 struct iwl_priv *priv = file->private_data;
2416 char buf[8];
2417 int buf_size;
2418 int plcp;
2419
2420 memset(buf, 0, sizeof(buf));
2421 buf_size = min(count, sizeof(buf) - 1);
2422 if (copy_from_user(buf, user_buf, buf_size))
2423 return -EFAULT;
2424 if (sscanf(buf, "%d", &plcp) != 1)
2425 return -EINVAL;
2426 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
2427 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
2428 priv->cfg->base_params->plcp_delta_threshold =
2429 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
2430 else
2431 priv->cfg->base_params->plcp_delta_threshold = plcp;
2432 return count;
2433}
2434
2435static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
2436 char __user *user_buf,
2437 size_t count, loff_t *ppos) {
2438
2439 struct iwl_priv *priv = file->private_data;
2440 int i, pos = 0;
2441 char buf[300];
2442 const size_t bufsz = sizeof(buf);
2443 struct iwl_force_reset *force_reset;
2444
2445 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
2446 force_reset = &priv->force_reset[i];
2447 pos += scnprintf(buf + pos, bufsz - pos,
2448 "Force reset method %d\n", i);
2449 pos += scnprintf(buf + pos, bufsz - pos,
2450 "\tnumber of reset request: %d\n",
2451 force_reset->reset_request_count);
2452 pos += scnprintf(buf + pos, bufsz - pos,
2453 "\tnumber of reset request success: %d\n",
2454 force_reset->reset_success_count);
2455 pos += scnprintf(buf + pos, bufsz - pos,
2456 "\tnumber of reset request reject: %d\n",
2457 force_reset->reset_reject_count);
2458 pos += scnprintf(buf + pos, bufsz - pos,
2459 "\treset duration: %lu\n",
2460 force_reset->reset_duration);
2461 }
2462 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2463}
2464
2465static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
2466 const char __user *user_buf,
2467 size_t count, loff_t *ppos) {
2468
2469 struct iwl_priv *priv = file->private_data;
2470 char buf[8];
2471 int buf_size;
2472 int reset, ret;
2473
2474 memset(buf, 0, sizeof(buf));
2475 buf_size = min(count, sizeof(buf) - 1);
2476 if (copy_from_user(buf, user_buf, buf_size))
2477 return -EFAULT;
2478 if (sscanf(buf, "%d", &reset) != 1)
2479 return -EINVAL;
2480 switch (reset) {
2481 case IWL_RF_RESET:
2482 case IWL_FW_RESET:
2483 ret = iwl_force_reset(priv, reset, true);
2484 break;
2485 default:
2486 return -EINVAL;
2487 }
2488 return ret ? ret : count;
2489}
2490
2491static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2492 const char __user *user_buf,
2493 size_t count, loff_t *ppos) {
2494
2495 struct iwl_priv *priv = file->private_data;
2496 char buf[8];
2497 int buf_size;
2498 int flush;
2499
2500 memset(buf, 0, sizeof(buf));
2501 buf_size = min(count, sizeof(buf) - 1);
2502 if (copy_from_user(buf, user_buf, buf_size))
2503 return -EFAULT;
2504 if (sscanf(buf, "%d", &flush) != 1)
2505 return -EINVAL;
2506
2507 if (iwl_is_rfkill(priv))
2508 return -EFAULT;
2509
2510 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
2511
2512 return count;
2513}
2514
2515static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
2516 const char __user *user_buf,
2517 size_t count, loff_t *ppos) {
2518
2519 struct iwl_priv *priv = file->private_data;
2520 char buf[8];
2521 int buf_size;
2522 int timeout;
2523
2524 memset(buf, 0, sizeof(buf));
2525 buf_size = min(count, sizeof(buf) - 1);
2526 if (copy_from_user(buf, user_buf, buf_size))
2527 return -EFAULT;
2528 if (sscanf(buf, "%d", &timeout) != 1)
2529 return -EINVAL;
2530 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
2531 timeout = IWL_DEF_WD_TIMEOUT;
2532
2533 priv->cfg->base_params->wd_timeout = timeout;
2534 iwl_setup_watchdog(priv);
2535 return count;
2536}
2537
2538static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
2539 char __user *user_buf,
2540 size_t count, loff_t *ppos) {
2541
2542 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2543 int pos = 0;
2544 char buf[200];
2545 const size_t bufsz = sizeof(buf);
2546
2547 if (!priv->bt_enable_flag) {
2548 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
2549 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2550 }
2551 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
2552 priv->bt_enable_flag);
2553 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
2554 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
2555 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
2556 "last traffic notif: %d\n",
2557 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
2558 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
2559 "kill_ack_mask: %x, kill_cts_mask: %x\n",
2560 priv->bt_ch_announce, priv->kill_ack_mask,
2561 priv->kill_cts_mask);
2562
2563 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
2564 switch (priv->bt_traffic_load) {
2565 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
2566 pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
2567 break;
2568 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
2569 pos += scnprintf(buf + pos, bufsz - pos, "High\n");
2570 break;
2571 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
2572 pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
2573 break;
2574 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
2575 default:
2576 pos += scnprintf(buf + pos, bufsz - pos, "None\n");
2577 break;
2578 }
2579
2580 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2581}
2582
2583static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
2584 char __user *user_buf,
2585 size_t count, loff_t *ppos)
2586{
2587 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2588
2589 int pos = 0;
2590 char buf[40];
2591 const size_t bufsz = sizeof(buf);
2592
2593 if (priv->cfg->ht_params)
2594 pos += scnprintf(buf + pos, bufsz - pos,
2595 "use %s for aggregation\n",
2596 (priv->cfg->ht_params->use_rts_for_aggregation) ?
2597 "rts/cts" : "cts-to-self");
2598 else
2599 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
2600
2601 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2602}
2603
2604static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2605 const char __user *user_buf,
2606 size_t count, loff_t *ppos) {
2607
2608 struct iwl_priv *priv = file->private_data;
2609 char buf[8];
2610 int buf_size;
2611 int rts;
2612
2613 if (!priv->cfg->ht_params)
2614 return -EINVAL;
2615
2616 memset(buf, 0, sizeof(buf));
2617 buf_size = min(count, sizeof(buf) - 1);
2618 if (copy_from_user(buf, user_buf, buf_size))
2619 return -EFAULT;
2620 if (sscanf(buf, "%d", &rts) != 1)
2621 return -EINVAL;
2622 if (rts)
2623 priv->cfg->ht_params->use_rts_for_aggregation = true;
2624 else
2625 priv->cfg->ht_params->use_rts_for_aggregation = false;
2626 return count;
2627}
2628
2629DEBUGFS_READ_FILE_OPS(rx_statistics);
2630DEBUGFS_READ_FILE_OPS(tx_statistics);
2631DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
2632DEBUGFS_READ_FILE_OPS(rx_queue);
2633DEBUGFS_READ_FILE_OPS(tx_queue);
2634DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2635DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2636DEBUGFS_READ_FILE_OPS(ucode_general_stats);
2637DEBUGFS_READ_FILE_OPS(sensitivity);
2638DEBUGFS_READ_FILE_OPS(chain_noise);
2639DEBUGFS_READ_FILE_OPS(power_save_status);
2640DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2641DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
2642DEBUGFS_WRITE_FILE_OPS(csr);
2643DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2644DEBUGFS_READ_FILE_OPS(fh_reg);
2645DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2646DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2647DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
2648DEBUGFS_READ_FILE_OPS(rxon_flags);
2649DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2650DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
2651DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
2652DEBUGFS_WRITE_FILE_OPS(wd_timeout);
2653DEBUGFS_READ_FILE_OPS(bt_traffic);
2654DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2655DEBUGFS_READ_FILE_OPS(reply_tx_error);
2656
2657/*
2658 * Create the debugfs files and directories
2659 *
2660 */
2661int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2662{
2663 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
2664 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
2665
2666 dir_drv = debugfs_create_dir(name, phyd);
2667 if (!dir_drv)
2668 return -ENOMEM;
2669
2670 priv->debugfs_dir = dir_drv;
2671
2672 dir_data = debugfs_create_dir("data", dir_drv);
2673 if (!dir_data)
2674 goto err;
2675 dir_rf = debugfs_create_dir("rf", dir_drv);
2676 if (!dir_rf)
2677 goto err;
2678 dir_debug = debugfs_create_dir("debug", dir_drv);
2679 if (!dir_debug)
2680 goto err;
2681
2682 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
2683 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
2684 DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
2685 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
2686 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
2687 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
2688 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
2689 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
2690 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
2691 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
2692 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
2693 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
2694 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2695 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
2696 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
2697 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
2698 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
2699 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
2700 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2701 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2702 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
2703 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
2704 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
2705 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2706 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2707 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
2708 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2709 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2710 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
2711 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
2712 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
2713
2714 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
2715 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2716 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
2717 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
2718 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
2719 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2720 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2721 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
2722 if (iwl_advanced_bt_coexist(priv))
2723 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
2724 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
2725 &priv->disable_sens_cal);
2726 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
2727 &priv->disable_chain_noise_cal);
2728 return 0;
2729
2730err:
2731 IWL_ERR(priv, "Can't create the debugfs directory\n");
2732 iwl_dbgfs_unregister(priv);
2733 return -ENOMEM;
2734}
2735
2736/**
2737 * Remove the debugfs files and directories
2738 *
2739 */
2740void iwl_dbgfs_unregister(struct iwl_priv *priv)
2741{
2742 if (!priv->debugfs_dir)
2743 return;
2744
2745 debugfs_remove_recursive(priv->debugfs_dir);
2746 priv->debugfs_dir = NULL;
2747}
2748
2749
2750
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
new file mode 100644
index 00000000000..6c9790cac8d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -0,0 +1,1662 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 */
30
31#ifndef __iwl_dev_h__
32#define __iwl_dev_h__
33
34#include <linux/interrupt.h>
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <linux/wait.h>
38#include <linux/leds.h>
39#include <net/ieee80211_radiotap.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h"
46#include "iwl-agn-hw.h"
47#include "iwl-led.h"
48#include "iwl-power.h"
49#include "iwl-agn-rs.h"
50#include "iwl-agn-tt.h"
51#include "iwl-bus.h"
52#include "iwl-trans.h"
53
54#define DRV_NAME "iwlagn"
55
56struct iwl_tx_queue;
57
58/* CT-KILL constants */
59#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
60#define CT_KILL_THRESHOLD 114 /* in Celsius */
61#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */
62
63/* Default noise level to report when noise measurement is not available.
64 * This may be because we're:
65 * 1) Not associated (4965, no beacon statistics being sent to driver)
66 * 2) Scanning (noise measurement does not apply to associated channel)
67 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
68 * Use default noise value of -127 ... this is below the range of measurable
69 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
70 * Also, -127 works better than 0 when averaging frames with/without
71 * noise info (e.g. averaging might be done in app); measured dBm values are
72 * always negative ... using a negative value as the default keeps all
73 * averages within an s8's (used in some apps) range of negative values. */
74#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
75
76/*
77 * RTS threshold here is total size [2347] minus 4 FCS bytes
78 * Per spec:
79 * a value of 0 means RTS on all data/management packets
80 * a value > max MSDU size means no RTS
81 * else RTS for data/management frames where MPDU is larger
82 * than RTS value.
83 */
84#define DEFAULT_RTS_THRESHOLD 2347U
85#define MIN_RTS_THRESHOLD 0U
86#define MAX_RTS_THRESHOLD 2347U
87#define MAX_MSDU_SIZE 2304U
88#define MAX_MPDU_SIZE 2346U
89#define DEFAULT_BEACON_INTERVAL 200U
90#define DEFAULT_SHORT_RETRY_LIMIT 7U
91#define DEFAULT_LONG_RETRY_LIMIT 4U
92
93struct iwl_rx_mem_buffer {
94 dma_addr_t page_dma;
95 struct page *page;
96 struct list_head list;
97};
98
99#define rxb_addr(r) page_address(r->page)
100
101/* defined below */
102struct iwl_device_cmd;
103
104struct iwl_cmd_meta {
105 /* only for SYNC commands, iff the reply skb is wanted */
106 struct iwl_host_cmd *source;
107 /*
108 * only for ASYNC commands
109 * (which is somewhat stupid -- look at iwl-sta.c for instance
110 * which duplicates a bunch of code because the callback isn't
111 * invoked for SYNC commands, if it were and its result passed
112 * through it would be simpler...)
113 */
114 void (*callback)(struct iwl_priv *priv,
115 struct iwl_device_cmd *cmd,
116 struct iwl_rx_packet *pkt);
117
118 u32 flags;
119
120 DEFINE_DMA_UNMAP_ADDR(mapping);
121 DEFINE_DMA_UNMAP_LEN(len);
122};
123
124/*
125 * Generic queue structure
126 *
127 * Contains common data for Rx and Tx queues.
128 *
129 * Note the difference between n_bd and n_window: the hardware
130 * always assumes 256 descriptors, so n_bd is always 256 (unless
131 * there might be HW changes in the future). For the normal TX
132 * queues, n_window, which is the size of the software queue data
133 * is also 256; however, for the command queue, n_window is only
134 * 32 since we don't need so many commands pending. Since the HW
135 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
136 * the software buffers (in the variables @meta, @txb in struct
137 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
138 * in the same struct) have 256.
139 * This means that we end up with the following:
140 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
141 * SW entries: | 0 | ... | 31 |
142 * where N is a number between 0 and 7. This means that the SW
143 * data is a window overlayed over the HW queue.
144 */
145struct iwl_queue {
146 int n_bd; /* number of BDs in this queue */
147 int write_ptr; /* 1-st empty entry (index) host_w*/
148 int read_ptr; /* last used entry (index) host_r*/
149 /* use for monitoring and recovering the stuck queue */
150 dma_addr_t dma_addr; /* physical addr for BD's */
151 int n_window; /* safe queue window */
152 u32 id;
153 int low_mark; /* low watermark, resume queue if free
154 * space more than this */
155 int high_mark; /* high watermark, stop queue if free
156 * space less than this */
157};
158
159/* One for each TFD */
160struct iwl_tx_info {
161 struct sk_buff *skb;
162 struct iwl_rxon_context *ctx;
163};
164
165/**
166 * struct iwl_tx_queue - Tx Queue for DMA
167 * @q: generic Rx/Tx queue descriptor
168 * @bd: base of circular buffer of TFDs
169 * @cmd: array of command/TX buffer pointers
170 * @meta: array of meta data for each command/tx buffer
171 * @dma_addr_cmd: physical address of cmd/tx buffer array
172 * @txb: array of per-TFD driver data
173 * @time_stamp: time (in jiffies) of last read_ptr change
174 * @need_update: indicates need to update read/write index
175 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
176 *
177 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
178 * descriptors) and required locking structures.
179 */
180#define TFD_TX_CMD_SLOTS 256
181#define TFD_CMD_SLOTS 32
182
183struct iwl_tx_queue {
184 struct iwl_queue q;
185 struct iwl_tfd *tfds;
186 struct iwl_device_cmd **cmd;
187 struct iwl_cmd_meta *meta;
188 struct iwl_tx_info *txb;
189 unsigned long time_stamp;
190 u8 need_update;
191 u8 sched_retry;
192 u8 active;
193 u8 swq_id;
194};
195
196#define IWL_NUM_SCAN_RATES (2)
197
198/*
199 * One for each channel, holds all channel setup data
200 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
201 * with one another!
202 */
203struct iwl_channel_info {
204 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
205 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
206 * HT40 channel */
207
208 u8 channel; /* channel number */
209 u8 flags; /* flags copied from EEPROM */
210 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
211 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
212 s8 min_power; /* always 0 */
213 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
214
215 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
216 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
217 enum ieee80211_band band;
218
219 /* HT40 channel info */
220 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
221 u8 ht40_flags; /* flags copied from EEPROM */
222 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
223};
224
225#define IWL_TX_FIFO_BK 0 /* shared */
226#define IWL_TX_FIFO_BE 1
227#define IWL_TX_FIFO_VI 2 /* shared */
228#define IWL_TX_FIFO_VO 3
229#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
230#define IWL_TX_FIFO_BE_IPAN 4
231#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
232#define IWL_TX_FIFO_VO_IPAN 5
233#define IWL_TX_FIFO_UNUSED -1
234
235/* Minimum number of queues. MAX_NUM is defined in hw specific files.
236 * Set the minimum to accommodate the 4 standard TX queues, 1 command
237 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
238#define IWL_MIN_NUM_QUEUES 10
239
240/*
241 * Command queue depends on iPAN support.
242 */
243#define IWL_DEFAULT_CMD_QUEUE_NUM 4
244#define IWL_IPAN_CMD_QUEUE_NUM 9
245
246/*
247 * This queue number is required for proper operation
248 * because the ucode will stop/start the scheduler as
249 * required.
250 */
251#define IWL_IPAN_MCAST_QUEUE 8
252
253#define IEEE80211_DATA_LEN 2304
254#define IEEE80211_4ADDR_LEN 30
255#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
256#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
257
258
259#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
260#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
261#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
262
263enum {
264 CMD_SYNC = 0,
265 CMD_ASYNC = BIT(0),
266 CMD_WANT_SKB = BIT(1),
267 CMD_ON_DEMAND = BIT(2),
268};
269
270#define DEF_CMD_PAYLOAD_SIZE 320
271
272/**
273 * struct iwl_device_cmd
274 *
275 * For allocation of the command and tx queues, this establishes the overall
276 * size of the largest command we send to uCode, except for commands that
277 * aren't fully copied and use other TFD space.
278 */
279struct iwl_device_cmd {
280 struct iwl_cmd_header hdr; /* uCode API */
281 union {
282 u32 flags;
283 u8 val8;
284 u16 val16;
285 u32 val32;
286 struct iwl_tx_cmd tx;
287 struct iwl6000_channel_switch_cmd chswitch;
288 u8 payload[DEF_CMD_PAYLOAD_SIZE];
289 } __packed cmd;
290} __packed;
291
292#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
293
294#define IWL_MAX_CMD_TFDS 2
295
296enum iwl_hcmd_dataflag {
297 IWL_HCMD_DFL_NOCOPY = BIT(0),
298};
299
300/**
301 * struct iwl_host_cmd - Host command to the uCode
302 * @data: array of chunks that composes the data of the host command
303 * @reply_page: pointer to the page that holds the response to the host command
304 * @callback:
305 * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
306 * @len: array of the lenths of the chunks in data
307 * @dataflags:
308 * @id: id of the host command
309 */
310struct iwl_host_cmd {
311 const void *data[IWL_MAX_CMD_TFDS];
312 unsigned long reply_page;
313 void (*callback)(struct iwl_priv *priv,
314 struct iwl_device_cmd *cmd,
315 struct iwl_rx_packet *pkt);
316 u32 flags;
317 u16 len[IWL_MAX_CMD_TFDS];
318 u8 dataflags[IWL_MAX_CMD_TFDS];
319 u8 id;
320};
321
322#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
323#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
324#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
325
326/**
327 * struct iwl_rx_queue - Rx queue
328 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
329 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
330 * @read: Shared index to newest available Rx buffer
331 * @write: Shared index to oldest written Rx packet
332 * @free_count: Number of pre-allocated buffers in rx_free
333 * @rx_free: list of free SKBs for use
334 * @rx_used: List of Rx buffers with no SKB
335 * @need_update: flag to indicate we need to update read/write index
336 * @rb_stts: driver's pointer to receive buffer status
337 * @rb_stts_dma: bus address of receive buffer status
338 *
339 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
340 */
341struct iwl_rx_queue {
342 __le32 *bd;
343 dma_addr_t bd_dma;
344 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
345 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
346 u32 read;
347 u32 write;
348 u32 free_count;
349 u32 write_actual;
350 struct list_head rx_free;
351 struct list_head rx_used;
352 int need_update;
353 struct iwl_rb_status *rb_stts;
354 dma_addr_t rb_stts_dma;
355 spinlock_t lock;
356};
357
358#define IWL_SUPPORTED_RATES_IE_LEN 8
359
360#define MAX_TID_COUNT 9
361
362#define IWL_INVALID_RATE 0xFF
363#define IWL_INVALID_VALUE -1
364
365/**
366 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
367 * @txq_id: Tx queue used for Tx attempt
368 * @frame_count: # frames attempted by Tx command
369 * @wait_for_ba: Expect block-ack before next Tx reply
370 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
371 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
372 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
373 * @rate_n_flags: Rate at which Tx was attempted
374 *
375 * If REPLY_TX indicates that aggregation was attempted, driver must wait
376 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
377 * until block ack arrives.
378 */
379struct iwl_ht_agg {
380 u16 txq_id;
381 u16 frame_count;
382 u16 wait_for_ba;
383 u16 start_idx;
384 u64 bitmap;
385 u32 rate_n_flags;
386#define IWL_AGG_OFF 0
387#define IWL_AGG_ON 1
388#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
389#define IWL_EMPTYING_HW_QUEUE_DELBA 3
390 u8 state;
391 u8 tx_fifo;
392};
393
394
395struct iwl_tid_data {
396 u16 seq_number; /* agn only */
397 u16 tfds_in_queue;
398 struct iwl_ht_agg agg;
399};
400
401union iwl_ht_rate_supp {
402 u16 rates;
403 struct {
404 u8 siso_rate;
405 u8 mimo_rate;
406 };
407};
408
409#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
410#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
411#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
412#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
413#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
414#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
415#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
416
417/*
418 * Maximal MPDU density for TX aggregation
419 * 4 - 2us density
420 * 5 - 4us density
421 * 6 - 8us density
422 * 7 - 16us density
423 */
424#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
425#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
426#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
427#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
428#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
429#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
430#define CFG_HT_MPDU_DENSITY_MIN (0x1)
431
432struct iwl_ht_config {
433 bool single_chain_sufficient;
434 enum ieee80211_smps_mode smps; /* current smps mode */
435};
436
437/* QoS structures */
438struct iwl_qos_info {
439 int qos_active;
440 struct iwl_qosparam_cmd def_qos_parm;
441};
442
443/*
444 * Structure should be accessed with sta_lock held. When station addition
445 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
446 * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
447 * held.
448 */
449struct iwl_station_entry {
450 struct iwl_addsta_cmd sta;
451 struct iwl_tid_data tid[MAX_TID_COUNT];
452 u8 used, ctxid;
453 struct iwl_link_quality_cmd *lq;
454};
455
456struct iwl_station_priv_common {
457 struct iwl_rxon_context *ctx;
458 u8 sta_id;
459};
460
461/*
462 * iwl_station_priv: Driver's private station information
463 *
464 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
465 * in the structure for use by driver. This structure is places in that
466 * space.
467 */
468struct iwl_station_priv {
469 struct iwl_station_priv_common common;
470 struct iwl_lq_sta lq_sta;
471 atomic_t pending_frames;
472 bool client;
473 bool asleep;
474 u8 max_agg_bufsize;
475};
476
477/**
478 * struct iwl_vif_priv - driver's private per-interface information
479 *
480 * When mac80211 allocates a virtual interface, it can allocate
481 * space for us to put data into.
482 */
483struct iwl_vif_priv {
484 struct iwl_rxon_context *ctx;
485 u8 ibss_bssid_sta_id;
486};
487
488/* one for each uCode image (inst/data, boot/init/runtime) */
489struct fw_desc {
490 void *v_addr; /* access by driver */
491 dma_addr_t p_addr; /* access by card's busmaster DMA */
492 u32 len; /* bytes */
493};
494
495struct fw_img {
496 struct fw_desc code, data;
497};
498
499/* v1/v2 uCode file layout */
500struct iwl_ucode_header {
501 __le32 ver; /* major/minor/API/serial */
502 union {
503 struct {
504 __le32 inst_size; /* bytes of runtime code */
505 __le32 data_size; /* bytes of runtime data */
506 __le32 init_size; /* bytes of init code */
507 __le32 init_data_size; /* bytes of init data */
508 __le32 boot_size; /* bytes of bootstrap code */
509 u8 data[0]; /* in same order as sizes */
510 } v1;
511 struct {
512 __le32 build; /* build number */
513 __le32 inst_size; /* bytes of runtime code */
514 __le32 data_size; /* bytes of runtime data */
515 __le32 init_size; /* bytes of init code */
516 __le32 init_data_size; /* bytes of init data */
517 __le32 boot_size; /* bytes of bootstrap code */
518 u8 data[0]; /* in same order as sizes */
519 } v2;
520 } u;
521};
522
523/*
524 * new TLV uCode file layout
525 *
526 * The new TLV file format contains TLVs, that each specify
527 * some piece of data. To facilitate "groups", for example
528 * different instruction image with different capabilities,
529 * bundled with the same init image, an alternative mechanism
530 * is provided:
531 * When the alternative field is 0, that means that the item
532 * is always valid. When it is non-zero, then it is only
533 * valid in conjunction with items of the same alternative,
534 * in which case the driver (user) selects one alternative
535 * to use.
536 */
537
538enum iwl_ucode_tlv_type {
539 IWL_UCODE_TLV_INVALID = 0, /* unused */
540 IWL_UCODE_TLV_INST = 1,
541 IWL_UCODE_TLV_DATA = 2,
542 IWL_UCODE_TLV_INIT = 3,
543 IWL_UCODE_TLV_INIT_DATA = 4,
544 IWL_UCODE_TLV_BOOT = 5,
545 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
546 IWL_UCODE_TLV_PAN = 7,
547 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
548 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
549 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
550 IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
551 IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
552 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
553 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
554 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
555 IWL_UCODE_TLV_WOWLAN_INST = 16,
556 IWL_UCODE_TLV_WOWLAN_DATA = 17,
557 IWL_UCODE_TLV_FLAGS = 18,
558};
559
560/**
561 * enum iwl_ucode_tlv_flag - ucode API flags
562 * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
563 * was a separate TLV but moved here to save space.
564 * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
565 * treats good CRC threshold as a boolean
566 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
567 */
568enum iwl_ucode_tlv_flag {
569 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
570 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
571 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
572};
573
574struct iwl_ucode_tlv {
575 __le16 type; /* see above */
576 __le16 alternative; /* see comment */
577 __le32 length; /* not including type/length fields */
578 u8 data[0];
579} __packed;
580
581#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
582
583struct iwl_tlv_ucode_header {
584 /*
585 * The TLV style ucode header is distinguished from
586 * the v1/v2 style header by first four bytes being
587 * zero, as such is an invalid combination of
588 * major/minor/API/serial versions.
589 */
590 __le32 zero;
591 __le32 magic;
592 u8 human_readable[64];
593 __le32 ver; /* major/minor/API/serial */
594 __le32 build;
595 __le64 alternatives; /* bitmask of valid alternatives */
596 /*
597 * The data contained herein has a TLV layout,
598 * see above for the TLV header and types.
599 * Note that each TLV is padded to a length
600 * that is a multiple of 4 for alignment.
601 */
602 u8 data[0];
603};
604
605struct iwl_sensitivity_ranges {
606 u16 min_nrg_cck;
607 u16 max_nrg_cck;
608
609 u16 nrg_th_cck;
610 u16 nrg_th_ofdm;
611
612 u16 auto_corr_min_ofdm;
613 u16 auto_corr_min_ofdm_mrc;
614 u16 auto_corr_min_ofdm_x1;
615 u16 auto_corr_min_ofdm_mrc_x1;
616
617 u16 auto_corr_max_ofdm;
618 u16 auto_corr_max_ofdm_mrc;
619 u16 auto_corr_max_ofdm_x1;
620 u16 auto_corr_max_ofdm_mrc_x1;
621
622 u16 auto_corr_max_cck;
623 u16 auto_corr_max_cck_mrc;
624 u16 auto_corr_min_cck;
625 u16 auto_corr_min_cck_mrc;
626
627 u16 barker_corr_th_min;
628 u16 barker_corr_th_min_mrc;
629 u16 nrg_th_cca;
630};
631
632
633#define KELVIN_TO_CELSIUS(x) ((x)-273)
634#define CELSIUS_TO_KELVIN(x) ((x)+273)
635
636
637/**
638 * struct iwl_hw_params
639 * @max_txq_num: Max # Tx queues supported
640 * @scd_bc_tbls_size: size of scheduler byte count tables
641 * @tfd_size: TFD size
642 * @tx/rx_chains_num: Number of TX/RX chains
643 * @valid_tx/rx_ant: usable antennas
644 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
645 * @max_rxq_log: Log-base-2 of max_rxq_size
646 * @rx_page_order: Rx buffer page order
647 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
648 * @max_stations:
649 * @ht40_channel: is 40MHz width possible in band 2.4
650 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
651 * @sw_crypto: 0 for hw, 1 for sw
652 * @max_xxx_size: for ucode uses
653 * @ct_kill_threshold: temperature threshold
654 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
655 * @calib_init_cfg: setup initial calibrations for the hw
656 * @calib_rt_cfg: setup runtime calibrations for the hw
657 * @struct iwl_sensitivity_ranges: range of sensitivity values
658 */
659struct iwl_hw_params {
660 u8 max_txq_num;
661 u16 scd_bc_tbls_size;
662 u32 tfd_size;
663 u8 tx_chains_num;
664 u8 rx_chains_num;
665 u8 valid_tx_ant;
666 u8 valid_rx_ant;
667 u16 max_rxq_size;
668 u16 max_rxq_log;
669 u32 rx_page_order;
670 u8 max_stations;
671 u8 ht40_channel;
672 u8 max_beacon_itrvl; /* in 1024 ms */
673 u32 max_inst_size;
674 u32 max_data_size;
675 u32 ct_kill_threshold; /* value in hw-dependent units */
676 u32 ct_kill_exit_threshold; /* value in hw-dependent units */
677 /* for 1000, 6000 series and up */
678 u16 beacon_time_tsf_bits;
679 u32 calib_init_cfg;
680 u32 calib_rt_cfg;
681 const struct iwl_sensitivity_ranges *sens;
682};
683
684
685/******************************************************************************
686 *
687 * Functions implemented in core module which are forward declared here
688 * for use by iwl-[4-5].c
689 *
690 * NOTE: The implementation of these functions are not hardware specific
691 * which is why they are in the core module files.
692 *
693 * Naming convention --
694 * iwl_ <-- Is part of iwlwifi
695 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
696 *
697 ****************************************************************************/
698extern void iwl_update_chain_flags(struct iwl_priv *priv);
699extern const u8 iwl_bcast_addr[ETH_ALEN];
700extern int iwl_queue_space(const struct iwl_queue *q);
701static inline int iwl_queue_used(const struct iwl_queue *q, int i)
702{
703 return q->write_ptr >= q->read_ptr ?
704 (i >= q->read_ptr && i < q->write_ptr) :
705 !(i < q->read_ptr && i >= q->write_ptr);
706}
707
708
709static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
710{
711 return index & (q->n_window - 1);
712}
713
714
715struct iwl_dma_ptr {
716 dma_addr_t dma;
717 void *addr;
718 size_t size;
719};
720
721#define IWL_OPERATION_MODE_AUTO 0
722#define IWL_OPERATION_MODE_HT_ONLY 1
723#define IWL_OPERATION_MODE_MIXED 2
724#define IWL_OPERATION_MODE_20MHZ 3
725
726#define IWL_TX_CRC_SIZE 4
727#define IWL_TX_DELIMITER_SIZE 4
728
729#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
730
731/* Sensitivity and chain noise calibration */
732#define INITIALIZATION_VALUE 0xFFFF
733#define IWL_CAL_NUM_BEACONS 16
734#define MAXIMUM_ALLOWED_PATHLOSS 15
735
736#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
737
738#define MAX_FA_OFDM 50
739#define MIN_FA_OFDM 5
740#define MAX_FA_CCK 50
741#define MIN_FA_CCK 5
742
743#define AUTO_CORR_STEP_OFDM 1
744
745#define AUTO_CORR_STEP_CCK 3
746#define AUTO_CORR_MAX_TH_CCK 160
747
748#define NRG_DIFF 2
749#define NRG_STEP_CCK 2
750#define NRG_MARGIN 8
751#define MAX_NUMBER_CCK_NO_FA 100
752
753#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
754
755#define CHAIN_A 0
756#define CHAIN_B 1
757#define CHAIN_C 2
758#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
759#define ALL_BAND_FILTER 0xFF00
760#define IN_BAND_FILTER 0xFF
761#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
762
763#define NRG_NUM_PREV_STAT_L 20
764#define NUM_RX_CHAINS 3
765
766enum iwlagn_false_alarm_state {
767 IWL_FA_TOO_MANY = 0,
768 IWL_FA_TOO_FEW = 1,
769 IWL_FA_GOOD_RANGE = 2,
770};
771
772enum iwlagn_chain_noise_state {
773 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
774 IWL_CHAIN_NOISE_ACCUMULATE,
775 IWL_CHAIN_NOISE_CALIBRATED,
776 IWL_CHAIN_NOISE_DONE,
777};
778
779
780/*
781 * enum iwl_calib
782 * defines the order in which results of initial calibrations
783 * should be sent to the runtime uCode
784 */
785enum iwl_calib {
786 IWL_CALIB_XTAL,
787 IWL_CALIB_DC,
788 IWL_CALIB_LO,
789 IWL_CALIB_TX_IQ,
790 IWL_CALIB_TX_IQ_PERD,
791 IWL_CALIB_BASE_BAND,
792 IWL_CALIB_TEMP_OFFSET,
793 IWL_CALIB_MAX
794};
795
796/* Opaque calibration results */
797struct iwl_calib_result {
798 void *buf;
799 size_t buf_len;
800};
801
802/* Sensitivity calib data */
803struct iwl_sensitivity_data {
804 u32 auto_corr_ofdm;
805 u32 auto_corr_ofdm_mrc;
806 u32 auto_corr_ofdm_x1;
807 u32 auto_corr_ofdm_mrc_x1;
808 u32 auto_corr_cck;
809 u32 auto_corr_cck_mrc;
810
811 u32 last_bad_plcp_cnt_ofdm;
812 u32 last_fa_cnt_ofdm;
813 u32 last_bad_plcp_cnt_cck;
814 u32 last_fa_cnt_cck;
815
816 u32 nrg_curr_state;
817 u32 nrg_prev_state;
818 u32 nrg_value[10];
819 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
820 u32 nrg_silence_ref;
821 u32 nrg_energy_idx;
822 u32 nrg_silence_idx;
823 u32 nrg_th_cck;
824 s32 nrg_auto_corr_silence_diff;
825 u32 num_in_cck_no_fa;
826 u32 nrg_th_ofdm;
827
828 u16 barker_corr_th_min;
829 u16 barker_corr_th_min_mrc;
830 u16 nrg_th_cca;
831};
832
833/* Chain noise (differential Rx gain) calib data */
834struct iwl_chain_noise_data {
835 u32 active_chains;
836 u32 chain_noise_a;
837 u32 chain_noise_b;
838 u32 chain_noise_c;
839 u32 chain_signal_a;
840 u32 chain_signal_b;
841 u32 chain_signal_c;
842 u16 beacon_count;
843 u8 disconn_array[NUM_RX_CHAINS];
844 u8 delta_gain_code[NUM_RX_CHAINS];
845 u8 radio_write;
846 u8 state;
847};
848
849#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
850#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
851
852#define IWL_TRAFFIC_ENTRIES (256)
853#define IWL_TRAFFIC_ENTRY_SIZE (64)
854
855enum {
856 MEASUREMENT_READY = (1 << 0),
857 MEASUREMENT_ACTIVE = (1 << 1),
858};
859
860enum iwl_nvm_type {
861 NVM_DEVICE_TYPE_EEPROM = 0,
862 NVM_DEVICE_TYPE_OTP,
863};
864
865/*
866 * Two types of OTP memory access modes
867 * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode,
868 * based on physical memory addressing
869 * IWL_OTP_ACCESS_RELATIVE - relative address mode,
870 * based on logical memory addressing
871 */
872enum iwl_access_mode {
873 IWL_OTP_ACCESS_ABSOLUTE,
874 IWL_OTP_ACCESS_RELATIVE,
875};
876
877/**
878 * enum iwl_pa_type - Power Amplifier type
879 * @IWL_PA_SYSTEM: based on uCode configuration
880 * @IWL_PA_INTERNAL: use Internal only
881 */
882enum iwl_pa_type {
883 IWL_PA_SYSTEM = 0,
884 IWL_PA_INTERNAL = 1,
885};
886
887/* interrupt statistics */
888struct isr_statistics {
889 u32 hw;
890 u32 sw;
891 u32 err_code;
892 u32 sch;
893 u32 alive;
894 u32 rfkill;
895 u32 ctkill;
896 u32 wakeup;
897 u32 rx;
898 u32 rx_handlers[REPLY_MAX];
899 u32 tx;
900 u32 unhandled;
901};
902
903/* reply_tx_statistics (for _agn devices) */
904struct reply_tx_error_statistics {
905 u32 pp_delay;
906 u32 pp_few_bytes;
907 u32 pp_bt_prio;
908 u32 pp_quiet_period;
909 u32 pp_calc_ttak;
910 u32 int_crossed_retry;
911 u32 short_limit;
912 u32 long_limit;
913 u32 fifo_underrun;
914 u32 drain_flow;
915 u32 rfkill_flush;
916 u32 life_expire;
917 u32 dest_ps;
918 u32 host_abort;
919 u32 bt_retry;
920 u32 sta_invalid;
921 u32 frag_drop;
922 u32 tid_disable;
923 u32 fifo_flush;
924 u32 insuff_cf_poll;
925 u32 fail_hw_drop;
926 u32 sta_color_mismatch;
927 u32 unknown;
928};
929
930/* reply_agg_tx_statistics (for _agn devices) */
931struct reply_agg_tx_error_statistics {
932 u32 underrun;
933 u32 bt_prio;
934 u32 few_bytes;
935 u32 abort;
936 u32 last_sent_ttl;
937 u32 last_sent_try;
938 u32 last_sent_bt_kill;
939 u32 scd_query;
940 u32 bad_crc32;
941 u32 response;
942 u32 dump_tx;
943 u32 delay_tx;
944 u32 unknown;
945};
946
947/* management statistics */
948enum iwl_mgmt_stats {
949 MANAGEMENT_ASSOC_REQ = 0,
950 MANAGEMENT_ASSOC_RESP,
951 MANAGEMENT_REASSOC_REQ,
952 MANAGEMENT_REASSOC_RESP,
953 MANAGEMENT_PROBE_REQ,
954 MANAGEMENT_PROBE_RESP,
955 MANAGEMENT_BEACON,
956 MANAGEMENT_ATIM,
957 MANAGEMENT_DISASSOC,
958 MANAGEMENT_AUTH,
959 MANAGEMENT_DEAUTH,
960 MANAGEMENT_ACTION,
961 MANAGEMENT_MAX,
962};
963/* control statistics */
964enum iwl_ctrl_stats {
965 CONTROL_BACK_REQ = 0,
966 CONTROL_BACK,
967 CONTROL_PSPOLL,
968 CONTROL_RTS,
969 CONTROL_CTS,
970 CONTROL_ACK,
971 CONTROL_CFEND,
972 CONTROL_CFENDACK,
973 CONTROL_MAX,
974};
975
976struct traffic_stats {
977#ifdef CONFIG_IWLWIFI_DEBUGFS
978 u32 mgmt[MANAGEMENT_MAX];
979 u32 ctrl[CONTROL_MAX];
980 u32 data_cnt;
981 u64 data_bytes;
982#endif
983};
984
985/*
986 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
987 * to perform continuous uCode event logging operation if enabled
988 */
989#define UCODE_TRACE_PERIOD (100)
990
991/*
992 * iwl_event_log: current uCode event log position
993 *
994 * @ucode_trace: enable/disable ucode continuous trace timer
995 * @num_wraps: how many times the event buffer wraps
996 * @next_entry: the entry just before the next one that uCode would fill
997 * @non_wraps_count: counter for no wrap detected when dump ucode events
998 * @wraps_once_count: counter for wrap once detected when dump ucode events
999 * @wraps_more_count: counter for wrap more than once detected
1000 * when dump ucode events
1001 */
1002struct iwl_event_log {
1003 bool ucode_trace;
1004 u32 num_wraps;
1005 u32 next_entry;
1006 int non_wraps_count;
1007 int wraps_once_count;
1008 int wraps_more_count;
1009};
1010
1011/*
1012 * host interrupt timeout value
1013 * used with setting interrupt coalescing timer
1014 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1015 *
1016 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1017 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1018 */
1019#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
1020#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
1021#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
1022#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1023#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1024#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1025
1026/*
1027 * This is the threshold value of plcp error rate per 100mSecs. It is
1028 * used to set and check for the validity of plcp_delta.
1029 */
1030#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
1031#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
1032#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
1033#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
1034#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
1035#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
1036
1037#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1038#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1039
1040/* TX queue watchdog timeouts in mSecs */
1041#define IWL_DEF_WD_TIMEOUT (2000)
1042#define IWL_LONG_WD_TIMEOUT (10000)
1043#define IWL_MAX_WD_TIMEOUT (120000)
1044
1045/* BT Antenna Coupling Threshold (dB) */
1046#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
1047
1048/* Firmware reload counter and Timestamp */
1049#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
1050#define IWL_MAX_CONTINUE_RELOAD_CNT 4
1051
1052
1053enum iwl_reset {
1054 IWL_RF_RESET = 0,
1055 IWL_FW_RESET,
1056 IWL_MAX_FORCE_RESET,
1057};
1058
1059struct iwl_force_reset {
1060 int reset_request_count;
1061 int reset_success_count;
1062 int reset_reject_count;
1063 unsigned long reset_duration;
1064 unsigned long last_force_reset_jiffies;
1065};
1066
1067/* extend beacon time format bit shifting */
1068/*
1069 * for _agn devices
1070 * bits 31:22 - extended
1071 * bits 21:0 - interval
1072 */
1073#define IWLAGN_EXT_BEACON_TIME_POS 22
1074
1075/**
1076 * struct iwl_notification_wait - notification wait entry
1077 * @list: list head for global list
1078 * @fn: function called with the notification
1079 * @cmd: command ID
1080 *
1081 * This structure is not used directly, to wait for a
1082 * notification declare it on the stack, and call
1083 * iwlagn_init_notification_wait() with appropriate
1084 * parameters. Then do whatever will cause the ucode
1085 * to notify the driver, and to wait for that then
1086 * call iwlagn_wait_notification().
1087 *
1088 * Each notification is one-shot. If at some point we
1089 * need to support multi-shot notifications (which
1090 * can't be allocated on the stack) we need to modify
1091 * the code for them.
1092 */
1093struct iwl_notification_wait {
1094 struct list_head list;
1095
1096 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
1097 void *data);
1098 void *fn_data;
1099
1100 u8 cmd;
1101 bool triggered, aborted;
1102};
1103
1104enum iwl_rxon_context_id {
1105 IWL_RXON_CTX_BSS,
1106 IWL_RXON_CTX_PAN,
1107
1108 NUM_IWL_RXON_CTX
1109};
1110
1111struct iwl_rxon_context {
1112 struct ieee80211_vif *vif;
1113
1114 const u8 *ac_to_fifo;
1115 const u8 *ac_to_queue;
1116 u8 mcast_queue;
1117
1118 /*
1119 * We could use the vif to indicate active, but we
1120 * also need it to be active during disabling when
1121 * we already removed the vif for type setting.
1122 */
1123 bool always_active, is_active;
1124
1125 bool ht_need_multiple_chains;
1126
1127 enum iwl_rxon_context_id ctxid;
1128
1129 u32 interface_modes, exclusive_interface_modes;
1130 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1131
1132 /*
1133 * We declare this const so it can only be
1134 * changed via explicit cast within the
1135 * routines that actually update the physical
1136 * hardware.
1137 */
1138 const struct iwl_rxon_cmd active;
1139 struct iwl_rxon_cmd staging;
1140
1141 struct iwl_rxon_time_cmd timing;
1142
1143 struct iwl_qos_info qos_data;
1144
1145 u8 bcast_sta_id, ap_sta_id;
1146
1147 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1148 u8 qos_cmd;
1149 u8 wep_key_cmd;
1150
1151 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1152 u8 key_mapping_keys;
1153
1154 __le32 station_flags;
1155
1156 int beacon_int;
1157
1158 struct {
1159 bool non_gf_sta_present;
1160 u8 protection;
1161 bool enabled, is_40mhz;
1162 u8 extension_chan_offset;
1163 } ht;
1164
1165 bool last_tx_rejected;
1166};
1167
1168enum iwl_scan_type {
1169 IWL_SCAN_NORMAL,
1170 IWL_SCAN_RADIO_RESET,
1171 IWL_SCAN_OFFCH_TX,
1172};
1173
1174enum iwlagn_ucode_type {
1175 IWL_UCODE_NONE,
1176 IWL_UCODE_REGULAR,
1177 IWL_UCODE_INIT,
1178 IWL_UCODE_WOWLAN,
1179};
1180
1181#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
1182struct iwl_testmode_trace {
1183 u32 buff_size;
1184 u32 total_size;
1185 u32 num_chunks;
1186 u8 *cpu_addr;
1187 u8 *trace_addr;
1188 dma_addr_t dma_addr;
1189 bool trace_enabled;
1190};
1191#endif
1192
1193/* uCode ownership */
1194#define IWL_OWNERSHIP_DRIVER 0
1195#define IWL_OWNERSHIP_TM 1
1196
1197struct iwl_priv {
1198
1199 /* ieee device used by generic ieee processing code */
1200 struct ieee80211_hw *hw;
1201 struct ieee80211_channel *ieee_channels;
1202 struct ieee80211_rate *ieee_rates;
1203 struct iwl_cfg *cfg;
1204
1205 enum ieee80211_band band;
1206
1207 void (*pre_rx_handler)(struct iwl_priv *priv,
1208 struct iwl_rx_mem_buffer *rxb);
1209 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1210 struct iwl_rx_mem_buffer *rxb);
1211
1212 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1213
1214 /* spectrum measurement report caching */
1215 struct iwl_spectrum_notification measure_report;
1216 u8 measurement_status;
1217
1218 /* ucode beacon time */
1219 u32 ucode_beacon_time;
1220 int missed_beacon_threshold;
1221
1222 /* track IBSS manager (last beacon) status */
1223 u32 ibss_manager;
1224
1225 /* jiffies when last recovery from statistics was performed */
1226 unsigned long rx_statistics_jiffies;
1227
1228 /* force reset */
1229 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1230
1231 /* firmware reload counter and timestamp */
1232 unsigned long reload_jiffies;
1233 int reload_count;
1234
1235 /* we allocate array of iwl_channel_info for NIC's valid channels.
1236 * Access via channel # using indirect index array */
1237 struct iwl_channel_info *channel_info; /* channel info array */
1238 u8 channel_count; /* # of channels */
1239
1240 /* thermal calibration */
1241 s32 temperature; /* degrees Kelvin */
1242 s32 last_temperature;
1243
1244 /* init calibration results */
1245 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1246
1247 /* Scan related variables */
1248 unsigned long scan_start;
1249 unsigned long scan_start_tsf;
1250 void *scan_cmd;
1251 enum ieee80211_band scan_band;
1252 struct cfg80211_scan_request *scan_request;
1253 struct ieee80211_vif *scan_vif;
1254 enum iwl_scan_type scan_type;
1255 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1256 u8 mgmt_tx_ant;
1257
1258 /* spinlock */
1259 spinlock_t lock; /* protect general shared data */
1260 spinlock_t hcmd_lock; /* protect hcmd */
1261 spinlock_t reg_lock; /* protect hw register access */
1262 struct mutex mutex;
1263
1264 struct iwl_bus *bus; /* bus specific data */
1265 struct iwl_trans trans;
1266
1267 /* microcode/device supports multiple contexts */
1268 u8 valid_contexts;
1269
1270 /* command queue number */
1271 u8 cmd_queue;
1272
1273 /* max number of station keys */
1274 u8 sta_key_max_num;
1275
1276 bool new_scan_threshold_behaviour;
1277
1278 /* EEPROM MAC addresses */
1279 struct mac_address addresses[2];
1280
1281 /* uCode images, save to reload in case of failure */
1282 int fw_index; /* firmware we're trying to load */
1283 u32 ucode_ver; /* version of ucode, copy of
1284 iwl_ucode.ver */
1285
1286 /* uCode owner: default: IWL_OWNERSHIP_DRIVER */
1287 u8 ucode_owner;
1288
1289 struct fw_img ucode_rt;
1290 struct fw_img ucode_init;
1291 struct fw_img ucode_wowlan;
1292
1293 enum iwlagn_ucode_type ucode_type;
1294 u8 ucode_write_complete; /* the image write is complete */
1295 char firmware_name[25];
1296
1297 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1298
1299 __le16 switch_channel;
1300
1301 struct {
1302 u32 error_event_table;
1303 u32 log_event_table;
1304 } device_pointers;
1305
1306 u16 active_rate;
1307
1308 u8 start_calib;
1309 struct iwl_sensitivity_data sensitivity_data;
1310 struct iwl_chain_noise_data chain_noise_data;
1311 bool enhance_sensitivity_table;
1312 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1313 __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
1314
1315 struct iwl_ht_config current_ht_config;
1316
1317 /* Rate scaling data */
1318 u8 retry_rate;
1319
1320 wait_queue_head_t wait_command_queue;
1321
1322 int activity_timer_active;
1323
1324 /* Rx and Tx DMA processing queues */
1325 struct iwl_rx_queue rxq;
1326 struct iwl_tx_queue *txq;
1327 unsigned long txq_ctx_active_msk;
1328 struct iwl_dma_ptr kw; /* keep warm address */
1329 struct iwl_dma_ptr scd_bc_tbls;
1330
1331 u32 scd_base_addr; /* scheduler sram base address */
1332
1333 unsigned long status;
1334
1335 /* counts mgmt, ctl, and data packets */
1336 struct traffic_stats tx_stats;
1337 struct traffic_stats rx_stats;
1338
1339 /* counts interrupts */
1340 struct isr_statistics isr_stats;
1341
1342 struct iwl_power_mgr power_data;
1343 struct iwl_tt_mgmt thermal_throttle;
1344
1345 /* station table variables */
1346
1347 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1348 spinlock_t sta_lock;
1349 int num_stations;
1350 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
1351 unsigned long ucode_key_table;
1352
1353 /* queue refcounts */
1354#define IWL_MAX_HW_QUEUES 32
1355 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1356 /* for each AC */
1357 atomic_t queue_stop_count[4];
1358
1359 /* Indication if ieee80211_ops->open has been called */
1360 u8 is_open;
1361
1362 u8 mac80211_registered;
1363
1364 bool wowlan;
1365
1366 /* eeprom -- this is in the card's little endian byte order */
1367 u8 *eeprom;
1368 int nvm_device_type;
1369 struct iwl_eeprom_calib_info *calib_info;
1370
1371 enum nl80211_iftype iw_mode;
1372
1373 /* Last Rx'd beacon timestamp */
1374 u64 timestamp;
1375
1376 struct {
1377 __le32 flag;
1378 struct statistics_general_common common;
1379 struct statistics_rx_non_phy rx_non_phy;
1380 struct statistics_rx_phy rx_ofdm;
1381 struct statistics_rx_ht_phy rx_ofdm_ht;
1382 struct statistics_rx_phy rx_cck;
1383 struct statistics_tx tx;
1384#ifdef CONFIG_IWLWIFI_DEBUGFS
1385 struct statistics_bt_activity bt_activity;
1386 __le32 num_bt_kills, accum_num_bt_kills;
1387#endif
1388 } statistics;
1389#ifdef CONFIG_IWLWIFI_DEBUGFS
1390 struct {
1391 struct statistics_general_common common;
1392 struct statistics_rx_non_phy rx_non_phy;
1393 struct statistics_rx_phy rx_ofdm;
1394 struct statistics_rx_ht_phy rx_ofdm_ht;
1395 struct statistics_rx_phy rx_cck;
1396 struct statistics_tx tx;
1397 struct statistics_bt_activity bt_activity;
1398 } accum_stats, delta_stats, max_delta_stats;
1399#endif
1400
1401 /* INT ICT Table */
1402 __le32 *ict_tbl;
1403 void *ict_tbl_vir;
1404 dma_addr_t ict_tbl_dma;
1405 dma_addr_t aligned_ict_tbl_dma;
1406 int ict_index;
1407 u32 inta;
1408 bool use_ict;
1409 /*
1410 * reporting the number of tids has AGG on. 0 means
1411 * no AGGREGATION
1412 */
1413 u8 agg_tids_count;
1414
1415 struct iwl_rx_phy_res last_phy_res;
1416 bool last_phy_res_valid;
1417
1418 struct completion firmware_loading_complete;
1419
1420 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1421 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1422
1423 /*
1424 * chain noise reset and gain commands are the
1425 * two extra calibration commands follows the standard
1426 * phy calibration commands
1427 */
1428 u8 phy_calib_chain_noise_reset_cmd;
1429 u8 phy_calib_chain_noise_gain_cmd;
1430
1431 /* counts reply_tx error */
1432 struct reply_tx_error_statistics reply_tx_stats;
1433 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1434 /* notification wait support */
1435 struct list_head notif_waits;
1436 spinlock_t notif_wait_lock;
1437 wait_queue_head_t notif_waitq;
1438
1439 /* remain-on-channel offload support */
1440 struct ieee80211_channel *hw_roc_channel;
1441 struct delayed_work hw_roc_work;
1442 enum nl80211_channel_type hw_roc_chantype;
1443 int hw_roc_duration;
1444 bool hw_roc_setup;
1445
1446 struct sk_buff *offchan_tx_skb;
1447 int offchan_tx_timeout;
1448 struct ieee80211_channel *offchan_tx_chan;
1449
1450 /* bt coex */
1451 u8 bt_enable_flag;
1452 u8 bt_status;
1453 u8 bt_traffic_load, last_bt_traffic_load;
1454 bool bt_ch_announce;
1455 bool bt_full_concurrent;
1456 bool bt_ant_couple_ok;
1457 __le32 kill_ack_mask;
1458 __le32 kill_cts_mask;
1459 __le16 bt_valid;
1460 u16 bt_on_thresh;
1461 u16 bt_duration;
1462 u16 dynamic_frag_thresh;
1463 u8 bt_ci_compliance;
1464 struct work_struct bt_traffic_change_work;
1465 bool bt_enable_pspoll;
1466 struct iwl_rxon_context *cur_rssi_ctx;
1467 bool bt_is_sco;
1468
1469 struct iwl_hw_params hw_params;
1470
1471 u32 inta_mask;
1472
1473 struct workqueue_struct *workqueue;
1474
1475 struct work_struct restart;
1476 struct work_struct scan_completed;
1477 struct work_struct rx_replenish;
1478 struct work_struct abort_scan;
1479
1480 struct work_struct beacon_update;
1481 struct iwl_rxon_context *beacon_ctx;
1482 struct sk_buff *beacon_skb;
1483 void *beacon_cmd;
1484
1485 struct work_struct tt_work;
1486 struct work_struct ct_enter;
1487 struct work_struct ct_exit;
1488 struct work_struct start_internal_scan;
1489 struct work_struct tx_flush;
1490 struct work_struct bt_full_concurrency;
1491 struct work_struct bt_runtime_config;
1492
1493 struct tasklet_struct irq_tasklet;
1494
1495 struct delayed_work scan_check;
1496
1497 /* TX Power */
1498 s8 tx_power_user_lmt;
1499 s8 tx_power_device_lmt;
1500 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
1501 s8 tx_power_next;
1502
1503
1504#ifdef CONFIG_IWLWIFI_DEBUG
1505 /* debugging info */
1506 u32 debug_level; /* per device debugging will override global
1507 iwl_debug_level if set */
1508#endif /* CONFIG_IWLWIFI_DEBUG */
1509#ifdef CONFIG_IWLWIFI_DEBUGFS
1510 /* debugfs */
1511 u16 tx_traffic_idx;
1512 u16 rx_traffic_idx;
1513 u8 *tx_traffic;
1514 u8 *rx_traffic;
1515 struct dentry *debugfs_dir;
1516 u32 dbgfs_sram_offset, dbgfs_sram_len;
1517 bool disable_ht40;
1518 void *wowlan_sram;
1519#endif /* CONFIG_IWLWIFI_DEBUGFS */
1520
1521 struct work_struct txpower_work;
1522 u32 disable_sens_cal;
1523 u32 disable_chain_noise_cal;
1524 struct work_struct run_time_calib_work;
1525 struct timer_list statistics_periodic;
1526 struct timer_list ucode_trace;
1527 struct timer_list watchdog;
1528
1529 struct iwl_event_log event_log;
1530
1531 struct led_classdev led;
1532 unsigned long blink_on, blink_off;
1533 bool led_registered;
1534#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
1535 struct iwl_testmode_trace testmode_trace;
1536 u32 tm_fixed_rate;
1537#endif
1538
1539 /* WoWLAN GTK rekey data */
1540 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
1541 __le64 replay_ctr;
1542 __le16 last_seq_ctl;
1543 bool have_rekey_data;
1544}; /*iwl_priv */
1545
1546static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1547{
1548 set_bit(txq_id, &priv->txq_ctx_active_msk);
1549}
1550
1551static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1552{
1553 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1554}
1555
1556#ifdef CONFIG_IWLWIFI_DEBUG
1557/*
1558 * iwl_get_debug_level: Return active debug level for device
1559 *
1560 * Using sysfs it is possible to set per device debug level. This debug
1561 * level will be used if set, otherwise the global debug level which can be
1562 * set via module parameter is used.
1563 */
1564static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1565{
1566 if (priv->debug_level)
1567 return priv->debug_level;
1568 else
1569 return iwl_debug_level;
1570}
1571#else
1572static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1573{
1574 return iwl_debug_level;
1575}
1576#endif
1577
1578
1579static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1580 int txq_id, int idx)
1581{
1582 if (priv->txq[txq_id].txb[idx].skb)
1583 return (struct ieee80211_hdr *)priv->txq[txq_id].
1584 txb[idx].skb->data;
1585 return NULL;
1586}
1587
1588static inline struct iwl_rxon_context *
1589iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1590{
1591 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1592
1593 return vif_priv->ctx;
1594}
1595
1596#define for_each_context(priv, ctx) \
1597 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1598 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1599 if (priv->valid_contexts & BIT(ctx->ctxid))
1600
1601static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1602{
1603 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1604}
1605
1606static inline int iwl_is_associated(struct iwl_priv *priv,
1607 enum iwl_rxon_context_id ctxid)
1608{
1609 return iwl_is_associated_ctx(&priv->contexts[ctxid]);
1610}
1611
1612static inline int iwl_is_any_associated(struct iwl_priv *priv)
1613{
1614 struct iwl_rxon_context *ctx;
1615 for_each_context(priv, ctx)
1616 if (iwl_is_associated_ctx(ctx))
1617 return true;
1618 return false;
1619}
1620
1621static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1622{
1623 if (ch_info == NULL)
1624 return 0;
1625 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1626}
1627
1628static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1629{
1630 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1631}
1632
1633static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
1634{
1635 return ch_info->band == IEEE80211_BAND_5GHZ;
1636}
1637
1638static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
1639{
1640 return ch_info->band == IEEE80211_BAND_2GHZ;
1641}
1642
1643static inline int is_channel_passive(const struct iwl_channel_info *ch)
1644{
1645 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1646}
1647
1648static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1649{
1650 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1651}
1652
1653static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
1654{
1655 __free_pages(page, priv->hw_params.rx_page_order);
1656}
1657
1658static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
1659{
1660 free_pages(page, priv->hw_params.rx_page_order);
1661}
1662#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
new file mode 100644
index 00000000000..19d31a5e32e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -0,0 +1,856 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwl_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145/*
146 * The device's EEPROM semaphore prevents conflicts between driver and uCode
147 * when accessing the EEPROM; each access is a series of pulses to/from the
148 * EEPROM chip, not a single event, so even reads could conflict if they
149 * weren't arbitrated by the semaphore.
150 */
151static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
152{
153 u16 count;
154 int ret;
155
156 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
157 /* Request semaphore */
158 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
159 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
160
161 /* See if we got it */
162 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
164 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
165 EEPROM_SEM_TIMEOUT);
166 if (ret >= 0) {
167 IWL_DEBUG_EEPROM(priv,
168 "Acquired semaphore after %d tries.\n",
169 count+1);
170 return ret;
171 }
172 }
173
174 return ret;
175}
176
177static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
178{
179 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
180 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
181
182}
183
184static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
185{
186 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
187 int ret = 0;
188
189 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
190 switch (gp) {
191 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
192 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
193 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
194 gp);
195 ret = -ENOENT;
196 }
197 break;
198 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
199 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
200 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
201 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
202 ret = -ENOENT;
203 }
204 break;
205 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
206 default:
207 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
208 "EEPROM_GP=0x%08x\n",
209 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
210 ? "OTP" : "EEPROM", gp);
211 ret = -ENOENT;
212 break;
213 }
214 return ret;
215}
216
217static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
218{
219 iwl_read32(priv, CSR_OTP_GP_REG);
220
221 if (mode == IWL_OTP_ACCESS_ABSOLUTE)
222 iwl_clear_bit(priv, CSR_OTP_GP_REG,
223 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
224 else
225 iwl_set_bit(priv, CSR_OTP_GP_REG,
226 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
227}
228
229static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
230{
231 u32 otpgp;
232 int nvm_type;
233
234 /* OTP only valid for CP/PP and after */
235 switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
236 case CSR_HW_REV_TYPE_NONE:
237 IWL_ERR(priv, "Unknown hardware type\n");
238 return -ENOENT;
239 case CSR_HW_REV_TYPE_5300:
240 case CSR_HW_REV_TYPE_5350:
241 case CSR_HW_REV_TYPE_5100:
242 case CSR_HW_REV_TYPE_5150:
243 nvm_type = NVM_DEVICE_TYPE_EEPROM;
244 break;
245 default:
246 otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
247 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
248 nvm_type = NVM_DEVICE_TYPE_OTP;
249 else
250 nvm_type = NVM_DEVICE_TYPE_EEPROM;
251 break;
252 }
253 return nvm_type;
254}
255
256static int iwl_init_otp_access(struct iwl_priv *priv)
257{
258 int ret;
259
260 /* Enable 40MHz radio clock */
261 iwl_write32(priv, CSR_GP_CNTRL,
262 iwl_read32(priv, CSR_GP_CNTRL) |
263 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
264
265 /* wait for clock to be ready */
266 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
267 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
268 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
269 25000);
270 if (ret < 0)
271 IWL_ERR(priv, "Time out access OTP\n");
272 else {
273 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
274 APMG_PS_CTRL_VAL_RESET_REQ);
275 udelay(5);
276 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
277 APMG_PS_CTRL_VAL_RESET_REQ);
278
279 /*
280 * CSR auto clock gate disable bit -
281 * this is only applicable for HW with OTP shadow RAM
282 */
283 if (priv->cfg->base_params->shadow_ram_support)
284 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
285 CSR_RESET_LINK_PWR_MGMT_DISABLED);
286 }
287 return ret;
288}
289
290static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
291{
292 int ret = 0;
293 u32 r;
294 u32 otpgp;
295
296 iwl_write32(priv, CSR_EEPROM_REG,
297 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
298 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
299 CSR_EEPROM_REG_READ_VALID_MSK,
300 CSR_EEPROM_REG_READ_VALID_MSK,
301 IWL_EEPROM_ACCESS_TIMEOUT);
302 if (ret < 0) {
303 IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
304 return ret;
305 }
306 r = iwl_read32(priv, CSR_EEPROM_REG);
307 /* check for ECC errors: */
308 otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
309 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
310 /* stop in this case */
311 /* set the uncorrectable OTP ECC bit for acknowledgement */
312 iwl_set_bit(priv, CSR_OTP_GP_REG,
313 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
314 IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
315 return -EINVAL;
316 }
317 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
318 /* continue in this case */
319 /* set the correctable OTP ECC bit for acknowledgement */
320 iwl_set_bit(priv, CSR_OTP_GP_REG,
321 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
322 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
323 }
324 *eeprom_data = cpu_to_le16(r >> 16);
325 return 0;
326}
327
328/*
329 * iwl_is_otp_empty: check for empty OTP
330 */
331static bool iwl_is_otp_empty(struct iwl_priv *priv)
332{
333 u16 next_link_addr = 0;
334 __le16 link_value;
335 bool is_empty = false;
336
337 /* locate the beginning of OTP link list */
338 if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
339 if (!link_value) {
340 IWL_ERR(priv, "OTP is empty\n");
341 is_empty = true;
342 }
343 } else {
344 IWL_ERR(priv, "Unable to read first block of OTP list.\n");
345 is_empty = true;
346 }
347
348 return is_empty;
349}
350
351
352/*
353 * iwl_find_otp_image: find EEPROM image in OTP
354 * finding the OTP block that contains the EEPROM image.
355 * the last valid block on the link list (the block _before_ the last block)
356 * is the block we should read and used to configure the device.
357 * If all the available OTP blocks are full, the last block will be the block
358 * we should read and used to configure the device.
359 * only perform this operation if shadow RAM is disabled
360 */
361static int iwl_find_otp_image(struct iwl_priv *priv,
362 u16 *validblockaddr)
363{
364 u16 next_link_addr = 0, valid_addr;
365 __le16 link_value = 0;
366 int usedblocks = 0;
367
368 /* set addressing mode to absolute to traverse the link list */
369 iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
370
371 /* checking for empty OTP or error */
372 if (iwl_is_otp_empty(priv))
373 return -EINVAL;
374
375 /*
376 * start traverse link list
377 * until reach the max number of OTP blocks
378 * different devices have different number of OTP blocks
379 */
380 do {
381 /* save current valid block address
382 * check for more block on the link list
383 */
384 valid_addr = next_link_addr;
385 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
386 IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
387 usedblocks, next_link_addr);
388 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
389 return -EINVAL;
390 if (!link_value) {
391 /*
392 * reach the end of link list, return success and
393 * set address point to the starting address
394 * of the image
395 */
396 *validblockaddr = valid_addr;
397 /* skip first 2 bytes (link list pointer) */
398 *validblockaddr += 2;
399 return 0;
400 }
401 /* more in the link list, continue */
402 usedblocks++;
403 } while (usedblocks <= priv->cfg->base_params->max_ll_items);
404
405 /* OTP has no valid blocks */
406 IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
407 return -EINVAL;
408}
409
410u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
411{
412 if (!priv->eeprom)
413 return 0;
414 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
415}
416
417/**
418 * iwl_eeprom_init - read EEPROM contents
419 *
420 * Load the EEPROM contents from adapter into priv->eeprom
421 *
422 * NOTE: This routine uses the non-debug IO access functions.
423 */
424int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
425{
426 __le16 *e;
427 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
428 int sz;
429 int ret;
430 u16 addr;
431 u16 validblockaddr = 0;
432 u16 cache_addr = 0;
433
434 priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev);
435 if (priv->nvm_device_type == -ENOENT)
436 return -ENOENT;
437 /* allocate eeprom */
438 sz = priv->cfg->base_params->eeprom_size;
439 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
440 priv->eeprom = kzalloc(sz, GFP_KERNEL);
441 if (!priv->eeprom) {
442 ret = -ENOMEM;
443 goto alloc_err;
444 }
445 e = (__le16 *)priv->eeprom;
446
447 iwl_apm_init(priv);
448
449 ret = iwl_eeprom_verify_signature(priv);
450 if (ret < 0) {
451 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
452 ret = -ENOENT;
453 goto err;
454 }
455
456 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
457 ret = iwl_eeprom_acquire_semaphore(priv);
458 if (ret < 0) {
459 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
460 ret = -ENOENT;
461 goto err;
462 }
463
464 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
465
466 ret = iwl_init_otp_access(priv);
467 if (ret) {
468 IWL_ERR(priv, "Failed to initialize OTP access.\n");
469 ret = -ENOENT;
470 goto done;
471 }
472 iwl_write32(priv, CSR_EEPROM_GP,
473 iwl_read32(priv, CSR_EEPROM_GP) &
474 ~CSR_EEPROM_GP_IF_OWNER_MSK);
475
476 iwl_set_bit(priv, CSR_OTP_GP_REG,
477 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
478 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
479 /* traversing the linked list if no shadow ram supported */
480 if (!priv->cfg->base_params->shadow_ram_support) {
481 if (iwl_find_otp_image(priv, &validblockaddr)) {
482 ret = -ENOENT;
483 goto done;
484 }
485 }
486 for (addr = validblockaddr; addr < validblockaddr + sz;
487 addr += sizeof(u16)) {
488 __le16 eeprom_data;
489
490 ret = iwl_read_otp_word(priv, addr, &eeprom_data);
491 if (ret)
492 goto done;
493 e[cache_addr / 2] = eeprom_data;
494 cache_addr += sizeof(u16);
495 }
496 } else {
497 /* eeprom is an array of 16bit values */
498 for (addr = 0; addr < sz; addr += sizeof(u16)) {
499 u32 r;
500
501 iwl_write32(priv, CSR_EEPROM_REG,
502 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
503
504 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
505 CSR_EEPROM_REG_READ_VALID_MSK,
506 CSR_EEPROM_REG_READ_VALID_MSK,
507 IWL_EEPROM_ACCESS_TIMEOUT);
508 if (ret < 0) {
509 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
510 goto done;
511 }
512 r = iwl_read32(priv, CSR_EEPROM_REG);
513 e[addr / 2] = cpu_to_le16(r >> 16);
514 }
515 }
516
517 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
518 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
519 ? "OTP" : "EEPROM",
520 iwl_eeprom_query16(priv, EEPROM_VERSION));
521
522 ret = 0;
523done:
524 iwl_eeprom_release_semaphore(priv);
525
526err:
527 if (ret)
528 iwl_eeprom_free(priv);
529 /* Reset chip to save power until we load uCode during "up". */
530 iwl_apm_stop(priv);
531alloc_err:
532 return ret;
533}
534
535void iwl_eeprom_free(struct iwl_priv *priv)
536{
537 kfree(priv->eeprom);
538 priv->eeprom = NULL;
539}
540
541static void iwl_init_band_reference(const struct iwl_priv *priv,
542 int eep_band, int *eeprom_ch_count,
543 const struct iwl_eeprom_channel **eeprom_ch_info,
544 const u8 **eeprom_ch_index)
545{
546 u32 offset = priv->cfg->lib->
547 eeprom_ops.regulatory_bands[eep_band - 1];
548 switch (eep_band) {
549 case 1: /* 2.4GHz band */
550 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
551 *eeprom_ch_info = (struct iwl_eeprom_channel *)
552 iwl_eeprom_query_addr(priv, offset);
553 *eeprom_ch_index = iwl_eeprom_band_1;
554 break;
555 case 2: /* 4.9GHz band */
556 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
557 *eeprom_ch_info = (struct iwl_eeprom_channel *)
558 iwl_eeprom_query_addr(priv, offset);
559 *eeprom_ch_index = iwl_eeprom_band_2;
560 break;
561 case 3: /* 5.2GHz band */
562 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
563 *eeprom_ch_info = (struct iwl_eeprom_channel *)
564 iwl_eeprom_query_addr(priv, offset);
565 *eeprom_ch_index = iwl_eeprom_band_3;
566 break;
567 case 4: /* 5.5GHz band */
568 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
569 *eeprom_ch_info = (struct iwl_eeprom_channel *)
570 iwl_eeprom_query_addr(priv, offset);
571 *eeprom_ch_index = iwl_eeprom_band_4;
572 break;
573 case 5: /* 5.7GHz band */
574 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
575 *eeprom_ch_info = (struct iwl_eeprom_channel *)
576 iwl_eeprom_query_addr(priv, offset);
577 *eeprom_ch_index = iwl_eeprom_band_5;
578 break;
579 case 6: /* 2.4GHz ht40 channels */
580 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
581 *eeprom_ch_info = (struct iwl_eeprom_channel *)
582 iwl_eeprom_query_addr(priv, offset);
583 *eeprom_ch_index = iwl_eeprom_band_6;
584 break;
585 case 7: /* 5 GHz ht40 channels */
586 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
587 *eeprom_ch_info = (struct iwl_eeprom_channel *)
588 iwl_eeprom_query_addr(priv, offset);
589 *eeprom_ch_index = iwl_eeprom_band_7;
590 break;
591 default:
592 BUG();
593 return;
594 }
595}
596
597#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
598 ? # x " " : "")
599/**
600 * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
601 *
602 * Does not set up a command, or touch hardware.
603 */
604static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
605 enum ieee80211_band band, u16 channel,
606 const struct iwl_eeprom_channel *eeprom_ch,
607 u8 clear_ht40_extension_channel)
608{
609 struct iwl_channel_info *ch_info;
610
611 ch_info = (struct iwl_channel_info *)
612 iwl_get_channel_info(priv, band, channel);
613
614 if (!is_channel_valid(ch_info))
615 return -1;
616
617 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
618 " Ad-Hoc %ssupported\n",
619 ch_info->channel,
620 is_channel_a_band(ch_info) ?
621 "5.2" : "2.4",
622 CHECK_AND_PRINT(IBSS),
623 CHECK_AND_PRINT(ACTIVE),
624 CHECK_AND_PRINT(RADAR),
625 CHECK_AND_PRINT(WIDE),
626 CHECK_AND_PRINT(DFS),
627 eeprom_ch->flags,
628 eeprom_ch->max_power_avg,
629 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
630 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
631 "" : "not ");
632
633 ch_info->ht40_eeprom = *eeprom_ch;
634 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
635 ch_info->ht40_flags = eeprom_ch->flags;
636 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
637 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
638
639 return 0;
640}
641
642#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
643 ? # x " " : "")
644
645/**
646 * iwl_init_channel_map - Set up driver's info for all possible channels
647 */
648int iwl_init_channel_map(struct iwl_priv *priv)
649{
650 int eeprom_ch_count = 0;
651 const u8 *eeprom_ch_index = NULL;
652 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
653 int band, ch;
654 struct iwl_channel_info *ch_info;
655
656 if (priv->channel_count) {
657 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
658 return 0;
659 }
660
661 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
662
663 priv->channel_count =
664 ARRAY_SIZE(iwl_eeprom_band_1) +
665 ARRAY_SIZE(iwl_eeprom_band_2) +
666 ARRAY_SIZE(iwl_eeprom_band_3) +
667 ARRAY_SIZE(iwl_eeprom_band_4) +
668 ARRAY_SIZE(iwl_eeprom_band_5);
669
670 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
671 priv->channel_count);
672
673 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
674 priv->channel_count, GFP_KERNEL);
675 if (!priv->channel_info) {
676 IWL_ERR(priv, "Could not allocate channel_info\n");
677 priv->channel_count = 0;
678 return -ENOMEM;
679 }
680
681 ch_info = priv->channel_info;
682
683 /* Loop through the 5 EEPROM bands adding them in order to the
684 * channel map we maintain (that contains additional information than
685 * what just in the EEPROM) */
686 for (band = 1; band <= 5; band++) {
687
688 iwl_init_band_reference(priv, band, &eeprom_ch_count,
689 &eeprom_ch_info, &eeprom_ch_index);
690
691 /* Loop through each band adding each of the channels */
692 for (ch = 0; ch < eeprom_ch_count; ch++) {
693 ch_info->channel = eeprom_ch_index[ch];
694 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
695 IEEE80211_BAND_5GHZ;
696
697 /* permanently store EEPROM's channel regulatory flags
698 * and max power in channel info database. */
699 ch_info->eeprom = eeprom_ch_info[ch];
700
701 /* Copy the run-time flags so they are there even on
702 * invalid channels */
703 ch_info->flags = eeprom_ch_info[ch].flags;
704 /* First write that ht40 is not enabled, and then enable
705 * one by one */
706 ch_info->ht40_extension_channel =
707 IEEE80211_CHAN_NO_HT40;
708
709 if (!(is_channel_valid(ch_info))) {
710 IWL_DEBUG_EEPROM(priv,
711 "Ch. %d Flags %x [%sGHz] - "
712 "No traffic\n",
713 ch_info->channel,
714 ch_info->flags,
715 is_channel_a_band(ch_info) ?
716 "5.2" : "2.4");
717 ch_info++;
718 continue;
719 }
720
721 /* Initialize regulatory-based run-time data */
722 ch_info->max_power_avg = ch_info->curr_txpow =
723 eeprom_ch_info[ch].max_power_avg;
724 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
725 ch_info->min_power = 0;
726
727 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
728 "%s%s%s%s%s%s(0x%02x %ddBm):"
729 " Ad-Hoc %ssupported\n",
730 ch_info->channel,
731 is_channel_a_band(ch_info) ?
732 "5.2" : "2.4",
733 CHECK_AND_PRINT_I(VALID),
734 CHECK_AND_PRINT_I(IBSS),
735 CHECK_AND_PRINT_I(ACTIVE),
736 CHECK_AND_PRINT_I(RADAR),
737 CHECK_AND_PRINT_I(WIDE),
738 CHECK_AND_PRINT_I(DFS),
739 eeprom_ch_info[ch].flags,
740 eeprom_ch_info[ch].max_power_avg,
741 ((eeprom_ch_info[ch].
742 flags & EEPROM_CHANNEL_IBSS)
743 && !(eeprom_ch_info[ch].
744 flags & EEPROM_CHANNEL_RADAR))
745 ? "" : "not ");
746
747 ch_info++;
748 }
749 }
750
751 /* Check if we do have HT40 channels */
752 if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
753 EEPROM_REGULATORY_BAND_NO_HT40 &&
754 priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
755 EEPROM_REGULATORY_BAND_NO_HT40)
756 return 0;
757
758 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
759 for (band = 6; band <= 7; band++) {
760 enum ieee80211_band ieeeband;
761
762 iwl_init_band_reference(priv, band, &eeprom_ch_count,
763 &eeprom_ch_info, &eeprom_ch_index);
764
765 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
766 ieeeband =
767 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
768
769 /* Loop through each band adding each of the channels */
770 for (ch = 0; ch < eeprom_ch_count; ch++) {
771 /* Set up driver's info for lower half */
772 iwl_mod_ht40_chan_info(priv, ieeeband,
773 eeprom_ch_index[ch],
774 &eeprom_ch_info[ch],
775 IEEE80211_CHAN_NO_HT40PLUS);
776
777 /* Set up driver's info for upper half */
778 iwl_mod_ht40_chan_info(priv, ieeeband,
779 eeprom_ch_index[ch] + 4,
780 &eeprom_ch_info[ch],
781 IEEE80211_CHAN_NO_HT40MINUS);
782 }
783 }
784
785 /* for newer device (6000 series and up)
786 * EEPROM contain enhanced tx power information
787 * driver need to process addition information
788 * to determine the max channel tx power limits
789 */
790 if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
791 priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
792
793 return 0;
794}
795
796/*
797 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
798 */
799void iwl_free_channel_map(struct iwl_priv *priv)
800{
801 kfree(priv->channel_info);
802 priv->channel_count = 0;
803}
804
805/**
806 * iwl_get_channel_info - Find driver's private channel info
807 *
808 * Based on band and channel number.
809 */
810const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
811 enum ieee80211_band band, u16 channel)
812{
813 int i;
814
815 switch (band) {
816 case IEEE80211_BAND_5GHZ:
817 for (i = 14; i < priv->channel_count; i++) {
818 if (priv->channel_info[i].channel == channel)
819 return &priv->channel_info[i];
820 }
821 break;
822 case IEEE80211_BAND_2GHZ:
823 if (channel >= 1 && channel <= 14)
824 return &priv->channel_info[channel - 1];
825 break;
826 default:
827 BUG();
828 }
829
830 return NULL;
831}
832
833void iwl_rf_config(struct iwl_priv *priv)
834{
835 u16 radio_cfg;
836
837 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
838
839 /* write radio config values to register */
840 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
841 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
842 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
843 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
844 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
845 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
846 EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
847 EEPROM_RF_CFG_STEP_MSK(radio_cfg),
848 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
849 } else
850 WARN_ON(1);
851
852 /* set CSR_HW_CONFIG_REG for uCode use */
853 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
854 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
855 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
856}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
new file mode 100644
index 00000000000..e4bf8ac5e64
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -0,0 +1,313 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
114#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
115#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
116#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
117#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
118
119/* *regulatory* channel data format in eeprom, one for each channel.
120 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
121struct iwl_eeprom_channel {
122 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
123 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
124} __packed;
125
126enum iwl_eeprom_enhanced_txpwr_flags {
127 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
128 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
129 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
130 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
131 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
132 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
133 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
134 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
135};
136
137/**
138 * iwl_eeprom_enhanced_txpwr structure
139 * This structure presents the enhanced regulatory tx power limit layout
140 * in eeprom image
141 * Enhanced regulatory tx power portion of eeprom image can be broken down
142 * into individual structures; each one is 8 bytes in size and contain the
143 * following information
144 * @flags: entry flags
145 * @channel: channel number
146 * @chain_a_max_pwr: chain a max power in 1/2 dBm
147 * @chain_b_max_pwr: chain b max power in 1/2 dBm
148 * @chain_c_max_pwr: chain c max power in 1/2 dBm
149 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
150 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
151 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
152 *
153 */
154struct iwl_eeprom_enhanced_txpwr {
155 u8 flags;
156 u8 channel;
157 s8 chain_a_max;
158 s8 chain_b_max;
159 s8 chain_c_max;
160 u8 delta_20_in_40;
161 s8 mimo2_max;
162 s8 mimo3_max;
163} __packed;
164
165/* calibration */
166#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
167#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
168
169/* temperature */
170#define EEPROM_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
171
172/* agn links */
173#define EEPROM_LINK_HOST (2*0x64)
174#define EEPROM_LINK_GENERAL (2*0x65)
175#define EEPROM_LINK_REGULATORY (2*0x66)
176#define EEPROM_LINK_CALIBRATION (2*0x67)
177#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
178#define EEPROM_LINK_OTHERS (2*0x69)
179#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
180#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
181
182/* agn regulatory - indirect access */
183#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
184 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
185#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
186 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
187#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
188 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
189#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
190 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
191#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
193#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
195#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
197
198/* 6000 regulatory - indirect access */
199#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
201
202/* 5000 Specific */
203#define EEPROM_5000_TX_POWER_VERSION (4)
204#define EEPROM_5000_EEPROM_VERSION (0x11A)
205
206/* 5050 Specific */
207#define EEPROM_5050_TX_POWER_VERSION (4)
208#define EEPROM_5050_EEPROM_VERSION (0x21E)
209
210/* 1000 Specific */
211#define EEPROM_1000_TX_POWER_VERSION (4)
212#define EEPROM_1000_EEPROM_VERSION (0x15C)
213
214/* 6x00 Specific */
215#define EEPROM_6000_TX_POWER_VERSION (4)
216#define EEPROM_6000_EEPROM_VERSION (0x423)
217
218/* 6x50 Specific */
219#define EEPROM_6050_TX_POWER_VERSION (4)
220#define EEPROM_6050_EEPROM_VERSION (0x532)
221
222/* 6150 Specific */
223#define EEPROM_6150_TX_POWER_VERSION (6)
224#define EEPROM_6150_EEPROM_VERSION (0x553)
225
226/* 6x05 Specific */
227#define EEPROM_6005_TX_POWER_VERSION (6)
228#define EEPROM_6005_EEPROM_VERSION (0x709)
229
230/* 6x30 Specific */
231#define EEPROM_6030_TX_POWER_VERSION (6)
232#define EEPROM_6030_EEPROM_VERSION (0x709)
233
234/* 2x00 Specific */
235#define EEPROM_2000_TX_POWER_VERSION (6)
236#define EEPROM_2000_EEPROM_VERSION (0x805)
237
238/* 6x35 Specific */
239#define EEPROM_6035_TX_POWER_VERSION (6)
240#define EEPROM_6035_EEPROM_VERSION (0x753)
241
242
243/* OTP */
244/* lower blocks contain EEPROM image and calibration data */
245#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
246/* high blocks contain PAPD data */
247#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
248#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
249#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
250#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
251#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
252#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
253
254/* 2.4 GHz */
255extern const u8 iwl_eeprom_band_1[14];
256
257#define ADDRESS_MSK 0x0000FFFF
258#define INDIRECT_TYPE_MSK 0x000F0000
259#define INDIRECT_HOST 0x00010000
260#define INDIRECT_GENERAL 0x00020000
261#define INDIRECT_REGULATORY 0x00030000
262#define INDIRECT_CALIBRATION 0x00040000
263#define INDIRECT_PROCESS_ADJST 0x00050000
264#define INDIRECT_OTHERS 0x00060000
265#define INDIRECT_TXP_LIMIT 0x00070000
266#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
267#define INDIRECT_ADDRESS 0x00100000
268
269/* General */
270#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
271#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
272#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
273#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
274#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
275#define EEPROM_VERSION (2*0x44) /* 2 bytes */
276#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
277#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
278#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
279#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
280
281/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
282#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
283#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
284#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
285#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
286#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
287#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
288
289#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
290
291#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
292
293struct iwl_eeprom_ops {
294 const u32 regulatory_bands[7];
295 void (*update_enhanced_txpower) (struct iwl_priv *priv);
296};
297
298
299int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
300void iwl_eeprom_free(struct iwl_priv *priv);
301int iwl_eeprom_check_version(struct iwl_priv *priv);
302int iwl_eeprom_check_sku(struct iwl_priv *priv);
303const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
304int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
305u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
306int iwl_init_channel_map(struct iwl_priv *priv);
307void iwl_free_channel_map(struct iwl_priv *priv);
308const struct iwl_channel_info *iwl_get_channel_info(
309 const struct iwl_priv *priv,
310 enum ieee80211_band band, u16 channel);
311void iwl_rf_config(struct iwl_priv *priv);
312
313#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
new file mode 100644
index 00000000000..9d91552d13c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -0,0 +1,184 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_helpers_h__
31#define __iwl_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/*
68 * we have 8 bits used like this:
69 *
70 * 7 6 5 4 3 2 1 0
71 * | | | | | | | |
72 * | | | | | | +-+-------- AC queue (0-3)
73 * | | | | | |
74 * | +-+-+-+-+------------ HW queue ID
75 * |
76 * +---------------------- unused
77 */
78static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
79{
80 BUG_ON(ac > 3); /* only have 2 bits */
81 BUG_ON(hwq > 31); /* only use 5 bits */
82
83 txq->swq_id = (hwq << 2) | ac;
84}
85
86static inline void iwl_wake_queue(struct iwl_priv *priv,
87 struct iwl_tx_queue *txq)
88{
89 u8 queue = txq->swq_id;
90 u8 ac = queue & 3;
91 u8 hwq = (queue >> 2) & 0x1f;
92
93 if (test_and_clear_bit(hwq, priv->queue_stopped))
94 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
95 ieee80211_wake_queue(priv->hw, ac);
96}
97
98static inline void iwl_stop_queue(struct iwl_priv *priv,
99 struct iwl_tx_queue *txq)
100{
101 u8 queue = txq->swq_id;
102 u8 ac = queue & 3;
103 u8 hwq = (queue >> 2) & 0x1f;
104
105 if (!test_and_set_bit(hwq, priv->queue_stopped))
106 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
107 ieee80211_stop_queue(priv->hw, ac);
108}
109
110static inline void iwl_wake_any_queue(struct iwl_priv *priv,
111 struct iwl_rxon_context *ctx)
112{
113 u8 ac;
114
115 for (ac = 0; ac < AC_NUM; ac++) {
116 IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
117 ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
118 ? "stopped" : "awake");
119 iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
120 }
121}
122
123#ifdef ieee80211_stop_queue
124#undef ieee80211_stop_queue
125#endif
126
127#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
128
129#ifdef ieee80211_wake_queue
130#undef ieee80211_wake_queue
131#endif
132
133#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
134
135static inline void iwl_disable_interrupts(struct iwl_priv *priv)
136{
137 clear_bit(STATUS_INT_ENABLED, &priv->status);
138
139 /* disable interrupts from uCode/NIC to host */
140 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
141
142 /* acknowledge/clear/reset any interrupts still pending
143 * from uCode or flow handler (Rx/Tx DMA) */
144 iwl_write32(priv, CSR_INT, 0xffffffff);
145 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
146 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
147}
148
149static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
150{
151 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
152 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
153}
154
155static inline void iwl_enable_interrupts(struct iwl_priv *priv)
156{
157 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
158 set_bit(STATUS_INT_ENABLED, &priv->status);
159 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
160}
161
162/**
163 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
164 * @priv -- pointer to iwl_priv data structure
165 * @tsf_bits -- number of bits need to shift for masking)
166 */
167static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
168 u16 tsf_bits)
169{
170 return (1 << tsf_bits) - 1;
171}
172
173/**
174 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
175 * @priv -- pointer to iwl_priv data structure
176 * @tsf_bits -- number of bits need to shift for masking)
177 */
178static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
179 u16 tsf_bits)
180{
181 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
182}
183
184#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
new file mode 100644
index 00000000000..a67ae56d546
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -0,0 +1,223 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/wireless.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-agn.h"
42#include "iwl-io.h"
43#include "iwl-trans.h"
44
45/* Throughput OFF time(ms) ON time (ms)
46 * >300 25 25
47 * >200 to 300 40 40
48 * >100 to 200 55 55
49 * >70 to 100 65 65
50 * >50 to 70 75 75
51 * >20 to 50 85 85
52 * >10 to 20 95 95
53 * >5 to 10 110 110
54 * >1 to 5 130 130
55 * >0 to 1 167 167
56 * <=0 SOLID ON
57 */
58static const struct ieee80211_tpt_blink iwl_blink[] = {
59 { .throughput = 0, .blink_time = 334 },
60 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
61 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
62 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
63 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
64 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
65 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
66 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
67 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
68 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
69};
70
71/* Set led register off */
72void iwlagn_led_enable(struct iwl_priv *priv)
73{
74 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
75}
76
77/*
78 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
79 * Led blink rate analysis showed an average deviation of 20% on 5000 series
80 * and up.
81 * Need to compensate on the led on/off time per HW according to the deviation
82 * to achieve the desired led frequency
83 * The calculation is: (100-averageDeviation)/100 * blinkTime
84 * For code efficiency the calculation will be:
85 * compensation = (100 - averageDeviation) * 64 / 100
86 * NewBlinkTime = (compensation * BlinkTime) / 64
87 */
88static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
89 u8 time, u16 compensation)
90{
91 if (!compensation) {
92 IWL_ERR(priv, "undefined blink compensation: "
93 "use pre-defined blinking time\n");
94 return time;
95 }
96
97 return (u8)((time * compensation) >> 6);
98}
99
100static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
101{
102 struct iwl_host_cmd cmd = {
103 .id = REPLY_LEDS_CMD,
104 .len = { sizeof(struct iwl_led_cmd), },
105 .data = { led_cmd, },
106 .flags = CMD_ASYNC,
107 .callback = NULL,
108 };
109 u32 reg;
110
111 reg = iwl_read32(priv, CSR_LED_REG);
112 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
113 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
114
115 return trans_send_cmd(&priv->trans, &cmd);
116}
117
118/* Set led pattern command */
119static int iwl_led_cmd(struct iwl_priv *priv,
120 unsigned long on,
121 unsigned long off)
122{
123 struct iwl_led_cmd led_cmd = {
124 .id = IWL_LED_LINK,
125 .interval = IWL_DEF_LED_INTRVL
126 };
127 int ret;
128
129 if (!test_bit(STATUS_READY, &priv->status))
130 return -EBUSY;
131
132 if (priv->blink_on == on && priv->blink_off == off)
133 return 0;
134
135 if (off == 0) {
136 /* led is SOLID_ON */
137 on = IWL_LED_SOLID;
138 }
139
140 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
141 priv->cfg->base_params->led_compensation);
142 led_cmd.on = iwl_blink_compensation(priv, on,
143 priv->cfg->base_params->led_compensation);
144 led_cmd.off = iwl_blink_compensation(priv, off,
145 priv->cfg->base_params->led_compensation);
146
147 ret = iwl_send_led_cmd(priv, &led_cmd);
148 if (!ret) {
149 priv->blink_on = on;
150 priv->blink_off = off;
151 }
152 return ret;
153}
154
155static void iwl_led_brightness_set(struct led_classdev *led_cdev,
156 enum led_brightness brightness)
157{
158 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
159 unsigned long on = 0;
160
161 if (brightness > 0)
162 on = IWL_LED_SOLID;
163
164 iwl_led_cmd(priv, on, 0);
165}
166
167static int iwl_led_blink_set(struct led_classdev *led_cdev,
168 unsigned long *delay_on,
169 unsigned long *delay_off)
170{
171 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
172
173 return iwl_led_cmd(priv, *delay_on, *delay_off);
174}
175
176void iwl_leds_init(struct iwl_priv *priv)
177{
178 int mode = iwlagn_mod_params.led_mode;
179 int ret;
180
181 if (mode == IWL_LED_DEFAULT)
182 mode = priv->cfg->led_mode;
183
184 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
185 wiphy_name(priv->hw->wiphy));
186 priv->led.brightness_set = iwl_led_brightness_set;
187 priv->led.blink_set = iwl_led_blink_set;
188 priv->led.max_brightness = 1;
189
190 switch (mode) {
191 case IWL_LED_DEFAULT:
192 WARN_ON(1);
193 break;
194 case IWL_LED_BLINK:
195 priv->led.default_trigger =
196 ieee80211_create_tpt_led_trigger(priv->hw,
197 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
198 iwl_blink, ARRAY_SIZE(iwl_blink));
199 break;
200 case IWL_LED_RF_STATE:
201 priv->led.default_trigger =
202 ieee80211_get_radio_led_name(priv->hw);
203 break;
204 }
205
206 ret = led_classdev_register(priv->bus->dev,
207 &priv->led);
208 if (ret) {
209 kfree(priv->led.name);
210 return;
211 }
212
213 priv->led_registered = true;
214}
215
216void iwl_leds_exit(struct iwl_priv *priv)
217{
218 if (!priv->led_registered)
219 return;
220
221 led_classdev_unregister(&priv->led);
222 kfree(priv->led.name);
223}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
new file mode 100644
index 00000000000..1c93dfef693
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -0,0 +1,57 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_leds_h__
28#define __iwl_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwlagn_led_enable(struct iwl_priv *priv);
54void iwl_leds_init(struct iwl_priv *priv);
55void iwl_leds_exit(struct iwl_priv *priv);
56
57#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
new file mode 100644
index 00000000000..32d64e71861
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -0,0 +1,562 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/pci.h>
64#include <linux/pci-aspm.h>
65
66#include "iwl-bus.h"
67#include "iwl-agn.h"
68#include "iwl-core.h"
69#include "iwl-io.h"
70
71/* PCI registers */
72#define PCI_CFG_RETRY_TIMEOUT 0x041
73#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
74#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
75
76struct iwl_pci_bus {
77 /* basic pci-network driver stuff */
78 struct pci_dev *pci_dev;
79
80 /* pci hardware address support */
81 void __iomem *hw_base;
82};
83
84#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \
85 ((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific))
86
87#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \
88 ((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev)
89
90static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
91{
92 int pos;
93 u16 pci_lnk_ctl;
94 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
95
96 pos = pci_pcie_cap(pci_dev);
97 pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
98 return pci_lnk_ctl;
99}
100
101static bool iwl_pci_is_pm_supported(struct iwl_bus *bus)
102{
103 u16 lctl = iwl_pciexp_link_ctrl(bus);
104
105 return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
106}
107
108static void iwl_pci_apm_config(struct iwl_bus *bus)
109{
110 /*
111 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
112 * Check if BIOS (or OS) enabled L1-ASPM on this device.
113 * If so (likely), disable L0S, so device moves directly L0->L1;
114 * costs negligible amount of power savings.
115 * If not (unlikely), enable L0S, so there is at least some
116 * power savings, even without L1.
117 */
118 u16 lctl = iwl_pciexp_link_ctrl(bus);
119
120 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
121 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
122 /* L1-ASPM enabled; disable(!) L0S */
123 iwl_set_bit(bus->drv_data, CSR_GIO_REG,
124 CSR_GIO_REG_VAL_L0S_ENABLED);
125 dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
126 } else {
127 /* L1-ASPM disabled; enable(!) L0S */
128 iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
129 CSR_GIO_REG_VAL_L0S_ENABLED);
130 dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
131 }
132}
133
134static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
135{
136 bus->drv_data = drv_data;
137 pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
138}
139
140static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
141 int buf_len)
142{
143 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
144
145 snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device,
146 pci_dev->subsystem_device);
147}
148
149static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
150{
151 iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
152}
153
154static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val)
155{
156 iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
157}
158
159static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
160{
161 u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
162 return val;
163}
164
165static struct iwl_bus_ops pci_ops = {
166 .get_pm_support = iwl_pci_is_pm_supported,
167 .apm_config = iwl_pci_apm_config,
168 .set_drv_data = iwl_pci_set_drv_data,
169 .get_hw_id = iwl_pci_get_hw_id,
170 .write8 = iwl_pci_write8,
171 .write32 = iwl_pci_write32,
172 .read32 = iwl_pci_read32,
173};
174
175#define IWL_PCI_DEVICE(dev, subdev, cfg) \
176 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
177 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
178 .driver_data = (kernel_ulong_t)&(cfg)
179
180/* Hardware specific file defines the PCI IDs table for that hardware module */
181static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
182 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
183 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
184 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
185 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
186 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
187 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
188 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
189 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
190 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
191 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
192 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
193 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
194 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
195 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
196 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
197 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
198 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
199 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
200 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
201 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
202 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
203 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
204 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
205 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
206
207/* 5300 Series WiFi */
208 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
209 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
210 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
211 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
212 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
213 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
214 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
215 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
216 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
217 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
218 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
219 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
220
221/* 5350 Series WiFi/WiMax */
222 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
223 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
224 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
225
226/* 5150 Series Wifi/WiMax */
227 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
228 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
229 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
230 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
231 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
232 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
233
234 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
235 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
236 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
237 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
238
239/* 6x00 Series */
240 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
241 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
242 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
243 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
244 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
245 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
246 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
247 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
248 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
249 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
250
251/* 6x05 Series */
252 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
253 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
254 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
255 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
256 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
257 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
258 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
259
260/* 6x30 Series */
261 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
262 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
263 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
264 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
265 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
266 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
267 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
268 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
269 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
270 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
271 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
272 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
273 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
274 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
275 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
276 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
277
278/* 6x50 WiFi/WiMax Series */
279 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
280 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
281 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
282 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
283 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
284 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
285
286/* 6150 WiFi/WiMax Series */
287 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
288 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
289 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
290 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
291 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
292 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
293
294/* 1000 Series WiFi */
295 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
296 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
297 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
298 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
299 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
300 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
301 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
302 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
303 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
304 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
305 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
306 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
307
308/* 100 Series WiFi */
309 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
310 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
311 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
312 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
313 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
314 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
315
316/* 130 Series WiFi */
317 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
318 {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
319 {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
320 {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
321 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
322 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
323
324/* 2x00 Series */
325 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
326 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
327 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
328 {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
329 {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
330 {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
331
332/* 2x30 Series */
333 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
334 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
335 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
336 {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
337 {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
338 {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
339
340/* 6x35 Series */
341 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
342 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
343 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
344 {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
345 {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
346 {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
347 {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
348 {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
349 {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
350
351/* 105 Series */
352 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
353 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
354 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
355 {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
356 {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
357 {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
358
359/* 135 Series */
360 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
361 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
362 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
363 {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
364 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
365 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
366
367 {0}
368};
369MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
370
371static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
372{
373 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
374 struct iwl_bus *bus;
375 struct iwl_pci_bus *pci_bus;
376 u16 pci_cmd;
377 int err;
378
379 bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
380 if (!bus) {
381 dev_printk(KERN_ERR, &pdev->dev,
382 "Couldn't allocate iwl_pci_bus");
383 err = -ENOMEM;
384 goto out_no_pci;
385 }
386
387 pci_bus = IWL_BUS_GET_PCI_BUS(bus);
388 pci_bus->pci_dev = pdev;
389
390 /* W/A - seems to solve weird behavior. We need to remove this if we
391 * don't want to stay in L1 all the time. This wastes a lot of power */
392 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
393 PCIE_LINK_STATE_CLKPM);
394
395 if (pci_enable_device(pdev)) {
396 err = -ENODEV;
397 goto out_no_pci;
398 }
399
400 pci_set_master(pdev);
401
402 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
403 if (!err)
404 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
405 if (err) {
406 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
407 if (!err)
408 err = pci_set_consistent_dma_mask(pdev,
409 DMA_BIT_MASK(32));
410 /* both attempts failed: */
411 if (err) {
412 dev_printk(KERN_ERR, bus->dev,
413 "No suitable DMA available.\n");
414 goto out_pci_disable_device;
415 }
416 }
417
418 err = pci_request_regions(pdev, DRV_NAME);
419 if (err) {
420 dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
421 goto out_pci_disable_device;
422 }
423
424 pci_bus->hw_base = pci_iomap(pdev, 0, 0);
425 if (!pci_bus->hw_base) {
426 dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
427 err = -ENODEV;
428 goto out_pci_release_regions;
429 }
430
431 dev_printk(KERN_INFO, &pdev->dev,
432 "pci_resource_len = 0x%08llx\n",
433 (unsigned long long) pci_resource_len(pdev, 0));
434 dev_printk(KERN_INFO, &pdev->dev,
435 "pci_resource_base = %p\n", pci_bus->hw_base);
436
437 dev_printk(KERN_INFO, &pdev->dev,
438 "HW Revision ID = 0x%X\n", pdev->revision);
439
440 /* We disable the RETRY_TIMEOUT register (0x41) to keep
441 * PCI Tx retries from interfering with C3 CPU state */
442 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
443
444 err = pci_enable_msi(pdev);
445 if (err)
446 dev_printk(KERN_ERR, &pdev->dev,
447 "pci_enable_msi failed(0X%x)", err);
448
449 /* TODO: Move this away, not needed if not MSI */
450 /* enable rfkill interrupt: hw bug w/a */
451 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
452 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
453 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
454 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
455 }
456
457 bus->dev = &pdev->dev;
458 bus->irq = pdev->irq;
459 bus->ops = &pci_ops;
460
461 err = iwl_probe(bus, cfg);
462 if (err)
463 goto out_disable_msi;
464 return 0;
465
466out_disable_msi:
467 pci_disable_msi(pdev);
468 pci_iounmap(pdev, pci_bus->hw_base);
469out_pci_release_regions:
470 pci_set_drvdata(pdev, NULL);
471 pci_release_regions(pdev);
472out_pci_disable_device:
473 pci_disable_device(pdev);
474out_no_pci:
475 kfree(bus);
476 return err;
477}
478
479static void __devexit iwl_pci_remove(struct pci_dev *pdev)
480{
481 struct iwl_priv *priv = pci_get_drvdata(pdev);
482 struct iwl_bus *bus = priv->bus;
483 struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
484 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
485
486 iwl_remove(priv);
487
488 pci_disable_msi(pci_dev);
489 pci_iounmap(pci_dev, pci_bus->hw_base);
490 pci_release_regions(pci_dev);
491 pci_disable_device(pci_dev);
492 pci_set_drvdata(pci_dev, NULL);
493
494 kfree(bus);
495}
496
497#ifdef CONFIG_PM
498
499static int iwl_pci_suspend(struct device *device)
500{
501 struct pci_dev *pdev = to_pci_dev(device);
502 struct iwl_priv *priv = pci_get_drvdata(pdev);
503
504 /* Before you put code here, think about WoWLAN. You cannot check here
505 * whether WoWLAN is enabled or not, and your code will run even if
506 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
507 */
508
509 return iwl_suspend(priv);
510}
511
512static int iwl_pci_resume(struct device *device)
513{
514 struct pci_dev *pdev = to_pci_dev(device);
515 struct iwl_priv *priv = pci_get_drvdata(pdev);
516
517 /* Before you put code here, think about WoWLAN. You cannot check here
518 * whether WoWLAN is enabled or not, and your code will run even if
519 * WoWLAN is enabled - the NIC may be alive.
520 */
521
522 /*
523 * We disable the RETRY_TIMEOUT register (0x41) to keep
524 * PCI Tx retries from interfering with C3 CPU state.
525 */
526 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
527
528 return iwl_resume(priv);
529}
530
531static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
532
533#define IWL_PM_OPS (&iwl_dev_pm_ops)
534
535#else
536
537#define IWL_PM_OPS NULL
538
539#endif
540
541static struct pci_driver iwl_pci_driver = {
542 .name = DRV_NAME,
543 .id_table = iwl_hw_card_ids,
544 .probe = iwl_pci_probe,
545 .remove = __devexit_p(iwl_pci_remove),
546 .driver.pm = IWL_PM_OPS,
547};
548
549int __must_check iwl_pci_register_driver(void)
550{
551 int ret;
552 ret = pci_register_driver(&iwl_pci_driver);
553 if (ret)
554 pr_err("Unable to initialize PCI module\n");
555
556 return ret;
557}
558
559void iwl_pci_unregister_driver(void)
560{
561 pci_unregister_driver(&iwl_pci_driver);
562}
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
new file mode 100644
index 00000000000..cd64df05f9e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -0,0 +1,444 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-agn.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-commands.h"
43#include "iwl-debug.h"
44#include "iwl-power.h"
45#include "iwl-trans.h"
46
47/*
48 * Setting power level allows the card to go to sleep when not busy.
49 *
50 * We calculate a sleep command based on the required latency, which
51 * we get from mac80211. In order to handle thermal throttling, we can
52 * also use pre-defined power levels.
53 */
54
55/*
56 * This defines the old power levels. They are still used by default
57 * (level 1) and for thermal throttle (levels 3 through 5)
58 */
59
60struct iwl_power_vec_entry {
61 struct iwl_powertable_cmd cmd;
62 u8 no_dtim; /* number of skip dtim */
63};
64
65#define IWL_DTIM_RANGE_0_MAX 2
66#define IWL_DTIM_RANGE_1_MAX 10
67
68#define NOSLP cpu_to_le16(0), 0, 0
69#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
70#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
71 IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
72 IWL_POWER_ADVANCE_PM_ENA_MSK)
73#define ASLP_TOUT(T) cpu_to_le32(T)
74#define TU_TO_USEC 1024
75#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
76#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
77 cpu_to_le32(X1), \
78 cpu_to_le32(X2), \
79 cpu_to_le32(X3), \
80 cpu_to_le32(X4)}
81/* default power management (not Tx power) table values */
82/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
83/* DTIM 0 - 2 */
84static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
85 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
86 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
87 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
89 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
90};
91
92
93/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
94/* DTIM 3 - 10 */
95static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
96 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
97 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
98 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
100 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
101};
102
103/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
104/* DTIM 11 - */
105static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
108 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
111};
112
113/* advance power management */
114/* DTIM 0 - 2 */
115static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
116 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
117 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
118 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
119 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
120 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
121 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
122 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
123 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
124 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
125 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
126};
127
128
129/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
130/* DTIM 3 - 10 */
131static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
132 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
133 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
134 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
135 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
136 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
137 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
138 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
139 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
140 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
141 SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
142};
143
144/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
145/* DTIM 11 - */
146static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
147 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
148 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
149 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
150 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
151 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
152 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
153 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
154 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
155 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
156 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
157};
158
159static void iwl_static_sleep_cmd(struct iwl_priv *priv,
160 struct iwl_powertable_cmd *cmd,
161 enum iwl_power_level lvl, int period)
162{
163 const struct iwl_power_vec_entry *table;
164 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
165 int i;
166 u8 skip;
167 u32 slp_itrvl;
168
169 if (priv->cfg->adv_pm) {
170 table = apm_range_2;
171 if (period <= IWL_DTIM_RANGE_1_MAX)
172 table = apm_range_1;
173 if (period <= IWL_DTIM_RANGE_0_MAX)
174 table = apm_range_0;
175 } else {
176 table = range_2;
177 if (period <= IWL_DTIM_RANGE_1_MAX)
178 table = range_1;
179 if (period <= IWL_DTIM_RANGE_0_MAX)
180 table = range_0;
181 }
182
183 if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
184 memset(cmd, 0, sizeof(*cmd));
185 else
186 *cmd = table[lvl].cmd;
187
188 if (period == 0) {
189 skip = 0;
190 period = 1;
191 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
192 max_sleep[i] = 1;
193
194 } else {
195 skip = table[lvl].no_dtim;
196 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
197 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
198 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
199 }
200
201 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
202 /* figure out the listen interval based on dtim period and skip */
203 if (slp_itrvl == 0xFF)
204 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
205 cpu_to_le32(period * (skip + 1));
206
207 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
208 if (slp_itrvl > period)
209 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
210 cpu_to_le32((slp_itrvl / period) * period);
211
212 if (skip)
213 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
214 else
215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
216
217 if (priv->cfg->base_params->shadow_reg_enable)
218 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
219 else
220 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
221
222 if (iwl_advanced_bt_coexist(priv)) {
223 if (!priv->cfg->bt_params->bt_sco_disable)
224 cmd->flags |= IWL_POWER_BT_SCO_ENA;
225 else
226 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
227 }
228
229
230 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
231 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
232 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
233 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
234
235 /* enforce max sleep interval */
236 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
237 if (le32_to_cpu(cmd->sleep_interval[i]) >
238 (max_sleep[i] * period))
239 cmd->sleep_interval[i] =
240 cpu_to_le32(max_sleep[i] * period);
241 if (i != (IWL_POWER_VEC_SIZE - 1)) {
242 if (le32_to_cpu(cmd->sleep_interval[i]) >
243 le32_to_cpu(cmd->sleep_interval[i+1]))
244 cmd->sleep_interval[i] =
245 cmd->sleep_interval[i+1];
246 }
247 }
248
249 if (priv->power_data.bus_pm)
250 cmd->flags |= IWL_POWER_PCI_PM_MSK;
251 else
252 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
253
254 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
255 skip, period);
256 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
257}
258
259static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
260 struct iwl_powertable_cmd *cmd)
261{
262 memset(cmd, 0, sizeof(*cmd));
263
264 if (priv->power_data.bus_pm)
265 cmd->flags |= IWL_POWER_PCI_PM_MSK;
266
267 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
268}
269
270static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
271 struct iwl_powertable_cmd *cmd,
272 int dynps_ms, int wakeup_period)
273{
274 /*
275 * These are the original power level 3 sleep successions. The
276 * device may behave better with such succession and was also
277 * only tested with that. Just like the original sleep commands,
278 * also adjust the succession here to the wakeup_period below.
279 * The ranges are the same as for the sleep commands, 0-2, 3-9
280 * and >10, which is selected based on the DTIM interval for
281 * the sleep index but here we use the wakeup period since that
282 * is what we need to do for the latency requirements.
283 */
284 static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
285 static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
286 static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
287 const u8 *slp_succ = slp_succ_r0;
288 int i;
289
290 if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
291 slp_succ = slp_succ_r1;
292 if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
293 slp_succ = slp_succ_r2;
294
295 memset(cmd, 0, sizeof(*cmd));
296
297 cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
298 IWL_POWER_FAST_PD; /* no use seeing frames for others */
299
300 if (priv->power_data.bus_pm)
301 cmd->flags |= IWL_POWER_PCI_PM_MSK;
302
303 if (priv->cfg->base_params->shadow_reg_enable)
304 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
305 else
306 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
307
308 if (iwl_advanced_bt_coexist(priv)) {
309 if (!priv->cfg->bt_params->bt_sco_disable)
310 cmd->flags |= IWL_POWER_BT_SCO_ENA;
311 else
312 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
313 }
314
315 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
316 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
317
318 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
319 cmd->sleep_interval[i] =
320 cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
321
322 IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
323}
324
325static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
326{
327 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
328 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
329 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
330 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
331 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
332 le32_to_cpu(cmd->sleep_interval[0]),
333 le32_to_cpu(cmd->sleep_interval[1]),
334 le32_to_cpu(cmd->sleep_interval[2]),
335 le32_to_cpu(cmd->sleep_interval[3]),
336 le32_to_cpu(cmd->sleep_interval[4]));
337
338 return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
339 sizeof(struct iwl_powertable_cmd), cmd);
340}
341
342static void iwl_power_build_cmd(struct iwl_priv *priv,
343 struct iwl_powertable_cmd *cmd)
344{
345 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
346 int dtimper;
347
348 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
349
350 if (priv->wowlan)
351 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
352 else if (!priv->cfg->base_params->no_idle_support &&
353 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
354 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
355 else if (iwl_tt_is_low_power_state(priv)) {
356 /* in thermal throttling low power state */
357 iwl_static_sleep_cmd(priv, cmd,
358 iwl_tt_current_power_mode(priv), dtimper);
359 } else if (!enabled)
360 iwl_power_sleep_cam_cmd(priv, cmd);
361 else if (priv->power_data.debug_sleep_level_override >= 0)
362 iwl_static_sleep_cmd(priv, cmd,
363 priv->power_data.debug_sleep_level_override,
364 dtimper);
365 else if (iwlagn_mod_params.no_sleep_autoadjust) {
366 if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 &&
367 iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5)
368 iwl_static_sleep_cmd(priv, cmd,
369 iwlagn_mod_params.power_level, dtimper);
370 else
371 iwl_static_sleep_cmd(priv, cmd,
372 IWL_POWER_INDEX_1, dtimper);
373 } else
374 iwl_power_fill_sleep_cmd(priv, cmd,
375 priv->hw->conf.dynamic_ps_timeout,
376 priv->hw->conf.max_sleep_period);
377}
378
379int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
380 bool force)
381{
382 int ret;
383 bool update_chains;
384
385 lockdep_assert_held(&priv->mutex);
386
387 /* Don't update the RX chain when chain noise calibration is running */
388 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
389 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
390
391 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
392 return 0;
393
394 if (!iwl_is_ready_rf(priv))
395 return -EIO;
396
397 /* scan complete use sleep_power_next, need to be updated */
398 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
399 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
400 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
401 return 0;
402 }
403
404 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
405 set_bit(STATUS_POWER_PMI, &priv->status);
406
407 ret = iwl_set_power(priv, cmd);
408 if (!ret) {
409 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
410 clear_bit(STATUS_POWER_PMI, &priv->status);
411
412 if (update_chains)
413 iwl_update_chain_flags(priv);
414 else
415 IWL_DEBUG_POWER(priv,
416 "Cannot update the power, chain noise "
417 "calibration running: %d\n",
418 priv->chain_noise_data.state);
419
420 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
421 } else
422 IWL_ERR(priv, "set power fail, ret = %d", ret);
423
424 return ret;
425}
426
427int iwl_power_update_mode(struct iwl_priv *priv, bool force)
428{
429 struct iwl_powertable_cmd cmd;
430
431 iwl_power_build_cmd(priv, &cmd);
432 return iwl_power_set_mode(priv, &cmd, force);
433}
434
435/* initialize to default */
436void iwl_power_initialize(struct iwl_priv *priv)
437{
438 priv->power_data.bus_pm = bus_get_pm_support(priv->bus);
439
440 priv->power_data.debug_sleep_level_override = -1;
441
442 memset(&priv->power_data.sleep_cmd, 0,
443 sizeof(priv->power_data.sleep_cmd));
444}
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
new file mode 100644
index 00000000000..5f7b720cf1a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool bus_pm;
47};
48
49int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
50 bool force);
51int iwl_power_update_mode(struct iwl_priv *priv, bool force);
52void iwl_power_initialize(struct iwl_priv *priv);
53
54extern bool no_sleep_autoadjust;
55
56#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
new file mode 100644
index 00000000000..732f01b565d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -0,0 +1,1029 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34#include <asm/unaligned.h>
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-agn-calib.h"
42#include "iwl-agn.h"
43
44
45/******************************************************************************
46 *
47 * Generic RX handler implementations
48 *
49 ******************************************************************************/
50
51static void iwl_rx_reply_error(struct iwl_priv *priv,
52 struct iwl_rx_mem_buffer *rxb)
53{
54 struct iwl_rx_packet *pkt = rxb_addr(rxb);
55
56 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
57 "seq 0x%04X ser 0x%08X\n",
58 le32_to_cpu(pkt->u.err_resp.error_type),
59 get_cmd_string(pkt->u.err_resp.cmd_id),
60 pkt->u.err_resp.cmd_id,
61 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
62 le32_to_cpu(pkt->u.err_resp.error_info));
63}
64
65static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
66{
67 struct iwl_rx_packet *pkt = rxb_addr(rxb);
68 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
69 /*
70 * MULTI-FIXME
71 * See iwl_mac_channel_switch.
72 */
73 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
74 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
75
76 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
77 return;
78
79 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
80 rxon->channel = csa->channel;
81 ctx->staging.channel = csa->channel;
82 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
83 le16_to_cpu(csa->channel));
84 iwl_chswitch_done(priv, true);
85 } else {
86 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
87 le16_to_cpu(csa->channel));
88 iwl_chswitch_done(priv, false);
89 }
90}
91
92
93static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
94 struct iwl_rx_mem_buffer *rxb)
95{
96 struct iwl_rx_packet *pkt = rxb_addr(rxb);
97 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
98
99 if (!report->state) {
100 IWL_DEBUG_11H(priv,
101 "Spectrum Measure Notification: Start\n");
102 return;
103 }
104
105 memcpy(&priv->measure_report, report, sizeof(*report));
106 priv->measurement_status |= MEASUREMENT_READY;
107}
108
109static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
110 struct iwl_rx_mem_buffer *rxb)
111{
112#ifdef CONFIG_IWLWIFI_DEBUG
113 struct iwl_rx_packet *pkt = rxb_addr(rxb);
114 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
115 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
116 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
117#endif
118}
119
120static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
121 struct iwl_rx_mem_buffer *rxb)
122{
123 struct iwl_rx_packet *pkt = rxb_addr(rxb);
124 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
125 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
126 "notification for %s:\n", len,
127 get_cmd_string(pkt->hdr.cmd));
128 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
129}
130
131static void iwl_rx_beacon_notif(struct iwl_priv *priv,
132 struct iwl_rx_mem_buffer *rxb)
133{
134 struct iwl_rx_packet *pkt = rxb_addr(rxb);
135 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
136#ifdef CONFIG_IWLWIFI_DEBUG
137 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
138 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
139
140 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
141 "tsf:0x%.8x%.8x rate:%d\n",
142 status & TX_STATUS_MSK,
143 beacon->beacon_notify_hdr.failure_frame,
144 le32_to_cpu(beacon->ibss_mgr_status),
145 le32_to_cpu(beacon->high_tsf),
146 le32_to_cpu(beacon->low_tsf), rate);
147#endif
148
149 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
150
151 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
152 queue_work(priv->workqueue, &priv->beacon_update);
153}
154
155/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
156#define ACK_CNT_RATIO (50)
157#define BA_TIMEOUT_CNT (5)
158#define BA_TIMEOUT_MAX (16)
159
160/**
161 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
162 *
163 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
164 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
165 * operation state.
166 */
167static bool iwl_good_ack_health(struct iwl_priv *priv,
168 struct statistics_tx *cur)
169{
170 int actual_delta, expected_delta, ba_timeout_delta;
171 struct statistics_tx *old;
172
173 if (priv->agg_tids_count)
174 return true;
175
176 old = &priv->statistics.tx;
177
178 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
179 le32_to_cpu(old->actual_ack_cnt);
180 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
181 le32_to_cpu(old->expected_ack_cnt);
182
183 /* Values should not be negative, but we do not trust the firmware */
184 if (actual_delta <= 0 || expected_delta <= 0)
185 return true;
186
187 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
188 le32_to_cpu(old->agg.ba_timeout);
189
190 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
191 ba_timeout_delta > BA_TIMEOUT_CNT) {
192 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
193 actual_delta, expected_delta, ba_timeout_delta);
194
195#ifdef CONFIG_IWLWIFI_DEBUGFS
196 /*
197 * This is ifdef'ed on DEBUGFS because otherwise the
198 * statistics aren't available. If DEBUGFS is set but
199 * DEBUG is not, these will just compile out.
200 */
201 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
202 priv->delta_stats.tx.rx_detected_cnt);
203 IWL_DEBUG_RADIO(priv,
204 "ack_or_ba_timeout_collision delta %d\n",
205 priv->delta_stats.tx.ack_or_ba_timeout_collision);
206#endif
207
208 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
209 return false;
210 }
211
212 return true;
213}
214
215/**
216 * iwl_good_plcp_health - checks for plcp error.
217 *
218 * When the plcp error is exceeding the thresholds, reset the radio
219 * to improve the throughput.
220 */
221static bool iwl_good_plcp_health(struct iwl_priv *priv,
222 struct statistics_rx_phy *cur_ofdm,
223 struct statistics_rx_ht_phy *cur_ofdm_ht,
224 unsigned int msecs)
225{
226 int delta;
227 int threshold = priv->cfg->base_params->plcp_delta_threshold;
228
229 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
230 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
231 return true;
232 }
233
234 delta = le32_to_cpu(cur_ofdm->plcp_err) -
235 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
236 le32_to_cpu(cur_ofdm_ht->plcp_err) -
237 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
238
239 /* Can be negative if firmware reset statistics */
240 if (delta <= 0)
241 return true;
242
243 if ((delta * 100 / msecs) > threshold) {
244 IWL_DEBUG_RADIO(priv,
245 "plcp health threshold %u delta %d msecs %u\n",
246 threshold, delta, msecs);
247 return false;
248 }
249
250 return true;
251}
252
253static void iwl_recover_from_statistics(struct iwl_priv *priv,
254 struct statistics_rx_phy *cur_ofdm,
255 struct statistics_rx_ht_phy *cur_ofdm_ht,
256 struct statistics_tx *tx,
257 unsigned long stamp)
258{
259 unsigned int msecs;
260
261 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
262 return;
263
264 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
265
266 /* Only gather statistics and update time stamp when not associated */
267 if (!iwl_is_any_associated(priv))
268 return;
269
270 /* Do not check/recover when do not have enough statistics data */
271 if (msecs < 99)
272 return;
273
274 if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) {
275 IWL_ERR(priv, "low ack count detected, restart firmware\n");
276 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
277 return;
278 }
279
280 if (iwlagn_mod_params.plcp_check &&
281 !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
282 iwl_force_reset(priv, IWL_RF_RESET, false);
283}
284
285/* Calculate noise level, based on measurements during network silence just
286 * before arriving beacon. This measurement can be done only if we know
287 * exactly when to expect beacons, therefore only when we're associated. */
288static void iwl_rx_calc_noise(struct iwl_priv *priv)
289{
290 struct statistics_rx_non_phy *rx_info;
291 int num_active_rx = 0;
292 int total_silence = 0;
293 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
294 int last_rx_noise;
295
296 rx_info = &priv->statistics.rx_non_phy;
297
298 bcn_silence_a =
299 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
300 bcn_silence_b =
301 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
302 bcn_silence_c =
303 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
304
305 if (bcn_silence_a) {
306 total_silence += bcn_silence_a;
307 num_active_rx++;
308 }
309 if (bcn_silence_b) {
310 total_silence += bcn_silence_b;
311 num_active_rx++;
312 }
313 if (bcn_silence_c) {
314 total_silence += bcn_silence_c;
315 num_active_rx++;
316 }
317
318 /* Average among active antennas */
319 if (num_active_rx)
320 last_rx_noise = (total_silence / num_active_rx) - 107;
321 else
322 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
323
324 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
325 bcn_silence_a, bcn_silence_b, bcn_silence_c,
326 last_rx_noise);
327}
328
329#ifdef CONFIG_IWLWIFI_DEBUGFS
330/*
331 * based on the assumption of all statistics counter are in DWORD
332 * FIXME: This function is for debugging, do not deal with
333 * the case of counters roll-over.
334 */
335static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
336 __le32 *max_delta, __le32 *accum, int size)
337{
338 int i;
339
340 for (i = 0;
341 i < size / sizeof(__le32);
342 i++, prev++, cur++, delta++, max_delta++, accum++) {
343 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
344 *delta = cpu_to_le32(
345 le32_to_cpu(*cur) - le32_to_cpu(*prev));
346 le32_add_cpu(accum, le32_to_cpu(*delta));
347 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
348 *max_delta = *delta;
349 }
350 }
351}
352
353static void
354iwl_accumulative_statistics(struct iwl_priv *priv,
355 struct statistics_general_common *common,
356 struct statistics_rx_non_phy *rx_non_phy,
357 struct statistics_rx_phy *rx_ofdm,
358 struct statistics_rx_ht_phy *rx_ofdm_ht,
359 struct statistics_rx_phy *rx_cck,
360 struct statistics_tx *tx,
361 struct statistics_bt_activity *bt_activity)
362{
363#define ACCUM(_name) \
364 accum_stats((__le32 *)&priv->statistics._name, \
365 (__le32 *)_name, \
366 (__le32 *)&priv->delta_stats._name, \
367 (__le32 *)&priv->max_delta_stats._name, \
368 (__le32 *)&priv->accum_stats._name, \
369 sizeof(*_name));
370
371 ACCUM(common);
372 ACCUM(rx_non_phy);
373 ACCUM(rx_ofdm);
374 ACCUM(rx_ofdm_ht);
375 ACCUM(rx_cck);
376 ACCUM(tx);
377 if (bt_activity)
378 ACCUM(bt_activity);
379#undef ACCUM
380}
381#else
382static inline void
383iwl_accumulative_statistics(struct iwl_priv *priv,
384 struct statistics_general_common *common,
385 struct statistics_rx_non_phy *rx_non_phy,
386 struct statistics_rx_phy *rx_ofdm,
387 struct statistics_rx_ht_phy *rx_ofdm_ht,
388 struct statistics_rx_phy *rx_cck,
389 struct statistics_tx *tx,
390 struct statistics_bt_activity *bt_activity)
391{
392}
393#endif
394
395static void iwl_rx_statistics(struct iwl_priv *priv,
396 struct iwl_rx_mem_buffer *rxb)
397{
398 unsigned long stamp = jiffies;
399 const int reg_recalib_period = 60;
400 int change;
401 struct iwl_rx_packet *pkt = rxb_addr(rxb);
402 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
403 __le32 *flag;
404 struct statistics_general_common *common;
405 struct statistics_rx_non_phy *rx_non_phy;
406 struct statistics_rx_phy *rx_ofdm;
407 struct statistics_rx_ht_phy *rx_ofdm_ht;
408 struct statistics_rx_phy *rx_cck;
409 struct statistics_tx *tx;
410 struct statistics_bt_activity *bt_activity;
411
412 len -= sizeof(struct iwl_cmd_header); /* skip header */
413
414 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
415 len);
416
417 if (len == sizeof(struct iwl_bt_notif_statistics)) {
418 struct iwl_bt_notif_statistics *stats;
419 stats = &pkt->u.stats_bt;
420 flag = &stats->flag;
421 common = &stats->general.common;
422 rx_non_phy = &stats->rx.general.common;
423 rx_ofdm = &stats->rx.ofdm;
424 rx_ofdm_ht = &stats->rx.ofdm_ht;
425 rx_cck = &stats->rx.cck;
426 tx = &stats->tx;
427 bt_activity = &stats->general.activity;
428
429#ifdef CONFIG_IWLWIFI_DEBUGFS
430 /* handle this exception directly */
431 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
432 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
433 le32_to_cpu(stats->rx.general.num_bt_kills));
434#endif
435 } else if (len == sizeof(struct iwl_notif_statistics)) {
436 struct iwl_notif_statistics *stats;
437 stats = &pkt->u.stats;
438 flag = &stats->flag;
439 common = &stats->general.common;
440 rx_non_phy = &stats->rx.general;
441 rx_ofdm = &stats->rx.ofdm;
442 rx_ofdm_ht = &stats->rx.ofdm_ht;
443 rx_cck = &stats->rx.cck;
444 tx = &stats->tx;
445 bt_activity = NULL;
446 } else {
447 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
448 len, sizeof(struct iwl_bt_notif_statistics),
449 sizeof(struct iwl_notif_statistics));
450 return;
451 }
452
453 change = common->temperature != priv->statistics.common.temperature ||
454 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
455 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
456
457 iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
458 rx_ofdm_ht, rx_cck, tx, bt_activity);
459
460 iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
461
462 priv->statistics.flag = *flag;
463 memcpy(&priv->statistics.common, common, sizeof(*common));
464 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
465 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
466 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
467 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
468 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
469#ifdef CONFIG_IWLWIFI_DEBUGFS
470 if (bt_activity)
471 memcpy(&priv->statistics.bt_activity, bt_activity,
472 sizeof(*bt_activity));
473#endif
474
475 priv->rx_statistics_jiffies = stamp;
476
477 set_bit(STATUS_STATISTICS, &priv->status);
478
479 /* Reschedule the statistics timer to occur in
480 * reg_recalib_period seconds to ensure we get a
481 * thermal update even if the uCode doesn't give
482 * us one */
483 mod_timer(&priv->statistics_periodic, jiffies +
484 msecs_to_jiffies(reg_recalib_period * 1000));
485
486 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
487 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
488 iwl_rx_calc_noise(priv);
489 queue_work(priv->workqueue, &priv->run_time_calib_work);
490 }
491 if (priv->cfg->lib->temperature && change)
492 priv->cfg->lib->temperature(priv);
493}
494
495static void iwl_rx_reply_statistics(struct iwl_priv *priv,
496 struct iwl_rx_mem_buffer *rxb)
497{
498 struct iwl_rx_packet *pkt = rxb_addr(rxb);
499
500 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
501#ifdef CONFIG_IWLWIFI_DEBUGFS
502 memset(&priv->accum_stats, 0,
503 sizeof(priv->accum_stats));
504 memset(&priv->delta_stats, 0,
505 sizeof(priv->delta_stats));
506 memset(&priv->max_delta_stats, 0,
507 sizeof(priv->max_delta_stats));
508#endif
509 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
510 }
511 iwl_rx_statistics(priv, rxb);
512}
513
514/* Handle notification from uCode that card's power state is changing
515 * due to software, hardware, or critical temperature RFKILL */
516static void iwl_rx_card_state_notif(struct iwl_priv *priv,
517 struct iwl_rx_mem_buffer *rxb)
518{
519 struct iwl_rx_packet *pkt = rxb_addr(rxb);
520 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
521 unsigned long status = priv->status;
522
523 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
524 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
525 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
526 (flags & CT_CARD_DISABLED) ?
527 "Reached" : "Not reached");
528
529 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
530 CT_CARD_DISABLED)) {
531
532 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
533 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
534
535 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
536 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
537
538 if (!(flags & RXON_CARD_DISABLED)) {
539 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
540 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
541 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
542 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
543 }
544 if (flags & CT_CARD_DISABLED)
545 iwl_tt_enter_ct_kill(priv);
546 }
547 if (!(flags & CT_CARD_DISABLED))
548 iwl_tt_exit_ct_kill(priv);
549
550 if (flags & HW_CARD_DISABLED)
551 set_bit(STATUS_RF_KILL_HW, &priv->status);
552 else
553 clear_bit(STATUS_RF_KILL_HW, &priv->status);
554
555
556 if (!(flags & RXON_CARD_DISABLED))
557 iwl_scan_cancel(priv);
558
559 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
560 test_bit(STATUS_RF_KILL_HW, &priv->status)))
561 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
562 test_bit(STATUS_RF_KILL_HW, &priv->status));
563 else
564 wake_up(&priv->wait_command_queue);
565}
566
567static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
568 struct iwl_rx_mem_buffer *rxb)
569
570{
571 struct iwl_rx_packet *pkt = rxb_addr(rxb);
572 struct iwl_missed_beacon_notif *missed_beacon;
573
574 missed_beacon = &pkt->u.missed_beacon;
575 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
576 priv->missed_beacon_threshold) {
577 IWL_DEBUG_CALIB(priv,
578 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
579 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
580 le32_to_cpu(missed_beacon->total_missed_becons),
581 le32_to_cpu(missed_beacon->num_recvd_beacons),
582 le32_to_cpu(missed_beacon->num_expected_beacons));
583 if (!test_bit(STATUS_SCANNING, &priv->status))
584 iwl_init_sensitivity(priv);
585 }
586}
587
588/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
589 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
590static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
591 struct iwl_rx_mem_buffer *rxb)
592{
593 struct iwl_rx_packet *pkt = rxb_addr(rxb);
594
595 priv->last_phy_res_valid = true;
596 memcpy(&priv->last_phy_res, pkt->u.raw,
597 sizeof(struct iwl_rx_phy_res));
598}
599
600/*
601 * returns non-zero if packet should be dropped
602 */
603static int iwl_set_decrypted_flag(struct iwl_priv *priv,
604 struct ieee80211_hdr *hdr,
605 u32 decrypt_res,
606 struct ieee80211_rx_status *stats)
607{
608 u16 fc = le16_to_cpu(hdr->frame_control);
609
610 /*
611 * All contexts have the same setting here due to it being
612 * a module parameter, so OK to check any context.
613 */
614 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
615 RXON_FILTER_DIS_DECRYPT_MSK)
616 return 0;
617
618 if (!(fc & IEEE80211_FCTL_PROTECTED))
619 return 0;
620
621 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
622 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
623 case RX_RES_STATUS_SEC_TYPE_TKIP:
624 /* The uCode has got a bad phase 1 Key, pushes the packet.
625 * Decryption will be done in SW. */
626 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
627 RX_RES_STATUS_BAD_KEY_TTAK)
628 break;
629
630 case RX_RES_STATUS_SEC_TYPE_WEP:
631 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
632 RX_RES_STATUS_BAD_ICV_MIC) {
633 /* bad ICV, the packet is destroyed since the
634 * decryption is inplace, drop it */
635 IWL_DEBUG_RX(priv, "Packet destroyed\n");
636 return -1;
637 }
638 case RX_RES_STATUS_SEC_TYPE_CCMP:
639 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
640 RX_RES_STATUS_DECRYPT_OK) {
641 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
642 stats->flag |= RX_FLAG_DECRYPTED;
643 }
644 break;
645
646 default:
647 break;
648 }
649 return 0;
650}
651
652static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
653 struct ieee80211_hdr *hdr,
654 u16 len,
655 u32 ampdu_status,
656 struct iwl_rx_mem_buffer *rxb,
657 struct ieee80211_rx_status *stats)
658{
659 struct sk_buff *skb;
660 __le16 fc = hdr->frame_control;
661 struct iwl_rxon_context *ctx;
662
663 /* We only process data packets if the interface is open */
664 if (unlikely(!priv->is_open)) {
665 IWL_DEBUG_DROP_LIMIT(priv,
666 "Dropping packet while interface is not open.\n");
667 return;
668 }
669
670 /* In case of HW accelerated crypto and bad decryption, drop */
671 if (!iwlagn_mod_params.sw_crypto &&
672 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
673 return;
674
675 skb = dev_alloc_skb(128);
676 if (!skb) {
677 IWL_ERR(priv, "dev_alloc_skb failed\n");
678 return;
679 }
680
681 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
682
683 iwl_update_stats(priv, false, fc, len);
684
685 /*
686 * Wake any queues that were stopped due to a passive channel tx
687 * failure. This can happen because the regulatory enforcement in
688 * the device waits for a beacon before allowing transmission,
689 * sometimes even after already having transmitted frames for the
690 * association because the new RXON may reset the information.
691 */
692 if (unlikely(ieee80211_is_beacon(fc))) {
693 for_each_context(priv, ctx) {
694 if (!ctx->last_tx_rejected)
695 continue;
696 if (compare_ether_addr(hdr->addr3,
697 ctx->active.bssid_addr))
698 continue;
699 ctx->last_tx_rejected = false;
700 iwl_wake_any_queue(priv, ctx);
701 }
702 }
703
704 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
705
706 ieee80211_rx(priv->hw, skb);
707 rxb->page = NULL;
708}
709
710static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
711{
712 u32 decrypt_out = 0;
713
714 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
715 RX_RES_STATUS_STATION_FOUND)
716 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
717 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
718
719 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
720
721 /* packet was not encrypted */
722 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
723 RX_RES_STATUS_SEC_TYPE_NONE)
724 return decrypt_out;
725
726 /* packet was encrypted with unknown alg */
727 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
728 RX_RES_STATUS_SEC_TYPE_ERR)
729 return decrypt_out;
730
731 /* decryption was not done in HW */
732 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
733 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
734 return decrypt_out;
735
736 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
737
738 case RX_RES_STATUS_SEC_TYPE_CCMP:
739 /* alg is CCM: check MIC only */
740 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
741 /* Bad MIC */
742 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
743 else
744 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
745
746 break;
747
748 case RX_RES_STATUS_SEC_TYPE_TKIP:
749 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
750 /* Bad TTAK */
751 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
752 break;
753 }
754 /* fall through if TTAK OK */
755 default:
756 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
757 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
758 else
759 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
760 break;
761 }
762
763 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
764 decrypt_in, decrypt_out);
765
766 return decrypt_out;
767}
768
769/* Calc max signal level (dBm) among 3 possible receivers */
770static int iwlagn_calc_rssi(struct iwl_priv *priv,
771 struct iwl_rx_phy_res *rx_resp)
772{
773 /* data from PHY/DSP regarding signal strength, etc.,
774 * contents are always there, not configurable by host
775 */
776 struct iwlagn_non_cfg_phy *ncphy =
777 (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
778 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
779 u8 agc;
780
781 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
782 agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
783
784 /* Find max rssi among 3 possible receivers.
785 * These values are measured by the digital signal processor (DSP).
786 * They should stay fairly constant even as the signal strength varies,
787 * if the radio's automatic gain control (AGC) is working right.
788 * AGC value (see below) will provide the "interesting" info.
789 */
790 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
791 rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
792 IWLAGN_OFDM_RSSI_A_BIT_POS;
793 rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
794 IWLAGN_OFDM_RSSI_B_BIT_POS;
795 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
796 rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
797 IWLAGN_OFDM_RSSI_C_BIT_POS;
798
799 max_rssi = max_t(u32, rssi_a, rssi_b);
800 max_rssi = max_t(u32, max_rssi, rssi_c);
801
802 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
803 rssi_a, rssi_b, rssi_c, max_rssi, agc);
804
805 /* dBm = max_rssi dB - agc dB - constant.
806 * Higher AGC (higher radio gain) means lower signal. */
807 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
808}
809
810/* Called for REPLY_RX (legacy ABG frames), or
811 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
812static void iwl_rx_reply_rx(struct iwl_priv *priv,
813 struct iwl_rx_mem_buffer *rxb)
814{
815 struct ieee80211_hdr *header;
816 struct ieee80211_rx_status rx_status;
817 struct iwl_rx_packet *pkt = rxb_addr(rxb);
818 struct iwl_rx_phy_res *phy_res;
819 __le32 rx_pkt_status;
820 struct iwl_rx_mpdu_res_start *amsdu;
821 u32 len;
822 u32 ampdu_status;
823 u32 rate_n_flags;
824
825 /**
826 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
827 * REPLY_RX: physical layer info is in this buffer
828 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
829 * command and cached in priv->last_phy_res
830 *
831 * Here we set up local variables depending on which command is
832 * received.
833 */
834 if (pkt->hdr.cmd == REPLY_RX) {
835 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
836 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
837 + phy_res->cfg_phy_cnt);
838
839 len = le16_to_cpu(phy_res->byte_count);
840 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
841 phy_res->cfg_phy_cnt + len);
842 ampdu_status = le32_to_cpu(rx_pkt_status);
843 } else {
844 if (!priv->last_phy_res_valid) {
845 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
846 return;
847 }
848 phy_res = &priv->last_phy_res;
849 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
850 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
851 len = le16_to_cpu(amsdu->byte_count);
852 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
853 ampdu_status = iwl_translate_rx_status(priv,
854 le32_to_cpu(rx_pkt_status));
855 }
856
857 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
858 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
859 phy_res->cfg_phy_cnt);
860 return;
861 }
862
863 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
864 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
865 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
866 le32_to_cpu(rx_pkt_status));
867 return;
868 }
869
870 /* This will be used in several places later */
871 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
872
873 /* rx_status carries information about the packet to mac80211 */
874 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
875 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
876 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
877 rx_status.freq =
878 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
879 rx_status.band);
880 rx_status.rate_idx =
881 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
882 rx_status.flag = 0;
883
884 /* TSF isn't reliable. In order to allow smooth user experience,
885 * this W/A doesn't propagate it to the mac80211 */
886 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
887
888 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
889
890 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
891 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
892
893 iwl_dbg_log_rx_data_frame(priv, len, header);
894 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
895 rx_status.signal, (unsigned long long)rx_status.mactime);
896
897 /*
898 * "antenna number"
899 *
900 * It seems that the antenna field in the phy flags value
901 * is actually a bit field. This is undefined by radiotap,
902 * it wants an actual antenna number but I always get "7"
903 * for most legacy frames I receive indicating that the
904 * same frame was received on all three RX chains.
905 *
906 * I think this field should be removed in favor of a
907 * new 802.11n radiotap field "RX chains" that is defined
908 * as a bitmask.
909 */
910 rx_status.antenna =
911 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
912 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
913
914 /* set the preamble flag if appropriate */
915 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
916 rx_status.flag |= RX_FLAG_SHORTPRE;
917
918 /* Set up the HT phy flags */
919 if (rate_n_flags & RATE_MCS_HT_MSK)
920 rx_status.flag |= RX_FLAG_HT;
921 if (rate_n_flags & RATE_MCS_HT40_MSK)
922 rx_status.flag |= RX_FLAG_40MHZ;
923 if (rate_n_flags & RATE_MCS_SGI_MSK)
924 rx_status.flag |= RX_FLAG_SHORT_GI;
925
926 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
927 rxb, &rx_status);
928}
929
930/**
931 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
932 *
933 * Setup the RX handlers for each of the reply types sent from the uCode
934 * to the host.
935 */
936void iwl_setup_rx_handlers(struct iwl_priv *priv)
937{
938 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
939
940 handlers = priv->rx_handlers;
941
942 handlers[REPLY_ERROR] = iwl_rx_reply_error;
943 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
944 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
945 handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
946 handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
947 handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
948
949 /*
950 * The same handler is used for both the REPLY to a discrete
951 * statistics request from the host as well as for the periodic
952 * statistics notifications (after received beacons) from the uCode.
953 */
954 handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
955 handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
956
957 iwl_setup_rx_scan_handlers(priv);
958
959 handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
960 handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
961
962 /* Rx handlers */
963 handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
964 handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
965
966 /* block ack */
967 handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
968
969 /* init calibration handlers */
970 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
971 iwlagn_rx_calib_result;
972 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
973
974 /* set up notification wait support */
975 spin_lock_init(&priv->notif_wait_lock);
976 INIT_LIST_HEAD(&priv->notif_waits);
977 init_waitqueue_head(&priv->notif_waitq);
978
979 /* Set up BT Rx handlers */
980 if (priv->cfg->lib->bt_rx_handler_setup)
981 priv->cfg->lib->bt_rx_handler_setup(priv);
982
983}
984
985void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
986{
987 struct iwl_rx_packet *pkt = rxb_addr(rxb);
988
989 /*
990 * Do the notification wait before RX handlers so
991 * even if the RX handler consumes the RXB we have
992 * access to it in the notification wait entry.
993 */
994 if (!list_empty(&priv->notif_waits)) {
995 struct iwl_notification_wait *w;
996
997 spin_lock(&priv->notif_wait_lock);
998 list_for_each_entry(w, &priv->notif_waits, list) {
999 if (w->cmd != pkt->hdr.cmd)
1000 continue;
1001 IWL_DEBUG_RX(priv,
1002 "Notif: %s, 0x%02x - wake the callers up\n",
1003 get_cmd_string(pkt->hdr.cmd),
1004 pkt->hdr.cmd);
1005 w->triggered = true;
1006 if (w->fn)
1007 w->fn(priv, pkt, w->fn_data);
1008 }
1009 spin_unlock(&priv->notif_wait_lock);
1010
1011 wake_up_all(&priv->notif_waitq);
1012 }
1013
1014 if (priv->pre_rx_handler)
1015 priv->pre_rx_handler(priv, rxb);
1016
1017 /* Based on type of command response or notification,
1018 * handle those that need handling via function in
1019 * rx_handlers table. See iwl_setup_rx_handlers() */
1020 if (priv->rx_handlers[pkt->hdr.cmd]) {
1021 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1022 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1023 } else {
1024 /* No handling needed */
1025 IWL_DEBUG_RX(priv,
1026 "No handler needed for %s, 0x%02x\n",
1027 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1028 }
1029}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
new file mode 100644
index 00000000000..77e528f5db8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -0,0 +1,629 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn.h"
40#include "iwl-trans.h"
41
42/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
43 * sending probe req. This should be set long enough to hear probe responses
44 * from more than one AP. */
45#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
46#define IWL_ACTIVE_DWELL_TIME_52 (20)
47
48#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
49#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
50
51/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
52 * Must be set longer than active dwell time.
53 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
54#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
55#define IWL_PASSIVE_DWELL_TIME_52 (10)
56#define IWL_PASSIVE_DWELL_BASE (100)
57#define IWL_CHANNEL_TUNE_TIME 5
58
59static int iwl_send_scan_abort(struct iwl_priv *priv)
60{
61 int ret;
62 struct iwl_rx_packet *pkt;
63 struct iwl_host_cmd cmd = {
64 .id = REPLY_SCAN_ABORT_CMD,
65 .flags = CMD_SYNC | CMD_WANT_SKB,
66 };
67
68 /* Exit instantly with error when device is not ready
69 * to receive scan abort command or it does not perform
70 * hardware scan currently */
71 if (!test_bit(STATUS_READY, &priv->status) ||
72 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
73 !test_bit(STATUS_SCAN_HW, &priv->status) ||
74 test_bit(STATUS_FW_ERROR, &priv->status) ||
75 test_bit(STATUS_EXIT_PENDING, &priv->status))
76 return -EIO;
77
78 ret = trans_send_cmd(&priv->trans, &cmd);
79 if (ret)
80 return ret;
81
82 pkt = (struct iwl_rx_packet *)cmd.reply_page;
83 if (pkt->u.status != CAN_ABORT_STATUS) {
84 /* The scan abort will return 1 for success or
85 * 2 for "failure". A failure condition can be
86 * due to simply not being in an active scan which
87 * can occur if we send the scan abort before we
88 * the microcode has notified us that a scan is
89 * completed. */
90 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
91 ret = -EIO;
92 }
93
94 iwl_free_pages(priv, cmd.reply_page);
95 return ret;
96}
97
98static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
99{
100 /* check if scan was requested from mac80211 */
101 if (priv->scan_request) {
102 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
103 ieee80211_scan_completed(priv->hw, aborted);
104 }
105
106 priv->scan_type = IWL_SCAN_NORMAL;
107 priv->scan_vif = NULL;
108 priv->scan_request = NULL;
109}
110
111void iwl_force_scan_end(struct iwl_priv *priv)
112{
113 lockdep_assert_held(&priv->mutex);
114
115 if (!test_bit(STATUS_SCANNING, &priv->status)) {
116 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
117 return;
118 }
119
120 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
121 clear_bit(STATUS_SCANNING, &priv->status);
122 clear_bit(STATUS_SCAN_HW, &priv->status);
123 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
124 iwl_complete_scan(priv, true);
125}
126
127static void iwl_do_scan_abort(struct iwl_priv *priv)
128{
129 int ret;
130
131 lockdep_assert_held(&priv->mutex);
132
133 if (!test_bit(STATUS_SCANNING, &priv->status)) {
134 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
135 return;
136 }
137
138 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
139 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
140 return;
141 }
142
143 ret = iwl_send_scan_abort(priv);
144 if (ret) {
145 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
146 iwl_force_scan_end(priv);
147 } else
148 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
149}
150
151/**
152 * iwl_scan_cancel - Cancel any currently executing HW scan
153 */
154int iwl_scan_cancel(struct iwl_priv *priv)
155{
156 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
157 queue_work(priv->workqueue, &priv->abort_scan);
158 return 0;
159}
160
161/**
162 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
163 * @ms: amount of time to wait (in milliseconds) for scan to abort
164 *
165 */
166int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
167{
168 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
169
170 lockdep_assert_held(&priv->mutex);
171
172 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
173
174 iwl_do_scan_abort(priv);
175
176 while (time_before_eq(jiffies, timeout)) {
177 if (!test_bit(STATUS_SCAN_HW, &priv->status))
178 break;
179 msleep(20);
180 }
181
182 return test_bit(STATUS_SCAN_HW, &priv->status);
183}
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242 struct iwl_rx_packet *pkt = rxb_addr(rxb);
243 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
244
245 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
246 scan_notif->scanned_channels,
247 scan_notif->tsf_low,
248 scan_notif->tsf_high, scan_notif->status);
249
250 /* The HW is no longer scanning */
251 clear_bit(STATUS_SCAN_HW, &priv->status);
252
253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
255 jiffies_to_msecs(jiffies - priv->scan_start));
256
257 queue_work(priv->workqueue, &priv->scan_completed);
258
259 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
260 iwl_advanced_bt_coexist(priv) &&
261 priv->bt_status != scan_notif->bt_status) {
262 if (scan_notif->bt_status) {
263 /* BT on */
264 if (!priv->bt_ch_announce)
265 priv->bt_traffic_load =
266 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
267 /*
268 * otherwise, no traffic load information provided
269 * no changes made
270 */
271 } else {
272 /* BT off */
273 priv->bt_traffic_load =
274 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
275 }
276 priv->bt_status = scan_notif->bt_status;
277 queue_work(priv->workqueue, &priv->bt_traffic_change_work);
278 }
279}
280
281void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
282{
283 /* scan handlers */
284 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
285 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
286 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
287 iwl_rx_scan_results_notif;
288 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
289 iwl_rx_scan_complete_notif;
290}
291
292inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
293 enum ieee80211_band band,
294 u8 n_probes)
295{
296 if (band == IEEE80211_BAND_5GHZ)
297 return IWL_ACTIVE_DWELL_TIME_52 +
298 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
299 else
300 return IWL_ACTIVE_DWELL_TIME_24 +
301 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
302}
303
304u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
305 enum ieee80211_band band,
306 struct ieee80211_vif *vif)
307{
308 struct iwl_rxon_context *ctx;
309 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
310 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
311 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
312
313 if (iwl_is_any_associated(priv)) {
314 /*
315 * If we're associated, we clamp the maximum passive
316 * dwell time to be 98% of the smallest beacon interval
317 * (minus 2 * channel tune time)
318 */
319 for_each_context(priv, ctx) {
320 u16 value;
321
322 if (!iwl_is_associated_ctx(ctx))
323 continue;
324 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
325 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
326 value = IWL_PASSIVE_DWELL_BASE;
327 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
328 passive = min(value, passive);
329 }
330 }
331
332 return passive;
333}
334
335void iwl_init_scan_params(struct iwl_priv *priv)
336{
337 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
338 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
339 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
340 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
341 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
342}
343
344int __must_check iwl_scan_initiate(struct iwl_priv *priv,
345 struct ieee80211_vif *vif,
346 enum iwl_scan_type scan_type,
347 enum ieee80211_band band)
348{
349 int ret;
350
351 lockdep_assert_held(&priv->mutex);
352
353 cancel_delayed_work(&priv->scan_check);
354
355 if (!iwl_is_ready_rf(priv)) {
356 IWL_WARN(priv, "Request scan called when driver not ready.\n");
357 return -EIO;
358 }
359
360 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
361 IWL_DEBUG_SCAN(priv,
362 "Multiple concurrent scan requests in parallel.\n");
363 return -EBUSY;
364 }
365
366 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
367 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
368 return -EBUSY;
369 }
370
371 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
372 scan_type == IWL_SCAN_NORMAL ? "" :
373 scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
374 "internal short ");
375
376 set_bit(STATUS_SCANNING, &priv->status);
377 priv->scan_type = scan_type;
378 priv->scan_start = jiffies;
379 priv->scan_band = band;
380
381 ret = iwlagn_request_scan(priv, vif);
382 if (ret) {
383 clear_bit(STATUS_SCANNING, &priv->status);
384 priv->scan_type = IWL_SCAN_NORMAL;
385 return ret;
386 }
387
388 queue_delayed_work(priv->workqueue, &priv->scan_check,
389 IWL_SCAN_CHECK_WATCHDOG);
390
391 return 0;
392}
393
394int iwl_mac_hw_scan(struct ieee80211_hw *hw,
395 struct ieee80211_vif *vif,
396 struct cfg80211_scan_request *req)
397{
398 struct iwl_priv *priv = hw->priv;
399 int ret;
400
401 IWL_DEBUG_MAC80211(priv, "enter\n");
402
403 if (req->n_channels == 0)
404 return -EINVAL;
405
406 mutex_lock(&priv->mutex);
407
408 /*
409 * If an internal scan is in progress, just set
410 * up the scan_request as per above.
411 */
412 if (priv->scan_type != IWL_SCAN_NORMAL) {
413 IWL_DEBUG_SCAN(priv,
414 "SCAN request during internal scan - defer\n");
415 priv->scan_request = req;
416 priv->scan_vif = vif;
417 ret = 0;
418 } else {
419 priv->scan_request = req;
420 priv->scan_vif = vif;
421 /*
422 * mac80211 will only ask for one band at a time
423 * so using channels[0] here is ok
424 */
425 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
426 req->channels[0]->band);
427 if (ret) {
428 priv->scan_request = NULL;
429 priv->scan_vif = NULL;
430 }
431 }
432
433 IWL_DEBUG_MAC80211(priv, "leave\n");
434
435 mutex_unlock(&priv->mutex);
436
437 return ret;
438}
439
440/*
441 * internal short scan, this function should only been called while associated.
442 * It will reset and tune the radio to prevent possible RF related problem
443 */
444void iwl_internal_short_hw_scan(struct iwl_priv *priv)
445{
446 queue_work(priv->workqueue, &priv->start_internal_scan);
447}
448
449static void iwl_bg_start_internal_scan(struct work_struct *work)
450{
451 struct iwl_priv *priv =
452 container_of(work, struct iwl_priv, start_internal_scan);
453
454 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
455
456 mutex_lock(&priv->mutex);
457
458 if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
459 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
460 goto unlock;
461 }
462
463 if (test_bit(STATUS_SCANNING, &priv->status)) {
464 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
465 goto unlock;
466 }
467
468 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
469 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
470 unlock:
471 mutex_unlock(&priv->mutex);
472}
473
474static void iwl_bg_scan_check(struct work_struct *data)
475{
476 struct iwl_priv *priv =
477 container_of(data, struct iwl_priv, scan_check.work);
478
479 IWL_DEBUG_SCAN(priv, "Scan check work\n");
480
481 /* Since we are here firmware does not finish scan and
482 * most likely is in bad shape, so we don't bother to
483 * send abort command, just force scan complete to mac80211 */
484 mutex_lock(&priv->mutex);
485 iwl_force_scan_end(priv);
486 mutex_unlock(&priv->mutex);
487}
488
489/**
490 * iwl_fill_probe_req - fill in all required fields and IE for probe request
491 */
492
493u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
494 const u8 *ta, const u8 *ies, int ie_len, int left)
495{
496 int len = 0;
497 u8 *pos = NULL;
498
499 /* Make sure there is enough space for the probe request,
500 * two mandatory IEs and the data */
501 left -= 24;
502 if (left < 0)
503 return 0;
504
505 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
506 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
507 memcpy(frame->sa, ta, ETH_ALEN);
508 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
509 frame->seq_ctrl = 0;
510
511 len += 24;
512
513 /* ...next IE... */
514 pos = &frame->u.probe_req.variable[0];
515
516 /* fill in our indirect SSID IE */
517 left -= 2;
518 if (left < 0)
519 return 0;
520 *pos++ = WLAN_EID_SSID;
521 *pos++ = 0;
522
523 len += 2;
524
525 if (WARN_ON(left < ie_len))
526 return len;
527
528 if (ies && ie_len) {
529 memcpy(pos, ies, ie_len);
530 len += ie_len;
531 }
532
533 return (u16)len;
534}
535
536static void iwl_bg_abort_scan(struct work_struct *work)
537{
538 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
539
540 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
541
542 /* We keep scan_check work queued in case when firmware will not
543 * report back scan completed notification */
544 mutex_lock(&priv->mutex);
545 iwl_scan_cancel_timeout(priv, 200);
546 mutex_unlock(&priv->mutex);
547}
548
549static void iwl_bg_scan_completed(struct work_struct *work)
550{
551 struct iwl_priv *priv =
552 container_of(work, struct iwl_priv, scan_completed);
553 bool aborted;
554
555 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
556
557 cancel_delayed_work(&priv->scan_check);
558
559 mutex_lock(&priv->mutex);
560
561 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
562 if (aborted)
563 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
564
565 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
566 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
567 goto out_settings;
568 }
569
570 if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) {
571 ieee80211_tx_status_irqsafe(priv->hw,
572 priv->offchan_tx_skb);
573 priv->offchan_tx_skb = NULL;
574 }
575
576 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
577 int err;
578
579 /* Check if mac80211 requested scan during our internal scan */
580 if (priv->scan_request == NULL)
581 goto out_complete;
582
583 /* If so request a new scan */
584 err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
585 priv->scan_request->channels[0]->band);
586 if (err) {
587 IWL_DEBUG_SCAN(priv,
588 "failed to initiate pending scan: %d\n", err);
589 aborted = true;
590 goto out_complete;
591 }
592
593 goto out;
594 }
595
596out_complete:
597 iwl_complete_scan(priv, aborted);
598
599out_settings:
600 /* Can we still talk to firmware ? */
601 if (!iwl_is_ready_rf(priv))
602 goto out;
603
604 iwlagn_post_scan(priv);
605
606out:
607 mutex_unlock(&priv->mutex);
608}
609
610void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
611{
612 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
613 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
614 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
615 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
616}
617
618void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
619{
620 cancel_work_sync(&priv->start_internal_scan);
621 cancel_work_sync(&priv->abort_scan);
622 cancel_work_sync(&priv->scan_completed);
623
624 if (cancel_delayed_work_sync(&priv->scan_check)) {
625 mutex_lock(&priv->mutex);
626 iwl_force_scan_end(priv);
627 mutex_unlock(&priv->mutex);
628 }
629}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
new file mode 100644
index 00000000000..1ef3b7106ad
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -0,0 +1,832 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-trans.h"
39#include "iwl-agn.h"
40
41/* priv->sta_lock must be held */
42static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
43{
44
45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
46 IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
47 sta_id, priv->stations[sta_id].sta.sta.addr);
48
49 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
50 IWL_DEBUG_ASSOC(priv,
51 "STA id %u addr %pM already present in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
92 sta_id);
93 break;
94 case ADD_STA_MODIFY_NON_EXIST_STA:
95 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
96 sta_id);
97 break;
98 default:
99 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
100 pkt->u.add_sta.status);
101 break;
102 }
103
104 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
105 priv->stations[sta_id].sta.mode ==
106 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
107 sta_id, priv->stations[sta_id].sta.sta.addr);
108
109 /*
110 * XXX: The MAC address in the command buffer is often changed from
111 * the original sent to the device. That is, the MAC address
112 * written to the command buffer often is not the same MAC address
113 * read from the command buffer when the command returns. This
114 * issue has not yet been resolved and this debugging is left to
115 * observe the problem.
116 */
117 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
118 priv->stations[sta_id].sta.mode ==
119 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
120 addsta->sta.addr);
121 spin_unlock_irqrestore(&priv->sta_lock, flags);
122
123 return ret;
124}
125
126static void iwl_add_sta_callback(struct iwl_priv *priv,
127 struct iwl_device_cmd *cmd,
128 struct iwl_rx_packet *pkt)
129{
130 struct iwl_addsta_cmd *addsta =
131 (struct iwl_addsta_cmd *)cmd->cmd.payload;
132
133 iwl_process_add_sta_resp(priv, addsta, pkt, false);
134
135}
136
137static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
138{
139 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
140 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
141 memcpy(addsta, cmd, size);
142 /* resrved in 5000 */
143 addsta->rate_n_flags = cpu_to_le16(0);
144 return size;
145}
146
147int iwl_send_add_sta(struct iwl_priv *priv,
148 struct iwl_addsta_cmd *sta, u8 flags)
149{
150 struct iwl_rx_packet *pkt = NULL;
151 int ret = 0;
152 u8 data[sizeof(*sta)];
153 struct iwl_host_cmd cmd = {
154 .id = REPLY_ADD_STA,
155 .flags = flags,
156 .data = { data, },
157 };
158 u8 sta_id __maybe_unused = sta->sta.sta_id;
159
160 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
161 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
162
163 if (flags & CMD_ASYNC)
164 cmd.callback = iwl_add_sta_callback;
165 else {
166 cmd.flags |= CMD_WANT_SKB;
167 might_sleep();
168 }
169
170 cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
171 ret = trans_send_cmd(&priv->trans, &cmd);
172
173 if (ret || (flags & CMD_ASYNC))
174 return ret;
175
176 if (ret == 0) {
177 pkt = (struct iwl_rx_packet *)cmd.reply_page;
178 ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
179 }
180 iwl_free_pages(priv, cmd.reply_page);
181
182 return ret;
183}
184
185static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
186 struct ieee80211_sta *sta,
187 struct iwl_rxon_context *ctx)
188{
189 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
190 __le32 sta_flags;
191 u8 mimo_ps_mode;
192
193 if (!sta || !sta_ht_inf->ht_supported)
194 goto done;
195
196 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
197 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
198 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
199 "static" :
200 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
201 "dynamic" : "disabled");
202
203 sta_flags = priv->stations[index].sta.station_flags;
204
205 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
206
207 switch (mimo_ps_mode) {
208 case WLAN_HT_CAP_SM_PS_STATIC:
209 sta_flags |= STA_FLG_MIMO_DIS_MSK;
210 break;
211 case WLAN_HT_CAP_SM_PS_DYNAMIC:
212 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
213 break;
214 case WLAN_HT_CAP_SM_PS_DISABLED:
215 break;
216 default:
217 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
218 break;
219 }
220
221 sta_flags |= cpu_to_le32(
222 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
223
224 sta_flags |= cpu_to_le32(
225 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
226
227 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
228 sta_flags |= STA_FLG_HT40_EN_MSK;
229 else
230 sta_flags &= ~STA_FLG_HT40_EN_MSK;
231
232 priv->stations[index].sta.station_flags = sta_flags;
233 done:
234 return;
235}
236
237/**
238 * iwl_prep_station - Prepare station information for addition
239 *
240 * should be called with sta_lock held
241 */
242u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
243 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
244{
245 struct iwl_station_entry *station;
246 int i;
247 u8 sta_id = IWL_INVALID_STATION;
248
249 if (is_ap)
250 sta_id = ctx->ap_sta_id;
251 else if (is_broadcast_ether_addr(addr))
252 sta_id = ctx->bcast_sta_id;
253 else
254 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
255 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
256 addr)) {
257 sta_id = i;
258 break;
259 }
260
261 if (!priv->stations[i].used &&
262 sta_id == IWL_INVALID_STATION)
263 sta_id = i;
264 }
265
266 /*
267 * These two conditions have the same outcome, but keep them
268 * separate
269 */
270 if (unlikely(sta_id == IWL_INVALID_STATION))
271 return sta_id;
272
273 /*
274 * uCode is not able to deal with multiple requests to add a
275 * station. Keep track if one is in progress so that we do not send
276 * another.
277 */
278 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
279 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
280 sta_id);
281 return sta_id;
282 }
283
284 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
285 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
286 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
287 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
288 sta_id, addr);
289 return sta_id;
290 }
291
292 station = &priv->stations[sta_id];
293 station->used = IWL_STA_DRIVER_ACTIVE;
294 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
295 sta_id, addr);
296 priv->num_stations++;
297
298 /* Set up the REPLY_ADD_STA command to send to device */
299 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
300 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
301 station->sta.mode = 0;
302 station->sta.sta.sta_id = sta_id;
303 station->sta.station_flags = ctx->station_flags;
304 station->ctxid = ctx->ctxid;
305
306 if (sta) {
307 struct iwl_station_priv_common *sta_priv;
308
309 sta_priv = (void *)sta->drv_priv;
310 sta_priv->ctx = ctx;
311 }
312
313 /*
314 * OK to call unconditionally, since local stations (IBSS BSSID
315 * STA and broadcast STA) pass in a NULL sta, and mac80211
316 * doesn't allow HT IBSS.
317 */
318 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
319
320 return sta_id;
321
322}
323
324#define STA_WAIT_TIMEOUT (HZ/2)
325
326/**
327 * iwl_add_station_common -
328 */
329int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
330 const u8 *addr, bool is_ap,
331 struct ieee80211_sta *sta, u8 *sta_id_r)
332{
333 unsigned long flags_spin;
334 int ret = 0;
335 u8 sta_id;
336 struct iwl_addsta_cmd sta_cmd;
337
338 *sta_id_r = 0;
339 spin_lock_irqsave(&priv->sta_lock, flags_spin);
340 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
341 if (sta_id == IWL_INVALID_STATION) {
342 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
343 addr);
344 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
345 return -EINVAL;
346 }
347
348 /*
349 * uCode is not able to deal with multiple requests to add a
350 * station. Keep track if one is in progress so that we do not send
351 * another.
352 */
353 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
354 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
355 sta_id);
356 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
357 return -EEXIST;
358 }
359
360 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
361 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
362 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
363 sta_id, addr);
364 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
365 return -EEXIST;
366 }
367
368 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
369 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371
372 /* Add station to device's station table */
373 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
374 if (ret) {
375 spin_lock_irqsave(&priv->sta_lock, flags_spin);
376 IWL_ERR(priv, "Adding station %pM failed.\n",
377 priv->stations[sta_id].sta.sta.addr);
378 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
379 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
380 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
381 }
382 *sta_id_r = sta_id;
383 return ret;
384}
385
386/**
387 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
388 *
389 * priv->sta_lock must be held
390 */
391static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
392{
393 /* Ucode must be active and driver must be non active */
394 if ((priv->stations[sta_id].used &
395 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
396 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
397
398 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
399
400 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
401 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
402}
403
404static int iwl_send_remove_station(struct iwl_priv *priv,
405 const u8 *addr, int sta_id,
406 bool temporary)
407{
408 struct iwl_rx_packet *pkt;
409 int ret;
410
411 unsigned long flags_spin;
412 struct iwl_rem_sta_cmd rm_sta_cmd;
413
414 struct iwl_host_cmd cmd = {
415 .id = REPLY_REMOVE_STA,
416 .len = { sizeof(struct iwl_rem_sta_cmd), },
417 .flags = CMD_SYNC,
418 .data = { &rm_sta_cmd, },
419 };
420
421 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
422 rm_sta_cmd.num_sta = 1;
423 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
424
425 cmd.flags |= CMD_WANT_SKB;
426
427 ret = trans_send_cmd(&priv->trans, &cmd);
428
429 if (ret)
430 return ret;
431
432 pkt = (struct iwl_rx_packet *)cmd.reply_page;
433 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
434 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
435 pkt->hdr.flags);
436 ret = -EIO;
437 }
438
439 if (!ret) {
440 switch (pkt->u.rem_sta.status) {
441 case REM_STA_SUCCESS_MSK:
442 if (!temporary) {
443 spin_lock_irqsave(&priv->sta_lock, flags_spin);
444 iwl_sta_ucode_deactivate(priv, sta_id);
445 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
446 }
447 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
448 break;
449 default:
450 ret = -EIO;
451 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
452 break;
453 }
454 }
455 iwl_free_pages(priv, cmd.reply_page);
456
457 return ret;
458}
459
460/**
461 * iwl_remove_station - Remove driver's knowledge of station.
462 */
463int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
464 const u8 *addr)
465{
466 unsigned long flags;
467
468 if (!iwl_is_ready(priv)) {
469 IWL_DEBUG_INFO(priv,
470 "Unable to remove station %pM, device not ready.\n",
471 addr);
472 /*
473 * It is typical for stations to be removed when we are
474 * going down. Return success since device will be down
475 * soon anyway
476 */
477 return 0;
478 }
479
480 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
481 sta_id, addr);
482
483 if (WARN_ON(sta_id == IWL_INVALID_STATION))
484 return -EINVAL;
485
486 spin_lock_irqsave(&priv->sta_lock, flags);
487
488 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
489 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
490 addr);
491 goto out_err;
492 }
493
494 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
495 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
496 addr);
497 goto out_err;
498 }
499
500 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
501 kfree(priv->stations[sta_id].lq);
502 priv->stations[sta_id].lq = NULL;
503 }
504
505 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
506
507 priv->num_stations--;
508
509 if (WARN_ON(priv->num_stations < 0))
510 priv->num_stations = 0;
511
512 spin_unlock_irqrestore(&priv->sta_lock, flags);
513
514 return iwl_send_remove_station(priv, addr, sta_id, false);
515out_err:
516 spin_unlock_irqrestore(&priv->sta_lock, flags);
517 return -EINVAL;
518}
519
520/**
521 * iwl_clear_ucode_stations - clear ucode station table bits
522 *
523 * This function clears all the bits in the driver indicating
524 * which stations are active in the ucode. Call when something
525 * other than explicit station management would cause this in
526 * the ucode, e.g. unassociated RXON.
527 */
528void iwl_clear_ucode_stations(struct iwl_priv *priv,
529 struct iwl_rxon_context *ctx)
530{
531 int i;
532 unsigned long flags_spin;
533 bool cleared = false;
534
535 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
536
537 spin_lock_irqsave(&priv->sta_lock, flags_spin);
538 for (i = 0; i < priv->hw_params.max_stations; i++) {
539 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
540 continue;
541
542 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
543 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
544 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
545 cleared = true;
546 }
547 }
548 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
549
550 if (!cleared)
551 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
552}
553
554/**
555 * iwl_restore_stations() - Restore driver known stations to device
556 *
557 * All stations considered active by driver, but not present in ucode, is
558 * restored.
559 *
560 * Function sleeps.
561 */
562void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
563{
564 struct iwl_addsta_cmd sta_cmd;
565 struct iwl_link_quality_cmd lq;
566 unsigned long flags_spin;
567 int i;
568 bool found = false;
569 int ret;
570 bool send_lq;
571
572 if (!iwl_is_ready(priv)) {
573 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
574 return;
575 }
576
577 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
578 spin_lock_irqsave(&priv->sta_lock, flags_spin);
579 for (i = 0; i < priv->hw_params.max_stations; i++) {
580 if (ctx->ctxid != priv->stations[i].ctxid)
581 continue;
582 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
583 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
584 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
585 priv->stations[i].sta.sta.addr);
586 priv->stations[i].sta.mode = 0;
587 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
588 found = true;
589 }
590 }
591
592 for (i = 0; i < priv->hw_params.max_stations; i++) {
593 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
594 memcpy(&sta_cmd, &priv->stations[i].sta,
595 sizeof(struct iwl_addsta_cmd));
596 send_lq = false;
597 if (priv->stations[i].lq) {
598 memcpy(&lq, priv->stations[i].lq,
599 sizeof(struct iwl_link_quality_cmd));
600 send_lq = true;
601 }
602 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
603 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
604 if (ret) {
605 spin_lock_irqsave(&priv->sta_lock, flags_spin);
606 IWL_ERR(priv, "Adding station %pM failed.\n",
607 priv->stations[i].sta.sta.addr);
608 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
609 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
610 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
611 }
612 /*
613 * Rate scaling has already been initialized, send
614 * current LQ command
615 */
616 if (send_lq)
617 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
618 spin_lock_irqsave(&priv->sta_lock, flags_spin);
619 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
620 }
621 }
622
623 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
624 if (!found)
625 IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
626 else
627 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
628}
629
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{
632 unsigned long flags;
633 int sta_id = ctx->ap_sta_id;
634 int ret;
635 struct iwl_addsta_cmd sta_cmd;
636 struct iwl_link_quality_cmd lq;
637 bool active;
638
639 spin_lock_irqsave(&priv->sta_lock, flags);
640 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
641 spin_unlock_irqrestore(&priv->sta_lock, flags);
642 return;
643 }
644
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
646 sta_cmd.mode = 0;
647 memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
648
649 active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
650 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
651 spin_unlock_irqrestore(&priv->sta_lock, flags);
652
653 if (active) {
654 ret = iwl_send_remove_station(
655 priv, priv->stations[sta_id].sta.sta.addr,
656 sta_id, true);
657 if (ret)
658 IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
659 priv->stations[sta_id].sta.sta.addr, ret);
660 }
661 spin_lock_irqsave(&priv->sta_lock, flags);
662 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
663 spin_unlock_irqrestore(&priv->sta_lock, flags);
664
665 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
666 if (ret)
667 IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
668 priv->stations[sta_id].sta.sta.addr, ret);
669 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
670}
671
672int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
673{
674 int i;
675
676 for (i = 0; i < priv->sta_key_max_num; i++)
677 if (!test_and_set_bit(i, &priv->ucode_key_table))
678 return i;
679
680 return WEP_INVALID_OFFSET;
681}
682
683void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
684{
685 unsigned long flags;
686 int i;
687
688 spin_lock_irqsave(&priv->sta_lock, flags);
689 for (i = 0; i < priv->hw_params.max_stations; i++) {
690 if (!(priv->stations[i].used & IWL_STA_BCAST))
691 continue;
692
693 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
694 priv->num_stations--;
695 if (WARN_ON(priv->num_stations < 0))
696 priv->num_stations = 0;
697 kfree(priv->stations[i].lq);
698 priv->stations[i].lq = NULL;
699 }
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701}
702
703#ifdef CONFIG_IWLWIFI_DEBUG
704static void iwl_dump_lq_cmd(struct iwl_priv *priv,
705 struct iwl_link_quality_cmd *lq)
706{
707 int i;
708 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
709 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
710 lq->general_params.single_stream_ant_msk,
711 lq->general_params.dual_stream_ant_msk);
712
713 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
714 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
715 i, lq->rs_table[i].rate_n_flags);
716}
717#else
718static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
719 struct iwl_link_quality_cmd *lq)
720{
721}
722#endif
723
724/**
725 * is_lq_table_valid() - Test one aspect of LQ cmd for validity
726 *
727 * It sometimes happens when a HT rate has been in use and we
728 * loose connectivity with AP then mac80211 will first tell us that the
729 * current channel is not HT anymore before removing the station. In such a
730 * scenario the RXON flags will be updated to indicate we are not
731 * communicating HT anymore, but the LQ command may still contain HT rates.
732 * Test for this to prevent driver from sending LQ command between the time
733 * RXON flags are updated and when LQ command is updated.
734 */
735static bool is_lq_table_valid(struct iwl_priv *priv,
736 struct iwl_rxon_context *ctx,
737 struct iwl_link_quality_cmd *lq)
738{
739 int i;
740
741 if (ctx->ht.enabled)
742 return true;
743
744 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
745 ctx->active.channel);
746 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
747 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
748 IWL_DEBUG_INFO(priv,
749 "index %d of LQ expects HT channel\n",
750 i);
751 return false;
752 }
753 }
754 return true;
755}
756
757/**
758 * iwl_send_lq_cmd() - Send link quality command
759 * @init: This command is sent as part of station initialization right
760 * after station has been added.
761 *
762 * The link quality command is sent as the last step of station creation.
763 * This is the special case in which init is set and we call a callback in
764 * this case to clear the state indicating that station creation is in
765 * progress.
766 */
767int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
768 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
769{
770 int ret = 0;
771 unsigned long flags_spin;
772
773 struct iwl_host_cmd cmd = {
774 .id = REPLY_TX_LINK_QUALITY_CMD,
775 .len = { sizeof(struct iwl_link_quality_cmd), },
776 .flags = flags,
777 .data = { lq, },
778 };
779
780 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
781 return -EINVAL;
782
783
784 spin_lock_irqsave(&priv->sta_lock, flags_spin);
785 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
786 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
787 return -EINVAL;
788 }
789 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
790
791 iwl_dump_lq_cmd(priv, lq);
792 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
793 return -EINVAL;
794
795 if (is_lq_table_valid(priv, ctx, lq))
796 ret = trans_send_cmd(&priv->trans, &cmd);
797 else
798 ret = -EINVAL;
799
800 if (cmd.flags & CMD_ASYNC)
801 return ret;
802
803 if (init) {
804 IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
805 lq->sta_id);
806 spin_lock_irqsave(&priv->sta_lock, flags_spin);
807 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
808 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
809 }
810 return ret;
811}
812
813int iwl_mac_sta_remove(struct ieee80211_hw *hw,
814 struct ieee80211_vif *vif,
815 struct ieee80211_sta *sta)
816{
817 struct iwl_priv *priv = hw->priv;
818 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
819 int ret;
820
821 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
822 sta->addr);
823 mutex_lock(&priv->mutex);
824 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
825 sta->addr);
826 ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
827 if (ret)
828 IWL_ERR(priv, "Error removing station %pM\n",
829 sta->addr);
830 mutex_unlock(&priv->mutex);
831 return ret;
832}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
new file mode 100644
index 00000000000..9a6768d6685
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -0,0 +1,138 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__
31
32#include "iwl-dev.h"
33
34#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
35#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
36#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
37 being activated */
38#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
39 (this is for the IBSS BSSID stations) */
40#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
41
42
43void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
44void iwl_clear_ucode_stations(struct iwl_priv *priv,
45 struct iwl_rxon_context *ctx);
46void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
47int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
48int iwl_send_add_sta(struct iwl_priv *priv,
49 struct iwl_addsta_cmd *sta, u8 flags);
50int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
51 const u8 *addr, bool is_ap,
52 struct ieee80211_sta *sta, u8 *sta_id_r);
53int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
54 const u8 *addr);
55int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
56 struct ieee80211_sta *sta);
57
58u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
59 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
60
61int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
62 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
63void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
64
65/**
66 * iwl_clear_driver_stations - clear knowledge of all stations from driver
67 * @priv: iwl priv struct
68 *
69 * This is called during iwl_down() to make sure that in the case
70 * we're coming there from a hardware restart mac80211 will be
71 * able to reconfigure stations -- if we're getting there in the
72 * normal down flow then the stations will already be cleared.
73 */
74static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
75{
76 unsigned long flags;
77 struct iwl_rxon_context *ctx;
78
79 spin_lock_irqsave(&priv->sta_lock, flags);
80 memset(priv->stations, 0, sizeof(priv->stations));
81 priv->num_stations = 0;
82
83 priv->ucode_key_table = 0;
84
85 for_each_context(priv, ctx) {
86 /*
87 * Remove all key information that is not stored as part
88 * of station information since mac80211 may not have had
89 * a chance to remove all the keys. When device is
90 * reconfigured by mac80211 after an error all keys will
91 * be reconfigured.
92 */
93 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
94 ctx->key_mapping_keys = 0;
95 }
96
97 spin_unlock_irqrestore(&priv->sta_lock, flags);
98}
99
100static inline int iwl_sta_id(struct ieee80211_sta *sta)
101{
102 if (WARN_ON(!sta))
103 return IWL_INVALID_STATION;
104
105 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
106}
107
108/**
109 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
110 * @priv: iwl priv
111 * @context: the current context
112 * @sta: mac80211 station
113 *
114 * In certain circumstances mac80211 passes a station pointer
115 * that may be %NULL, for example during TX or key setup. In
116 * that case, we need to use the broadcast station, so this
117 * inline wraps that pattern.
118 */
119static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
120 struct iwl_rxon_context *context,
121 struct ieee80211_sta *sta)
122{
123 int sta_id;
124
125 if (!sta)
126 return context->bcast_sta_id;
127
128 sta_id = iwl_sta_id(sta);
129
130 /*
131 * mac80211 should not be passing a partially
132 * initialised station!
133 */
134 WARN_ON(sta_id == IWL_INVALID_STATION);
135
136 return sta_id;
137}
138#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
new file mode 100644
index 00000000000..b11f60de4f1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -0,0 +1,754 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/init.h>
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <net/net_namespace.h>
67#include <linux/netdevice.h>
68#include <net/cfg80211.h>
69#include <net/mac80211.h>
70#include <net/netlink.h>
71
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-fh.h"
76#include "iwl-io.h"
77#include "iwl-agn.h"
78#include "iwl-testmode.h"
79#include "iwl-trans.h"
80
81/* The TLVs used in the gnl message policy between the kernel module and
82 * user space application. iwl_testmode_gnl_msg_policy is to be carried
83 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
84 * See iwl-testmode.h
85 */
86static
87struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
88 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
89
90 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
91 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
92
93 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
94 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
95 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
96
97 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
98 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
99
100 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
103 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
104 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
105
106 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
109};
110
111/*
112 * See the struct iwl_rx_packet in iwl-commands.h for the format of the
113 * received events from the device
114 */
115static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
116{
117 struct iwl_rx_packet *pkt = rxb_addr(rxb);
118 if (pkt)
119 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
120 else
121 return 0;
122}
123
124
125/*
126 * This function multicasts the spontaneous messages from the device to the
127 * user space. It is invoked whenever there is a received messages
128 * from the device. This function is called within the ISR of the rx handlers
129 * in iwlagn driver.
130 *
131 * The parsing of the message content is left to the user space application,
132 * The message content is treated as unattacked raw data and is encapsulated
133 * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
134 *
135 * @priv: the instance of iwlwifi device
136 * @rxb: pointer to rx data content received by the ISR
137 *
138 * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
139 * For the messages multicasting to the user application, the mandatory
140 * TLV fields are :
141 * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
142 * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
143 */
144
145static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
146 struct iwl_rx_mem_buffer *rxb)
147{
148 struct ieee80211_hw *hw = priv->hw;
149 struct sk_buff *skb;
150 void *data;
151 int length;
152
153 data = (void *)rxb_addr(rxb);
154 length = get_event_length(rxb);
155
156 if (!data || length == 0)
157 return;
158
159 skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
160 GFP_ATOMIC);
161 if (skb == NULL) {
162 IWL_DEBUG_INFO(priv,
163 "Run out of memory for messages to user space ?\n");
164 return;
165 }
166 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
167 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
168 cfg80211_testmode_event(skb, GFP_ATOMIC);
169 return;
170
171nla_put_failure:
172 kfree_skb(skb);
173 IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n");
174}
175
176void iwl_testmode_init(struct iwl_priv *priv)
177{
178 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
179 priv->testmode_trace.trace_enabled = false;
180}
181
182static void iwl_trace_cleanup(struct iwl_priv *priv)
183{
184 if (priv->testmode_trace.trace_enabled) {
185 if (priv->testmode_trace.cpu_addr &&
186 priv->testmode_trace.dma_addr)
187 dma_free_coherent(priv->bus->dev,
188 priv->testmode_trace.total_size,
189 priv->testmode_trace.cpu_addr,
190 priv->testmode_trace.dma_addr);
191 priv->testmode_trace.trace_enabled = false;
192 priv->testmode_trace.cpu_addr = NULL;
193 priv->testmode_trace.trace_addr = NULL;
194 priv->testmode_trace.dma_addr = 0;
195 priv->testmode_trace.buff_size = 0;
196 priv->testmode_trace.total_size = 0;
197 }
198}
199
200
201void iwl_testmode_cleanup(struct iwl_priv *priv)
202{
203 iwl_trace_cleanup(priv);
204}
205
206/*
207 * This function handles the user application commands to the ucode.
208 *
209 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
210 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
211 * host command to the ucode.
212 *
213 * If any mandatory field is missing, -ENOMSG is replied to the user space
214 * application; otherwise, the actual execution result of the host command to
215 * ucode is replied.
216 *
217 * @hw: ieee80211_hw object that represents the device
218 * @tb: gnl message fields from the user space
219 */
220static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
221{
222 struct iwl_priv *priv = hw->priv;
223 struct iwl_host_cmd cmd;
224
225 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
226
227 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
228 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
229 IWL_DEBUG_INFO(priv,
230 "Error finding ucode command mandatory fields\n");
231 return -ENOMSG;
232 }
233
234 cmd.flags = CMD_ON_DEMAND;
235 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
236 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
237 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
238 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
239 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
240 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
241 /* ok, let's submit the command to ucode */
242 return trans_send_cmd(&priv->trans, &cmd);
243}
244
245
246/*
247 * This function handles the user application commands for register access.
248 *
249 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
250 * handlers respectively.
251 *
252 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
253 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
254 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
255 * the success of the command execution.
256 *
257 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
258 * value is returned with IWL_TM_ATTR_REG_VALUE32.
259 *
260 * @hw: ieee80211_hw object that represents the device
261 * @tb: gnl message fields from the user space
262 */
263static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
264{
265 struct iwl_priv *priv = hw->priv;
266 u32 ofs, val32;
267 u8 val8;
268 struct sk_buff *skb;
269 int status = 0;
270
271 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
272 IWL_DEBUG_INFO(priv, "Error finding register offset\n");
273 return -ENOMSG;
274 }
275 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
276 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
277
278 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
279 case IWL_TM_CMD_APP2DEV_REG_READ32:
280 val32 = iwl_read32(priv, ofs);
281 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
282
283 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
284 if (!skb) {
285 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
286 return -ENOMEM;
287 }
288 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
289 status = cfg80211_testmode_reply(skb);
290 if (status < 0)
291 IWL_DEBUG_INFO(priv,
292 "Error sending msg : %d\n", status);
293 break;
294 case IWL_TM_CMD_APP2DEV_REG_WRITE32:
295 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
296 IWL_DEBUG_INFO(priv,
297 "Error finding value to write\n");
298 return -ENOMSG;
299 } else {
300 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
301 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
302 iwl_write32(priv, ofs, val32);
303 }
304 break;
305 case IWL_TM_CMD_APP2DEV_REG_WRITE8:
306 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
307 IWL_DEBUG_INFO(priv, "Error finding value to write\n");
308 return -ENOMSG;
309 } else {
310 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
311 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
312 iwl_write8(priv, ofs, val8);
313 }
314 break;
315 default:
316 IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
317 return -ENOSYS;
318 }
319
320 return status;
321
322nla_put_failure:
323 kfree_skb(skb);
324 return -EMSGSIZE;
325}
326
327
328static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
329{
330 struct iwl_notification_wait calib_wait;
331 int ret;
332
333 iwlagn_init_notification_wait(priv, &calib_wait,
334 CALIBRATION_COMPLETE_NOTIFICATION,
335 NULL, NULL);
336 ret = iwlagn_init_alive_start(priv);
337 if (ret) {
338 IWL_DEBUG_INFO(priv,
339 "Error configuring init calibration: %d\n", ret);
340 goto cfg_init_calib_error;
341 }
342
343 ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
344 if (ret)
345 IWL_DEBUG_INFO(priv, "Error detecting"
346 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
347 return ret;
348
349cfg_init_calib_error:
350 iwlagn_remove_notification(priv, &calib_wait);
351 return ret;
352}
353
354/*
355 * This function handles the user application commands for driver.
356 *
357 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
358 * handlers respectively.
359 *
360 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
361 * value of the actual command execution is replied to the user application.
362 *
363 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
364 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
365 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
366 *
367 * @hw: ieee80211_hw object that represents the device
368 * @tb: gnl message fields from the user space
369 */
370static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
371{
372 struct iwl_priv *priv = hw->priv;
373 struct sk_buff *skb;
374 unsigned char *rsp_data_ptr = NULL;
375 int status = 0, rsp_data_len = 0;
376
377 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
378 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
379 rsp_data_ptr = (unsigned char *)priv->cfg->name;
380 rsp_data_len = strlen(priv->cfg->name);
381 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
382 rsp_data_len + 20);
383 if (!skb) {
384 IWL_DEBUG_INFO(priv,
385 "Error allocating memory\n");
386 return -ENOMEM;
387 }
388 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
389 IWL_TM_CMD_DEV2APP_SYNC_RSP);
390 NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
391 rsp_data_len, rsp_data_ptr);
392 status = cfg80211_testmode_reply(skb);
393 if (status < 0)
394 IWL_DEBUG_INFO(priv, "Error sending msg : %d\n",
395 status);
396 break;
397
398 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
399 status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
400 IWL_UCODE_INIT);
401 if (status)
402 IWL_DEBUG_INFO(priv,
403 "Error loading init ucode: %d\n", status);
404 break;
405
406 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
407 iwl_testmode_cfg_init_calib(priv);
408 trans_stop_device(&priv->trans);
409 break;
410
411 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
412 status = iwlagn_load_ucode_wait_alive(priv,
413 &priv->ucode_rt,
414 IWL_UCODE_REGULAR);
415 if (status) {
416 IWL_DEBUG_INFO(priv,
417 "Error loading runtime ucode: %d\n", status);
418 break;
419 }
420 status = iwl_alive_start(priv);
421 if (status)
422 IWL_DEBUG_INFO(priv,
423 "Error starting the device: %d\n", status);
424 break;
425
426 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
427 if (priv->eeprom) {
428 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
429 priv->cfg->base_params->eeprom_size + 20);
430 if (!skb) {
431 IWL_DEBUG_INFO(priv,
432 "Error allocating memory\n");
433 return -ENOMEM;
434 }
435 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
436 IWL_TM_CMD_DEV2APP_EEPROM_RSP);
437 NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
438 priv->cfg->base_params->eeprom_size,
439 priv->eeprom);
440 status = cfg80211_testmode_reply(skb);
441 if (status < 0)
442 IWL_DEBUG_INFO(priv,
443 "Error sending msg : %d\n",
444 status);
445 } else
446 return -EFAULT;
447 break;
448
449 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
450 if (!tb[IWL_TM_ATTR_FIXRATE]) {
451 IWL_DEBUG_INFO(priv,
452 "Error finding fixrate setting\n");
453 return -ENOMSG;
454 }
455 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
456 break;
457
458 default:
459 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
460 return -ENOSYS;
461 }
462 return status;
463
464nla_put_failure:
465 kfree_skb(skb);
466 return -EMSGSIZE;
467}
468
469
470/*
471 * This function handles the user application commands for uCode trace
472 *
473 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
474 * handlers respectively.
475 *
476 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
477 * value of the actual command execution is replied to the user application.
478 *
479 * @hw: ieee80211_hw object that represents the device
480 * @tb: gnl message fields from the user space
481 */
482static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
483{
484 struct iwl_priv *priv = hw->priv;
485 struct sk_buff *skb;
486 int status = 0;
487 struct device *dev = priv->bus->dev;
488
489 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
490 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
491 if (priv->testmode_trace.trace_enabled)
492 return -EBUSY;
493
494 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
495 priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
496 else
497 priv->testmode_trace.buff_size =
498 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
499 if (!priv->testmode_trace.buff_size)
500 return -EINVAL;
501 if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
502 priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
503 return -EINVAL;
504
505 priv->testmode_trace.total_size =
506 priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
507 priv->testmode_trace.cpu_addr =
508 dma_alloc_coherent(dev,
509 priv->testmode_trace.total_size,
510 &priv->testmode_trace.dma_addr,
511 GFP_KERNEL);
512 if (!priv->testmode_trace.cpu_addr)
513 return -ENOMEM;
514 priv->testmode_trace.trace_enabled = true;
515 priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
516 priv->testmode_trace.cpu_addr, 0x100);
517 memset(priv->testmode_trace.trace_addr, 0x03B,
518 priv->testmode_trace.buff_size);
519 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
520 sizeof(priv->testmode_trace.dma_addr) + 20);
521 if (!skb) {
522 IWL_DEBUG_INFO(priv,
523 "Error allocating memory\n");
524 iwl_trace_cleanup(priv);
525 return -ENOMEM;
526 }
527 NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
528 sizeof(priv->testmode_trace.dma_addr),
529 (u64 *)&priv->testmode_trace.dma_addr);
530 status = cfg80211_testmode_reply(skb);
531 if (status < 0) {
532 IWL_DEBUG_INFO(priv,
533 "Error sending msg : %d\n",
534 status);
535 }
536 priv->testmode_trace.num_chunks =
537 DIV_ROUND_UP(priv->testmode_trace.buff_size,
538 TRACE_CHUNK_SIZE);
539 break;
540
541 case IWL_TM_CMD_APP2DEV_END_TRACE:
542 iwl_trace_cleanup(priv);
543 break;
544 default:
545 IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
546 return -ENOSYS;
547 }
548 return status;
549
550nla_put_failure:
551 kfree_skb(skb);
552 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
553 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
554 iwl_trace_cleanup(priv);
555 return -EMSGSIZE;
556}
557
558static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
559 struct sk_buff *skb,
560 struct netlink_callback *cb)
561{
562 struct iwl_priv *priv = hw->priv;
563 int idx, length;
564
565 if (priv->testmode_trace.trace_enabled &&
566 priv->testmode_trace.trace_addr) {
567 idx = cb->args[4];
568 if (idx >= priv->testmode_trace.num_chunks)
569 return -ENOENT;
570 length = TRACE_CHUNK_SIZE;
571 if (((idx + 1) == priv->testmode_trace.num_chunks) &&
572 (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
573 length = priv->testmode_trace.buff_size %
574 TRACE_CHUNK_SIZE;
575
576 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
577 priv->testmode_trace.trace_addr +
578 (TRACE_CHUNK_SIZE * idx));
579 idx++;
580 cb->args[4] = idx;
581 return 0;
582 } else
583 return -EFAULT;
584
585 nla_put_failure:
586 return -ENOBUFS;
587}
588
589/*
590 * This function handles the user application switch ucode ownership.
591 *
592 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
593 * decide who the current owner of the uCode
594 *
595 * If the current owner is OWNERSHIP_TM, then the only host command
596 * can deliver to uCode is from testmode, all the other host commands
597 * will dropped.
598 *
599 * default driver is the owner of uCode in normal operational mode
600 *
601 * @hw: ieee80211_hw object that represents the device
602 * @tb: gnl message fields from the user space
603 */
604static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
605{
606 struct iwl_priv *priv = hw->priv;
607 u8 owner;
608
609 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
610 IWL_DEBUG_INFO(priv, "Error finding ucode owner\n");
611 return -ENOMSG;
612 }
613
614 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
615 if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
616 priv->ucode_owner = owner;
617 else {
618 IWL_DEBUG_INFO(priv, "Invalid owner\n");
619 return -EINVAL;
620 }
621 return 0;
622}
623
624
625/* The testmode gnl message handler that takes the gnl message from the
626 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
627 * invoke the corresponding handlers.
628 *
629 * This function is invoked when there is user space application sending
630 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
631 * by nl80211.
632 *
633 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
634 * dispatching it to the corresponding handler.
635 *
636 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
637 * -ENOSYS is replied to the user application if the command is unknown;
638 * Otherwise, the command is dispatched to the respective handler.
639 *
640 * @hw: ieee80211_hw object that represents the device
641 * @data: pointer to user space message
642 * @len: length in byte of @data
643 */
644int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
645{
646 struct nlattr *tb[IWL_TM_ATTR_MAX];
647 struct iwl_priv *priv = hw->priv;
648 int result;
649
650 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
651 iwl_testmode_gnl_msg_policy);
652 if (result != 0) {
653 IWL_DEBUG_INFO(priv,
654 "Error parsing the gnl message : %d\n", result);
655 return result;
656 }
657
658 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
659 if (!tb[IWL_TM_ATTR_COMMAND]) {
660 IWL_DEBUG_INFO(priv, "Error finding testmode command type\n");
661 return -ENOMSG;
662 }
663 /* in case multiple accesses to the device happens */
664 mutex_lock(&priv->mutex);
665
666 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
667 case IWL_TM_CMD_APP2DEV_UCODE:
668 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
669 result = iwl_testmode_ucode(hw, tb);
670 break;
671 case IWL_TM_CMD_APP2DEV_REG_READ32:
672 case IWL_TM_CMD_APP2DEV_REG_WRITE32:
673 case IWL_TM_CMD_APP2DEV_REG_WRITE8:
674 IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
675 result = iwl_testmode_reg(hw, tb);
676 break;
677 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
678 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
679 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
680 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
681 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
682 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
683 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
684 result = iwl_testmode_driver(hw, tb);
685 break;
686
687 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
688 case IWL_TM_CMD_APP2DEV_END_TRACE:
689 case IWL_TM_CMD_APP2DEV_READ_TRACE:
690 IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
691 result = iwl_testmode_trace(hw, tb);
692 break;
693
694 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
695 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
696 result = iwl_testmode_ownership(hw, tb);
697 break;
698
699 default:
700 IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
701 result = -ENOSYS;
702 break;
703 }
704
705 mutex_unlock(&priv->mutex);
706 return result;
707}
708
709int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
710 struct netlink_callback *cb,
711 void *data, int len)
712{
713 struct nlattr *tb[IWL_TM_ATTR_MAX];
714 struct iwl_priv *priv = hw->priv;
715 int result;
716 u32 cmd;
717
718 if (cb->args[3]) {
719 /* offset by 1 since commands start at 0 */
720 cmd = cb->args[3] - 1;
721 } else {
722 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
723 iwl_testmode_gnl_msg_policy);
724 if (result) {
725 IWL_DEBUG_INFO(priv,
726 "Error parsing the gnl message : %d\n", result);
727 return result;
728 }
729
730 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
731 if (!tb[IWL_TM_ATTR_COMMAND]) {
732 IWL_DEBUG_INFO(priv,
733 "Error finding testmode command type\n");
734 return -ENOMSG;
735 }
736 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
737 cb->args[3] = cmd + 1;
738 }
739
740 /* in case multiple accesses to the device happens */
741 mutex_lock(&priv->mutex);
742 switch (cmd) {
743 case IWL_TM_CMD_APP2DEV_READ_TRACE:
744 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
745 result = iwl_testmode_trace_dump(hw, tb, skb, cb);
746 break;
747 default:
748 result = -EINVAL;
749 break;
750 }
751
752 mutex_unlock(&priv->mutex);
753 return result;
754}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
new file mode 100644
index 00000000000..b79330d8418
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
@@ -0,0 +1,82 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_trans_int_pcie_h__
30#define __iwl_trans_int_pcie_h__
31
32/*This file includes the declaration that are internal to the
33 * trans_pcie layer */
34
35/*****************************************************
36* RX
37******************************************************/
38void iwl_bg_rx_replenish(struct work_struct *data);
39void iwl_irq_tasklet(struct iwl_priv *priv);
40void iwlagn_rx_replenish(struct iwl_priv *priv);
41void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
42 struct iwl_rx_queue *q);
43
44/*****************************************************
45* ICT
46******************************************************/
47int iwl_reset_ict(struct iwl_priv *priv);
48void iwl_disable_ict(struct iwl_priv *priv);
49int iwl_alloc_isr_ict(struct iwl_priv *priv);
50void iwl_free_isr_ict(struct iwl_priv *priv);
51irqreturn_t iwl_isr_ict(int irq, void *data);
52
53
54/*****************************************************
55* TX / HCMD
56******************************************************/
57void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
58void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
59 int index);
60int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
61 struct iwl_tx_queue *txq,
62 dma_addr_t addr, u16 len, u8 reset);
63int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
64 int count, int slots_num, u32 id);
65int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
66int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
67 u16 len, const void *data);
68void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
69void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
70 struct iwl_tx_queue *txq,
71 u16 byte_cnt);
72int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
73 u16 ssn_idx, u8 tx_fifo);
74void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
75 int txq_id, u32 index);
76void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
77 struct iwl_tx_queue *txq,
78 int tx_fifo_id, int scd_retry);
79void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
80 int frame_limit);
81
82#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
new file mode 100644
index 00000000000..f9f0df0cecb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
@@ -0,0 +1,979 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/gfp.h>
32
33#include "iwl-dev.h"
34#include "iwl-agn.h"
35#include "iwl-core.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38#include "iwl-trans-int-pcie.h"
39
40/******************************************************************************
41 *
42 * RX path functions
43 *
44 ******************************************************************************/
45
46/*
47 * Rx theory of operation
48 *
49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50 * each of which point to Receive Buffers to be filled by the NIC. These get
51 * used not only for Rx frames, but for any command response or notification
52 * from the NIC. The driver and NIC manage the Rx buffers by means
53 * of indexes into the circular buffer.
54 *
55 * Rx Queue Indexes
56 * The host/firmware share two index registers for managing the Rx buffers.
57 *
58 * The READ index maps to the first position that the firmware may be writing
59 * to -- the driver can read up to (but not including) this position and get
60 * good data.
61 * The READ index is managed by the firmware once the card is enabled.
62 *
63 * The WRITE index maps to the last position the driver has read from -- the
64 * position preceding WRITE is the last slot the firmware can place a packet.
65 *
66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
67 * WRITE = READ.
68 *
69 * During initialization, the host sets up the READ queue position to the first
70 * INDEX position, and WRITE to the last (READ - 1 wrapped)
71 *
72 * When the firmware places a packet in a buffer, it will advance the READ index
73 * and fire the RX interrupt. The driver can then query the READ index and
74 * process as many packets as possible, moving the WRITE index forward as it
75 * resets the Rx queue buffers with new memory.
76 *
77 * The management in the driver is as follows:
78 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
79 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
80 * to replenish the iwl->rxq->rx_free.
81 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
82 * iwl->rxq is replenished and the READ INDEX is updated (updating the
83 * 'processed' and 'read' driver indexes as well)
84 * + A received packet is processed and handed to the kernel network stack,
85 * detached from the iwl->rxq. The driver 'processed' index is updated.
86 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
87 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
88 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
89 * were enough free buffers and RX_STALLED is set it is cleared.
90 *
91 *
92 * Driver sequence:
93 *
94 * iwl_rx_queue_alloc() Allocates rx_free
95 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
96 * iwl_rx_queue_restock
97 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
98 * queue, updates firmware pointers, and updates
99 * the WRITE index. If insufficient rx_free buffers
100 * are available, schedules iwl_rx_replenish
101 *
102 * -- enable interrupts --
103 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
104 * READ INDEX, detaching the SKB from the pool.
105 * Moves the packet buffer from queue to rx_used.
106 * Calls iwl_rx_queue_restock to refill any empty
107 * slots.
108 * ...
109 *
110 */
111
112/**
113 * iwl_rx_queue_space - Return number of free slots available in queue.
114 */
115static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
116{
117 int s = q->read - q->write;
118 if (s <= 0)
119 s += RX_QUEUE_SIZE;
120 /* keep some buffer to not confuse full and empty queue */
121 s -= 2;
122 if (s < 0)
123 s = 0;
124 return s;
125}
126
127/**
128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
129 */
130void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
131 struct iwl_rx_queue *q)
132{
133 unsigned long flags;
134 u32 reg;
135
136 spin_lock_irqsave(&q->lock, flags);
137
138 if (q->need_update == 0)
139 goto exit_unlock;
140
141 if (priv->cfg->base_params->shadow_reg_enable) {
142 /* shadow register enabled */
143 /* Device expects a multiple of 8 */
144 q->write_actual = (q->write & ~0x7);
145 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
146 } else {
147 /* If power-saving is in use, make sure device is awake */
148 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
149 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
150
151 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
152 IWL_DEBUG_INFO(priv,
153 "Rx queue requesting wakeup,"
154 " GP1 = 0x%x\n", reg);
155 iwl_set_bit(priv, CSR_GP_CNTRL,
156 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
157 goto exit_unlock;
158 }
159
160 q->write_actual = (q->write & ~0x7);
161 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
162 q->write_actual);
163
164 /* Else device is assumed to be awake */
165 } else {
166 /* Device expects a multiple of 8 */
167 q->write_actual = (q->write & ~0x7);
168 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
169 q->write_actual);
170 }
171 }
172 q->need_update = 0;
173
174 exit_unlock:
175 spin_unlock_irqrestore(&q->lock, flags);
176}
177
178/**
179 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
180 */
181static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
182 dma_addr_t dma_addr)
183{
184 return cpu_to_le32((u32)(dma_addr >> 8));
185}
186
187/**
188 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
189 *
190 * If there are slots in the RX queue that need to be restocked,
191 * and we have free pre-allocated buffers, fill the ranks as much
192 * as we can, pulling from rx_free.
193 *
194 * This moves the 'write' index forward to catch up with 'processed', and
195 * also updates the memory address in the firmware to reference the new
196 * target buffer.
197 */
198static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
199{
200 struct iwl_rx_queue *rxq = &priv->rxq;
201 struct list_head *element;
202 struct iwl_rx_mem_buffer *rxb;
203 unsigned long flags;
204
205 spin_lock_irqsave(&rxq->lock, flags);
206 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
207 /* The overwritten rxb must be a used one */
208 rxb = rxq->queue[rxq->write];
209 BUG_ON(rxb && rxb->page);
210
211 /* Get next free Rx buffer, remove from free list */
212 element = rxq->rx_free.next;
213 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
214 list_del(element);
215
216 /* Point to Rx buffer via next RBD in circular buffer */
217 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
218 rxb->page_dma);
219 rxq->queue[rxq->write] = rxb;
220 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
221 rxq->free_count--;
222 }
223 spin_unlock_irqrestore(&rxq->lock, flags);
224 /* If the pre-allocated buffer pool is dropping low, schedule to
225 * refill it */
226 if (rxq->free_count <= RX_LOW_WATERMARK)
227 queue_work(priv->workqueue, &priv->rx_replenish);
228
229
230 /* If we've added more space for the firmware to place data, tell it.
231 * Increment device's write pointer in multiples of 8. */
232 if (rxq->write_actual != (rxq->write & ~0x7)) {
233 spin_lock_irqsave(&rxq->lock, flags);
234 rxq->need_update = 1;
235 spin_unlock_irqrestore(&rxq->lock, flags);
236 iwl_rx_queue_update_write_ptr(priv, rxq);
237 }
238}
239
240/**
241 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
242 *
243 * When moving to rx_free an SKB is allocated for the slot.
244 *
245 * Also restock the Rx queue via iwl_rx_queue_restock.
246 * This is called as a scheduled work item (except for during initialization)
247 */
248static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
249{
250 struct iwl_rx_queue *rxq = &priv->rxq;
251 struct list_head *element;
252 struct iwl_rx_mem_buffer *rxb;
253 struct page *page;
254 unsigned long flags;
255 gfp_t gfp_mask = priority;
256
257 while (1) {
258 spin_lock_irqsave(&rxq->lock, flags);
259 if (list_empty(&rxq->rx_used)) {
260 spin_unlock_irqrestore(&rxq->lock, flags);
261 return;
262 }
263 spin_unlock_irqrestore(&rxq->lock, flags);
264
265 if (rxq->free_count > RX_LOW_WATERMARK)
266 gfp_mask |= __GFP_NOWARN;
267
268 if (priv->hw_params.rx_page_order > 0)
269 gfp_mask |= __GFP_COMP;
270
271 /* Alloc a new receive buffer */
272 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
273 if (!page) {
274 if (net_ratelimit())
275 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
276 "order: %d\n",
277 priv->hw_params.rx_page_order);
278
279 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
280 net_ratelimit())
281 IWL_CRIT(priv, "Failed to alloc_pages with %s."
282 "Only %u free buffers remaining.\n",
283 priority == GFP_ATOMIC ?
284 "GFP_ATOMIC" : "GFP_KERNEL",
285 rxq->free_count);
286 /* We don't reschedule replenish work here -- we will
287 * call the restock method and if it still needs
288 * more buffers it will schedule replenish */
289 return;
290 }
291
292 spin_lock_irqsave(&rxq->lock, flags);
293
294 if (list_empty(&rxq->rx_used)) {
295 spin_unlock_irqrestore(&rxq->lock, flags);
296 __free_pages(page, priv->hw_params.rx_page_order);
297 return;
298 }
299 element = rxq->rx_used.next;
300 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
301 list_del(element);
302
303 spin_unlock_irqrestore(&rxq->lock, flags);
304
305 BUG_ON(rxb->page);
306 rxb->page = page;
307 /* Get physical address of the RB */
308 rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
309 PAGE_SIZE << priv->hw_params.rx_page_order,
310 DMA_FROM_DEVICE);
311 /* dma address must be no more than 36 bits */
312 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
313 /* and also 256 byte aligned! */
314 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
315
316 spin_lock_irqsave(&rxq->lock, flags);
317
318 list_add_tail(&rxb->list, &rxq->rx_free);
319 rxq->free_count++;
320
321 spin_unlock_irqrestore(&rxq->lock, flags);
322 }
323}
324
325void iwlagn_rx_replenish(struct iwl_priv *priv)
326{
327 unsigned long flags;
328
329 iwlagn_rx_allocate(priv, GFP_KERNEL);
330
331 spin_lock_irqsave(&priv->lock, flags);
332 iwlagn_rx_queue_restock(priv);
333 spin_unlock_irqrestore(&priv->lock, flags);
334}
335
336static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
337{
338 iwlagn_rx_allocate(priv, GFP_ATOMIC);
339
340 iwlagn_rx_queue_restock(priv);
341}
342
343void iwl_bg_rx_replenish(struct work_struct *data)
344{
345 struct iwl_priv *priv =
346 container_of(data, struct iwl_priv, rx_replenish);
347
348 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
349 return;
350
351 mutex_lock(&priv->mutex);
352 iwlagn_rx_replenish(priv);
353 mutex_unlock(&priv->mutex);
354}
355
356/**
357 * iwl_rx_handle - Main entry function for receiving responses from uCode
358 *
359 * Uses the priv->rx_handlers callback function array to invoke
360 * the appropriate handlers, including command responses,
361 * frame-received notifications, and other notifications.
362 */
363static void iwl_rx_handle(struct iwl_priv *priv)
364{
365 struct iwl_rx_mem_buffer *rxb;
366 struct iwl_rx_packet *pkt;
367 struct iwl_rx_queue *rxq = &priv->rxq;
368 u32 r, i;
369 int reclaim;
370 unsigned long flags;
371 u8 fill_rx = 0;
372 u32 count = 8;
373 int total_empty;
374
375 /* uCode's read index (stored in shared DRAM) indicates the last Rx
376 * buffer that the driver may process (last buffer filled by ucode). */
377 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
378 i = rxq->read;
379
380 /* Rx interrupt, but nothing sent from uCode */
381 if (i == r)
382 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
383
384 /* calculate total frames need to be restock after handling RX */
385 total_empty = r - rxq->write_actual;
386 if (total_empty < 0)
387 total_empty += RX_QUEUE_SIZE;
388
389 if (total_empty > (RX_QUEUE_SIZE / 2))
390 fill_rx = 1;
391
392 while (i != r) {
393 int len;
394
395 rxb = rxq->queue[i];
396
397 /* If an RXB doesn't have a Rx queue slot associated with it,
398 * then a bug has been introduced in the queue refilling
399 * routines -- catch it here */
400 if (WARN_ON(rxb == NULL)) {
401 i = (i + 1) & RX_QUEUE_MASK;
402 continue;
403 }
404
405 rxq->queue[i] = NULL;
406
407 dma_unmap_page(priv->bus->dev, rxb->page_dma,
408 PAGE_SIZE << priv->hw_params.rx_page_order,
409 DMA_FROM_DEVICE);
410 pkt = rxb_addr(rxb);
411
412 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
413 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
414
415 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
416 len += sizeof(u32); /* account for status word */
417 trace_iwlwifi_dev_rx(priv, pkt, len);
418
419 /* Reclaim a command buffer only if this packet is a response
420 * to a (driver-originated) command.
421 * If the packet (e.g. Rx frame) originated from uCode,
422 * there is no command buffer to reclaim.
423 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
424 * but apparently a few don't get set; catch them here. */
425 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
426 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
427 (pkt->hdr.cmd != REPLY_RX) &&
428 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
429 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
430 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
431 (pkt->hdr.cmd != REPLY_TX);
432
433 iwl_rx_dispatch(priv, rxb);
434
435 /*
436 * XXX: After here, we should always check rxb->page
437 * against NULL before touching it or its virtual
438 * memory (pkt). Because some rx_handler might have
439 * already taken or freed the pages.
440 */
441
442 if (reclaim) {
443 /* Invoke any callbacks, transfer the buffer to caller,
444 * and fire off the (possibly) blocking
445 * trans_send_cmd()
446 * as we reclaim the driver command queue */
447 if (rxb->page)
448 iwl_tx_cmd_complete(priv, rxb);
449 else
450 IWL_WARN(priv, "Claim null rxb?\n");
451 }
452
453 /* Reuse the page if possible. For notification packets and
454 * SKBs that fail to Rx correctly, add them back into the
455 * rx_free list for reuse later. */
456 spin_lock_irqsave(&rxq->lock, flags);
457 if (rxb->page != NULL) {
458 rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
459 0, PAGE_SIZE << priv->hw_params.rx_page_order,
460 DMA_FROM_DEVICE);
461 list_add_tail(&rxb->list, &rxq->rx_free);
462 rxq->free_count++;
463 } else
464 list_add_tail(&rxb->list, &rxq->rx_used);
465
466 spin_unlock_irqrestore(&rxq->lock, flags);
467
468 i = (i + 1) & RX_QUEUE_MASK;
469 /* If there are a lot of unused frames,
470 * restock the Rx queue so ucode wont assert. */
471 if (fill_rx) {
472 count++;
473 if (count >= 8) {
474 rxq->read = i;
475 iwlagn_rx_replenish_now(priv);
476 count = 0;
477 }
478 }
479 }
480
481 /* Backtrack one entry */
482 rxq->read = i;
483 if (fill_rx)
484 iwlagn_rx_replenish_now(priv);
485 else
486 iwlagn_rx_queue_restock(priv);
487}
488
489/* tasklet for iwlagn interrupt */
490void iwl_irq_tasklet(struct iwl_priv *priv)
491{
492 u32 inta = 0;
493 u32 handled = 0;
494 unsigned long flags;
495 u32 i;
496#ifdef CONFIG_IWLWIFI_DEBUG
497 u32 inta_mask;
498#endif
499
500 spin_lock_irqsave(&priv->lock, flags);
501
502 /* Ack/clear/reset pending uCode interrupts.
503 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
504 */
505 /* There is a hardware bug in the interrupt mask function that some
506 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
507 * they are disabled in the CSR_INT_MASK register. Furthermore the
508 * ICT interrupt handling mechanism has another bug that might cause
509 * these unmasked interrupts fail to be detected. We workaround the
510 * hardware bugs here by ACKing all the possible interrupts so that
511 * interrupt coalescing can still be achieved.
512 */
513 iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
514
515 inta = priv->inta;
516
517#ifdef CONFIG_IWLWIFI_DEBUG
518 if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
519 /* just for debug */
520 inta_mask = iwl_read32(priv, CSR_INT_MASK);
521 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
522 inta, inta_mask);
523 }
524#endif
525
526 spin_unlock_irqrestore(&priv->lock, flags);
527
528 /* saved interrupt in inta variable now we can reset priv->inta */
529 priv->inta = 0;
530
531 /* Now service all interrupt bits discovered above. */
532 if (inta & CSR_INT_BIT_HW_ERR) {
533 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
534
535 /* Tell the device to stop sending interrupts */
536 iwl_disable_interrupts(priv);
537
538 priv->isr_stats.hw++;
539 iwl_irq_handle_error(priv);
540
541 handled |= CSR_INT_BIT_HW_ERR;
542
543 return;
544 }
545
546#ifdef CONFIG_IWLWIFI_DEBUG
547 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
548 /* NIC fires this, but we don't use it, redundant with WAKEUP */
549 if (inta & CSR_INT_BIT_SCD) {
550 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
551 "the frame/frames.\n");
552 priv->isr_stats.sch++;
553 }
554
555 /* Alive notification via Rx interrupt will do the real work */
556 if (inta & CSR_INT_BIT_ALIVE) {
557 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
558 priv->isr_stats.alive++;
559 }
560 }
561#endif
562 /* Safely ignore these bits for debug checks below */
563 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
564
565 /* HW RF KILL switch toggled */
566 if (inta & CSR_INT_BIT_RF_KILL) {
567 int hw_rf_kill = 0;
568 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
569 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
570 hw_rf_kill = 1;
571
572 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
573 hw_rf_kill ? "disable radio" : "enable radio");
574
575 priv->isr_stats.rfkill++;
576
577 /* driver only loads ucode once setting the interface up.
578 * the driver allows loading the ucode even if the radio
579 * is killed. Hence update the killswitch state here. The
580 * rfkill handler will care about restarting if needed.
581 */
582 if (!test_bit(STATUS_ALIVE, &priv->status)) {
583 if (hw_rf_kill)
584 set_bit(STATUS_RF_KILL_HW, &priv->status);
585 else
586 clear_bit(STATUS_RF_KILL_HW, &priv->status);
587 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
588 }
589
590 handled |= CSR_INT_BIT_RF_KILL;
591 }
592
593 /* Chip got too hot and stopped itself */
594 if (inta & CSR_INT_BIT_CT_KILL) {
595 IWL_ERR(priv, "Microcode CT kill error detected.\n");
596 priv->isr_stats.ctkill++;
597 handled |= CSR_INT_BIT_CT_KILL;
598 }
599
600 /* Error detected by uCode */
601 if (inta & CSR_INT_BIT_SW_ERR) {
602 IWL_ERR(priv, "Microcode SW error detected. "
603 " Restarting 0x%X.\n", inta);
604 priv->isr_stats.sw++;
605 iwl_irq_handle_error(priv);
606 handled |= CSR_INT_BIT_SW_ERR;
607 }
608
609 /* uCode wakes up after power-down sleep */
610 if (inta & CSR_INT_BIT_WAKEUP) {
611 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
612 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
613 for (i = 0; i < priv->hw_params.max_txq_num; i++)
614 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
615
616 priv->isr_stats.wakeup++;
617
618 handled |= CSR_INT_BIT_WAKEUP;
619 }
620
621 /* All uCode command responses, including Tx command responses,
622 * Rx "responses" (frame-received notification), and other
623 * notifications from uCode come through here*/
624 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
625 CSR_INT_BIT_RX_PERIODIC)) {
626 IWL_DEBUG_ISR(priv, "Rx interrupt\n");
627 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
628 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
629 iwl_write32(priv, CSR_FH_INT_STATUS,
630 CSR_FH_INT_RX_MASK);
631 }
632 if (inta & CSR_INT_BIT_RX_PERIODIC) {
633 handled |= CSR_INT_BIT_RX_PERIODIC;
634 iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
635 }
636 /* Sending RX interrupt require many steps to be done in the
637 * the device:
638 * 1- write interrupt to current index in ICT table.
639 * 2- dma RX frame.
640 * 3- update RX shared data to indicate last write index.
641 * 4- send interrupt.
642 * This could lead to RX race, driver could receive RX interrupt
643 * but the shared data changes does not reflect this;
644 * periodic interrupt will detect any dangling Rx activity.
645 */
646
647 /* Disable periodic interrupt; we use it as just a one-shot. */
648 iwl_write8(priv, CSR_INT_PERIODIC_REG,
649 CSR_INT_PERIODIC_DIS);
650 iwl_rx_handle(priv);
651
652 /*
653 * Enable periodic interrupt in 8 msec only if we received
654 * real RX interrupt (instead of just periodic int), to catch
655 * any dangling Rx interrupt. If it was just the periodic
656 * interrupt, there was no dangling Rx activity, and no need
657 * to extend the periodic interrupt; one-shot is enough.
658 */
659 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
660 iwl_write8(priv, CSR_INT_PERIODIC_REG,
661 CSR_INT_PERIODIC_ENA);
662
663 priv->isr_stats.rx++;
664 }
665
666 /* This "Tx" DMA channel is used only for loading uCode */
667 if (inta & CSR_INT_BIT_FH_TX) {
668 iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
669 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
670 priv->isr_stats.tx++;
671 handled |= CSR_INT_BIT_FH_TX;
672 /* Wake up uCode load routine, now that load is complete */
673 priv->ucode_write_complete = 1;
674 wake_up(&priv->wait_command_queue);
675 }
676
677 if (inta & ~handled) {
678 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
679 priv->isr_stats.unhandled++;
680 }
681
682 if (inta & ~(priv->inta_mask)) {
683 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
684 inta & ~priv->inta_mask);
685 }
686
687 /* Re-enable all interrupts */
688 /* only Re-enable if disabled by irq */
689 if (test_bit(STATUS_INT_ENABLED, &priv->status))
690 iwl_enable_interrupts(priv);
691 /* Re-enable RF_KILL if it occurred */
692 else if (handled & CSR_INT_BIT_RF_KILL)
693 iwl_enable_rfkill_int(priv);
694}
695
696/******************************************************************************
697 *
698 * ICT functions
699 *
700 ******************************************************************************/
701#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
702
703/* Free dram table */
704void iwl_free_isr_ict(struct iwl_priv *priv)
705{
706 if (priv->ict_tbl_vir) {
707 dma_free_coherent(priv->bus->dev,
708 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
709 priv->ict_tbl_vir,
710 priv->ict_tbl_dma);
711 priv->ict_tbl_vir = NULL;
712 memset(&priv->ict_tbl_dma, 0,
713 sizeof(priv->ict_tbl_dma));
714 memset(&priv->aligned_ict_tbl_dma, 0,
715 sizeof(priv->aligned_ict_tbl_dma));
716 }
717}
718
719
720/* allocate dram shared table it is a PAGE_SIZE aligned
721 * also reset all data related to ICT table interrupt.
722 */
723int iwl_alloc_isr_ict(struct iwl_priv *priv)
724{
725
726 /* allocate shrared data table */
727 priv->ict_tbl_vir =
728 dma_alloc_coherent(priv->bus->dev,
729 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
730 &priv->ict_tbl_dma, GFP_KERNEL);
731 if (!priv->ict_tbl_vir)
732 return -ENOMEM;
733
734 /* align table to PAGE_SIZE boundary */
735 priv->aligned_ict_tbl_dma =
736 ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
737
738 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
739 (unsigned long long)priv->ict_tbl_dma,
740 (unsigned long long)priv->aligned_ict_tbl_dma,
741 (int)(priv->aligned_ict_tbl_dma -
742 priv->ict_tbl_dma));
743
744 priv->ict_tbl = priv->ict_tbl_vir +
745 (priv->aligned_ict_tbl_dma -
746 priv->ict_tbl_dma);
747
748 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
749 priv->ict_tbl, priv->ict_tbl_vir,
750 (int)(priv->aligned_ict_tbl_dma -
751 priv->ict_tbl_dma));
752
753 /* reset table and index to all 0 */
754 memset(priv->ict_tbl_vir, 0,
755 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
756 priv->ict_index = 0;
757
758 /* add periodic RX interrupt */
759 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
760 return 0;
761}
762
763/* Device is going up inform it about using ICT interrupt table,
764 * also we need to tell the driver to start using ICT interrupt.
765 */
766int iwl_reset_ict(struct iwl_priv *priv)
767{
768 u32 val;
769 unsigned long flags;
770
771 if (!priv->ict_tbl_vir)
772 return 0;
773
774 spin_lock_irqsave(&priv->lock, flags);
775 iwl_disable_interrupts(priv);
776
777 memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
778
779 val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
780
781 val |= CSR_DRAM_INT_TBL_ENABLE;
782 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
783
784 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
785 "aligned dma address %Lx\n",
786 val,
787 (unsigned long long)priv->aligned_ict_tbl_dma);
788
789 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
790 priv->use_ict = true;
791 priv->ict_index = 0;
792 iwl_write32(priv, CSR_INT, priv->inta_mask);
793 iwl_enable_interrupts(priv);
794 spin_unlock_irqrestore(&priv->lock, flags);
795
796 return 0;
797}
798
799/* Device is going down disable ict interrupt usage */
800void iwl_disable_ict(struct iwl_priv *priv)
801{
802 unsigned long flags;
803
804 spin_lock_irqsave(&priv->lock, flags);
805 priv->use_ict = false;
806 spin_unlock_irqrestore(&priv->lock, flags);
807}
808
809static irqreturn_t iwl_isr(int irq, void *data)
810{
811 struct iwl_priv *priv = data;
812 u32 inta, inta_mask;
813 unsigned long flags;
814#ifdef CONFIG_IWLWIFI_DEBUG
815 u32 inta_fh;
816#endif
817 if (!priv)
818 return IRQ_NONE;
819
820 spin_lock_irqsave(&priv->lock, flags);
821
822 /* Disable (but don't clear!) interrupts here to avoid
823 * back-to-back ISRs and sporadic interrupts from our NIC.
824 * If we have something to service, the tasklet will re-enable ints.
825 * If we *don't* have something, we'll re-enable before leaving here. */
826 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
827 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
828
829 /* Discover which interrupts are active/pending */
830 inta = iwl_read32(priv, CSR_INT);
831
832 /* Ignore interrupt if there's nothing in NIC to service.
833 * This may be due to IRQ shared with another device,
834 * or due to sporadic interrupts thrown from our NIC. */
835 if (!inta) {
836 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
837 goto none;
838 }
839
840 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
841 /* Hardware disappeared. It might have already raised
842 * an interrupt */
843 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
844 goto unplugged;
845 }
846
847#ifdef CONFIG_IWLWIFI_DEBUG
848 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
849 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
850 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
851 "fh 0x%08x\n", inta, inta_mask, inta_fh);
852 }
853#endif
854
855 priv->inta |= inta;
856 /* iwl_irq_tasklet() will service interrupts and re-enable them */
857 if (likely(inta))
858 tasklet_schedule(&priv->irq_tasklet);
859 else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
860 !priv->inta)
861 iwl_enable_interrupts(priv);
862
863 unplugged:
864 spin_unlock_irqrestore(&priv->lock, flags);
865 return IRQ_HANDLED;
866
867 none:
868 /* re-enable interrupts here since we don't have anything to service. */
869 /* only Re-enable if disabled by irq and no schedules tasklet. */
870 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
871 iwl_enable_interrupts(priv);
872
873 spin_unlock_irqrestore(&priv->lock, flags);
874 return IRQ_NONE;
875}
876
877/* interrupt handler using ict table, with this interrupt driver will
878 * stop using INTA register to get device's interrupt, reading this register
879 * is expensive, device will write interrupts in ICT dram table, increment
880 * index then will fire interrupt to driver, driver will OR all ICT table
881 * entries from current index up to table entry with 0 value. the result is
882 * the interrupt we need to service, driver will set the entries back to 0 and
883 * set index.
884 */
885irqreturn_t iwl_isr_ict(int irq, void *data)
886{
887 struct iwl_priv *priv = data;
888 u32 inta, inta_mask;
889 u32 val = 0;
890 unsigned long flags;
891
892 if (!priv)
893 return IRQ_NONE;
894
895 /* dram interrupt table not set yet,
896 * use legacy interrupt.
897 */
898 if (!priv->use_ict)
899 return iwl_isr(irq, data);
900
901 spin_lock_irqsave(&priv->lock, flags);
902
903 /* Disable (but don't clear!) interrupts here to avoid
904 * back-to-back ISRs and sporadic interrupts from our NIC.
905 * If we have something to service, the tasklet will re-enable ints.
906 * If we *don't* have something, we'll re-enable before leaving here.
907 */
908 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
909 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
910
911
912 /* Ignore interrupt if there's nothing in NIC to service.
913 * This may be due to IRQ shared with another device,
914 * or due to sporadic interrupts thrown from our NIC. */
915 if (!priv->ict_tbl[priv->ict_index]) {
916 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
917 goto none;
918 }
919
920 /* read all entries that not 0 start with ict_index */
921 while (priv->ict_tbl[priv->ict_index]) {
922
923 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
924 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
925 priv->ict_index,
926 le32_to_cpu(
927 priv->ict_tbl[priv->ict_index]));
928 priv->ict_tbl[priv->ict_index] = 0;
929 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
930 ICT_COUNT);
931
932 }
933
934 /* We should not get this value, just ignore it. */
935 if (val == 0xffffffff)
936 val = 0;
937
938 /*
939 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
940 * (bit 15 before shifting it to 31) to clear when using interrupt
941 * coalescing. fortunately, bits 18 and 19 stay set when this happens
942 * so we use them to decide on the real state of the Rx bit.
943 * In order words, bit 15 is set if bit 18 or bit 19 are set.
944 */
945 if (val & 0xC0000)
946 val |= 0x8000;
947
948 inta = (0xff & val) | ((0xff00 & val) << 16);
949 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
950 inta, inta_mask, val);
951
952 inta &= priv->inta_mask;
953 priv->inta |= inta;
954
955 /* iwl_irq_tasklet() will service interrupts and re-enable them */
956 if (likely(inta))
957 tasklet_schedule(&priv->irq_tasklet);
958 else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
959 !priv->inta) {
960 /* Allow interrupt if was disabled by this handler and
961 * no tasklet was schedules, We should not enable interrupt,
962 * tasklet will enable it.
963 */
964 iwl_enable_interrupts(priv);
965 }
966
967 spin_unlock_irqrestore(&priv->lock, flags);
968 return IRQ_HANDLED;
969
970 none:
971 /* re-enable interrupts here since we don't have anything to service.
972 * only Re-enable if disabled by irq.
973 */
974 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
975 iwl_enable_interrupts(priv);
976
977 spin_unlock_irqrestore(&priv->lock, flags);
978 return IRQ_NONE;
979}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
new file mode 100644
index 00000000000..2bf3107be93
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -0,0 +1,1038 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/slab.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-agn.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-trans-int-pcie.h"
40
41/**
42 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 */
44void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
45 struct iwl_tx_queue *txq,
46 u16 byte_cnt)
47{
48 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
49 int write_ptr = txq->q.write_ptr;
50 int txq_id = txq->q.id;
51 u8 sec_ctl = 0;
52 u8 sta_id = 0;
53 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
54 __le16 bc_ent;
55
56 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
57
58 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
59 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
60
61 switch (sec_ctl & TX_CMD_SEC_MSK) {
62 case TX_CMD_SEC_CCM:
63 len += CCMP_MIC_LEN;
64 break;
65 case TX_CMD_SEC_TKIP:
66 len += TKIP_ICV_LEN;
67 break;
68 case TX_CMD_SEC_WEP:
69 len += WEP_IV_LEN + WEP_ICV_LEN;
70 break;
71 }
72
73 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
74
75 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
76
77 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
78 scd_bc_tbl[txq_id].
79 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
80}
81
82/**
83 * iwl_txq_update_write_ptr - Send new write index to hardware
84 */
85void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
86{
87 u32 reg = 0;
88 int txq_id = txq->q.id;
89
90 if (txq->need_update == 0)
91 return;
92
93 if (priv->cfg->base_params->shadow_reg_enable) {
94 /* shadow register enabled */
95 iwl_write32(priv, HBUS_TARG_WRPTR,
96 txq->q.write_ptr | (txq_id << 8));
97 } else {
98 /* if we're trying to save power */
99 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
100 /* wake up nic if it's powered down ...
101 * uCode will wake up, and interrupt us again, so next
102 * time we'll skip this part. */
103 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
104
105 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
106 IWL_DEBUG_INFO(priv,
107 "Tx queue %d requesting wakeup,"
108 " GP1 = 0x%x\n", txq_id, reg);
109 iwl_set_bit(priv, CSR_GP_CNTRL,
110 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
111 return;
112 }
113
114 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
115 txq->q.write_ptr | (txq_id << 8));
116
117 /*
118 * else not in power-save mode,
119 * uCode will never sleep when we're
120 * trying to tx (during RFKILL, we're not trying to tx).
121 */
122 } else
123 iwl_write32(priv, HBUS_TARG_WRPTR,
124 txq->q.write_ptr | (txq_id << 8));
125 }
126 txq->need_update = 0;
127}
128
129static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
130{
131 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
132
133 dma_addr_t addr = get_unaligned_le32(&tb->lo);
134 if (sizeof(dma_addr_t) > sizeof(u32))
135 addr |=
136 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
137
138 return addr;
139}
140
141static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
142{
143 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
144
145 return le16_to_cpu(tb->hi_n_len) >> 4;
146}
147
148static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
149 dma_addr_t addr, u16 len)
150{
151 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
152 u16 hi_n_len = len << 4;
153
154 put_unaligned_le32(addr, &tb->lo);
155 if (sizeof(dma_addr_t) > sizeof(u32))
156 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
157
158 tb->hi_n_len = cpu_to_le16(hi_n_len);
159
160 tfd->num_tbs = idx + 1;
161}
162
163static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
164{
165 return tfd->num_tbs & 0x1f;
166}
167
168static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
169 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
170{
171 int i;
172 int num_tbs;
173
174 /* Sanity check on number of chunks */
175 num_tbs = iwl_tfd_get_num_tbs(tfd);
176
177 if (num_tbs >= IWL_NUM_OF_TBS) {
178 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
179 /* @todo issue fatal error, it is quite serious situation */
180 return;
181 }
182
183 /* Unmap tx_cmd */
184 if (num_tbs)
185 dma_unmap_single(priv->bus->dev,
186 dma_unmap_addr(meta, mapping),
187 dma_unmap_len(meta, len),
188 DMA_BIDIRECTIONAL);
189
190 /* Unmap chunks, if any. */
191 for (i = 1; i < num_tbs; i++)
192 dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
193 iwl_tfd_tb_get_len(tfd, i), dma_dir);
194}
195
196/**
197 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
198 * @priv - driver private data
199 * @txq - tx queue
200 * @index - the index of the TFD to be freed
201 *
202 * Does NOT advance any TFD circular buffer read/write indexes
203 * Does NOT free the TFD itself (which is within circular buffer)
204 */
205void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
206 int index)
207{
208 struct iwl_tfd *tfd_tmp = txq->tfds;
209
210 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
211 DMA_TO_DEVICE);
212
213 /* free SKB */
214 if (txq->txb) {
215 struct sk_buff *skb;
216
217 skb = txq->txb[index].skb;
218
219 /* can be called from irqs-disabled context */
220 if (skb) {
221 dev_kfree_skb_any(skb);
222 txq->txb[index].skb = NULL;
223 }
224 }
225}
226
227int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
228 struct iwl_tx_queue *txq,
229 dma_addr_t addr, u16 len,
230 u8 reset)
231{
232 struct iwl_queue *q;
233 struct iwl_tfd *tfd, *tfd_tmp;
234 u32 num_tbs;
235
236 q = &txq->q;
237 tfd_tmp = txq->tfds;
238 tfd = &tfd_tmp[q->write_ptr];
239
240 if (reset)
241 memset(tfd, 0, sizeof(*tfd));
242
243 num_tbs = iwl_tfd_get_num_tbs(tfd);
244
245 /* Each TFD can point to a maximum 20 Tx buffers */
246 if (num_tbs >= IWL_NUM_OF_TBS) {
247 IWL_ERR(priv, "Error can not send more than %d chunks\n",
248 IWL_NUM_OF_TBS);
249 return -EINVAL;
250 }
251
252 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
253 return -EINVAL;
254
255 if (unlikely(addr & ~IWL_TX_DMA_MASK))
256 IWL_ERR(priv, "Unaligned address = %llx\n",
257 (unsigned long long)addr);
258
259 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
260
261 return 0;
262}
263
264/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
265 * DMA services
266 *
267 * Theory of operation
268 *
269 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
270 * of buffer descriptors, each of which points to one or more data buffers for
271 * the device to read from or fill. Driver and device exchange status of each
272 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
273 * entries in each circular buffer, to protect against confusing empty and full
274 * queue states.
275 *
276 * The device reads or writes the data in the queues via the device's several
277 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
278 *
279 * For Tx queue, there are low mark and high mark limits. If, after queuing
280 * the packet for Tx, free space become < low mark, Tx queue stopped. When
281 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
282 * Tx queue resumed.
283 *
284 ***************************************************/
285
286int iwl_queue_space(const struct iwl_queue *q)
287{
288 int s = q->read_ptr - q->write_ptr;
289
290 if (q->read_ptr > q->write_ptr)
291 s -= q->n_bd;
292
293 if (s <= 0)
294 s += q->n_window;
295 /* keep some reserve to not confuse empty and full situations */
296 s -= 2;
297 if (s < 0)
298 s = 0;
299 return s;
300}
301
302/**
303 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
304 */
305int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
306 int count, int slots_num, u32 id)
307{
308 q->n_bd = count;
309 q->n_window = slots_num;
310 q->id = id;
311
312 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
313 * and iwl_queue_dec_wrap are broken. */
314 if (WARN_ON(!is_power_of_2(count)))
315 return -EINVAL;
316
317 /* slots_num must be power-of-two size, otherwise
318 * get_cmd_index is broken. */
319 if (WARN_ON(!is_power_of_2(slots_num)))
320 return -EINVAL;
321
322 q->low_mark = q->n_window / 4;
323 if (q->low_mark < 4)
324 q->low_mark = 4;
325
326 q->high_mark = q->n_window / 8;
327 if (q->high_mark < 2)
328 q->high_mark = 2;
329
330 q->write_ptr = q->read_ptr = 0;
331
332 return 0;
333}
334
335/*TODO: this functions should NOT be exported from trans module - export it
336 * until the reclaim flow will be brought to the transport module too.
337 * Add a declaration to make sparse happy */
338void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
339 struct iwl_tx_queue *txq);
340
341void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
342 struct iwl_tx_queue *txq)
343{
344 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
345 int txq_id = txq->q.id;
346 int read_ptr = txq->q.read_ptr;
347 u8 sta_id = 0;
348 __le16 bc_ent;
349
350 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
351
352 if (txq_id != priv->cmd_queue)
353 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
354
355 bc_ent = cpu_to_le16(1 | (sta_id << 12));
356 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
357
358 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
359 scd_bc_tbl[txq_id].
360 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
361}
362
363static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
364 u16 txq_id)
365{
366 u32 tbl_dw_addr;
367 u32 tbl_dw;
368 u16 scd_q2ratid;
369
370 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
371
372 tbl_dw_addr = priv->scd_base_addr +
373 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
374
375 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
376
377 if (txq_id & 0x1)
378 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
379 else
380 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
381
382 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
383
384 return 0;
385}
386
387static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
388{
389 /* Simply stop the queue, but don't change any configuration;
390 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
391 iwl_write_prph(priv,
392 SCD_QUEUE_STATUS_BITS(txq_id),
393 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
394 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
395}
396
397void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
398 int txq_id, u32 index)
399{
400 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
401 (index & 0xff) | (txq_id << 8));
402 iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
403}
404
405void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
406 struct iwl_tx_queue *txq,
407 int tx_fifo_id, int scd_retry)
408{
409 int txq_id = txq->q.id;
410 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
411
412 iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
413 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
414 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
415 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
416 SCD_QUEUE_STTS_REG_MSK);
417
418 txq->sched_retry = scd_retry;
419
420 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
421 active ? "Activate" : "Deactivate",
422 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
423}
424
425void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
426 int frame_limit)
427{
428 int tx_fifo, txq_id, ssn_idx;
429 u16 ra_tid;
430 unsigned long flags;
431 struct iwl_tid_data *tid_data;
432
433 if (WARN_ON(sta_id == IWL_INVALID_STATION))
434 return;
435 if (WARN_ON(tid >= MAX_TID_COUNT))
436 return;
437
438 spin_lock_irqsave(&priv->sta_lock, flags);
439 tid_data = &priv->stations[sta_id].tid[tid];
440 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
441 txq_id = tid_data->agg.txq_id;
442 tx_fifo = tid_data->agg.tx_fifo;
443 spin_unlock_irqrestore(&priv->sta_lock, flags);
444
445 ra_tid = BUILD_RAxTID(sta_id, tid);
446
447 spin_lock_irqsave(&priv->lock, flags);
448
449 /* Stop this Tx queue before configuring it */
450 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
451
452 /* Map receiver-address / traffic-ID to this queue */
453 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
454
455 /* Set this queue as a chain-building queue */
456 iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
457
458 /* enable aggregations for the queue */
459 iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
460
461 /* Place first TFD at index corresponding to start sequence number.
462 * Assumes that ssn_idx is valid (!= 0xFFF) */
463 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
464 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
465 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
466
467 /* Set up Tx window size and frame limit for this queue */
468 iwl_write_targ_mem(priv, priv->scd_base_addr +
469 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
470 sizeof(u32),
471 ((frame_limit <<
472 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
473 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
474 ((frame_limit <<
475 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
476 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
477
478 iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
479
480 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
481 iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
482
483 spin_unlock_irqrestore(&priv->lock, flags);
484}
485
486int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
487 u16 ssn_idx, u8 tx_fifo)
488{
489 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
490 (IWLAGN_FIRST_AMPDU_QUEUE +
491 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
492 IWL_ERR(priv,
493 "queue number out of range: %d, must be %d to %d\n",
494 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
495 IWLAGN_FIRST_AMPDU_QUEUE +
496 priv->cfg->base_params->num_of_ampdu_queues - 1);
497 return -EINVAL;
498 }
499
500 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
501
502 iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
503
504 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
505 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
506 /* supposes that ssn_idx is valid (!= 0xFFF) */
507 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
508
509 iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
510 iwl_txq_ctx_deactivate(priv, txq_id);
511 iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
512
513 return 0;
514}
515
516/*************** HOST COMMAND QUEUE FUNCTIONS *****/
517
518/**
519 * iwl_enqueue_hcmd - enqueue a uCode command
520 * @priv: device private data point
521 * @cmd: a point to the ucode command structure
522 *
523 * The function returns < 0 values to indicate the operation is
524 * failed. On success, it turns the index (> 0) of command in the
525 * command queue.
526 */
527static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
528{
529 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
530 struct iwl_queue *q = &txq->q;
531 struct iwl_device_cmd *out_cmd;
532 struct iwl_cmd_meta *out_meta;
533 dma_addr_t phys_addr;
534 unsigned long flags;
535 u32 idx;
536 u16 copy_size, cmd_size;
537 bool is_ct_kill = false;
538 bool had_nocopy = false;
539 int i;
540 u8 *cmd_dest;
541#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
542 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
543 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
544 int trace_idx;
545#endif
546
547 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
548 IWL_WARN(priv, "fw recovery, no hcmd send\n");
549 return -EIO;
550 }
551
552 if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
553 !(cmd->flags & CMD_ON_DEMAND)) {
554 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
555 return -EIO;
556 }
557
558 copy_size = sizeof(out_cmd->hdr);
559 cmd_size = sizeof(out_cmd->hdr);
560
561 /* need one for the header if the first is NOCOPY */
562 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
563
564 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
565 if (!cmd->len[i])
566 continue;
567 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
568 had_nocopy = true;
569 } else {
570 /* NOCOPY must not be followed by normal! */
571 if (WARN_ON(had_nocopy))
572 return -EINVAL;
573 copy_size += cmd->len[i];
574 }
575 cmd_size += cmd->len[i];
576 }
577
578 /*
579 * If any of the command structures end up being larger than
580 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
581 * allocated into separate TFDs, then we will need to
582 * increase the size of the buffers.
583 */
584 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
585 return -EINVAL;
586
587 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
588 IWL_WARN(priv, "Not sending command - %s KILL\n",
589 iwl_is_rfkill(priv) ? "RF" : "CT");
590 return -EIO;
591 }
592
593 spin_lock_irqsave(&priv->hcmd_lock, flags);
594
595 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
596 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
597
598 IWL_ERR(priv, "No space in command queue\n");
599 is_ct_kill = iwl_check_for_ct_kill(priv);
600 if (!is_ct_kill) {
601 IWL_ERR(priv, "Restarting adapter due to queue full\n");
602 iwlagn_fw_error(priv, false);
603 }
604 return -ENOSPC;
605 }
606
607 idx = get_cmd_index(q, q->write_ptr);
608 out_cmd = txq->cmd[idx];
609 out_meta = &txq->meta[idx];
610
611 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
612 if (cmd->flags & CMD_WANT_SKB)
613 out_meta->source = cmd;
614 if (cmd->flags & CMD_ASYNC)
615 out_meta->callback = cmd->callback;
616
617 /* set up the header */
618
619 out_cmd->hdr.cmd = cmd->id;
620 out_cmd->hdr.flags = 0;
621 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
622 INDEX_TO_SEQ(q->write_ptr));
623
624 /* and copy the data that needs to be copied */
625
626 cmd_dest = &out_cmd->cmd.payload[0];
627 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
628 if (!cmd->len[i])
629 continue;
630 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
631 break;
632 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
633 cmd_dest += cmd->len[i];
634 }
635
636 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
637 "%d bytes at %d[%d]:%d\n",
638 get_cmd_string(out_cmd->hdr.cmd),
639 out_cmd->hdr.cmd,
640 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
641 q->write_ptr, idx, priv->cmd_queue);
642
643 phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
644 DMA_BIDIRECTIONAL);
645 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
646 idx = -ENOMEM;
647 goto out;
648 }
649
650 dma_unmap_addr_set(out_meta, mapping, phys_addr);
651 dma_unmap_len_set(out_meta, len, copy_size);
652
653 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
654#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
655 trace_bufs[0] = &out_cmd->hdr;
656 trace_lens[0] = copy_size;
657 trace_idx = 1;
658#endif
659
660 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
661 if (!cmd->len[i])
662 continue;
663 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
664 continue;
665 phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
666 cmd->len[i], DMA_BIDIRECTIONAL);
667 if (dma_mapping_error(priv->bus->dev, phys_addr)) {
668 iwlagn_unmap_tfd(priv, out_meta,
669 &txq->tfds[q->write_ptr],
670 DMA_BIDIRECTIONAL);
671 idx = -ENOMEM;
672 goto out;
673 }
674
675 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
676 cmd->len[i], 0);
677#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
678 trace_bufs[trace_idx] = cmd->data[i];
679 trace_lens[trace_idx] = cmd->len[i];
680 trace_idx++;
681#endif
682 }
683
684 out_meta->flags = cmd->flags;
685
686 txq->need_update = 1;
687
688 /* check that tracing gets all possible blocks */
689 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
690#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
691 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
692 trace_bufs[0], trace_lens[0],
693 trace_bufs[1], trace_lens[1],
694 trace_bufs[2], trace_lens[2]);
695#endif
696
697 /* Increment and update queue's write index */
698 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
699 iwl_txq_update_write_ptr(priv, txq);
700
701 out:
702 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
703 return idx;
704}
705
706/**
707 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
708 *
709 * When FW advances 'R' index, all entries between old and new 'R' index
710 * need to be reclaimed. As result, some free space forms. If there is
711 * enough free space (> low mark), wake the stack that feeds us.
712 */
713static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
714{
715 struct iwl_tx_queue *txq = &priv->txq[txq_id];
716 struct iwl_queue *q = &txq->q;
717 int nfreed = 0;
718
719 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
720 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
721 "index %d is out of range [0-%d] %d %d.\n", __func__,
722 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
723 return;
724 }
725
726 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
727 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
728
729 if (nfreed++ > 0) {
730 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
731 q->write_ptr, q->read_ptr);
732 iwlagn_fw_error(priv, false);
733 }
734
735 }
736}
737
738/**
739 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
740 * @rxb: Rx buffer to reclaim
741 *
742 * If an Rx buffer has an async callback associated with it the callback
743 * will be executed. The attached skb (if present) will only be freed
744 * if the callback returns 1
745 */
746void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
747{
748 struct iwl_rx_packet *pkt = rxb_addr(rxb);
749 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
750 int txq_id = SEQ_TO_QUEUE(sequence);
751 int index = SEQ_TO_INDEX(sequence);
752 int cmd_index;
753 struct iwl_device_cmd *cmd;
754 struct iwl_cmd_meta *meta;
755 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
756 unsigned long flags;
757
758 /* If a Tx command is being handled and it isn't in the actual
759 * command queue then there a command routing bug has been introduced
760 * in the queue management code. */
761 if (WARN(txq_id != priv->cmd_queue,
762 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
763 txq_id, priv->cmd_queue, sequence,
764 priv->txq[priv->cmd_queue].q.read_ptr,
765 priv->txq[priv->cmd_queue].q.write_ptr)) {
766 iwl_print_hex_error(priv, pkt, 32);
767 return;
768 }
769
770 cmd_index = get_cmd_index(&txq->q, index);
771 cmd = txq->cmd[cmd_index];
772 meta = &txq->meta[cmd_index];
773
774 txq->time_stamp = jiffies;
775
776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
777
778 /* Input error checking is done when commands are added to queue. */
779 if (meta->flags & CMD_WANT_SKB) {
780 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
781 rxb->page = NULL;
782 } else if (meta->callback)
783 meta->callback(priv, cmd, pkt);
784
785 spin_lock_irqsave(&priv->hcmd_lock, flags);
786
787 iwl_hcmd_queue_reclaim(priv, txq_id, index);
788
789 if (!(meta->flags & CMD_ASYNC)) {
790 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
791 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
792 get_cmd_string(cmd->hdr.cmd));
793 wake_up(&priv->wait_command_queue);
794 }
795
796 meta->flags = 0;
797
798 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
799}
800
801const char *get_cmd_string(u8 cmd)
802{
803 switch (cmd) {
804 IWL_CMD(REPLY_ALIVE);
805 IWL_CMD(REPLY_ERROR);
806 IWL_CMD(REPLY_RXON);
807 IWL_CMD(REPLY_RXON_ASSOC);
808 IWL_CMD(REPLY_QOS_PARAM);
809 IWL_CMD(REPLY_RXON_TIMING);
810 IWL_CMD(REPLY_ADD_STA);
811 IWL_CMD(REPLY_REMOVE_STA);
812 IWL_CMD(REPLY_REMOVE_ALL_STA);
813 IWL_CMD(REPLY_TXFIFO_FLUSH);
814 IWL_CMD(REPLY_WEPKEY);
815 IWL_CMD(REPLY_TX);
816 IWL_CMD(REPLY_LEDS_CMD);
817 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
818 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
819 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
820 IWL_CMD(COEX_EVENT_CMD);
821 IWL_CMD(REPLY_QUIET_CMD);
822 IWL_CMD(REPLY_CHANNEL_SWITCH);
823 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
824 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
825 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
826 IWL_CMD(POWER_TABLE_CMD);
827 IWL_CMD(PM_SLEEP_NOTIFICATION);
828 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
829 IWL_CMD(REPLY_SCAN_CMD);
830 IWL_CMD(REPLY_SCAN_ABORT_CMD);
831 IWL_CMD(SCAN_START_NOTIFICATION);
832 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
833 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
834 IWL_CMD(BEACON_NOTIFICATION);
835 IWL_CMD(REPLY_TX_BEACON);
836 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
837 IWL_CMD(QUIET_NOTIFICATION);
838 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
839 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
840 IWL_CMD(REPLY_BT_CONFIG);
841 IWL_CMD(REPLY_STATISTICS_CMD);
842 IWL_CMD(STATISTICS_NOTIFICATION);
843 IWL_CMD(REPLY_CARD_STATE_CMD);
844 IWL_CMD(CARD_STATE_NOTIFICATION);
845 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
846 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
847 IWL_CMD(SENSITIVITY_CMD);
848 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
849 IWL_CMD(REPLY_RX_PHY_CMD);
850 IWL_CMD(REPLY_RX_MPDU_CMD);
851 IWL_CMD(REPLY_RX);
852 IWL_CMD(REPLY_COMPRESSED_BA);
853 IWL_CMD(CALIBRATION_CFG_CMD);
854 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
855 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
856 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
857 IWL_CMD(TEMPERATURE_NOTIFICATION);
858 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
859 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
860 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
861 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
862 IWL_CMD(REPLY_WIPAN_PARAMS);
863 IWL_CMD(REPLY_WIPAN_RXON);
864 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
865 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
866 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
867 IWL_CMD(REPLY_WIPAN_WEPKEY);
868 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
869 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
870 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
871 IWL_CMD(REPLY_WOWLAN_PATTERNS);
872 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
873 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
874 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
875 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
876 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
877 default:
878 return "UNKNOWN";
879
880 }
881}
882
883#define HOST_COMPLETE_TIMEOUT (2 * HZ)
884
885static void iwl_generic_cmd_callback(struct iwl_priv *priv,
886 struct iwl_device_cmd *cmd,
887 struct iwl_rx_packet *pkt)
888{
889 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
890 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
891 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
892 return;
893 }
894
895#ifdef CONFIG_IWLWIFI_DEBUG
896 switch (cmd->hdr.cmd) {
897 case REPLY_TX_LINK_QUALITY_CMD:
898 case SENSITIVITY_CMD:
899 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
900 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
901 break;
902 default:
903 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
904 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
905 }
906#endif
907}
908
909static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
910{
911 int ret;
912
913 /* An asynchronous command can not expect an SKB to be set. */
914 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
915 return -EINVAL;
916
917 /* Assign a generic callback if one is not provided */
918 if (!cmd->callback)
919 cmd->callback = iwl_generic_cmd_callback;
920
921 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
922 return -EBUSY;
923
924 ret = iwl_enqueue_hcmd(priv, cmd);
925 if (ret < 0) {
926 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
927 get_cmd_string(cmd->id), ret);
928 return ret;
929 }
930 return 0;
931}
932
933static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
934{
935 int cmd_idx;
936 int ret;
937
938 lockdep_assert_held(&priv->mutex);
939
940 /* A synchronous command can not have a callback set. */
941 if (WARN_ON(cmd->callback))
942 return -EINVAL;
943
944 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
945 get_cmd_string(cmd->id));
946
947 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
948 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
949 get_cmd_string(cmd->id));
950
951 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
952 if (cmd_idx < 0) {
953 ret = cmd_idx;
954 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
955 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
956 get_cmd_string(cmd->id), ret);
957 return ret;
958 }
959
960 ret = wait_event_timeout(priv->wait_command_queue,
961 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
962 HOST_COMPLETE_TIMEOUT);
963 if (!ret) {
964 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
965 IWL_ERR(priv,
966 "Error sending %s: time out after %dms.\n",
967 get_cmd_string(cmd->id),
968 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
969
970 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
971 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
972 "%s\n", get_cmd_string(cmd->id));
973 ret = -ETIMEDOUT;
974 goto cancel;
975 }
976 }
977
978 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
979 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
980 get_cmd_string(cmd->id));
981 ret = -ECANCELED;
982 goto fail;
983 }
984 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
985 IWL_ERR(priv, "Command %s failed: FW Error\n",
986 get_cmd_string(cmd->id));
987 ret = -EIO;
988 goto fail;
989 }
990 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
991 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
992 get_cmd_string(cmd->id));
993 ret = -EIO;
994 goto cancel;
995 }
996
997 return 0;
998
999cancel:
1000 if (cmd->flags & CMD_WANT_SKB) {
1001 /*
1002 * Cancel the CMD_WANT_SKB flag for the cmd in the
1003 * TX cmd queue. Otherwise in case the cmd comes
1004 * in later, it will possibly set an invalid
1005 * address (cmd->meta.source).
1006 */
1007 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
1008 ~CMD_WANT_SKB;
1009 }
1010fail:
1011 if (cmd->reply_page) {
1012 iwl_free_pages(priv, cmd->reply_page);
1013 cmd->reply_page = 0;
1014 }
1015
1016 return ret;
1017}
1018
1019int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1020{
1021 if (cmd->flags & CMD_ASYNC)
1022 return iwl_send_cmd_async(priv, cmd);
1023
1024 return iwl_send_cmd_sync(priv, cmd);
1025}
1026
1027int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
1028 const void *data)
1029{
1030 struct iwl_host_cmd cmd = {
1031 .id = id,
1032 .len = { len, },
1033 .data = { data, },
1034 .flags = flags,
1035 };
1036
1037 return iwl_send_cmd(priv, &cmd);
1038}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644
index 00000000000..32eb4fe0432
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -0,0 +1,1170 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include "iwl-dev.h"
64#include "iwl-trans.h"
65#include "iwl-core.h"
66#include "iwl-helpers.h"
67#include "iwl-trans-int-pcie.h"
68/*TODO remove uneeded includes when the transport layer tx_free will be here */
69#include "iwl-agn.h"
70#include "iwl-core.h"
71
72static int iwl_trans_rx_alloc(struct iwl_priv *priv)
73{
74 struct iwl_rx_queue *rxq = &priv->rxq;
75 struct device *dev = priv->bus->dev;
76
77 memset(&priv->rxq, 0, sizeof(priv->rxq));
78
79 spin_lock_init(&rxq->lock);
80 INIT_LIST_HEAD(&rxq->rx_free);
81 INIT_LIST_HEAD(&rxq->rx_used);
82
83 if (WARN_ON(rxq->bd || rxq->rb_stts))
84 return -EINVAL;
85
86 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
87 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
88 &rxq->bd_dma, GFP_KERNEL);
89 if (!rxq->bd)
90 goto err_bd;
91 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
92
93 /*Allocate the driver's pointer to receive buffer status */
94 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
95 &rxq->rb_stts_dma, GFP_KERNEL);
96 if (!rxq->rb_stts)
97 goto err_rb_stts;
98 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
99
100 return 0;
101
102err_rb_stts:
103 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
104 rxq->bd, rxq->bd_dma);
105 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
106 rxq->bd = NULL;
107err_bd:
108 return -ENOMEM;
109}
110
111static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
112{
113 struct iwl_rx_queue *rxq = &priv->rxq;
114 int i;
115
116 /* Fill the rx_used queue with _all_ of the Rx buffers */
117 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
118 /* In the reset function, these buffers may have been allocated
119 * to an SKB, so we need to unmap and free potential storage */
120 if (rxq->pool[i].page != NULL) {
121 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
122 PAGE_SIZE << priv->hw_params.rx_page_order,
123 DMA_FROM_DEVICE);
124 __iwl_free_pages(priv, rxq->pool[i].page);
125 rxq->pool[i].page = NULL;
126 }
127 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
128 }
129}
130
131static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
132 struct iwl_rx_queue *rxq)
133{
134 u32 rb_size;
135 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
136 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
137
138 rb_timeout = RX_RB_TIMEOUT;
139
140 if (iwlagn_mod_params.amsdu_size_8K)
141 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145 /* Stop Rx DMA */
146 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148 /* Reset driver's Rx queue write index */
149 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151 /* Tell device where to find RBD circular buffer in DRAM */
152 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
153 (u32)(rxq->bd_dma >> 8));
154
155 /* Tell device where in DRAM to update its Rx status */
156 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
157 rxq->rb_stts_dma >> 4);
158
159 /* Enable Rx DMA
160 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
161 * the credit mechanism in 5000 HW RX FIFO
162 * Direct rx interrupts to hosts
163 * Rx buffer size 4 or 8k
164 * RB timeout 0x10
165 * 256 RBDs
166 */
167 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
168 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
169 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
170 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
171 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
172 rb_size|
173 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
174 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
175
176 /* Set interrupt coalescing timer to default (2048 usecs) */
177 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
178}
179
180static int iwl_rx_init(struct iwl_priv *priv)
181{
182 struct iwl_rx_queue *rxq = &priv->rxq;
183 int i, err;
184 unsigned long flags;
185
186 if (!rxq->bd) {
187 err = iwl_trans_rx_alloc(priv);
188 if (err)
189 return err;
190 }
191
192 spin_lock_irqsave(&rxq->lock, flags);
193 INIT_LIST_HEAD(&rxq->rx_free);
194 INIT_LIST_HEAD(&rxq->rx_used);
195
196 iwl_trans_rxq_free_rx_bufs(priv);
197
198 for (i = 0; i < RX_QUEUE_SIZE; i++)
199 rxq->queue[i] = NULL;
200
201 /* Set us so that we have processed and used all buffers, but have
202 * not restocked the Rx queue with fresh buffers */
203 rxq->read = rxq->write = 0;
204 rxq->write_actual = 0;
205 rxq->free_count = 0;
206 spin_unlock_irqrestore(&rxq->lock, flags);
207
208 iwlagn_rx_replenish(priv);
209
210 iwl_trans_rx_hw_init(priv, rxq);
211
212 spin_lock_irqsave(&priv->lock, flags);
213 rxq->need_update = 1;
214 iwl_rx_queue_update_write_ptr(priv, rxq);
215 spin_unlock_irqrestore(&priv->lock, flags);
216
217 return 0;
218}
219
220static void iwl_trans_rx_free(struct iwl_priv *priv)
221{
222 struct iwl_rx_queue *rxq = &priv->rxq;
223 unsigned long flags;
224
225 /*if rxq->bd is NULL, it means that nothing has been allocated,
226 * exit now */
227 if (!rxq->bd) {
228 IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
229 return;
230 }
231
232 spin_lock_irqsave(&rxq->lock, flags);
233 iwl_trans_rxq_free_rx_bufs(priv);
234 spin_unlock_irqrestore(&rxq->lock, flags);
235
236 dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
237 rxq->bd, rxq->bd_dma);
238 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
239 rxq->bd = NULL;
240
241 if (rxq->rb_stts)
242 dma_free_coherent(priv->bus->dev,
243 sizeof(struct iwl_rb_status),
244 rxq->rb_stts, rxq->rb_stts_dma);
245 else
246 IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
247 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
248 rxq->rb_stts = NULL;
249}
250
251static int iwl_trans_rx_stop(struct iwl_priv *priv)
252{
253
254 /* stop Rx DMA */
255 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
256 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
257 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
258}
259
260static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
261 struct iwl_dma_ptr *ptr, size_t size)
262{
263 if (WARN_ON(ptr->addr))
264 return -EINVAL;
265
266 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
267 &ptr->dma, GFP_KERNEL);
268 if (!ptr->addr)
269 return -ENOMEM;
270 ptr->size = size;
271 return 0;
272}
273
274static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
275 struct iwl_dma_ptr *ptr)
276{
277 if (unlikely(!ptr->addr))
278 return;
279
280 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
281 memset(ptr, 0, sizeof(*ptr));
282}
283
284static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
285 int slots_num, u32 txq_id)
286{
287 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
288 int i;
289
290 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
291 return -EINVAL;
292
293 txq->q.n_window = slots_num;
294
295 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
296 GFP_KERNEL);
297 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
298 GFP_KERNEL);
299
300 if (!txq->meta || !txq->cmd)
301 goto error;
302
303 for (i = 0; i < slots_num; i++) {
304 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
305 GFP_KERNEL);
306 if (!txq->cmd[i])
307 goto error;
308 }
309
310 /* Alloc driver data array and TFD circular buffer */
311 /* Driver private data, only for Tx (not command) queues,
312 * not shared with device. */
313 if (txq_id != priv->cmd_queue) {
314 txq->txb = kzalloc(sizeof(txq->txb[0]) *
315 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
316 if (!txq->txb) {
317 IWL_ERR(priv, "kmalloc for auxiliary BD "
318 "structures failed\n");
319 goto error;
320 }
321 } else {
322 txq->txb = NULL;
323 }
324
325 /* Circular buffer of transmit frame descriptors (TFDs),
326 * shared with device */
327 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
328 GFP_KERNEL);
329 if (!txq->tfds) {
330 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
331 goto error;
332 }
333 txq->q.id = txq_id;
334
335 return 0;
336error:
337 kfree(txq->txb);
338 txq->txb = NULL;
339 /* since txq->cmd has been zeroed,
340 * all non allocated cmd[i] will be NULL */
341 if (txq->cmd)
342 for (i = 0; i < slots_num; i++)
343 kfree(txq->cmd[i]);
344 kfree(txq->meta);
345 kfree(txq->cmd);
346 txq->meta = NULL;
347 txq->cmd = NULL;
348
349 return -ENOMEM;
350
351}
352
353static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
354 int slots_num, u32 txq_id)
355{
356 int ret;
357
358 txq->need_update = 0;
359 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
360
361 /*
362 * For the default queues 0-3, set up the swq_id
363 * already -- all others need to get one later
364 * (if they need one at all).
365 */
366 if (txq_id < 4)
367 iwl_set_swq_id(txq, txq_id, txq_id);
368
369 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
370 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
371 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
372
373 /* Initialize queue's high/low-water marks, and head/tail indexes */
374 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
375 txq_id);
376 if (ret)
377 return ret;
378
379 /*
380 * Tell nic where to find circular buffer of Tx Frame Descriptors for
381 * given Tx queue, and enable the DMA channel used for that queue.
382 * Circular buffer (TFD queue in DRAM) physical base address */
383 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
384 txq->q.dma_addr >> 8);
385
386 return 0;
387}
388
389/**
390 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
391 */
392static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
393{
394 struct iwl_tx_queue *txq = &priv->txq[txq_id];
395 struct iwl_queue *q = &txq->q;
396
397 if (!q->n_bd)
398 return;
399
400 while (q->write_ptr != q->read_ptr) {
401 /* The read_ptr needs to bound by q->n_window */
402 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
403 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
404 }
405}
406
407/**
408 * iwl_tx_queue_free - Deallocate DMA queue.
409 * @txq: Transmit queue to deallocate.
410 *
411 * Empty queue by removing and destroying all BD's.
412 * Free all buffers.
413 * 0-fill, but do not free "txq" descriptor structure.
414 */
415static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
416{
417 struct iwl_tx_queue *txq = &priv->txq[txq_id];
418 struct device *dev = priv->bus->dev;
419 int i;
420 if (WARN_ON(!txq))
421 return;
422
423 iwl_tx_queue_unmap(priv, txq_id);
424
425 /* De-alloc array of command/tx buffers */
426 for (i = 0; i < txq->q.n_window; i++)
427 kfree(txq->cmd[i]);
428
429 /* De-alloc circular buffer of TFDs */
430 if (txq->q.n_bd) {
431 dma_free_coherent(dev, priv->hw_params.tfd_size *
432 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
433 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
434 }
435
436 /* De-alloc array of per-TFD driver data */
437 kfree(txq->txb);
438 txq->txb = NULL;
439
440 /* deallocate arrays */
441 kfree(txq->cmd);
442 kfree(txq->meta);
443 txq->cmd = NULL;
444 txq->meta = NULL;
445
446 /* 0-fill queue descriptor structure */
447 memset(txq, 0, sizeof(*txq));
448}
449
450/**
451 * iwl_trans_tx_free - Free TXQ Context
452 *
453 * Destroy all TX DMA queues and structures
454 */
455static void iwl_trans_tx_free(struct iwl_priv *priv)
456{
457 int txq_id;
458
459 /* Tx queues */
460 if (priv->txq) {
461 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
462 iwl_tx_queue_free(priv, txq_id);
463 }
464
465 kfree(priv->txq);
466 priv->txq = NULL;
467
468 iwlagn_free_dma_ptr(priv, &priv->kw);
469
470 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
471}
472
473/**
474 * iwl_trans_tx_alloc - allocate TX context
475 * Allocate all Tx DMA structures and initialize them
476 *
477 * @param priv
478 * @return error code
479 */
480static int iwl_trans_tx_alloc(struct iwl_priv *priv)
481{
482 int ret;
483 int txq_id, slots_num;
484
485 /*It is not allowed to alloc twice, so warn when this happens.
486 * We cannot rely on the previous allocation, so free and fail */
487 if (WARN_ON(priv->txq)) {
488 ret = -EINVAL;
489 goto error;
490 }
491
492 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
493 priv->hw_params.scd_bc_tbls_size);
494 if (ret) {
495 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
496 goto error;
497 }
498
499 /* Alloc keep-warm buffer */
500 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
501 if (ret) {
502 IWL_ERR(priv, "Keep Warm allocation failed\n");
503 goto error;
504 }
505
506 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
507 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
508 if (!priv->txq) {
509 IWL_ERR(priv, "Not enough memory for txq\n");
510 ret = ENOMEM;
511 goto error;
512 }
513
514 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
515 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
516 slots_num = (txq_id == priv->cmd_queue) ?
517 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
518 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
519 txq_id);
520 if (ret) {
521 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
522 goto error;
523 }
524 }
525
526 return 0;
527
528error:
529 trans_tx_free(&priv->trans);
530
531 return ret;
532}
533static int iwl_tx_init(struct iwl_priv *priv)
534{
535 int ret;
536 int txq_id, slots_num;
537 unsigned long flags;
538 bool alloc = false;
539
540 if (!priv->txq) {
541 ret = iwl_trans_tx_alloc(priv);
542 if (ret)
543 goto error;
544 alloc = true;
545 }
546
547 spin_lock_irqsave(&priv->lock, flags);
548
549 /* Turn off all Tx DMA fifos */
550 iwl_write_prph(priv, SCD_TXFACT, 0);
551
552 /* Tell NIC where to find the "keep warm" buffer */
553 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
554
555 spin_unlock_irqrestore(&priv->lock, flags);
556
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
559 slots_num = (txq_id == priv->cmd_queue) ?
560 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
561 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
562 txq_id);
563 if (ret) {
564 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
565 goto error;
566 }
567 }
568
569 return 0;
570error:
571 /*Upon error, free only if we allocated something */
572 if (alloc)
573 trans_tx_free(&priv->trans);
574 return ret;
575}
576
577static void iwl_set_pwr_vmain(struct iwl_priv *priv)
578{
579/*
580 * (for documentation purposes)
581 * to set power to V_AUX, do:
582
583 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
584 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
585 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
586 ~APMG_PS_CTRL_MSK_PWR_SRC);
587 */
588
589 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
590 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
591 ~APMG_PS_CTRL_MSK_PWR_SRC);
592}
593
594static int iwl_nic_init(struct iwl_priv *priv)
595{
596 unsigned long flags;
597
598 /* nic_init */
599 spin_lock_irqsave(&priv->lock, flags);
600 iwl_apm_init(priv);
601
602 /* Set interrupt coalescing calibration timer to default (512 usecs) */
603 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
604
605 spin_unlock_irqrestore(&priv->lock, flags);
606
607 iwl_set_pwr_vmain(priv);
608
609 priv->cfg->lib->nic_config(priv);
610
611 /* Allocate the RX queue, or reset if it is already allocated */
612 iwl_rx_init(priv);
613
614 /* Allocate or reset and init all Tx and Command queues */
615 if (iwl_tx_init(priv))
616 return -ENOMEM;
617
618 if (priv->cfg->base_params->shadow_reg_enable) {
619 /* enable shadow regs in HW */
620 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
621 0x800FFFFF);
622 }
623
624 set_bit(STATUS_INIT, &priv->status);
625
626 return 0;
627}
628
629#define HW_READY_TIMEOUT (50)
630
631/* Note: returns poll_bit return value, which is >= 0 if success */
632static int iwl_set_hw_ready(struct iwl_priv *priv)
633{
634 int ret;
635
636 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
637 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
638
639 /* See if we got it */
640 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
641 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
642 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
643 HW_READY_TIMEOUT);
644
645 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
646 return ret;
647}
648
649/* Note: returns standard 0/-ERROR code */
650static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
651{
652 int ret;
653
654 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
655
656 ret = iwl_set_hw_ready(priv);
657 if (ret >= 0)
658 return 0;
659
660 /* If HW is not ready, prepare the conditions to check again */
661 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
662 CSR_HW_IF_CONFIG_REG_PREPARE);
663
664 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
665 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
666 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
667
668 if (ret < 0)
669 return ret;
670
671 /* HW should be ready by now, check again. */
672 ret = iwl_set_hw_ready(priv);
673 if (ret >= 0)
674 return 0;
675 return ret;
676}
677
678static int iwl_trans_start_device(struct iwl_priv *priv)
679{
680 int ret;
681
682 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
683
684 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
685 iwl_trans_prepare_card_hw(priv)) {
686 IWL_WARN(priv, "Exit HW not ready\n");
687 return -EIO;
688 }
689
690 /* If platform's RF_KILL switch is NOT set to KILL */
691 if (iwl_read32(priv, CSR_GP_CNTRL) &
692 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
693 clear_bit(STATUS_RF_KILL_HW, &priv->status);
694 else
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696
697 if (iwl_is_rfkill(priv)) {
698 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
699 iwl_enable_interrupts(priv);
700 return -ERFKILL;
701 }
702
703 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
704
705 ret = iwl_nic_init(priv);
706 if (ret) {
707 IWL_ERR(priv, "Unable to init nic\n");
708 return ret;
709 }
710
711 /* make sure rfkill handshake bits are cleared */
712 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
713 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
714 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
715
716 /* clear (again), then enable host interrupts */
717 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
718 iwl_enable_interrupts(priv);
719
720 /* really make sure rfkill handshake bits are cleared */
721 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
722 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
723
724 return 0;
725}
726
727/*
728 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
729 * must be called under priv->lock and mac access
730 */
731static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
732{
733 iwl_write_prph(priv, SCD_TXFACT, mask);
734}
735
736#define IWL_AC_UNSET -1
737
738struct queue_to_fifo_ac {
739 s8 fifo, ac;
740};
741
742static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
743 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
744 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
745 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
746 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
747 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
748 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
749 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
750 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
751 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
752 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
753};
754
755static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
756 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
757 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
758 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
759 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
760 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
761 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
762 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
763 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
764 { IWL_TX_FIFO_BE_IPAN, 2, },
765 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
766};
767static void iwl_trans_tx_start(struct iwl_priv *priv)
768{
769 const struct queue_to_fifo_ac *queue_to_fifo;
770 struct iwl_rxon_context *ctx;
771 u32 a;
772 unsigned long flags;
773 int i, chan;
774 u32 reg_val;
775
776 spin_lock_irqsave(&priv->lock, flags);
777
778 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
779 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
780 /* reset conext data memory */
781 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
782 a += 4)
783 iwl_write_targ_mem(priv, a, 0);
784 /* reset tx status memory */
785 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
786 a += 4)
787 iwl_write_targ_mem(priv, a, 0);
788 for (; a < priv->scd_base_addr +
789 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
790 iwl_write_targ_mem(priv, a, 0);
791
792 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
793 priv->scd_bc_tbls.dma >> 10);
794
795 /* Enable DMA channel */
796 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
797 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
798 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
799 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
800
801 /* Update FH chicken bits */
802 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
803 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
804 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
805
806 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
807 SCD_QUEUECHAIN_SEL_ALL(priv));
808 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
809
810 /* initiate the queues */
811 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
812 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
813 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
814 iwl_write_targ_mem(priv, priv->scd_base_addr +
815 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
816 iwl_write_targ_mem(priv, priv->scd_base_addr +
817 SCD_CONTEXT_QUEUE_OFFSET(i) +
818 sizeof(u32),
819 ((SCD_WIN_SIZE <<
820 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
821 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
822 ((SCD_FRAME_LIMIT <<
823 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
824 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
825 }
826
827 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
828 IWL_MASK(0, priv->hw_params.max_txq_num));
829
830 /* Activate all Tx DMA/FIFO channels */
831 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
832
833 /* map queues to FIFOs */
834 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
835 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
836 else
837 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
838
839 iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
840
841 /* make sure all queue are not stopped */
842 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
843 for (i = 0; i < 4; i++)
844 atomic_set(&priv->queue_stop_count[i], 0);
845 for_each_context(priv, ctx)
846 ctx->last_tx_rejected = false;
847
848 /* reset to 0 to enable all the queue first */
849 priv->txq_ctx_active_msk = 0;
850
851 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
852 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
853
854 for (i = 0; i < 10; i++) {
855 int fifo = queue_to_fifo[i].fifo;
856 int ac = queue_to_fifo[i].ac;
857
858 iwl_txq_ctx_activate(priv, i);
859
860 if (fifo == IWL_TX_FIFO_UNUSED)
861 continue;
862
863 if (ac != IWL_AC_UNSET)
864 iwl_set_swq_id(&priv->txq[i], ac, i);
865 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
866 }
867
868 spin_unlock_irqrestore(&priv->lock, flags);
869
870 /* Enable L1-Active */
871 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
872 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
873}
874
875/**
876 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
877 */
878static int iwl_trans_tx_stop(struct iwl_priv *priv)
879{
880 int ch, txq_id;
881 unsigned long flags;
882
883 /* Turn off all Tx DMA fifos */
884 spin_lock_irqsave(&priv->lock, flags);
885
886 iwl_trans_txq_set_sched(priv, 0);
887
888 /* Stop each Tx DMA channel, and wait for it to be idle */
889 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
890 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
891 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
892 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
893 1000))
894 IWL_ERR(priv, "Failing on timeout while stopping"
895 " DMA channel %d [0x%08x]", ch,
896 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
897 }
898 spin_unlock_irqrestore(&priv->lock, flags);
899
900 if (!priv->txq) {
901 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
902 return 0;
903 }
904
905 /* Unmap DMA from host system and free skb's */
906 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
907 iwl_tx_queue_unmap(priv, txq_id);
908
909 return 0;
910}
911
912static void iwl_trans_stop_device(struct iwl_priv *priv)
913{
914 unsigned long flags;
915
916 /* stop and reset the on-board processor */
917 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
918
919 /* tell the device to stop sending interrupts */
920 spin_lock_irqsave(&priv->lock, flags);
921 iwl_disable_interrupts(priv);
922 spin_unlock_irqrestore(&priv->lock, flags);
923 trans_sync_irq(&priv->trans);
924
925 /* device going down, Stop using ICT table */
926 iwl_disable_ict(priv);
927
928 /*
929 * If a HW restart happens during firmware loading,
930 * then the firmware loading might call this function
931 * and later it might be called again due to the
932 * restart. So don't process again if the device is
933 * already dead.
934 */
935 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
936 iwl_trans_tx_stop(priv);
937 iwl_trans_rx_stop(priv);
938
939 /* Power-down device's busmaster DMA clocks */
940 iwl_write_prph(priv, APMG_CLK_DIS_REG,
941 APMG_CLK_VAL_DMA_CLK_RQT);
942 udelay(5);
943 }
944
945 /* Make sure (redundant) we've released our request to stay awake */
946 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
947
948 /* Stop the device, and put it in low power state */
949 iwl_apm_stop(priv);
950}
951
952static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
953 int txq_id)
954{
955 struct iwl_tx_queue *txq = &priv->txq[txq_id];
956 struct iwl_queue *q = &txq->q;
957 struct iwl_device_cmd *dev_cmd;
958
959 if (unlikely(iwl_queue_space(q) < q->high_mark))
960 return NULL;
961
962 /*
963 * Set up the Tx-command (not MAC!) header.
964 * Store the chosen Tx queue and TFD index within the sequence field;
965 * after Tx, uCode's Tx response will return this value so driver can
966 * locate the frame within the tx queue and do post-tx processing.
967 */
968 dev_cmd = txq->cmd[q->write_ptr];
969 memset(dev_cmd, 0, sizeof(*dev_cmd));
970 dev_cmd->hdr.cmd = REPLY_TX;
971 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
972 INDEX_TO_SEQ(q->write_ptr)));
973 return &dev_cmd->cmd.tx;
974}
975
976static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
977 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
978 struct iwl_rxon_context *ctx)
979{
980 struct iwl_tx_queue *txq = &priv->txq[txq_id];
981 struct iwl_queue *q = &txq->q;
982 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
983 struct iwl_cmd_meta *out_meta;
984
985 dma_addr_t phys_addr = 0;
986 dma_addr_t txcmd_phys;
987 dma_addr_t scratch_phys;
988 u16 len, firstlen, secondlen;
989 u8 wait_write_ptr = 0;
990 u8 hdr_len = ieee80211_hdrlen(fc);
991
992 /* Set up driver data for this TFD */
993 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
994 txq->txb[q->write_ptr].skb = skb;
995 txq->txb[q->write_ptr].ctx = ctx;
996
997 /* Set up first empty entry in queue's array of Tx/cmd buffers */
998 out_meta = &txq->meta[q->write_ptr];
999
1000 /*
1001 * Use the first empty entry in this queue's command buffer array
1002 * to contain the Tx command and MAC header concatenated together
1003 * (payload data will be in another buffer).
1004 * Size of this varies, due to varying MAC header length.
1005 * If end is not dword aligned, we'll have 2 extra bytes at the end
1006 * of the MAC header (device reads on dword boundaries).
1007 * We'll tell device about this padding later.
1008 */
1009 len = sizeof(struct iwl_tx_cmd) +
1010 sizeof(struct iwl_cmd_header) + hdr_len;
1011 firstlen = (len + 3) & ~3;
1012
1013 /* Tell NIC about any 2-byte padding after MAC header */
1014 if (firstlen != len)
1015 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1016
1017 /* Physical address of this Tx command's header (not MAC header!),
1018 * within command buffer array. */
1019 txcmd_phys = dma_map_single(priv->bus->dev,
1020 &dev_cmd->hdr, firstlen,
1021 DMA_BIDIRECTIONAL);
1022 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1023 return -1;
1024 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1025 dma_unmap_len_set(out_meta, len, firstlen);
1026
1027 if (!ieee80211_has_morefrags(fc)) {
1028 txq->need_update = 1;
1029 } else {
1030 wait_write_ptr = 1;
1031 txq->need_update = 0;
1032 }
1033
1034 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1035 * if any (802.11 null frames have no payload). */
1036 secondlen = skb->len - hdr_len;
1037 if (secondlen > 0) {
1038 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1039 secondlen, DMA_TO_DEVICE);
1040 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1041 dma_unmap_single(priv->bus->dev,
1042 dma_unmap_addr(out_meta, mapping),
1043 dma_unmap_len(out_meta, len),
1044 DMA_BIDIRECTIONAL);
1045 return -1;
1046 }
1047 }
1048
1049 /* Attach buffers to TFD */
1050 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1051 if (secondlen > 0)
1052 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1053 secondlen, 0);
1054
1055 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1056 offsetof(struct iwl_tx_cmd, scratch);
1057
1058 /* take back ownership of DMA buffer to enable update */
1059 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1060 DMA_BIDIRECTIONAL);
1061 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1062 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1063
1064 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1065 le16_to_cpu(dev_cmd->hdr.sequence));
1066 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1067 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1068 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1069
1070 /* Set up entry for this TFD in Tx byte-count array */
1071 iwl_trans_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len));
1072
1073 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1074 DMA_BIDIRECTIONAL);
1075
1076 trace_iwlwifi_dev_tx(priv,
1077 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1078 sizeof(struct iwl_tfd),
1079 &dev_cmd->hdr, firstlen,
1080 skb->data + hdr_len, secondlen);
1081
1082 /* Tell device the write index *just past* this latest filled TFD */
1083 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1084 iwl_txq_update_write_ptr(priv, txq);
1085
1086 /*
1087 * At this point the frame is "transmitted" successfully
1088 * and we will get a TX status notification eventually,
1089 * regardless of the value of ret. "ret" only indicates
1090 * whether or not we should update the write pointer.
1091 */
1092 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1093 if (wait_write_ptr) {
1094 txq->need_update = 1;
1095 iwl_txq_update_write_ptr(priv, txq);
1096 } else {
1097 iwl_stop_queue(priv, txq);
1098 }
1099 }
1100 return 0;
1101}
1102
1103static void iwl_trans_kick_nic(struct iwl_priv *priv)
1104{
1105 /* Remove all resets to allow NIC to operate */
1106 iwl_write32(priv, CSR_RESET, 0);
1107}
1108
1109static void iwl_trans_sync_irq(struct iwl_priv *priv)
1110{
1111 /* wait to make sure we flush pending tasklet*/
1112 synchronize_irq(priv->bus->irq);
1113 tasklet_kill(&priv->irq_tasklet);
1114}
1115
1116static void iwl_trans_free(struct iwl_priv *priv)
1117{
1118 free_irq(priv->bus->irq, priv);
1119 iwl_free_isr_ict(priv);
1120}
1121
1122static const struct iwl_trans_ops trans_ops = {
1123 .start_device = iwl_trans_start_device,
1124 .prepare_card_hw = iwl_trans_prepare_card_hw,
1125 .stop_device = iwl_trans_stop_device,
1126
1127 .tx_start = iwl_trans_tx_start,
1128
1129 .rx_free = iwl_trans_rx_free,
1130 .tx_free = iwl_trans_tx_free,
1131
1132 .send_cmd = iwl_send_cmd,
1133 .send_cmd_pdu = iwl_send_cmd_pdu,
1134
1135 .get_tx_cmd = iwl_trans_get_tx_cmd,
1136 .tx = iwl_trans_tx,
1137
1138 .txq_agg_disable = iwl_trans_txq_agg_disable,
1139 .txq_agg_setup = iwl_trans_txq_agg_setup,
1140
1141 .kick_nic = iwl_trans_kick_nic,
1142
1143 .sync_irq = iwl_trans_sync_irq,
1144 .free = iwl_trans_free,
1145};
1146
1147int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
1148{
1149 int err;
1150
1151 priv->trans.ops = &trans_ops;
1152 priv->trans.priv = priv;
1153
1154 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
1155 iwl_irq_tasklet, (unsigned long)priv);
1156
1157 iwl_alloc_isr_ict(priv);
1158
1159 err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
1160 DRV_NAME, priv);
1161 if (err) {
1162 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
1163 iwl_free_isr_ict(priv);
1164 return err;
1165 }
1166
1167 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
1168
1169 return 0;
1170}