diff options
author | John W. Linville <linville@tuxdriver.com> | 2010-04-15 16:21:34 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-04-15 16:21:34 -0400 |
commit | 5c01d5669356e13f0fb468944c1dd4c6a7e978ad (patch) | |
tree | fa43345288d7b25fac92b3b35360a177c4947313 /drivers/net/wireless/iwlwifi | |
parent | fea069152614cdeefba4b2bf80afcddb9c217fc8 (diff) | |
parent | a5e944f1d955f3819503348426763e21e0413ba6 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Conflicts:
Documentation/feature-removal-schedule.txt
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/wl12xx/wl1271_main.c
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
36 files changed, 3986 insertions, 3910 deletions
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index e31a5ccebea2..a684a72eb6e9 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile | |||
@@ -10,6 +10,8 @@ CFLAGS_iwl-devtrace.o := -I$(src) | |||
10 | # AGN | 10 | # AGN |
11 | obj-$(CONFIG_IWLAGN) += iwlagn.o | 11 | obj-$(CONFIG_IWLAGN) += iwlagn.o |
12 | iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o | 12 | iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o |
13 | iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o | ||
14 | iwlagn-objs += iwl-agn-lib.o | ||
13 | 15 | ||
14 | iwlagn-$(CONFIG_IWL4965) += iwl-4965.o | 16 | iwlagn-$(CONFIG_IWL4965) += iwl-4965.o |
15 | iwlagn-$(CONFIG_IWL5000) += iwl-5000.o | 17 | iwlagn-$(CONFIG_IWL5000) += iwl-5000.o |
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index 9e392896005d..9a0191a5ea35 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include "iwl-sta.h" | 44 | #include "iwl-sta.h" |
45 | #include "iwl-agn.h" | 45 | #include "iwl-agn.h" |
46 | #include "iwl-helpers.h" | 46 | #include "iwl-helpers.h" |
47 | #include "iwl-5000-hw.h" | 47 | #include "iwl-agn-hw.h" |
48 | #include "iwl-agn-led.h" | 48 | #include "iwl-agn-led.h" |
49 | 49 | ||
50 | /* Highest firmware API version supported */ | 50 | /* Highest firmware API version supported */ |
@@ -118,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = { | |||
118 | static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) | 118 | static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) |
119 | { | 119 | { |
120 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && | 120 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && |
121 | priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) | 121 | priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) |
122 | priv->cfg->num_of_queues = | 122 | priv->cfg->num_of_queues = |
123 | priv->cfg->mod_params->num_of_queues; | 123 | priv->cfg->mod_params->num_of_queues; |
124 | 124 | ||
@@ -126,13 +126,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) | |||
126 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; | 126 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; |
127 | priv->hw_params.scd_bc_tbls_size = | 127 | priv->hw_params.scd_bc_tbls_size = |
128 | priv->cfg->num_of_queues * | 128 | priv->cfg->num_of_queues * |
129 | sizeof(struct iwl5000_scd_bc_tbl); | 129 | sizeof(struct iwlagn_scd_bc_tbl); |
130 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); | 130 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); |
131 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; | 131 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; |
132 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; | 132 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; |
133 | 133 | ||
134 | priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; | 134 | priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; |
135 | priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; | 135 | priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; |
136 | 136 | ||
137 | priv->hw_params.max_bsm_size = 0; | 137 | priv->hw_params.max_bsm_size = 0; |
138 | priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | | 138 | priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | |
@@ -162,25 +162,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) | |||
162 | 162 | ||
163 | static struct iwl_lib_ops iwl1000_lib = { | 163 | static struct iwl_lib_ops iwl1000_lib = { |
164 | .set_hw_params = iwl1000_hw_set_hw_params, | 164 | .set_hw_params = iwl1000_hw_set_hw_params, |
165 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 165 | .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, |
166 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, | 166 | .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, |
167 | .txq_set_sched = iwl5000_txq_set_sched, | 167 | .txq_set_sched = iwlagn_txq_set_sched, |
168 | .txq_agg_enable = iwl5000_txq_agg_enable, | 168 | .txq_agg_enable = iwlagn_txq_agg_enable, |
169 | .txq_agg_disable = iwl5000_txq_agg_disable, | 169 | .txq_agg_disable = iwlagn_txq_agg_disable, |
170 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, | 170 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, |
171 | .txq_free_tfd = iwl_hw_txq_free_tfd, | 171 | .txq_free_tfd = iwl_hw_txq_free_tfd, |
172 | .txq_init = iwl_hw_tx_queue_init, | 172 | .txq_init = iwl_hw_tx_queue_init, |
173 | .rx_handler_setup = iwl5000_rx_handler_setup, | 173 | .rx_handler_setup = iwlagn_rx_handler_setup, |
174 | .setup_deferred_work = iwl5000_setup_deferred_work, | 174 | .setup_deferred_work = iwlagn_setup_deferred_work, |
175 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | 175 | .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, |
176 | .load_ucode = iwl5000_load_ucode, | 176 | .load_ucode = iwlagn_load_ucode, |
177 | .dump_nic_event_log = iwl_dump_nic_event_log, | 177 | .dump_nic_event_log = iwl_dump_nic_event_log, |
178 | .dump_nic_error_log = iwl_dump_nic_error_log, | 178 | .dump_nic_error_log = iwl_dump_nic_error_log, |
179 | .dump_csr = iwl_dump_csr, | 179 | .dump_csr = iwl_dump_csr, |
180 | .dump_fh = iwl_dump_fh, | 180 | .dump_fh = iwl_dump_fh, |
181 | .init_alive_start = iwl5000_init_alive_start, | 181 | .init_alive_start = iwlagn_init_alive_start, |
182 | .alive_notify = iwl5000_alive_notify, | 182 | .alive_notify = iwlagn_alive_notify, |
183 | .send_tx_power = iwl5000_send_tx_power, | 183 | .send_tx_power = iwlagn_send_tx_power, |
184 | .update_chain_flags = iwl_update_chain_flags, | 184 | .update_chain_flags = iwl_update_chain_flags, |
185 | .apm_ops = { | 185 | .apm_ops = { |
186 | .init = iwl_apm_init, | 186 | .init = iwl_apm_init, |
@@ -190,25 +190,25 @@ static struct iwl_lib_ops iwl1000_lib = { | |||
190 | }, | 190 | }, |
191 | .eeprom_ops = { | 191 | .eeprom_ops = { |
192 | .regulatory_bands = { | 192 | .regulatory_bands = { |
193 | EEPROM_5000_REG_BAND_1_CHANNELS, | 193 | EEPROM_REG_BAND_1_CHANNELS, |
194 | EEPROM_5000_REG_BAND_2_CHANNELS, | 194 | EEPROM_REG_BAND_2_CHANNELS, |
195 | EEPROM_5000_REG_BAND_3_CHANNELS, | 195 | EEPROM_REG_BAND_3_CHANNELS, |
196 | EEPROM_5000_REG_BAND_4_CHANNELS, | 196 | EEPROM_REG_BAND_4_CHANNELS, |
197 | EEPROM_5000_REG_BAND_5_CHANNELS, | 197 | EEPROM_REG_BAND_5_CHANNELS, |
198 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 198 | EEPROM_REG_BAND_24_HT40_CHANNELS, |
199 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 199 | EEPROM_REG_BAND_52_HT40_CHANNELS |
200 | }, | 200 | }, |
201 | .verify_signature = iwlcore_eeprom_verify_signature, | 201 | .verify_signature = iwlcore_eeprom_verify_signature, |
202 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 202 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, |
203 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 203 | .release_semaphore = iwlcore_eeprom_release_semaphore, |
204 | .calib_version = iwl5000_eeprom_calib_version, | 204 | .calib_version = iwlagn_eeprom_calib_version, |
205 | .query_addr = iwl5000_eeprom_query_addr, | 205 | .query_addr = iwlagn_eeprom_query_addr, |
206 | }, | 206 | }, |
207 | .post_associate = iwl_post_associate, | 207 | .post_associate = iwl_post_associate, |
208 | .isr = iwl_isr_ict, | 208 | .isr = iwl_isr_ict, |
209 | .config_ap = iwl_config_ap, | 209 | .config_ap = iwl_config_ap, |
210 | .temp_ops = { | 210 | .temp_ops = { |
211 | .temperature = iwl5000_temperature, | 211 | .temperature = iwlagn_temperature, |
212 | .set_ct_kill = iwl1000_set_ct_threshold, | 212 | .set_ct_kill = iwl1000_set_ct_threshold, |
213 | }, | 213 | }, |
214 | .add_bcast_station = iwl_add_bcast_station, | 214 | .add_bcast_station = iwl_add_bcast_station, |
@@ -218,10 +218,10 @@ static struct iwl_lib_ops iwl1000_lib = { | |||
218 | }; | 218 | }; |
219 | 219 | ||
220 | static const struct iwl_ops iwl1000_ops = { | 220 | static const struct iwl_ops iwl1000_ops = { |
221 | .ucode = &iwl5000_ucode, | 221 | .ucode = &iwlagn_ucode, |
222 | .lib = &iwl1000_lib, | 222 | .lib = &iwl1000_lib, |
223 | .hcmd = &iwl5000_hcmd, | 223 | .hcmd = &iwlagn_hcmd, |
224 | .utils = &iwl5000_hcmd_utils, | 224 | .utils = &iwlagn_hcmd_utils, |
225 | .led = &iwlagn_led_ops, | 225 | .led = &iwlagn_led_ops, |
226 | }; | 226 | }; |
227 | 227 | ||
@@ -234,10 +234,10 @@ struct iwl_cfg iwl1000_bgn_cfg = { | |||
234 | .ops = &iwl1000_ops, | 234 | .ops = &iwl1000_ops, |
235 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 235 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
236 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, | 236 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, |
237 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 237 | .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, |
238 | .num_of_queues = IWL50_NUM_QUEUES, | 238 | .num_of_queues = IWLAGN_NUM_QUEUES, |
239 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 239 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
240 | .mod_params = &iwl50_mod_params, | 240 | .mod_params = &iwlagn_mod_params, |
241 | .valid_tx_ant = ANT_A, | 241 | .valid_tx_ant = ANT_A, |
242 | .valid_rx_ant = ANT_AB, | 242 | .valid_rx_ant = ANT_AB, |
243 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 243 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -253,6 +253,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { | |||
253 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, | 253 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, |
254 | .chain_noise_scale = 1000, | 254 | .chain_noise_scale = 1000, |
255 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 255 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
256 | .max_event_log_size = 128, | ||
256 | }; | 257 | }; |
257 | 258 | ||
258 | struct iwl_cfg iwl1000_bg_cfg = { | 259 | struct iwl_cfg iwl1000_bg_cfg = { |
@@ -264,10 +265,10 @@ struct iwl_cfg iwl1000_bg_cfg = { | |||
264 | .ops = &iwl1000_ops, | 265 | .ops = &iwl1000_ops, |
265 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 266 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
266 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, | 267 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, |
267 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 268 | .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, |
268 | .num_of_queues = IWL50_NUM_QUEUES, | 269 | .num_of_queues = IWLAGN_NUM_QUEUES, |
269 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 270 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
270 | .mod_params = &iwl50_mod_params, | 271 | .mod_params = &iwlagn_mod_params, |
271 | .valid_tx_ant = ANT_A, | 272 | .valid_tx_ant = ANT_A, |
272 | .valid_rx_ant = ANT_AB, | 273 | .valid_rx_ant = ANT_AB, |
273 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 274 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -282,6 +283,7 @@ struct iwl_cfg iwl1000_bg_cfg = { | |||
282 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, | 283 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, |
283 | .chain_noise_scale = 1000, | 284 | .chain_noise_scale = 1000, |
284 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 285 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
286 | .max_event_log_size = 128, | ||
285 | }; | 287 | }; |
286 | 288 | ||
287 | MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); | 289 | MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h index 074f42a7dcad..91bcb4e3cdfb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h | |||
@@ -71,10 +71,6 @@ | |||
71 | 71 | ||
72 | #include "iwl-eeprom.h" | 72 | #include "iwl-eeprom.h" |
73 | 73 | ||
74 | /* Time constants */ | ||
75 | #define SHORT_SLOT_TIME 9 | ||
76 | #define LONG_SLOT_TIME 20 | ||
77 | |||
78 | /* RSSI to dBm */ | 74 | /* RSSI to dBm */ |
79 | #define IWL39_RSSI_OFFSET 95 | 75 | #define IWL39_RSSI_OFFSET 95 |
80 | 76 | ||
@@ -230,7 +226,6 @@ struct iwl3945_eeprom { | |||
230 | 226 | ||
231 | /* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ | 227 | /* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ |
232 | #define IWL39_NUM_QUEUES 5 | 228 | #define IWL39_NUM_QUEUES 5 |
233 | #define IWL_NUM_SCAN_RATES (2) | ||
234 | 229 | ||
235 | #define IWL_DEFAULT_TX_RETRY 15 | 230 | #define IWL_DEFAULT_TX_RETRY 15 |
236 | 231 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index 8f85a0db5c39..32eb4709acac 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c | |||
@@ -342,7 +342,7 @@ void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 s | |||
342 | struct ieee80211_supported_band *sband; | 342 | struct ieee80211_supported_band *sband; |
343 | int i; | 343 | int i; |
344 | 344 | ||
345 | IWL_DEBUG_INFO(priv, "enter \n"); | 345 | IWL_DEBUG_INFO(priv, "enter\n"); |
346 | if (sta_id == priv->hw_params.bcast_sta_id) | 346 | if (sta_id == priv->hw_params.bcast_sta_id) |
347 | goto out; | 347 | goto out; |
348 | 348 | ||
@@ -648,7 +648,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, | |||
648 | unsigned long flags; | 648 | unsigned long flags; |
649 | u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; | 649 | u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; |
650 | s8 max_rate_idx = -1; | 650 | s8 max_rate_idx = -1; |
651 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 651 | struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; |
652 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 652 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
653 | 653 | ||
654 | IWL_DEBUG_RATE(priv, "enter\n"); | 654 | IWL_DEBUG_RATE(priv, "enter\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 7ac6cec006d0..bde3b4cbab9d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -192,12 +192,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | #ifdef CONFIG_IWLWIFI_DEBUG | 194 | #ifdef CONFIG_IWLWIFI_DEBUG |
195 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 195 | #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x |
196 | 196 | ||
197 | static const char *iwl3945_get_tx_fail_reason(u32 status) | 197 | static const char *iwl3945_get_tx_fail_reason(u32 status) |
198 | { | 198 | { |
199 | switch (status & TX_STATUS_MSK) { | 199 | switch (status & TX_STATUS_MSK) { |
200 | case TX_STATUS_SUCCESS: | 200 | case TX_3945_STATUS_SUCCESS: |
201 | return "SUCCESS"; | 201 | return "SUCCESS"; |
202 | TX_STATUS_ENTRY(SHORT_LIMIT); | 202 | TX_STATUS_ENTRY(SHORT_LIMIT); |
203 | TX_STATUS_ENTRY(LONG_LIMIT); | 203 | TX_STATUS_ENTRY(LONG_LIMIT); |
@@ -487,7 +487,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv, | |||
487 | * but you can hack it to show more, if you'd like to. */ | 487 | * but you can hack it to show more, if you'd like to. */ |
488 | if (dataframe) | 488 | if (dataframe) |
489 | IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " | 489 | IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " |
490 | "len=%u, rssi=%d, chnl=%d, rate=%d, \n", | 490 | "len=%u, rssi=%d, chnl=%d, rate=%d,\n", |
491 | title, le16_to_cpu(fc), header->addr1[5], | 491 | title, le16_to_cpu(fc), header->addr1[5], |
492 | length, rssi, channel, rate); | 492 | length, rssi, channel, rate); |
493 | else { | 493 | else { |
@@ -549,7 +549,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
549 | struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); | 549 | struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); |
550 | u16 len = le16_to_cpu(rx_hdr->len); | 550 | u16 len = le16_to_cpu(rx_hdr->len); |
551 | struct sk_buff *skb; | 551 | struct sk_buff *skb; |
552 | int ret; | ||
553 | __le16 fc = hdr->frame_control; | 552 | __le16 fc = hdr->frame_control; |
554 | 553 | ||
555 | /* We received data from the HW, so stop the watchdog */ | 554 | /* We received data from the HW, so stop the watchdog */ |
@@ -566,9 +565,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
566 | return; | 565 | return; |
567 | } | 566 | } |
568 | 567 | ||
569 | skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); | 568 | skb = dev_alloc_skb(128); |
570 | if (!skb) { | 569 | if (!skb) { |
571 | IWL_ERR(priv, "alloc_skb failed\n"); | 570 | IWL_ERR(priv, "dev_alloc_skb failed\n"); |
572 | return; | 571 | return; |
573 | } | 572 | } |
574 | 573 | ||
@@ -577,37 +576,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
577 | (struct ieee80211_hdr *)rxb_addr(rxb), | 576 | (struct ieee80211_hdr *)rxb_addr(rxb), |
578 | le32_to_cpu(rx_end->status), stats); | 577 | le32_to_cpu(rx_end->status), stats); |
579 | 578 | ||
580 | skb_reserve(skb, IWL_LINK_HDR_MAX); | ||
581 | skb_add_rx_frag(skb, 0, rxb->page, | 579 | skb_add_rx_frag(skb, 0, rxb->page, |
582 | (void *)rx_hdr->payload - (void *)pkt, len); | 580 | (void *)rx_hdr->payload - (void *)pkt, len); |
583 | 581 | ||
584 | /* mac80211 currently doesn't support paged SKB. Convert it to | ||
585 | * linear SKB for management frame and data frame requires | ||
586 | * software decryption or software defragementation. */ | ||
587 | if (ieee80211_is_mgmt(fc) || | ||
588 | ieee80211_has_protected(fc) || | ||
589 | ieee80211_has_morefrags(fc) || | ||
590 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) | ||
591 | ret = skb_linearize(skb); | ||
592 | else | ||
593 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | ||
594 | 0 : -ENOMEM; | ||
595 | |||
596 | if (ret) { | ||
597 | kfree_skb(skb); | ||
598 | goto out; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * XXX: We cannot touch the page and its virtual memory (pkt) after | ||
603 | * here. It might have already been freed by the above skb change. | ||
604 | */ | ||
605 | |||
606 | iwl_update_stats(priv, false, fc, len); | 582 | iwl_update_stats(priv, false, fc, len); |
607 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | 583 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); |
608 | 584 | ||
609 | ieee80211_rx(priv->hw, skb); | 585 | ieee80211_rx(priv->hw, skb); |
610 | out: | ||
611 | priv->alloc_rxb_page--; | 586 | priv->alloc_rxb_page--; |
612 | rxb->page = NULL; | 587 | rxb->page = NULL; |
613 | } | 588 | } |
@@ -623,9 +598,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, | |||
623 | struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); | 598 | struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); |
624 | struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); | 599 | struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); |
625 | struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); | 600 | struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); |
626 | int snr; | 601 | u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); |
627 | u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); | 602 | u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff); |
628 | u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); | ||
629 | u8 network_packet; | 603 | u8 network_packet; |
630 | 604 | ||
631 | rx_status.flag = 0; | 605 | rx_status.flag = 0; |
@@ -663,43 +637,19 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, | |||
663 | /* Convert 3945's rssi indicator to dBm */ | 637 | /* Convert 3945's rssi indicator to dBm */ |
664 | rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; | 638 | rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; |
665 | 639 | ||
666 | /* Set default noise value to -127 */ | 640 | IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n", |
667 | if (priv->last_rx_noise == 0) | 641 | rx_status.signal, rx_stats_sig_avg, |
668 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 642 | rx_stats_noise_diff); |
669 | |||
670 | /* 3945 provides noise info for OFDM frames only. | ||
671 | * sig_avg and noise_diff are measured by the 3945's digital signal | ||
672 | * processor (DSP), and indicate linear levels of signal level and | ||
673 | * distortion/noise within the packet preamble after | ||
674 | * automatic gain control (AGC). sig_avg should stay fairly | ||
675 | * constant if the radio's AGC is working well. | ||
676 | * Since these values are linear (not dB or dBm), linear | ||
677 | * signal-to-noise ratio (SNR) is (sig_avg / noise_diff). | ||
678 | * Convert linear SNR to dB SNR, then subtract that from rssi dBm | ||
679 | * to obtain noise level in dBm. | ||
680 | * Calculate rx_status.signal (quality indicator in %) based on SNR. */ | ||
681 | if (rx_stats_noise_diff) { | ||
682 | snr = rx_stats_sig_avg / rx_stats_noise_diff; | ||
683 | rx_status.noise = rx_status.signal - | ||
684 | iwl3945_calc_db_from_ratio(snr); | ||
685 | } else { | ||
686 | rx_status.noise = priv->last_rx_noise; | ||
687 | } | ||
688 | |||
689 | |||
690 | IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n", | ||
691 | rx_status.signal, rx_status.noise, | ||
692 | rx_stats_sig_avg, rx_stats_noise_diff); | ||
693 | 643 | ||
694 | header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); | 644 | header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); |
695 | 645 | ||
696 | network_packet = iwl3945_is_network_packet(priv, header); | 646 | network_packet = iwl3945_is_network_packet(priv, header); |
697 | 647 | ||
698 | IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", | 648 | IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n", |
699 | network_packet ? '*' : ' ', | 649 | network_packet ? '*' : ' ', |
700 | le16_to_cpu(rx_hdr->channel), | 650 | le16_to_cpu(rx_hdr->channel), |
701 | rx_status.signal, rx_status.signal, | 651 | rx_status.signal, rx_status.signal, |
702 | rx_status.noise, rx_status.rate_idx); | 652 | rx_status.rate_idx); |
703 | 653 | ||
704 | /* Set "1" to report good data frames in groups of 100 */ | 654 | /* Set "1" to report good data frames in groups of 100 */ |
705 | iwl3945_dbg_report_frame(priv, pkt, header, 1); | 655 | iwl3945_dbg_report_frame(priv, pkt, header, 1); |
@@ -710,7 +660,6 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, | |||
710 | le32_to_cpu(rx_end->beacon_timestamp); | 660 | le32_to_cpu(rx_end->beacon_timestamp); |
711 | priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); | 661 | priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); |
712 | priv->_3945.last_rx_rssi = rx_status.signal; | 662 | priv->_3945.last_rx_rssi = rx_status.signal; |
713 | priv->last_rx_noise = rx_status.noise; | ||
714 | } | 663 | } |
715 | 664 | ||
716 | iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); | 665 | iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); |
@@ -1050,7 +999,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv) | |||
1050 | IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); | 999 | IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); |
1051 | 1000 | ||
1052 | if (rev_id & PCI_CFG_REV_ID_BIT_RTP) | 1001 | if (rev_id & PCI_CFG_REV_ID_BIT_RTP) |
1053 | IWL_DEBUG_INFO(priv, "RTP type \n"); | 1002 | IWL_DEBUG_INFO(priv, "RTP type\n"); |
1054 | else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { | 1003 | else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { |
1055 | IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); | 1004 | IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); |
1056 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | 1005 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, |
@@ -2822,6 +2771,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { | |||
2822 | .broken_powersave = true, | 2771 | .broken_powersave = true, |
2823 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 2772 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
2824 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2773 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
2774 | .max_event_log_size = 512, | ||
2825 | }; | 2775 | }; |
2826 | 2776 | ||
2827 | static struct iwl_cfg iwl3945_abg_cfg = { | 2777 | static struct iwl_cfg iwl3945_abg_cfg = { |
@@ -2841,6 +2791,7 @@ static struct iwl_cfg iwl3945_abg_cfg = { | |||
2841 | .broken_powersave = true, | 2791 | .broken_powersave = true, |
2842 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 2792 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
2843 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2793 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
2794 | .max_event_log_size = 512, | ||
2844 | }; | 2795 | }; |
2845 | 2796 | ||
2846 | DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { | 2797 | DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h index 67ef562e8db1..cd4b61ae25b7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h | |||
@@ -81,26 +81,6 @@ | |||
81 | */ | 81 | */ |
82 | #define IWL49_FIRST_AMPDU_QUEUE 7 | 82 | #define IWL49_FIRST_AMPDU_QUEUE 7 |
83 | 83 | ||
84 | /* Time constants */ | ||
85 | #define SHORT_SLOT_TIME 9 | ||
86 | #define LONG_SLOT_TIME 20 | ||
87 | |||
88 | /* RSSI to dBm */ | ||
89 | #define IWL49_RSSI_OFFSET 44 | ||
90 | |||
91 | |||
92 | /* PCI registers */ | ||
93 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | ||
94 | |||
95 | /* PCI register values */ | ||
96 | #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 | ||
97 | #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 | ||
98 | |||
99 | #define IWL_NUM_SCAN_RATES (2) | ||
100 | |||
101 | #define IWL_DEFAULT_TX_RETRY 15 | ||
102 | |||
103 | |||
104 | /* Sizes and addresses for instruction and data memory (SRAM) in | 84 | /* Sizes and addresses for instruction and data memory (SRAM) in |
105 | * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ | 85 | * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ |
106 | #define IWL49_RTC_INST_LOWER_BOUND (0x000000) | 86 | #define IWL49_RTC_INST_LOWER_BOUND (0x000000) |
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) | |||
393 | * location(s) in command (struct iwl4965_txpowertable_cmd). | 373 | * location(s) in command (struct iwl4965_txpowertable_cmd). |
394 | */ | 374 | */ |
395 | 375 | ||
396 | /* Limit range of txpower output target to be between these values */ | ||
397 | #define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */ | ||
398 | #define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ | ||
399 | |||
400 | /** | 376 | /** |
401 | * When MIMO is used (2 transmitters operating simultaneously), driver should | 377 | * When MIMO is used (2 transmitters operating simultaneously), driver should |
402 | * limit each transmitter to deliver a max of 3 dB below the regulatory limit | 378 | * limit each transmitter to deliver a max of 3 dB below the regulatory limit |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 89aba76e4a2c..2e3cda75f3ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "iwl-calib.h" | 46 | #include "iwl-calib.h" |
47 | #include "iwl-sta.h" | 47 | #include "iwl-sta.h" |
48 | #include "iwl-agn-led.h" | 48 | #include "iwl-agn-led.h" |
49 | #include "iwl-agn.h" | ||
49 | 50 | ||
50 | static int iwl4965_send_tx_power(struct iwl_priv *priv); | 51 | static int iwl4965_send_tx_power(struct iwl_priv *priv); |
51 | static int iwl4965_hw_get_temperature(struct iwl_priv *priv); | 52 | static int iwl4965_hw_get_temperature(struct iwl_priv *priv); |
@@ -60,14 +61,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv); | |||
60 | #define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" | 61 | #define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" |
61 | #define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) | 62 | #define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) |
62 | 63 | ||
63 | |||
64 | /* module parameters */ | ||
65 | static struct iwl_mod_params iwl4965_mod_params = { | ||
66 | .amsdu_size_8K = 1, | ||
67 | .restart_fw = 1, | ||
68 | /* the rest are 0 by default */ | ||
69 | }; | ||
70 | |||
71 | /* check contents of special bootstrap uCode SRAM */ | 64 | /* check contents of special bootstrap uCode SRAM */ |
72 | static int iwl4965_verify_bsm(struct iwl_priv *priv) | 65 | static int iwl4965_verify_bsm(struct iwl_priv *priv) |
73 | { | 66 | { |
@@ -417,7 +410,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv, | |||
417 | sizeof(cmd), &cmd); | 410 | sizeof(cmd), &cmd); |
418 | if (ret) | 411 | if (ret) |
419 | IWL_DEBUG_CALIB(priv, "fail sending cmd " | 412 | IWL_DEBUG_CALIB(priv, "fail sending cmd " |
420 | "REPLY_PHY_CALIBRATION_CMD \n"); | 413 | "REPLY_PHY_CALIBRATION_CMD\n"); |
421 | 414 | ||
422 | /* TODO we might want recalculate | 415 | /* TODO we might want recalculate |
423 | * rx_chain in rxon cmd */ | 416 | * rx_chain in rxon cmd */ |
@@ -1619,19 +1612,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) | |||
1619 | 1612 | ||
1620 | /* get absolute value */ | 1613 | /* get absolute value */ |
1621 | if (temp_diff < 0) { | 1614 | if (temp_diff < 0) { |
1622 | IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff); | 1615 | IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff); |
1623 | temp_diff = -temp_diff; | 1616 | temp_diff = -temp_diff; |
1624 | } else if (temp_diff == 0) | 1617 | } else if (temp_diff == 0) |
1625 | IWL_DEBUG_POWER(priv, "Same temp, \n"); | 1618 | IWL_DEBUG_POWER(priv, "Temperature unchanged\n"); |
1626 | else | 1619 | else |
1627 | IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff); | 1620 | IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff); |
1628 | 1621 | ||
1629 | if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { | 1622 | if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { |
1630 | IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n"); | 1623 | IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n"); |
1631 | return 0; | 1624 | return 0; |
1632 | } | 1625 | } |
1633 | 1626 | ||
1634 | IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n"); | 1627 | IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n"); |
1635 | 1628 | ||
1636 | return 1; | 1629 | return 1; |
1637 | } | 1630 | } |
@@ -1880,7 +1873,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, | |||
1880 | info->status.rates[0].count = tx_resp->failure_frame + 1; | 1873 | info->status.rates[0].count = tx_resp->failure_frame + 1; |
1881 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | 1874 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
1882 | info->flags |= iwl_tx_status_to_mac80211(status); | 1875 | info->flags |= iwl_tx_status_to_mac80211(status); |
1883 | iwl_hwrate_to_tx_control(priv, rate_n_flags, info); | 1876 | iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); |
1884 | /* FIXME: code repetition end */ | 1877 | /* FIXME: code repetition end */ |
1885 | 1878 | ||
1886 | IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", | 1879 | IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", |
@@ -2020,7 +2013,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2020 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | 2013 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); |
2021 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2014 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2022 | "%d index %d\n", scd_ssn , index); | 2015 | "%d index %d\n", scd_ssn , index); |
2023 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2016 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
2024 | if (qc) | 2017 | if (qc) |
2025 | iwl_free_tfds_in_queue(priv, sta_id, | 2018 | iwl_free_tfds_in_queue(priv, sta_id, |
2026 | tid, freed); | 2019 | tid, freed); |
@@ -2037,7 +2030,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2037 | } else { | 2030 | } else { |
2038 | info->status.rates[0].count = tx_resp->failure_frame + 1; | 2031 | info->status.rates[0].count = tx_resp->failure_frame + 1; |
2039 | info->flags |= iwl_tx_status_to_mac80211(status); | 2032 | info->flags |= iwl_tx_status_to_mac80211(status); |
2040 | iwl_hwrate_to_tx_control(priv, | 2033 | iwlagn_hwrate_to_tx_control(priv, |
2041 | le32_to_cpu(tx_resp->rate_n_flags), | 2034 | le32_to_cpu(tx_resp->rate_n_flags), |
2042 | info); | 2035 | info); |
2043 | 2036 | ||
@@ -2048,7 +2041,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2048 | le32_to_cpu(tx_resp->rate_n_flags), | 2041 | le32_to_cpu(tx_resp->rate_n_flags), |
2049 | tx_resp->failure_frame); | 2042 | tx_resp->failure_frame); |
2050 | 2043 | ||
2051 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2044 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
2052 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2045 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2053 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2046 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
2054 | else if (sta_id == IWL_INVALID_STATION) | 2047 | else if (sta_id == IWL_INVALID_STATION) |
@@ -2059,10 +2052,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2059 | iwl_wake_queue(priv, txq_id); | 2052 | iwl_wake_queue(priv, txq_id); |
2060 | } | 2053 | } |
2061 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2054 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2062 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2055 | iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); |
2063 | 2056 | ||
2064 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 2057 | iwl_check_abort_status(priv, tx_resp->frame_count, status); |
2065 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | ||
2066 | } | 2058 | } |
2067 | 2059 | ||
2068 | static int iwl4965_calc_rssi(struct iwl_priv *priv, | 2060 | static int iwl4965_calc_rssi(struct iwl_priv *priv, |
@@ -2096,7 +2088,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv, | |||
2096 | 2088 | ||
2097 | /* dBm = max_rssi dB - agc dB - constant. | 2089 | /* dBm = max_rssi dB - agc dB - constant. |
2098 | * Higher AGC (higher radio gain) means lower signal. */ | 2090 | * Higher AGC (higher radio gain) means lower signal. */ |
2099 | return max_rssi - agc - IWL49_RSSI_OFFSET; | 2091 | return max_rssi - agc - IWLAGN_RSSI_OFFSET; |
2100 | } | 2092 | } |
2101 | 2093 | ||
2102 | 2094 | ||
@@ -2104,7 +2096,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv, | |||
2104 | static void iwl4965_rx_handler_setup(struct iwl_priv *priv) | 2096 | static void iwl4965_rx_handler_setup(struct iwl_priv *priv) |
2105 | { | 2097 | { |
2106 | /* Legacy Rx frames */ | 2098 | /* Legacy Rx frames */ |
2107 | priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx; | 2099 | priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; |
2108 | /* Tx response */ | 2100 | /* Tx response */ |
2109 | priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; | 2101 | priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; |
2110 | } | 2102 | } |
@@ -2247,7 +2239,7 @@ struct iwl_cfg iwl4965_agn_cfg = { | |||
2247 | .ops = &iwl4965_ops, | 2239 | .ops = &iwl4965_ops, |
2248 | .num_of_queues = IWL49_NUM_QUEUES, | 2240 | .num_of_queues = IWL49_NUM_QUEUES, |
2249 | .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, | 2241 | .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, |
2250 | .mod_params = &iwl4965_mod_params, | 2242 | .mod_params = &iwlagn_mod_params, |
2251 | .valid_tx_ant = ANT_AB, | 2243 | .valid_tx_ant = ANT_AB, |
2252 | .valid_rx_ant = ANT_ABC, | 2244 | .valid_rx_ant = ANT_ABC, |
2253 | .pll_cfg_val = 0, | 2245 | .pll_cfg_val = 0, |
@@ -2260,27 +2252,11 @@ struct iwl_cfg iwl4965_agn_cfg = { | |||
2260 | .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, | 2252 | .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, |
2261 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 2253 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
2262 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2254 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
2255 | .temperature_kelvin = true, | ||
2256 | .off_channel_workaround = true, | ||
2257 | .max_event_log_size = 512, | ||
2263 | }; | 2258 | }; |
2264 | 2259 | ||
2265 | /* Module firmware */ | 2260 | /* Module firmware */ |
2266 | MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); | 2261 | MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); |
2267 | 2262 | ||
2268 | module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO); | ||
2269 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | ||
2270 | module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); | ||
2271 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); | ||
2272 | module_param_named( | ||
2273 | disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO); | ||
2274 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | ||
2275 | |||
2276 | module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); | ||
2277 | MODULE_PARM_DESC(queues_num, "number of hw queues."); | ||
2278 | /* 11n */ | ||
2279 | module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); | ||
2280 | MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); | ||
2281 | module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, | ||
2282 | int, S_IRUGO); | ||
2283 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); | ||
2284 | |||
2285 | module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO); | ||
2286 | MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error"); | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h index 714e032f6217..146e6431ae95 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h | |||
@@ -68,25 +68,6 @@ | |||
68 | #ifndef __iwl_5000_hw_h__ | 68 | #ifndef __iwl_5000_hw_h__ |
69 | #define __iwl_5000_hw_h__ | 69 | #define __iwl_5000_hw_h__ |
70 | 70 | ||
71 | #define IWL50_RTC_INST_LOWER_BOUND (0x000000) | ||
72 | #define IWL50_RTC_INST_UPPER_BOUND (0x020000) | ||
73 | |||
74 | #define IWL50_RTC_DATA_LOWER_BOUND (0x800000) | ||
75 | #define IWL50_RTC_DATA_UPPER_BOUND (0x80C000) | ||
76 | |||
77 | #define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \ | ||
78 | IWL50_RTC_INST_LOWER_BOUND) | ||
79 | #define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \ | ||
80 | IWL50_RTC_DATA_LOWER_BOUND) | ||
81 | |||
82 | /* EEPROM */ | ||
83 | #define IWL_5000_EEPROM_IMG_SIZE 2048 | ||
84 | |||
85 | #define IWL50_CMD_FIFO_NUM 7 | ||
86 | #define IWL50_NUM_QUEUES 20 | ||
87 | #define IWL50_NUM_AMPDU_QUEUES 10 | ||
88 | #define IWL50_FIRST_AMPDU_QUEUE 10 | ||
89 | |||
90 | /* 5150 only */ | 71 | /* 5150 only */ |
91 | #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) | 72 | #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) |
92 | 73 | ||
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) | |||
103 | return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); | 84 | return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); |
104 | } | 85 | } |
105 | 86 | ||
106 | /* Fixed (non-configurable) rx data from phy */ | ||
107 | |||
108 | /** | ||
109 | * struct iwl5000_schedq_bc_tbl scheduler byte count table | ||
110 | * base physical address of iwl5000_shared | ||
111 | * is provided to SCD_DRAM_BASE_ADDR | ||
112 | * @tfd_offset 0-12 - tx command byte count | ||
113 | * 12-16 - station index | ||
114 | */ | ||
115 | struct iwl5000_scd_bc_tbl { | ||
116 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; | ||
117 | } __attribute__ ((packed)); | ||
118 | |||
119 | |||
120 | #endif /* __iwl_5000_hw_h__ */ | 87 | #endif /* __iwl_5000_hw_h__ */ |
121 | 88 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 2267cad49cbf..e967cfcac224 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * file called LICENSE. | 19 | * file called LICENSE. |
20 | * | 20 | * |
21 | * Contact Information: | 21 | * Contact Information: |
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
22 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
23 | * | 24 | * |
24 | *****************************************************************************/ | 25 | *****************************************************************************/ |
@@ -45,8 +46,8 @@ | |||
45 | #include "iwl-helpers.h" | 46 | #include "iwl-helpers.h" |
46 | #include "iwl-agn.h" | 47 | #include "iwl-agn.h" |
47 | #include "iwl-agn-led.h" | 48 | #include "iwl-agn-led.h" |
49 | #include "iwl-agn-hw.h" | ||
48 | #include "iwl-5000-hw.h" | 50 | #include "iwl-5000-hw.h" |
49 | #include "iwl-6000-hw.h" | ||
50 | 51 | ||
51 | /* Highest firmware API version supported */ | 52 | /* Highest firmware API version supported */ |
52 | #define IWL5000_UCODE_API_MAX 2 | 53 | #define IWL5000_UCODE_API_MAX 2 |
@@ -64,21 +65,8 @@ | |||
64 | #define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" | 65 | #define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" |
65 | #define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) | 66 | #define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) |
66 | 67 | ||
67 | static const s8 iwl5000_default_queue_to_tx_fifo[] = { | ||
68 | IWL_TX_FIFO_VO, | ||
69 | IWL_TX_FIFO_VI, | ||
70 | IWL_TX_FIFO_BE, | ||
71 | IWL_TX_FIFO_BK, | ||
72 | IWL50_CMD_FIFO_NUM, | ||
73 | IWL_TX_FIFO_UNUSED, | ||
74 | IWL_TX_FIFO_UNUSED, | ||
75 | IWL_TX_FIFO_UNUSED, | ||
76 | IWL_TX_FIFO_UNUSED, | ||
77 | IWL_TX_FIFO_UNUSED, | ||
78 | }; | ||
79 | |||
80 | /* NIC configuration for 5000 series */ | 68 | /* NIC configuration for 5000 series */ |
81 | void iwl5000_nic_config(struct iwl_priv *priv) | 69 | static void iwl5000_nic_config(struct iwl_priv *priv) |
82 | { | 70 | { |
83 | unsigned long flags; | 71 | unsigned long flags; |
84 | u16 radio_cfg; | 72 | u16 radio_cfg; |
@@ -111,162 +99,6 @@ void iwl5000_nic_config(struct iwl_priv *priv) | |||
111 | spin_unlock_irqrestore(&priv->lock, flags); | 99 | spin_unlock_irqrestore(&priv->lock, flags); |
112 | } | 100 | } |
113 | 101 | ||
114 | |||
115 | /* | ||
116 | * EEPROM | ||
117 | */ | ||
118 | static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) | ||
119 | { | ||
120 | u16 offset = 0; | ||
121 | |||
122 | if ((address & INDIRECT_ADDRESS) == 0) | ||
123 | return address; | ||
124 | |||
125 | switch (address & INDIRECT_TYPE_MSK) { | ||
126 | case INDIRECT_HOST: | ||
127 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST); | ||
128 | break; | ||
129 | case INDIRECT_GENERAL: | ||
130 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL); | ||
131 | break; | ||
132 | case INDIRECT_REGULATORY: | ||
133 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY); | ||
134 | break; | ||
135 | case INDIRECT_CALIBRATION: | ||
136 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION); | ||
137 | break; | ||
138 | case INDIRECT_PROCESS_ADJST: | ||
139 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST); | ||
140 | break; | ||
141 | case INDIRECT_OTHERS: | ||
142 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); | ||
143 | break; | ||
144 | default: | ||
145 | IWL_ERR(priv, "illegal indirect type: 0x%X\n", | ||
146 | address & INDIRECT_TYPE_MSK); | ||
147 | break; | ||
148 | } | ||
149 | |||
150 | /* translate the offset from words to byte */ | ||
151 | return (address & ADDRESS_MSK) + (offset << 1); | ||
152 | } | ||
153 | |||
154 | u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv) | ||
155 | { | ||
156 | struct iwl_eeprom_calib_hdr { | ||
157 | u8 version; | ||
158 | u8 pa_type; | ||
159 | u16 voltage; | ||
160 | } *hdr; | ||
161 | |||
162 | hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, | ||
163 | EEPROM_5000_CALIB_ALL); | ||
164 | return hdr->version; | ||
165 | |||
166 | } | ||
167 | |||
168 | static void iwl5000_gain_computation(struct iwl_priv *priv, | ||
169 | u32 average_noise[NUM_RX_CHAINS], | ||
170 | u16 min_average_noise_antenna_i, | ||
171 | u32 min_average_noise, | ||
172 | u8 default_chain) | ||
173 | { | ||
174 | int i; | ||
175 | s32 delta_g; | ||
176 | struct iwl_chain_noise_data *data = &priv->chain_noise_data; | ||
177 | |||
178 | /* | ||
179 | * Find Gain Code for the chains based on "default chain" | ||
180 | */ | ||
181 | for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { | ||
182 | if ((data->disconn_array[i])) { | ||
183 | data->delta_gain_code[i] = 0; | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | delta_g = (priv->cfg->chain_noise_scale * | ||
188 | ((s32)average_noise[default_chain] - | ||
189 | (s32)average_noise[i])) / 1500; | ||
190 | |||
191 | /* bound gain by 2 bits value max, 3rd bit is sign */ | ||
192 | data->delta_gain_code[i] = | ||
193 | min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); | ||
194 | |||
195 | if (delta_g < 0) | ||
196 | /* | ||
197 | * set negative sign ... | ||
198 | * note to Intel developers: This is uCode API format, | ||
199 | * not the format of any internal device registers. | ||
200 | * Do not change this format for e.g. 6050 or similar | ||
201 | * devices. Change format only if more resolution | ||
202 | * (i.e. more than 2 bits magnitude) is needed. | ||
203 | */ | ||
204 | data->delta_gain_code[i] |= (1 << 2); | ||
205 | } | ||
206 | |||
207 | IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", | ||
208 | data->delta_gain_code[1], data->delta_gain_code[2]); | ||
209 | |||
210 | if (!data->radio_write) { | ||
211 | struct iwl_calib_chain_noise_gain_cmd cmd; | ||
212 | |||
213 | memset(&cmd, 0, sizeof(cmd)); | ||
214 | |||
215 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; | ||
216 | cmd.hdr.first_group = 0; | ||
217 | cmd.hdr.groups_num = 1; | ||
218 | cmd.hdr.data_valid = 1; | ||
219 | cmd.delta_gain_1 = data->delta_gain_code[1]; | ||
220 | cmd.delta_gain_2 = data->delta_gain_code[2]; | ||
221 | iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, | ||
222 | sizeof(cmd), &cmd, NULL); | ||
223 | |||
224 | data->radio_write = 1; | ||
225 | data->state = IWL_CHAIN_NOISE_CALIBRATED; | ||
226 | } | ||
227 | |||
228 | data->chain_noise_a = 0; | ||
229 | data->chain_noise_b = 0; | ||
230 | data->chain_noise_c = 0; | ||
231 | data->chain_signal_a = 0; | ||
232 | data->chain_signal_b = 0; | ||
233 | data->chain_signal_c = 0; | ||
234 | data->beacon_count = 0; | ||
235 | } | ||
236 | |||
237 | static void iwl5000_chain_noise_reset(struct iwl_priv *priv) | ||
238 | { | ||
239 | struct iwl_chain_noise_data *data = &priv->chain_noise_data; | ||
240 | int ret; | ||
241 | |||
242 | if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { | ||
243 | struct iwl_calib_chain_noise_reset_cmd cmd; | ||
244 | memset(&cmd, 0, sizeof(cmd)); | ||
245 | |||
246 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; | ||
247 | cmd.hdr.first_group = 0; | ||
248 | cmd.hdr.groups_num = 1; | ||
249 | cmd.hdr.data_valid = 1; | ||
250 | ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, | ||
251 | sizeof(cmd), &cmd); | ||
252 | if (ret) | ||
253 | IWL_ERR(priv, | ||
254 | "Could not send REPLY_PHY_CALIBRATION_CMD\n"); | ||
255 | data->state = IWL_CHAIN_NOISE_ACCUMULATE; | ||
256 | IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, | ||
261 | __le32 *tx_flags) | ||
262 | { | ||
263 | if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || | ||
264 | (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) | ||
265 | *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; | ||
266 | else | ||
267 | *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK; | ||
268 | } | ||
269 | |||
270 | static struct iwl_sensitivity_ranges iwl5000_sensitivity = { | 102 | static struct iwl_sensitivity_ranges iwl5000_sensitivity = { |
271 | .min_nrg_cck = 95, | 103 | .min_nrg_cck = 95, |
272 | .max_nrg_cck = 0, /* not used, set to 0 */ | 104 | .max_nrg_cck = 0, /* not used, set to 0 */ |
@@ -318,14 +150,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = { | |||
318 | .nrg_th_cca = 62, | 150 | .nrg_th_cca = 62, |
319 | }; | 151 | }; |
320 | 152 | ||
321 | const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, | ||
322 | size_t offset) | ||
323 | { | ||
324 | u32 address = eeprom_indirect_address(priv, offset); | ||
325 | BUG_ON(address >= priv->cfg->eeprom_size); | ||
326 | return &priv->eeprom[address]; | ||
327 | } | ||
328 | |||
329 | static void iwl5150_set_ct_threshold(struct iwl_priv *priv) | 153 | static void iwl5150_set_ct_threshold(struct iwl_priv *priv) |
330 | { | 154 | { |
331 | const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; | 155 | const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; |
@@ -341,351 +165,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv) | |||
341 | priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; | 165 | priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; |
342 | } | 166 | } |
343 | 167 | ||
344 | /* | 168 | static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) |
345 | * Calibration | ||
346 | */ | ||
347 | static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) | ||
348 | { | ||
349 | struct iwl_calib_xtal_freq_cmd cmd; | ||
350 | __le16 *xtal_calib = | ||
351 | (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
352 | |||
353 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; | ||
354 | cmd.hdr.first_group = 0; | ||
355 | cmd.hdr.groups_num = 1; | ||
356 | cmd.hdr.data_valid = 1; | ||
357 | cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); | ||
358 | cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); | ||
359 | return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], | ||
360 | (u8 *)&cmd, sizeof(cmd)); | ||
361 | } | ||
362 | |||
363 | static int iwl5000_send_calib_cfg(struct iwl_priv *priv) | ||
364 | { | ||
365 | struct iwl_calib_cfg_cmd calib_cfg_cmd; | ||
366 | struct iwl_host_cmd cmd = { | ||
367 | .id = CALIBRATION_CFG_CMD, | ||
368 | .len = sizeof(struct iwl_calib_cfg_cmd), | ||
369 | .data = &calib_cfg_cmd, | ||
370 | }; | ||
371 | |||
372 | memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); | ||
373 | calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; | ||
374 | calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; | ||
375 | calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; | ||
376 | calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; | ||
377 | |||
378 | return iwl_send_cmd(priv, &cmd); | ||
379 | } | ||
380 | |||
381 | static void iwl5000_rx_calib_result(struct iwl_priv *priv, | ||
382 | struct iwl_rx_mem_buffer *rxb) | ||
383 | { | ||
384 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
385 | struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; | ||
386 | int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | ||
387 | int index; | ||
388 | |||
389 | /* reduce the size of the length field itself */ | ||
390 | len -= 4; | ||
391 | |||
392 | /* Define the order in which the results will be sent to the runtime | ||
393 | * uCode. iwl_send_calib_results sends them in a row according to their | ||
394 | * index. We sort them here */ | ||
395 | switch (hdr->op_code) { | ||
396 | case IWL_PHY_CALIBRATE_DC_CMD: | ||
397 | index = IWL_CALIB_DC; | ||
398 | break; | ||
399 | case IWL_PHY_CALIBRATE_LO_CMD: | ||
400 | index = IWL_CALIB_LO; | ||
401 | break; | ||
402 | case IWL_PHY_CALIBRATE_TX_IQ_CMD: | ||
403 | index = IWL_CALIB_TX_IQ; | ||
404 | break; | ||
405 | case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: | ||
406 | index = IWL_CALIB_TX_IQ_PERD; | ||
407 | break; | ||
408 | case IWL_PHY_CALIBRATE_BASE_BAND_CMD: | ||
409 | index = IWL_CALIB_BASE_BAND; | ||
410 | break; | ||
411 | default: | ||
412 | IWL_ERR(priv, "Unknown calibration notification %d\n", | ||
413 | hdr->op_code); | ||
414 | return; | ||
415 | } | ||
416 | iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); | ||
417 | } | ||
418 | |||
419 | static void iwl5000_rx_calib_complete(struct iwl_priv *priv, | ||
420 | struct iwl_rx_mem_buffer *rxb) | ||
421 | { | ||
422 | IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n"); | ||
423 | queue_work(priv->workqueue, &priv->restart); | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * ucode | ||
428 | */ | ||
429 | static int iwl5000_load_section(struct iwl_priv *priv, const char *name, | ||
430 | struct fw_desc *image, u32 dst_addr) | ||
431 | { | ||
432 | dma_addr_t phy_addr = image->p_addr; | ||
433 | u32 byte_cnt = image->len; | ||
434 | int ret; | ||
435 | |||
436 | priv->ucode_write_complete = 0; | ||
437 | |||
438 | iwl_write_direct32(priv, | ||
439 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
440 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); | ||
441 | |||
442 | iwl_write_direct32(priv, | ||
443 | FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); | ||
444 | |||
445 | iwl_write_direct32(priv, | ||
446 | FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), | ||
447 | phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); | ||
448 | |||
449 | iwl_write_direct32(priv, | ||
450 | FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), | ||
451 | (iwl_get_dma_hi_addr(phy_addr) | ||
452 | << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); | ||
453 | |||
454 | iwl_write_direct32(priv, | ||
455 | FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), | ||
456 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | | ||
457 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | | ||
458 | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); | ||
459 | |||
460 | iwl_write_direct32(priv, | ||
461 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
462 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
463 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | | ||
464 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); | ||
465 | |||
466 | IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name); | ||
467 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
468 | priv->ucode_write_complete, 5 * HZ); | ||
469 | if (ret == -ERESTARTSYS) { | ||
470 | IWL_ERR(priv, "Could not load the %s uCode section due " | ||
471 | "to interrupt\n", name); | ||
472 | return ret; | ||
473 | } | ||
474 | if (!ret) { | ||
475 | IWL_ERR(priv, "Could not load the %s uCode section\n", | ||
476 | name); | ||
477 | return -ETIMEDOUT; | ||
478 | } | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int iwl5000_load_given_ucode(struct iwl_priv *priv, | ||
484 | struct fw_desc *inst_image, | ||
485 | struct fw_desc *data_image) | ||
486 | { | ||
487 | int ret = 0; | ||
488 | |||
489 | ret = iwl5000_load_section(priv, "INST", inst_image, | ||
490 | IWL50_RTC_INST_LOWER_BOUND); | ||
491 | if (ret) | ||
492 | return ret; | ||
493 | |||
494 | return iwl5000_load_section(priv, "DATA", data_image, | ||
495 | IWL50_RTC_DATA_LOWER_BOUND); | ||
496 | } | ||
497 | |||
498 | int iwl5000_load_ucode(struct iwl_priv *priv) | ||
499 | { | ||
500 | int ret = 0; | ||
501 | |||
502 | /* check whether init ucode should be loaded, or rather runtime ucode */ | ||
503 | if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { | ||
504 | IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n"); | ||
505 | ret = iwl5000_load_given_ucode(priv, | ||
506 | &priv->ucode_init, &priv->ucode_init_data); | ||
507 | if (!ret) { | ||
508 | IWL_DEBUG_INFO(priv, "Init ucode load complete.\n"); | ||
509 | priv->ucode_type = UCODE_INIT; | ||
510 | } | ||
511 | } else { | ||
512 | IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. " | ||
513 | "Loading runtime ucode...\n"); | ||
514 | ret = iwl5000_load_given_ucode(priv, | ||
515 | &priv->ucode_code, &priv->ucode_data); | ||
516 | if (!ret) { | ||
517 | IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n"); | ||
518 | priv->ucode_type = UCODE_RT; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | return ret; | ||
523 | } | ||
524 | |||
525 | void iwl5000_init_alive_start(struct iwl_priv *priv) | ||
526 | { | ||
527 | int ret = 0; | ||
528 | |||
529 | /* Check alive response for "valid" sign from uCode */ | ||
530 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
531 | /* We had an error bringing up the hardware, so take it | ||
532 | * all the way back down so we can try again */ | ||
533 | IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); | ||
534 | goto restart; | ||
535 | } | ||
536 | |||
537 | /* initialize uCode was loaded... verify inst image. | ||
538 | * This is a paranoid check, because we would not have gotten the | ||
539 | * "initialize" alive if code weren't properly loaded. */ | ||
540 | if (iwl_verify_ucode(priv)) { | ||
541 | /* Runtime instruction load was bad; | ||
542 | * take it all the way back down so we can try again */ | ||
543 | IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); | ||
544 | goto restart; | ||
545 | } | ||
546 | |||
547 | ret = priv->cfg->ops->lib->alive_notify(priv); | ||
548 | if (ret) { | ||
549 | IWL_WARN(priv, | ||
550 | "Could not complete ALIVE transition: %d\n", ret); | ||
551 | goto restart; | ||
552 | } | ||
553 | |||
554 | iwl5000_send_calib_cfg(priv); | ||
555 | return; | ||
556 | |||
557 | restart: | ||
558 | /* real restart (first load init_ucode) */ | ||
559 | queue_work(priv->workqueue, &priv->restart); | ||
560 | } | ||
561 | |||
562 | static void iwl5000_set_wr_ptrs(struct iwl_priv *priv, | ||
563 | int txq_id, u32 index) | ||
564 | { | ||
565 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
566 | (index & 0xff) | (txq_id << 8)); | ||
567 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); | ||
568 | } | ||
569 | |||
570 | static void iwl5000_tx_queue_set_status(struct iwl_priv *priv, | ||
571 | struct iwl_tx_queue *txq, | ||
572 | int tx_fifo_id, int scd_retry) | ||
573 | { | ||
574 | int txq_id = txq->q.id; | ||
575 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; | ||
576 | |||
577 | iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
578 | (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
579 | (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | | ||
580 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | | ||
581 | IWL50_SCD_QUEUE_STTS_REG_MSK); | ||
582 | |||
583 | txq->sched_retry = scd_retry; | ||
584 | |||
585 | IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", | ||
586 | active ? "Activate" : "Deactivate", | ||
587 | scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); | ||
588 | } | ||
589 | |||
590 | int iwl5000_alive_notify(struct iwl_priv *priv) | ||
591 | { | ||
592 | u32 a; | ||
593 | unsigned long flags; | ||
594 | int i, chan; | ||
595 | u32 reg_val; | ||
596 | |||
597 | spin_lock_irqsave(&priv->lock, flags); | ||
598 | |||
599 | priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); | ||
600 | a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; | ||
601 | for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; | ||
602 | a += 4) | ||
603 | iwl_write_targ_mem(priv, a, 0); | ||
604 | for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; | ||
605 | a += 4) | ||
606 | iwl_write_targ_mem(priv, a, 0); | ||
607 | for (; a < priv->scd_base_addr + | ||
608 | IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) | ||
609 | iwl_write_targ_mem(priv, a, 0); | ||
610 | |||
611 | iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, | ||
612 | priv->scd_bc_tbls.dma >> 10); | ||
613 | |||
614 | /* Enable DMA channel */ | ||
615 | for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) | ||
616 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
617 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
618 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
619 | |||
620 | /* Update FH chicken bits */ | ||
621 | reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); | ||
622 | iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, | ||
623 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
624 | |||
625 | iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, | ||
626 | IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); | ||
627 | iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); | ||
628 | |||
629 | /* initiate the queues */ | ||
630 | for (i = 0; i < priv->hw_params.max_txq_num; i++) { | ||
631 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); | ||
632 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | ||
633 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
634 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); | ||
635 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
636 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + | ||
637 | sizeof(u32), | ||
638 | ((SCD_WIN_SIZE << | ||
639 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
640 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
641 | ((SCD_FRAME_LIMIT << | ||
642 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
643 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
644 | } | ||
645 | |||
646 | iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, | ||
647 | IWL_MASK(0, priv->hw_params.max_txq_num)); | ||
648 | |||
649 | /* Activate all Tx DMA/FIFO channels */ | ||
650 | priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); | ||
651 | |||
652 | iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | ||
653 | |||
654 | /* make sure all queue are not stopped */ | ||
655 | memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); | ||
656 | for (i = 0; i < 4; i++) | ||
657 | atomic_set(&priv->queue_stop_count[i], 0); | ||
658 | |||
659 | /* reset to 0 to enable all the queue first */ | ||
660 | priv->txq_ctx_active_msk = 0; | ||
661 | /* map qos queues to fifos one-to-one */ | ||
662 | BUILD_BUG_ON(ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo) != 10); | ||
663 | |||
664 | for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { | ||
665 | int ac = iwl5000_default_queue_to_tx_fifo[i]; | ||
666 | |||
667 | iwl_txq_ctx_activate(priv, i); | ||
668 | |||
669 | if (ac == IWL_TX_FIFO_UNUSED) | ||
670 | continue; | ||
671 | |||
672 | iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); | ||
673 | } | ||
674 | |||
675 | spin_unlock_irqrestore(&priv->lock, flags); | ||
676 | |||
677 | iwl_send_wimax_coex(priv); | ||
678 | |||
679 | iwl5000_set_Xtal_calib(priv); | ||
680 | iwl_send_calib_results(priv); | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | int iwl5000_hw_set_hw_params(struct iwl_priv *priv) | ||
686 | { | 169 | { |
687 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && | 170 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && |
688 | priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) | 171 | priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) |
689 | priv->cfg->num_of_queues = | 172 | priv->cfg->num_of_queues = |
690 | priv->cfg->mod_params->num_of_queues; | 173 | priv->cfg->mod_params->num_of_queues; |
691 | 174 | ||
@@ -693,13 +176,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv) | |||
693 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; | 176 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; |
694 | priv->hw_params.scd_bc_tbls_size = | 177 | priv->hw_params.scd_bc_tbls_size = |
695 | priv->cfg->num_of_queues * | 178 | priv->cfg->num_of_queues * |
696 | sizeof(struct iwl5000_scd_bc_tbl); | 179 | sizeof(struct iwlagn_scd_bc_tbl); |
697 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); | 180 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); |
698 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; | 181 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; |
699 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; | 182 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; |
700 | 183 | ||
701 | priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; | 184 | priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; |
702 | priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; | 185 | priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; |
703 | 186 | ||
704 | priv->hw_params.max_bsm_size = 0; | 187 | priv->hw_params.max_bsm_size = 0; |
705 | priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | | 188 | priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | |
@@ -740,547 +223,6 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv) | |||
740 | return 0; | 223 | return 0; |
741 | } | 224 | } |
742 | 225 | ||
743 | /** | ||
744 | * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
745 | */ | ||
746 | void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
747 | struct iwl_tx_queue *txq, | ||
748 | u16 byte_cnt) | ||
749 | { | ||
750 | struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
751 | int write_ptr = txq->q.write_ptr; | ||
752 | int txq_id = txq->q.id; | ||
753 | u8 sec_ctl = 0; | ||
754 | u8 sta_id = 0; | ||
755 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
756 | __le16 bc_ent; | ||
757 | |||
758 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
759 | |||
760 | if (txq_id != IWL_CMD_QUEUE_NUM) { | ||
761 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; | ||
762 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | ||
763 | |||
764 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
765 | case TX_CMD_SEC_CCM: | ||
766 | len += CCMP_MIC_LEN; | ||
767 | break; | ||
768 | case TX_CMD_SEC_TKIP: | ||
769 | len += TKIP_ICV_LEN; | ||
770 | break; | ||
771 | case TX_CMD_SEC_WEP: | ||
772 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
773 | break; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
778 | |||
779 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
780 | |||
781 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
782 | scd_bc_tbl[txq_id]. | ||
783 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
784 | } | ||
785 | |||
786 | void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
787 | struct iwl_tx_queue *txq) | ||
788 | { | ||
789 | struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
790 | int txq_id = txq->q.id; | ||
791 | int read_ptr = txq->q.read_ptr; | ||
792 | u8 sta_id = 0; | ||
793 | __le16 bc_ent; | ||
794 | |||
795 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
796 | |||
797 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
798 | sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; | ||
799 | |||
800 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
801 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
802 | |||
803 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
804 | scd_bc_tbl[txq_id]. | ||
805 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
806 | } | ||
807 | |||
808 | static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | ||
809 | u16 txq_id) | ||
810 | { | ||
811 | u32 tbl_dw_addr; | ||
812 | u32 tbl_dw; | ||
813 | u16 scd_q2ratid; | ||
814 | |||
815 | scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
816 | |||
817 | tbl_dw_addr = priv->scd_base_addr + | ||
818 | IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | ||
819 | |||
820 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | ||
821 | |||
822 | if (txq_id & 0x1) | ||
823 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
824 | else | ||
825 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
826 | |||
827 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | ||
828 | |||
829 | return 0; | ||
830 | } | ||
831 | static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) | ||
832 | { | ||
833 | /* Simply stop the queue, but don't change any configuration; | ||
834 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
835 | iwl_write_prph(priv, | ||
836 | IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
837 | (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
838 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
839 | } | ||
840 | |||
841 | int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id, | ||
842 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | ||
843 | { | ||
844 | unsigned long flags; | ||
845 | u16 ra_tid; | ||
846 | |||
847 | if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || | ||
848 | (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
849 | <= txq_id)) { | ||
850 | IWL_WARN(priv, | ||
851 | "queue number out of range: %d, must be %d to %d\n", | ||
852 | txq_id, IWL50_FIRST_AMPDU_QUEUE, | ||
853 | IWL50_FIRST_AMPDU_QUEUE + | ||
854 | priv->cfg->num_of_ampdu_queues - 1); | ||
855 | return -EINVAL; | ||
856 | } | ||
857 | |||
858 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
859 | |||
860 | /* Modify device's station table to Tx this TID */ | ||
861 | iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); | ||
862 | |||
863 | spin_lock_irqsave(&priv->lock, flags); | ||
864 | |||
865 | /* Stop this Tx queue before configuring it */ | ||
866 | iwl5000_tx_queue_stop_scheduler(priv, txq_id); | ||
867 | |||
868 | /* Map receiver-address / traffic-ID to this queue */ | ||
869 | iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | ||
870 | |||
871 | /* Set this queue as a chain-building queue */ | ||
872 | iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id)); | ||
873 | |||
874 | /* enable aggregations for the queue */ | ||
875 | iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id)); | ||
876 | |||
877 | /* Place first TFD at index corresponding to start sequence number. | ||
878 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
879 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
880 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
881 | iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
882 | |||
883 | /* Set up Tx window size and frame limit for this queue */ | ||
884 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
885 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + | ||
886 | sizeof(u32), | ||
887 | ((SCD_WIN_SIZE << | ||
888 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
889 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
890 | ((SCD_FRAME_LIMIT << | ||
891 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
892 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
893 | |||
894 | iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
895 | |||
896 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
897 | iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | ||
898 | |||
899 | spin_unlock_irqrestore(&priv->lock, flags); | ||
900 | |||
901 | return 0; | ||
902 | } | ||
903 | |||
904 | int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
905 | u16 ssn_idx, u8 tx_fifo) | ||
906 | { | ||
907 | if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || | ||
908 | (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
909 | <= txq_id)) { | ||
910 | IWL_ERR(priv, | ||
911 | "queue number out of range: %d, must be %d to %d\n", | ||
912 | txq_id, IWL50_FIRST_AMPDU_QUEUE, | ||
913 | IWL50_FIRST_AMPDU_QUEUE + | ||
914 | priv->cfg->num_of_ampdu_queues - 1); | ||
915 | return -EINVAL; | ||
916 | } | ||
917 | |||
918 | iwl5000_tx_queue_stop_scheduler(priv, txq_id); | ||
919 | |||
920 | iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); | ||
921 | |||
922 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
923 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
924 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
925 | iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
926 | |||
927 | iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
928 | iwl_txq_ctx_deactivate(priv, txq_id); | ||
929 | iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | ||
930 | |||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) | ||
935 | { | ||
936 | u16 size = (u16)sizeof(struct iwl_addsta_cmd); | ||
937 | struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; | ||
938 | memcpy(addsta, cmd, size); | ||
939 | /* resrved in 5000 */ | ||
940 | addsta->rate_n_flags = cpu_to_le16(0); | ||
941 | return size; | ||
942 | } | ||
943 | |||
944 | |||
945 | /* | ||
946 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
947 | * must be called under priv->lock and mac access | ||
948 | */ | ||
949 | void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) | ||
950 | { | ||
951 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); | ||
952 | } | ||
953 | |||
954 | |||
955 | static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) | ||
956 | { | ||
957 | return le32_to_cpup((__le32 *)&tx_resp->status + | ||
958 | tx_resp->frame_count) & MAX_SN; | ||
959 | } | ||
960 | |||
961 | static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv, | ||
962 | struct iwl_ht_agg *agg, | ||
963 | struct iwl5000_tx_resp *tx_resp, | ||
964 | int txq_id, u16 start_idx) | ||
965 | { | ||
966 | u16 status; | ||
967 | struct agg_tx_status *frame_status = &tx_resp->status; | ||
968 | struct ieee80211_tx_info *info = NULL; | ||
969 | struct ieee80211_hdr *hdr = NULL; | ||
970 | u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
971 | int i, sh, idx; | ||
972 | u16 seq; | ||
973 | |||
974 | if (agg->wait_for_ba) | ||
975 | IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); | ||
976 | |||
977 | agg->frame_count = tx_resp->frame_count; | ||
978 | agg->start_idx = start_idx; | ||
979 | agg->rate_n_flags = rate_n_flags; | ||
980 | agg->bitmap = 0; | ||
981 | |||
982 | /* # frames attempted by Tx command */ | ||
983 | if (agg->frame_count == 1) { | ||
984 | /* Only one frame was attempted; no block-ack will arrive */ | ||
985 | status = le16_to_cpu(frame_status[0].status); | ||
986 | idx = start_idx; | ||
987 | |||
988 | /* FIXME: code repetition */ | ||
989 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", | ||
990 | agg->frame_count, agg->start_idx, idx); | ||
991 | |||
992 | info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); | ||
993 | info->status.rates[0].count = tx_resp->failure_frame + 1; | ||
994 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | ||
995 | info->flags |= iwl_tx_status_to_mac80211(status); | ||
996 | iwl_hwrate_to_tx_control(priv, rate_n_flags, info); | ||
997 | |||
998 | /* FIXME: code repetition end */ | ||
999 | |||
1000 | IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", | ||
1001 | status & 0xff, tx_resp->failure_frame); | ||
1002 | IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); | ||
1003 | |||
1004 | agg->wait_for_ba = 0; | ||
1005 | } else { | ||
1006 | /* Two or more frames were attempted; expect block-ack */ | ||
1007 | u64 bitmap = 0; | ||
1008 | int start = agg->start_idx; | ||
1009 | |||
1010 | /* Construct bit-map of pending frames within Tx window */ | ||
1011 | for (i = 0; i < agg->frame_count; i++) { | ||
1012 | u16 sc; | ||
1013 | status = le16_to_cpu(frame_status[i].status); | ||
1014 | seq = le16_to_cpu(frame_status[i].sequence); | ||
1015 | idx = SEQ_TO_INDEX(seq); | ||
1016 | txq_id = SEQ_TO_QUEUE(seq); | ||
1017 | |||
1018 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | ||
1019 | AGG_TX_STATE_ABORT_MSK)) | ||
1020 | continue; | ||
1021 | |||
1022 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", | ||
1023 | agg->frame_count, txq_id, idx); | ||
1024 | |||
1025 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); | ||
1026 | if (!hdr) { | ||
1027 | IWL_ERR(priv, | ||
1028 | "BUG_ON idx doesn't point to valid skb" | ||
1029 | " idx=%d, txq_id=%d\n", idx, txq_id); | ||
1030 | return -1; | ||
1031 | } | ||
1032 | |||
1033 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
1034 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | ||
1035 | IWL_ERR(priv, | ||
1036 | "BUG_ON idx doesn't match seq control" | ||
1037 | " idx=%d, seq_idx=%d, seq=%d\n", | ||
1038 | idx, SEQ_TO_SN(sc), | ||
1039 | hdr->seq_ctrl); | ||
1040 | return -1; | ||
1041 | } | ||
1042 | |||
1043 | IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", | ||
1044 | i, idx, SEQ_TO_SN(sc)); | ||
1045 | |||
1046 | sh = idx - start; | ||
1047 | if (sh > 64) { | ||
1048 | sh = (start - idx) + 0xff; | ||
1049 | bitmap = bitmap << sh; | ||
1050 | sh = 0; | ||
1051 | start = idx; | ||
1052 | } else if (sh < -64) | ||
1053 | sh = 0xff - (start - idx); | ||
1054 | else if (sh < 0) { | ||
1055 | sh = start - idx; | ||
1056 | start = idx; | ||
1057 | bitmap = bitmap << sh; | ||
1058 | sh = 0; | ||
1059 | } | ||
1060 | bitmap |= 1ULL << sh; | ||
1061 | IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", | ||
1062 | start, (unsigned long long)bitmap); | ||
1063 | } | ||
1064 | |||
1065 | agg->bitmap = bitmap; | ||
1066 | agg->start_idx = start; | ||
1067 | IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", | ||
1068 | agg->frame_count, agg->start_idx, | ||
1069 | (unsigned long long)agg->bitmap); | ||
1070 | |||
1071 | if (bitmap) | ||
1072 | agg->wait_for_ba = 1; | ||
1073 | } | ||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | ||
1078 | struct iwl_rx_mem_buffer *rxb) | ||
1079 | { | ||
1080 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1081 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
1082 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
1083 | int index = SEQ_TO_INDEX(sequence); | ||
1084 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1085 | struct ieee80211_tx_info *info; | ||
1086 | struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
1087 | u32 status = le16_to_cpu(tx_resp->status.status); | ||
1088 | int tid; | ||
1089 | int sta_id; | ||
1090 | int freed; | ||
1091 | |||
1092 | if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { | ||
1093 | IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " | ||
1094 | "is out of range [0-%d] %d %d\n", txq_id, | ||
1095 | index, txq->q.n_bd, txq->q.write_ptr, | ||
1096 | txq->q.read_ptr); | ||
1097 | return; | ||
1098 | } | ||
1099 | |||
1100 | info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); | ||
1101 | memset(&info->status, 0, sizeof(info->status)); | ||
1102 | |||
1103 | tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; | ||
1104 | sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; | ||
1105 | |||
1106 | if (txq->sched_retry) { | ||
1107 | const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp); | ||
1108 | struct iwl_ht_agg *agg = NULL; | ||
1109 | |||
1110 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1111 | |||
1112 | iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); | ||
1113 | |||
1114 | /* check if BAR is needed */ | ||
1115 | if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) | ||
1116 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
1117 | |||
1118 | if (txq->q.read_ptr != (scd_ssn & 0xff)) { | ||
1119 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | ||
1120 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " | ||
1121 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", | ||
1122 | scd_ssn , index, txq_id, txq->swq_id); | ||
1123 | |||
1124 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | ||
1125 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1126 | |||
1127 | if (priv->mac80211_registered && | ||
1128 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1129 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { | ||
1130 | if (agg->state == IWL_AGG_OFF) | ||
1131 | iwl_wake_queue(priv, txq_id); | ||
1132 | else | ||
1133 | iwl_wake_queue(priv, txq->swq_id); | ||
1134 | } | ||
1135 | } | ||
1136 | } else { | ||
1137 | BUG_ON(txq_id != txq->swq_id); | ||
1138 | |||
1139 | info->status.rates[0].count = tx_resp->failure_frame + 1; | ||
1140 | info->flags |= iwl_tx_status_to_mac80211(status); | ||
1141 | iwl_hwrate_to_tx_control(priv, | ||
1142 | le32_to_cpu(tx_resp->rate_n_flags), | ||
1143 | info); | ||
1144 | |||
1145 | IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " | ||
1146 | "0x%x retries %d\n", | ||
1147 | txq_id, | ||
1148 | iwl_get_tx_fail_reason(status), status, | ||
1149 | le32_to_cpu(tx_resp->rate_n_flags), | ||
1150 | tx_resp->failure_frame); | ||
1151 | |||
1152 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | ||
1153 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1154 | |||
1155 | if (priv->mac80211_registered && | ||
1156 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | ||
1157 | iwl_wake_queue(priv, txq_id); | ||
1158 | } | ||
1159 | |||
1160 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | ||
1161 | |||
1162 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | ||
1163 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | ||
1164 | } | ||
1165 | |||
1166 | /* Currently 5000 is the superset of everything */ | ||
1167 | u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) | ||
1168 | { | ||
1169 | return len; | ||
1170 | } | ||
1171 | |||
1172 | void iwl5000_setup_deferred_work(struct iwl_priv *priv) | ||
1173 | { | ||
1174 | /* in 5000 the tx power calibration is done in uCode */ | ||
1175 | priv->disable_tx_power_cal = 1; | ||
1176 | } | ||
1177 | |||
1178 | void iwl5000_rx_handler_setup(struct iwl_priv *priv) | ||
1179 | { | ||
1180 | /* init calibration handlers */ | ||
1181 | priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = | ||
1182 | iwl5000_rx_calib_result; | ||
1183 | priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = | ||
1184 | iwl5000_rx_calib_complete; | ||
1185 | priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx; | ||
1186 | } | ||
1187 | |||
1188 | |||
1189 | int iwl5000_hw_valid_rtc_data_addr(u32 addr) | ||
1190 | { | ||
1191 | return (addr >= IWL50_RTC_DATA_LOWER_BOUND) && | ||
1192 | (addr < IWL50_RTC_DATA_UPPER_BOUND); | ||
1193 | } | ||
1194 | |||
1195 | static int iwl5000_send_rxon_assoc(struct iwl_priv *priv) | ||
1196 | { | ||
1197 | int ret = 0; | ||
1198 | struct iwl5000_rxon_assoc_cmd rxon_assoc; | ||
1199 | const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; | ||
1200 | const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; | ||
1201 | |||
1202 | if ((rxon1->flags == rxon2->flags) && | ||
1203 | (rxon1->filter_flags == rxon2->filter_flags) && | ||
1204 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | ||
1205 | (rxon1->ofdm_ht_single_stream_basic_rates == | ||
1206 | rxon2->ofdm_ht_single_stream_basic_rates) && | ||
1207 | (rxon1->ofdm_ht_dual_stream_basic_rates == | ||
1208 | rxon2->ofdm_ht_dual_stream_basic_rates) && | ||
1209 | (rxon1->ofdm_ht_triple_stream_basic_rates == | ||
1210 | rxon2->ofdm_ht_triple_stream_basic_rates) && | ||
1211 | (rxon1->acquisition_data == rxon2->acquisition_data) && | ||
1212 | (rxon1->rx_chain == rxon2->rx_chain) && | ||
1213 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | ||
1214 | IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); | ||
1215 | return 0; | ||
1216 | } | ||
1217 | |||
1218 | rxon_assoc.flags = priv->staging_rxon.flags; | ||
1219 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | ||
1220 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | ||
1221 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | ||
1222 | rxon_assoc.reserved1 = 0; | ||
1223 | rxon_assoc.reserved2 = 0; | ||
1224 | rxon_assoc.reserved3 = 0; | ||
1225 | rxon_assoc.ofdm_ht_single_stream_basic_rates = | ||
1226 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates; | ||
1227 | rxon_assoc.ofdm_ht_dual_stream_basic_rates = | ||
1228 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; | ||
1229 | rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; | ||
1230 | rxon_assoc.ofdm_ht_triple_stream_basic_rates = | ||
1231 | priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; | ||
1232 | rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; | ||
1233 | |||
1234 | ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, | ||
1235 | sizeof(rxon_assoc), &rxon_assoc, NULL); | ||
1236 | if (ret) | ||
1237 | return ret; | ||
1238 | |||
1239 | return ret; | ||
1240 | } | ||
1241 | int iwl5000_send_tx_power(struct iwl_priv *priv) | ||
1242 | { | ||
1243 | struct iwl5000_tx_power_dbm_cmd tx_power_cmd; | ||
1244 | u8 tx_ant_cfg_cmd; | ||
1245 | |||
1246 | /* half dBm need to multiply */ | ||
1247 | tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); | ||
1248 | |||
1249 | if (priv->tx_power_lmt_in_half_dbm && | ||
1250 | priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) { | ||
1251 | /* | ||
1252 | * For the newer devices which using enhanced/extend tx power | ||
1253 | * table in EEPROM, the format is in half dBm. driver need to | ||
1254 | * convert to dBm format before report to mac80211. | ||
1255 | * By doing so, there is a possibility of 1/2 dBm resolution | ||
1256 | * lost. driver will perform "round-up" operation before | ||
1257 | * reporting, but it will cause 1/2 dBm tx power over the | ||
1258 | * regulatory limit. Perform the checking here, if the | ||
1259 | * "tx_power_user_lmt" is higher than EEPROM value (in | ||
1260 | * half-dBm format), lower the tx power based on EEPROM | ||
1261 | */ | ||
1262 | tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; | ||
1263 | } | ||
1264 | tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; | ||
1265 | tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; | ||
1266 | |||
1267 | if (IWL_UCODE_API(priv->ucode_ver) == 1) | ||
1268 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; | ||
1269 | else | ||
1270 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; | ||
1271 | |||
1272 | return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, | ||
1273 | sizeof(tx_power_cmd), &tx_power_cmd, | ||
1274 | NULL); | ||
1275 | } | ||
1276 | |||
1277 | void iwl5000_temperature(struct iwl_priv *priv) | ||
1278 | { | ||
1279 | /* store temperature from statistics (in Celsius) */ | ||
1280 | priv->temperature = le32_to_cpu(priv->statistics.general.temperature); | ||
1281 | iwl_tt_handler(priv); | ||
1282 | } | ||
1283 | |||
1284 | static void iwl5150_temperature(struct iwl_priv *priv) | 226 | static void iwl5150_temperature(struct iwl_priv *priv) |
1285 | { | 227 | { |
1286 | u32 vt = 0; | 228 | u32 vt = 0; |
@@ -1293,100 +235,6 @@ static void iwl5150_temperature(struct iwl_priv *priv) | |||
1293 | iwl_tt_handler(priv); | 235 | iwl_tt_handler(priv); |
1294 | } | 236 | } |
1295 | 237 | ||
1296 | /* Calc max signal level (dBm) among 3 possible receivers */ | ||
1297 | int iwl5000_calc_rssi(struct iwl_priv *priv, | ||
1298 | struct iwl_rx_phy_res *rx_resp) | ||
1299 | { | ||
1300 | /* data from PHY/DSP regarding signal strength, etc., | ||
1301 | * contents are always there, not configurable by host | ||
1302 | */ | ||
1303 | struct iwl5000_non_cfg_phy *ncphy = | ||
1304 | (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; | ||
1305 | u32 val, rssi_a, rssi_b, rssi_c, max_rssi; | ||
1306 | u8 agc; | ||
1307 | |||
1308 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); | ||
1309 | agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; | ||
1310 | |||
1311 | /* Find max rssi among 3 possible receivers. | ||
1312 | * These values are measured by the digital signal processor (DSP). | ||
1313 | * They should stay fairly constant even as the signal strength varies, | ||
1314 | * if the radio's automatic gain control (AGC) is working right. | ||
1315 | * AGC value (see below) will provide the "interesting" info. | ||
1316 | */ | ||
1317 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); | ||
1318 | rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; | ||
1319 | rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; | ||
1320 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); | ||
1321 | rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; | ||
1322 | |||
1323 | max_rssi = max_t(u32, rssi_a, rssi_b); | ||
1324 | max_rssi = max_t(u32, max_rssi, rssi_c); | ||
1325 | |||
1326 | IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", | ||
1327 | rssi_a, rssi_b, rssi_c, max_rssi, agc); | ||
1328 | |||
1329 | /* dBm = max_rssi dB - agc dB - constant. | ||
1330 | * Higher AGC (higher radio gain) means lower signal. */ | ||
1331 | return max_rssi - agc - IWL49_RSSI_OFFSET; | ||
1332 | } | ||
1333 | |||
1334 | static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) | ||
1335 | { | ||
1336 | struct iwl_tx_ant_config_cmd tx_ant_cmd = { | ||
1337 | .valid = cpu_to_le32(valid_tx_ant), | ||
1338 | }; | ||
1339 | |||
1340 | if (IWL_UCODE_API(priv->ucode_ver) > 1) { | ||
1341 | IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); | ||
1342 | return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, | ||
1343 | sizeof(struct iwl_tx_ant_config_cmd), | ||
1344 | &tx_ant_cmd); | ||
1345 | } else { | ||
1346 | IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); | ||
1347 | return -EOPNOTSUPP; | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | |||
1352 | #define IWL5000_UCODE_GET(item) \ | ||
1353 | static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\ | ||
1354 | u32 api_ver) \ | ||
1355 | { \ | ||
1356 | if (api_ver <= 2) \ | ||
1357 | return le32_to_cpu(ucode->u.v1.item); \ | ||
1358 | return le32_to_cpu(ucode->u.v2.item); \ | ||
1359 | } | ||
1360 | |||
1361 | static u32 iwl5000_ucode_get_header_size(u32 api_ver) | ||
1362 | { | ||
1363 | if (api_ver <= 2) | ||
1364 | return UCODE_HEADER_SIZE(1); | ||
1365 | return UCODE_HEADER_SIZE(2); | ||
1366 | } | ||
1367 | |||
1368 | static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode, | ||
1369 | u32 api_ver) | ||
1370 | { | ||
1371 | if (api_ver <= 2) | ||
1372 | return 0; | ||
1373 | return le32_to_cpu(ucode->u.v2.build); | ||
1374 | } | ||
1375 | |||
1376 | static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode, | ||
1377 | u32 api_ver) | ||
1378 | { | ||
1379 | if (api_ver <= 2) | ||
1380 | return (u8 *) ucode->u.v1.data; | ||
1381 | return (u8 *) ucode->u.v2.data; | ||
1382 | } | ||
1383 | |||
1384 | IWL5000_UCODE_GET(inst_size); | ||
1385 | IWL5000_UCODE_GET(data_size); | ||
1386 | IWL5000_UCODE_GET(init_size); | ||
1387 | IWL5000_UCODE_GET(init_data_size); | ||
1388 | IWL5000_UCODE_GET(boot_size); | ||
1389 | |||
1390 | static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) | 238 | static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) |
1391 | { | 239 | { |
1392 | struct iwl5000_channel_switch_cmd cmd; | 240 | struct iwl5000_channel_switch_cmd cmd; |
@@ -1419,54 +267,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) | |||
1419 | return iwl_send_cmd_sync(priv, &hcmd); | 267 | return iwl_send_cmd_sync(priv, &hcmd); |
1420 | } | 268 | } |
1421 | 269 | ||
1422 | struct iwl_hcmd_ops iwl5000_hcmd = { | 270 | static struct iwl_lib_ops iwl5000_lib = { |
1423 | .rxon_assoc = iwl5000_send_rxon_assoc, | ||
1424 | .commit_rxon = iwl_commit_rxon, | ||
1425 | .set_rxon_chain = iwl_set_rxon_chain, | ||
1426 | .set_tx_ant = iwl5000_send_tx_ant_config, | ||
1427 | }; | ||
1428 | |||
1429 | struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { | ||
1430 | .get_hcmd_size = iwl5000_get_hcmd_size, | ||
1431 | .build_addsta_hcmd = iwl5000_build_addsta_hcmd, | ||
1432 | .gain_computation = iwl5000_gain_computation, | ||
1433 | .chain_noise_reset = iwl5000_chain_noise_reset, | ||
1434 | .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, | ||
1435 | .calc_rssi = iwl5000_calc_rssi, | ||
1436 | }; | ||
1437 | |||
1438 | struct iwl_ucode_ops iwl5000_ucode = { | ||
1439 | .get_header_size = iwl5000_ucode_get_header_size, | ||
1440 | .get_build = iwl5000_ucode_get_build, | ||
1441 | .get_inst_size = iwl5000_ucode_get_inst_size, | ||
1442 | .get_data_size = iwl5000_ucode_get_data_size, | ||
1443 | .get_init_size = iwl5000_ucode_get_init_size, | ||
1444 | .get_init_data_size = iwl5000_ucode_get_init_data_size, | ||
1445 | .get_boot_size = iwl5000_ucode_get_boot_size, | ||
1446 | .get_data = iwl5000_ucode_get_data, | ||
1447 | }; | ||
1448 | |||
1449 | struct iwl_lib_ops iwl5000_lib = { | ||
1450 | .set_hw_params = iwl5000_hw_set_hw_params, | 271 | .set_hw_params = iwl5000_hw_set_hw_params, |
1451 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 272 | .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, |
1452 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, | 273 | .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, |
1453 | .txq_set_sched = iwl5000_txq_set_sched, | 274 | .txq_set_sched = iwlagn_txq_set_sched, |
1454 | .txq_agg_enable = iwl5000_txq_agg_enable, | 275 | .txq_agg_enable = iwlagn_txq_agg_enable, |
1455 | .txq_agg_disable = iwl5000_txq_agg_disable, | 276 | .txq_agg_disable = iwlagn_txq_agg_disable, |
1456 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, | 277 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, |
1457 | .txq_free_tfd = iwl_hw_txq_free_tfd, | 278 | .txq_free_tfd = iwl_hw_txq_free_tfd, |
1458 | .txq_init = iwl_hw_tx_queue_init, | 279 | .txq_init = iwl_hw_tx_queue_init, |
1459 | .rx_handler_setup = iwl5000_rx_handler_setup, | 280 | .rx_handler_setup = iwlagn_rx_handler_setup, |
1460 | .setup_deferred_work = iwl5000_setup_deferred_work, | 281 | .setup_deferred_work = iwlagn_setup_deferred_work, |
1461 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | 282 | .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, |
1462 | .dump_nic_event_log = iwl_dump_nic_event_log, | 283 | .dump_nic_event_log = iwl_dump_nic_event_log, |
1463 | .dump_nic_error_log = iwl_dump_nic_error_log, | 284 | .dump_nic_error_log = iwl_dump_nic_error_log, |
1464 | .dump_csr = iwl_dump_csr, | 285 | .dump_csr = iwl_dump_csr, |
1465 | .dump_fh = iwl_dump_fh, | 286 | .dump_fh = iwl_dump_fh, |
1466 | .load_ucode = iwl5000_load_ucode, | 287 | .load_ucode = iwlagn_load_ucode, |
1467 | .init_alive_start = iwl5000_init_alive_start, | 288 | .init_alive_start = iwlagn_init_alive_start, |
1468 | .alive_notify = iwl5000_alive_notify, | 289 | .alive_notify = iwlagn_alive_notify, |
1469 | .send_tx_power = iwl5000_send_tx_power, | 290 | .send_tx_power = iwlagn_send_tx_power, |
1470 | .update_chain_flags = iwl_update_chain_flags, | 291 | .update_chain_flags = iwl_update_chain_flags, |
1471 | .set_channel_switch = iwl5000_hw_channel_switch, | 292 | .set_channel_switch = iwl5000_hw_channel_switch, |
1472 | .apm_ops = { | 293 | .apm_ops = { |
@@ -1477,25 +298,25 @@ struct iwl_lib_ops iwl5000_lib = { | |||
1477 | }, | 298 | }, |
1478 | .eeprom_ops = { | 299 | .eeprom_ops = { |
1479 | .regulatory_bands = { | 300 | .regulatory_bands = { |
1480 | EEPROM_5000_REG_BAND_1_CHANNELS, | 301 | EEPROM_REG_BAND_1_CHANNELS, |
1481 | EEPROM_5000_REG_BAND_2_CHANNELS, | 302 | EEPROM_REG_BAND_2_CHANNELS, |
1482 | EEPROM_5000_REG_BAND_3_CHANNELS, | 303 | EEPROM_REG_BAND_3_CHANNELS, |
1483 | EEPROM_5000_REG_BAND_4_CHANNELS, | 304 | EEPROM_REG_BAND_4_CHANNELS, |
1484 | EEPROM_5000_REG_BAND_5_CHANNELS, | 305 | EEPROM_REG_BAND_5_CHANNELS, |
1485 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 306 | EEPROM_REG_BAND_24_HT40_CHANNELS, |
1486 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 307 | EEPROM_REG_BAND_52_HT40_CHANNELS |
1487 | }, | 308 | }, |
1488 | .verify_signature = iwlcore_eeprom_verify_signature, | 309 | .verify_signature = iwlcore_eeprom_verify_signature, |
1489 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 310 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, |
1490 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 311 | .release_semaphore = iwlcore_eeprom_release_semaphore, |
1491 | .calib_version = iwl5000_eeprom_calib_version, | 312 | .calib_version = iwlagn_eeprom_calib_version, |
1492 | .query_addr = iwl5000_eeprom_query_addr, | 313 | .query_addr = iwlagn_eeprom_query_addr, |
1493 | }, | 314 | }, |
1494 | .post_associate = iwl_post_associate, | 315 | .post_associate = iwl_post_associate, |
1495 | .isr = iwl_isr_ict, | 316 | .isr = iwl_isr_ict, |
1496 | .config_ap = iwl_config_ap, | 317 | .config_ap = iwl_config_ap, |
1497 | .temp_ops = { | 318 | .temp_ops = { |
1498 | .temperature = iwl5000_temperature, | 319 | .temperature = iwlagn_temperature, |
1499 | .set_ct_kill = iwl5000_set_ct_threshold, | 320 | .set_ct_kill = iwl5000_set_ct_threshold, |
1500 | }, | 321 | }, |
1501 | .add_bcast_station = iwl_add_bcast_station, | 322 | .add_bcast_station = iwl_add_bcast_station, |
@@ -1506,24 +327,24 @@ struct iwl_lib_ops iwl5000_lib = { | |||
1506 | 327 | ||
1507 | static struct iwl_lib_ops iwl5150_lib = { | 328 | static struct iwl_lib_ops iwl5150_lib = { |
1508 | .set_hw_params = iwl5000_hw_set_hw_params, | 329 | .set_hw_params = iwl5000_hw_set_hw_params, |
1509 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 330 | .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, |
1510 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, | 331 | .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, |
1511 | .txq_set_sched = iwl5000_txq_set_sched, | 332 | .txq_set_sched = iwlagn_txq_set_sched, |
1512 | .txq_agg_enable = iwl5000_txq_agg_enable, | 333 | .txq_agg_enable = iwlagn_txq_agg_enable, |
1513 | .txq_agg_disable = iwl5000_txq_agg_disable, | 334 | .txq_agg_disable = iwlagn_txq_agg_disable, |
1514 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, | 335 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, |
1515 | .txq_free_tfd = iwl_hw_txq_free_tfd, | 336 | .txq_free_tfd = iwl_hw_txq_free_tfd, |
1516 | .txq_init = iwl_hw_tx_queue_init, | 337 | .txq_init = iwl_hw_tx_queue_init, |
1517 | .rx_handler_setup = iwl5000_rx_handler_setup, | 338 | .rx_handler_setup = iwlagn_rx_handler_setup, |
1518 | .setup_deferred_work = iwl5000_setup_deferred_work, | 339 | .setup_deferred_work = iwlagn_setup_deferred_work, |
1519 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | 340 | .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, |
1520 | .dump_nic_event_log = iwl_dump_nic_event_log, | 341 | .dump_nic_event_log = iwl_dump_nic_event_log, |
1521 | .dump_nic_error_log = iwl_dump_nic_error_log, | 342 | .dump_nic_error_log = iwl_dump_nic_error_log, |
1522 | .dump_csr = iwl_dump_csr, | 343 | .dump_csr = iwl_dump_csr, |
1523 | .load_ucode = iwl5000_load_ucode, | 344 | .load_ucode = iwlagn_load_ucode, |
1524 | .init_alive_start = iwl5000_init_alive_start, | 345 | .init_alive_start = iwlagn_init_alive_start, |
1525 | .alive_notify = iwl5000_alive_notify, | 346 | .alive_notify = iwlagn_alive_notify, |
1526 | .send_tx_power = iwl5000_send_tx_power, | 347 | .send_tx_power = iwlagn_send_tx_power, |
1527 | .update_chain_flags = iwl_update_chain_flags, | 348 | .update_chain_flags = iwl_update_chain_flags, |
1528 | .set_channel_switch = iwl5000_hw_channel_switch, | 349 | .set_channel_switch = iwl5000_hw_channel_switch, |
1529 | .apm_ops = { | 350 | .apm_ops = { |
@@ -1534,19 +355,19 @@ static struct iwl_lib_ops iwl5150_lib = { | |||
1534 | }, | 355 | }, |
1535 | .eeprom_ops = { | 356 | .eeprom_ops = { |
1536 | .regulatory_bands = { | 357 | .regulatory_bands = { |
1537 | EEPROM_5000_REG_BAND_1_CHANNELS, | 358 | EEPROM_REG_BAND_1_CHANNELS, |
1538 | EEPROM_5000_REG_BAND_2_CHANNELS, | 359 | EEPROM_REG_BAND_2_CHANNELS, |
1539 | EEPROM_5000_REG_BAND_3_CHANNELS, | 360 | EEPROM_REG_BAND_3_CHANNELS, |
1540 | EEPROM_5000_REG_BAND_4_CHANNELS, | 361 | EEPROM_REG_BAND_4_CHANNELS, |
1541 | EEPROM_5000_REG_BAND_5_CHANNELS, | 362 | EEPROM_REG_BAND_5_CHANNELS, |
1542 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 363 | EEPROM_REG_BAND_24_HT40_CHANNELS, |
1543 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 364 | EEPROM_REG_BAND_52_HT40_CHANNELS |
1544 | }, | 365 | }, |
1545 | .verify_signature = iwlcore_eeprom_verify_signature, | 366 | .verify_signature = iwlcore_eeprom_verify_signature, |
1546 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 367 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, |
1547 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 368 | .release_semaphore = iwlcore_eeprom_release_semaphore, |
1548 | .calib_version = iwl5000_eeprom_calib_version, | 369 | .calib_version = iwlagn_eeprom_calib_version, |
1549 | .query_addr = iwl5000_eeprom_query_addr, | 370 | .query_addr = iwlagn_eeprom_query_addr, |
1550 | }, | 371 | }, |
1551 | .post_associate = iwl_post_associate, | 372 | .post_associate = iwl_post_associate, |
1552 | .isr = iwl_isr_ict, | 373 | .isr = iwl_isr_ict, |
@@ -1562,28 +383,21 @@ static struct iwl_lib_ops iwl5150_lib = { | |||
1562 | }; | 383 | }; |
1563 | 384 | ||
1564 | static const struct iwl_ops iwl5000_ops = { | 385 | static const struct iwl_ops iwl5000_ops = { |
1565 | .ucode = &iwl5000_ucode, | 386 | .ucode = &iwlagn_ucode, |
1566 | .lib = &iwl5000_lib, | 387 | .lib = &iwl5000_lib, |
1567 | .hcmd = &iwl5000_hcmd, | 388 | .hcmd = &iwlagn_hcmd, |
1568 | .utils = &iwl5000_hcmd_utils, | 389 | .utils = &iwlagn_hcmd_utils, |
1569 | .led = &iwlagn_led_ops, | 390 | .led = &iwlagn_led_ops, |
1570 | }; | 391 | }; |
1571 | 392 | ||
1572 | static const struct iwl_ops iwl5150_ops = { | 393 | static const struct iwl_ops iwl5150_ops = { |
1573 | .ucode = &iwl5000_ucode, | 394 | .ucode = &iwlagn_ucode, |
1574 | .lib = &iwl5150_lib, | 395 | .lib = &iwl5150_lib, |
1575 | .hcmd = &iwl5000_hcmd, | 396 | .hcmd = &iwlagn_hcmd, |
1576 | .utils = &iwl5000_hcmd_utils, | 397 | .utils = &iwlagn_hcmd_utils, |
1577 | .led = &iwlagn_led_ops, | 398 | .led = &iwlagn_led_ops, |
1578 | }; | 399 | }; |
1579 | 400 | ||
1580 | struct iwl_mod_params iwl50_mod_params = { | ||
1581 | .amsdu_size_8K = 1, | ||
1582 | .restart_fw = 1, | ||
1583 | /* the rest are 0 by default */ | ||
1584 | }; | ||
1585 | |||
1586 | |||
1587 | struct iwl_cfg iwl5300_agn_cfg = { | 401 | struct iwl_cfg iwl5300_agn_cfg = { |
1588 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", | 402 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", |
1589 | .fw_name_pre = IWL5000_FW_PRE, | 403 | .fw_name_pre = IWL5000_FW_PRE, |
@@ -1591,12 +405,12 @@ struct iwl_cfg iwl5300_agn_cfg = { | |||
1591 | .ucode_api_min = IWL5000_UCODE_API_MIN, | 405 | .ucode_api_min = IWL5000_UCODE_API_MIN, |
1592 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | 406 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, |
1593 | .ops = &iwl5000_ops, | 407 | .ops = &iwl5000_ops, |
1594 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 408 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1595 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, | 409 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, |
1596 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 410 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, |
1597 | .num_of_queues = IWL50_NUM_QUEUES, | 411 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1598 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 412 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1599 | .mod_params = &iwl50_mod_params, | 413 | .mod_params = &iwlagn_mod_params, |
1600 | .valid_tx_ant = ANT_ABC, | 414 | .valid_tx_ant = ANT_ABC, |
1601 | .valid_rx_ant = ANT_ABC, | 415 | .valid_rx_ant = ANT_ABC, |
1602 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 416 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1609,6 +423,7 @@ struct iwl_cfg iwl5300_agn_cfg = { | |||
1609 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 423 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1610 | .chain_noise_scale = 1000, | 424 | .chain_noise_scale = 1000, |
1611 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 425 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
426 | .max_event_log_size = 512, | ||
1612 | }; | 427 | }; |
1613 | 428 | ||
1614 | struct iwl_cfg iwl5100_bgn_cfg = { | 429 | struct iwl_cfg iwl5100_bgn_cfg = { |
@@ -1618,12 +433,12 @@ struct iwl_cfg iwl5100_bgn_cfg = { | |||
1618 | .ucode_api_min = IWL5000_UCODE_API_MIN, | 433 | .ucode_api_min = IWL5000_UCODE_API_MIN, |
1619 | .sku = IWL_SKU_G|IWL_SKU_N, | 434 | .sku = IWL_SKU_G|IWL_SKU_N, |
1620 | .ops = &iwl5000_ops, | 435 | .ops = &iwl5000_ops, |
1621 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 436 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1622 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, | 437 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, |
1623 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 438 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, |
1624 | .num_of_queues = IWL50_NUM_QUEUES, | 439 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1625 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 440 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1626 | .mod_params = &iwl50_mod_params, | 441 | .mod_params = &iwlagn_mod_params, |
1627 | .valid_tx_ant = ANT_B, | 442 | .valid_tx_ant = ANT_B, |
1628 | .valid_rx_ant = ANT_AB, | 443 | .valid_rx_ant = ANT_AB, |
1629 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 444 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1636,6 +451,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { | |||
1636 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 451 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1637 | .chain_noise_scale = 1000, | 452 | .chain_noise_scale = 1000, |
1638 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 453 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
454 | .max_event_log_size = 512, | ||
1639 | }; | 455 | }; |
1640 | 456 | ||
1641 | struct iwl_cfg iwl5100_abg_cfg = { | 457 | struct iwl_cfg iwl5100_abg_cfg = { |
@@ -1645,12 +461,12 @@ struct iwl_cfg iwl5100_abg_cfg = { | |||
1645 | .ucode_api_min = IWL5000_UCODE_API_MIN, | 461 | .ucode_api_min = IWL5000_UCODE_API_MIN, |
1646 | .sku = IWL_SKU_A|IWL_SKU_G, | 462 | .sku = IWL_SKU_A|IWL_SKU_G, |
1647 | .ops = &iwl5000_ops, | 463 | .ops = &iwl5000_ops, |
1648 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 464 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1649 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, | 465 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, |
1650 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 466 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, |
1651 | .num_of_queues = IWL50_NUM_QUEUES, | 467 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1652 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 468 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1653 | .mod_params = &iwl50_mod_params, | 469 | .mod_params = &iwlagn_mod_params, |
1654 | .valid_tx_ant = ANT_B, | 470 | .valid_tx_ant = ANT_B, |
1655 | .valid_rx_ant = ANT_AB, | 471 | .valid_rx_ant = ANT_AB, |
1656 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 472 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1661,6 +477,7 @@ struct iwl_cfg iwl5100_abg_cfg = { | |||
1661 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 477 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1662 | .chain_noise_scale = 1000, | 478 | .chain_noise_scale = 1000, |
1663 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 479 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
480 | .max_event_log_size = 512, | ||
1664 | }; | 481 | }; |
1665 | 482 | ||
1666 | struct iwl_cfg iwl5100_agn_cfg = { | 483 | struct iwl_cfg iwl5100_agn_cfg = { |
@@ -1670,12 +487,12 @@ struct iwl_cfg iwl5100_agn_cfg = { | |||
1670 | .ucode_api_min = IWL5000_UCODE_API_MIN, | 487 | .ucode_api_min = IWL5000_UCODE_API_MIN, |
1671 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | 488 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, |
1672 | .ops = &iwl5000_ops, | 489 | .ops = &iwl5000_ops, |
1673 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 490 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1674 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, | 491 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, |
1675 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 492 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, |
1676 | .num_of_queues = IWL50_NUM_QUEUES, | 493 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1677 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 494 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1678 | .mod_params = &iwl50_mod_params, | 495 | .mod_params = &iwlagn_mod_params, |
1679 | .valid_tx_ant = ANT_B, | 496 | .valid_tx_ant = ANT_B, |
1680 | .valid_rx_ant = ANT_AB, | 497 | .valid_rx_ant = ANT_AB, |
1681 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 498 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1688,6 +505,7 @@ struct iwl_cfg iwl5100_agn_cfg = { | |||
1688 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 505 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1689 | .chain_noise_scale = 1000, | 506 | .chain_noise_scale = 1000, |
1690 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 507 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
508 | .max_event_log_size = 512, | ||
1691 | }; | 509 | }; |
1692 | 510 | ||
1693 | struct iwl_cfg iwl5350_agn_cfg = { | 511 | struct iwl_cfg iwl5350_agn_cfg = { |
@@ -1697,12 +515,12 @@ struct iwl_cfg iwl5350_agn_cfg = { | |||
1697 | .ucode_api_min = IWL5000_UCODE_API_MIN, | 515 | .ucode_api_min = IWL5000_UCODE_API_MIN, |
1698 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | 516 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, |
1699 | .ops = &iwl5000_ops, | 517 | .ops = &iwl5000_ops, |
1700 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 518 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1701 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, | 519 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, |
1702 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, | 520 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, |
1703 | .num_of_queues = IWL50_NUM_QUEUES, | 521 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1704 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 522 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1705 | .mod_params = &iwl50_mod_params, | 523 | .mod_params = &iwlagn_mod_params, |
1706 | .valid_tx_ant = ANT_ABC, | 524 | .valid_tx_ant = ANT_ABC, |
1707 | .valid_rx_ant = ANT_ABC, | 525 | .valid_rx_ant = ANT_ABC, |
1708 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 526 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1715,6 +533,7 @@ struct iwl_cfg iwl5350_agn_cfg = { | |||
1715 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 533 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1716 | .chain_noise_scale = 1000, | 534 | .chain_noise_scale = 1000, |
1717 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 535 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
536 | .max_event_log_size = 512, | ||
1718 | }; | 537 | }; |
1719 | 538 | ||
1720 | struct iwl_cfg iwl5150_agn_cfg = { | 539 | struct iwl_cfg iwl5150_agn_cfg = { |
@@ -1724,12 +543,12 @@ struct iwl_cfg iwl5150_agn_cfg = { | |||
1724 | .ucode_api_min = IWL5150_UCODE_API_MIN, | 543 | .ucode_api_min = IWL5150_UCODE_API_MIN, |
1725 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | 544 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, |
1726 | .ops = &iwl5150_ops, | 545 | .ops = &iwl5150_ops, |
1727 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 546 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1728 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, | 547 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, |
1729 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, | 548 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, |
1730 | .num_of_queues = IWL50_NUM_QUEUES, | 549 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1731 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 550 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1732 | .mod_params = &iwl50_mod_params, | 551 | .mod_params = &iwlagn_mod_params, |
1733 | .valid_tx_ant = ANT_A, | 552 | .valid_tx_ant = ANT_A, |
1734 | .valid_rx_ant = ANT_AB, | 553 | .valid_rx_ant = ANT_AB, |
1735 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 554 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1742,6 +561,7 @@ struct iwl_cfg iwl5150_agn_cfg = { | |||
1742 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 561 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1743 | .chain_noise_scale = 1000, | 562 | .chain_noise_scale = 1000, |
1744 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 563 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
564 | .max_event_log_size = 512, | ||
1745 | }; | 565 | }; |
1746 | 566 | ||
1747 | struct iwl_cfg iwl5150_abg_cfg = { | 567 | struct iwl_cfg iwl5150_abg_cfg = { |
@@ -1751,12 +571,12 @@ struct iwl_cfg iwl5150_abg_cfg = { | |||
1751 | .ucode_api_min = IWL5150_UCODE_API_MIN, | 571 | .ucode_api_min = IWL5150_UCODE_API_MIN, |
1752 | .sku = IWL_SKU_A|IWL_SKU_G, | 572 | .sku = IWL_SKU_A|IWL_SKU_G, |
1753 | .ops = &iwl5150_ops, | 573 | .ops = &iwl5150_ops, |
1754 | .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, | 574 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, |
1755 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, | 575 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, |
1756 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, | 576 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, |
1757 | .num_of_queues = IWL50_NUM_QUEUES, | 577 | .num_of_queues = IWLAGN_NUM_QUEUES, |
1758 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 578 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
1759 | .mod_params = &iwl50_mod_params, | 579 | .mod_params = &iwlagn_mod_params, |
1760 | .valid_tx_ant = ANT_A, | 580 | .valid_tx_ant = ANT_A, |
1761 | .valid_rx_ant = ANT_AB, | 581 | .valid_rx_ant = ANT_AB, |
1762 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | 582 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, |
@@ -1767,20 +587,8 @@ struct iwl_cfg iwl5150_abg_cfg = { | |||
1767 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 587 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
1768 | .chain_noise_scale = 1000, | 588 | .chain_noise_scale = 1000, |
1769 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 589 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
590 | .max_event_log_size = 512, | ||
1770 | }; | 591 | }; |
1771 | 592 | ||
1772 | MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); | 593 | MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); |
1773 | MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); | 594 | MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); |
1774 | |||
1775 | module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO); | ||
1776 | MODULE_PARM_DESC(swcrypto50, | ||
1777 | "using software crypto engine (default 0 [hardware])\n"); | ||
1778 | module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO); | ||
1779 | MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); | ||
1780 | module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO); | ||
1781 | MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); | ||
1782 | module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, | ||
1783 | int, S_IRUGO); | ||
1784 | MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series"); | ||
1785 | module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO); | ||
1786 | MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error"); | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index d75799946a7e..dd03384432f4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include "iwl-sta.h" | 44 | #include "iwl-sta.h" |
45 | #include "iwl-agn.h" | 45 | #include "iwl-agn.h" |
46 | #include "iwl-helpers.h" | 46 | #include "iwl-helpers.h" |
47 | #include "iwl-5000-hw.h" | 47 | #include "iwl-agn-hw.h" |
48 | #include "iwl-6000-hw.h" | 48 | #include "iwl-6000-hw.h" |
49 | #include "iwl-agn-led.h" | 49 | #include "iwl-agn-led.h" |
50 | 50 | ||
@@ -57,6 +57,7 @@ | |||
57 | #define IWL6050_UCODE_API_MIN 4 | 57 | #define IWL6050_UCODE_API_MIN 4 |
58 | 58 | ||
59 | #define IWL6000_FW_PRE "iwlwifi-6000-" | 59 | #define IWL6000_FW_PRE "iwlwifi-6000-" |
60 | #define IWL6000_G2_FW_PRE "iwlwifi-6005-" | ||
60 | #define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" | 61 | #define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" |
61 | #define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api) | 62 | #define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api) |
62 | 63 | ||
@@ -137,7 +138,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = { | |||
137 | static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) | 138 | static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) |
138 | { | 139 | { |
139 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && | 140 | if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && |
140 | priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) | 141 | priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) |
141 | priv->cfg->num_of_queues = | 142 | priv->cfg->num_of_queues = |
142 | priv->cfg->mod_params->num_of_queues; | 143 | priv->cfg->mod_params->num_of_queues; |
143 | 144 | ||
@@ -145,7 +146,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) | |||
145 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; | 146 | priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; |
146 | priv->hw_params.scd_bc_tbls_size = | 147 | priv->hw_params.scd_bc_tbls_size = |
147 | priv->cfg->num_of_queues * | 148 | priv->cfg->num_of_queues * |
148 | sizeof(struct iwl5000_scd_bc_tbl); | 149 | sizeof(struct iwlagn_scd_bc_tbl); |
149 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); | 150 | priv->hw_params.tfd_size = sizeof(struct iwl_tfd); |
150 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; | 151 | priv->hw_params.max_stations = IWL5000_STATION_COUNT; |
151 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; | 152 | priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; |
@@ -226,25 +227,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel) | |||
226 | 227 | ||
227 | static struct iwl_lib_ops iwl6000_lib = { | 228 | static struct iwl_lib_ops iwl6000_lib = { |
228 | .set_hw_params = iwl6000_hw_set_hw_params, | 229 | .set_hw_params = iwl6000_hw_set_hw_params, |
229 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 230 | .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, |
230 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, | 231 | .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, |
231 | .txq_set_sched = iwl5000_txq_set_sched, | 232 | .txq_set_sched = iwlagn_txq_set_sched, |
232 | .txq_agg_enable = iwl5000_txq_agg_enable, | 233 | .txq_agg_enable = iwlagn_txq_agg_enable, |
233 | .txq_agg_disable = iwl5000_txq_agg_disable, | 234 | .txq_agg_disable = iwlagn_txq_agg_disable, |
234 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, | 235 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, |
235 | .txq_free_tfd = iwl_hw_txq_free_tfd, | 236 | .txq_free_tfd = iwl_hw_txq_free_tfd, |
236 | .txq_init = iwl_hw_tx_queue_init, | 237 | .txq_init = iwl_hw_tx_queue_init, |
237 | .rx_handler_setup = iwl5000_rx_handler_setup, | 238 | .rx_handler_setup = iwlagn_rx_handler_setup, |
238 | .setup_deferred_work = iwl5000_setup_deferred_work, | 239 | .setup_deferred_work = iwlagn_setup_deferred_work, |
239 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | 240 | .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, |
240 | .load_ucode = iwl5000_load_ucode, | 241 | .load_ucode = iwlagn_load_ucode, |
241 | .dump_nic_event_log = iwl_dump_nic_event_log, | 242 | .dump_nic_event_log = iwl_dump_nic_event_log, |
242 | .dump_nic_error_log = iwl_dump_nic_error_log, | 243 | .dump_nic_error_log = iwl_dump_nic_error_log, |
243 | .dump_csr = iwl_dump_csr, | 244 | .dump_csr = iwl_dump_csr, |
244 | .dump_fh = iwl_dump_fh, | 245 | .dump_fh = iwl_dump_fh, |
245 | .init_alive_start = iwl5000_init_alive_start, | 246 | .init_alive_start = iwlagn_init_alive_start, |
246 | .alive_notify = iwl5000_alive_notify, | 247 | .alive_notify = iwlagn_alive_notify, |
247 | .send_tx_power = iwl5000_send_tx_power, | 248 | .send_tx_power = iwlagn_send_tx_power, |
248 | .update_chain_flags = iwl_update_chain_flags, | 249 | .update_chain_flags = iwl_update_chain_flags, |
249 | .set_channel_switch = iwl6000_hw_channel_switch, | 250 | .set_channel_switch = iwl6000_hw_channel_switch, |
250 | .apm_ops = { | 251 | .apm_ops = { |
@@ -255,26 +256,26 @@ static struct iwl_lib_ops iwl6000_lib = { | |||
255 | }, | 256 | }, |
256 | .eeprom_ops = { | 257 | .eeprom_ops = { |
257 | .regulatory_bands = { | 258 | .regulatory_bands = { |
258 | EEPROM_5000_REG_BAND_1_CHANNELS, | 259 | EEPROM_REG_BAND_1_CHANNELS, |
259 | EEPROM_5000_REG_BAND_2_CHANNELS, | 260 | EEPROM_REG_BAND_2_CHANNELS, |
260 | EEPROM_5000_REG_BAND_3_CHANNELS, | 261 | EEPROM_REG_BAND_3_CHANNELS, |
261 | EEPROM_5000_REG_BAND_4_CHANNELS, | 262 | EEPROM_REG_BAND_4_CHANNELS, |
262 | EEPROM_5000_REG_BAND_5_CHANNELS, | 263 | EEPROM_REG_BAND_5_CHANNELS, |
263 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 264 | EEPROM_REG_BAND_24_HT40_CHANNELS, |
264 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 265 | EEPROM_REG_BAND_52_HT40_CHANNELS |
265 | }, | 266 | }, |
266 | .verify_signature = iwlcore_eeprom_verify_signature, | 267 | .verify_signature = iwlcore_eeprom_verify_signature, |
267 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 268 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, |
268 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 269 | .release_semaphore = iwlcore_eeprom_release_semaphore, |
269 | .calib_version = iwl5000_eeprom_calib_version, | 270 | .calib_version = iwlagn_eeprom_calib_version, |
270 | .query_addr = iwl5000_eeprom_query_addr, | 271 | .query_addr = iwlagn_eeprom_query_addr, |
271 | .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, | 272 | .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, |
272 | }, | 273 | }, |
273 | .post_associate = iwl_post_associate, | 274 | .post_associate = iwl_post_associate, |
274 | .isr = iwl_isr_ict, | 275 | .isr = iwl_isr_ict, |
275 | .config_ap = iwl_config_ap, | 276 | .config_ap = iwl_config_ap, |
276 | .temp_ops = { | 277 | .temp_ops = { |
277 | .temperature = iwl5000_temperature, | 278 | .temperature = iwlagn_temperature, |
278 | .set_ct_kill = iwl6000_set_ct_threshold, | 279 | .set_ct_kill = iwl6000_set_ct_threshold, |
279 | }, | 280 | }, |
280 | .add_bcast_station = iwl_add_bcast_station, | 281 | .add_bcast_station = iwl_add_bcast_station, |
@@ -284,34 +285,34 @@ static struct iwl_lib_ops iwl6000_lib = { | |||
284 | }; | 285 | }; |
285 | 286 | ||
286 | static const struct iwl_ops iwl6000_ops = { | 287 | static const struct iwl_ops iwl6000_ops = { |
287 | .ucode = &iwl5000_ucode, | 288 | .ucode = &iwlagn_ucode, |
288 | .lib = &iwl6000_lib, | 289 | .lib = &iwl6000_lib, |
289 | .hcmd = &iwl5000_hcmd, | 290 | .hcmd = &iwlagn_hcmd, |
290 | .utils = &iwl5000_hcmd_utils, | 291 | .utils = &iwlagn_hcmd_utils, |
291 | .led = &iwlagn_led_ops, | 292 | .led = &iwlagn_led_ops, |
292 | }; | 293 | }; |
293 | 294 | ||
294 | static struct iwl_lib_ops iwl6050_lib = { | 295 | static struct iwl_lib_ops iwl6050_lib = { |
295 | .set_hw_params = iwl6000_hw_set_hw_params, | 296 | .set_hw_params = iwl6000_hw_set_hw_params, |
296 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 297 | .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, |
297 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, | 298 | .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, |
298 | .txq_set_sched = iwl5000_txq_set_sched, | 299 | .txq_set_sched = iwlagn_txq_set_sched, |
299 | .txq_agg_enable = iwl5000_txq_agg_enable, | 300 | .txq_agg_enable = iwlagn_txq_agg_enable, |
300 | .txq_agg_disable = iwl5000_txq_agg_disable, | 301 | .txq_agg_disable = iwlagn_txq_agg_disable, |
301 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, | 302 | .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, |
302 | .txq_free_tfd = iwl_hw_txq_free_tfd, | 303 | .txq_free_tfd = iwl_hw_txq_free_tfd, |
303 | .txq_init = iwl_hw_tx_queue_init, | 304 | .txq_init = iwl_hw_tx_queue_init, |
304 | .rx_handler_setup = iwl5000_rx_handler_setup, | 305 | .rx_handler_setup = iwlagn_rx_handler_setup, |
305 | .setup_deferred_work = iwl5000_setup_deferred_work, | 306 | .setup_deferred_work = iwlagn_setup_deferred_work, |
306 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | 307 | .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, |
307 | .load_ucode = iwl5000_load_ucode, | 308 | .load_ucode = iwlagn_load_ucode, |
308 | .dump_nic_event_log = iwl_dump_nic_event_log, | 309 | .dump_nic_event_log = iwl_dump_nic_event_log, |
309 | .dump_nic_error_log = iwl_dump_nic_error_log, | 310 | .dump_nic_error_log = iwl_dump_nic_error_log, |
310 | .dump_csr = iwl_dump_csr, | 311 | .dump_csr = iwl_dump_csr, |
311 | .dump_fh = iwl_dump_fh, | 312 | .dump_fh = iwl_dump_fh, |
312 | .init_alive_start = iwl5000_init_alive_start, | 313 | .init_alive_start = iwlagn_init_alive_start, |
313 | .alive_notify = iwl5000_alive_notify, | 314 | .alive_notify = iwlagn_alive_notify, |
314 | .send_tx_power = iwl5000_send_tx_power, | 315 | .send_tx_power = iwlagn_send_tx_power, |
315 | .update_chain_flags = iwl_update_chain_flags, | 316 | .update_chain_flags = iwl_update_chain_flags, |
316 | .set_channel_switch = iwl6000_hw_channel_switch, | 317 | .set_channel_switch = iwl6000_hw_channel_switch, |
317 | .apm_ops = { | 318 | .apm_ops = { |
@@ -322,26 +323,26 @@ static struct iwl_lib_ops iwl6050_lib = { | |||
322 | }, | 323 | }, |
323 | .eeprom_ops = { | 324 | .eeprom_ops = { |
324 | .regulatory_bands = { | 325 | .regulatory_bands = { |
325 | EEPROM_5000_REG_BAND_1_CHANNELS, | 326 | EEPROM_REG_BAND_1_CHANNELS, |
326 | EEPROM_5000_REG_BAND_2_CHANNELS, | 327 | EEPROM_REG_BAND_2_CHANNELS, |
327 | EEPROM_5000_REG_BAND_3_CHANNELS, | 328 | EEPROM_REG_BAND_3_CHANNELS, |
328 | EEPROM_5000_REG_BAND_4_CHANNELS, | 329 | EEPROM_REG_BAND_4_CHANNELS, |
329 | EEPROM_5000_REG_BAND_5_CHANNELS, | 330 | EEPROM_REG_BAND_5_CHANNELS, |
330 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 331 | EEPROM_REG_BAND_24_HT40_CHANNELS, |
331 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 332 | EEPROM_REG_BAND_52_HT40_CHANNELS |
332 | }, | 333 | }, |
333 | .verify_signature = iwlcore_eeprom_verify_signature, | 334 | .verify_signature = iwlcore_eeprom_verify_signature, |
334 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 335 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, |
335 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 336 | .release_semaphore = iwlcore_eeprom_release_semaphore, |
336 | .calib_version = iwl5000_eeprom_calib_version, | 337 | .calib_version = iwlagn_eeprom_calib_version, |
337 | .query_addr = iwl5000_eeprom_query_addr, | 338 | .query_addr = iwlagn_eeprom_query_addr, |
338 | .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, | 339 | .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, |
339 | }, | 340 | }, |
340 | .post_associate = iwl_post_associate, | 341 | .post_associate = iwl_post_associate, |
341 | .isr = iwl_isr_ict, | 342 | .isr = iwl_isr_ict, |
342 | .config_ap = iwl_config_ap, | 343 | .config_ap = iwl_config_ap, |
343 | .temp_ops = { | 344 | .temp_ops = { |
344 | .temperature = iwl5000_temperature, | 345 | .temperature = iwlagn_temperature, |
345 | .set_ct_kill = iwl6000_set_ct_threshold, | 346 | .set_ct_kill = iwl6000_set_ct_threshold, |
346 | .set_calib_version = iwl6050_set_calib_version, | 347 | .set_calib_version = iwl6050_set_calib_version, |
347 | }, | 348 | }, |
@@ -352,16 +353,50 @@ static struct iwl_lib_ops iwl6050_lib = { | |||
352 | }; | 353 | }; |
353 | 354 | ||
354 | static const struct iwl_ops iwl6050_ops = { | 355 | static const struct iwl_ops iwl6050_ops = { |
355 | .ucode = &iwl5000_ucode, | 356 | .ucode = &iwlagn_ucode, |
356 | .lib = &iwl6050_lib, | 357 | .lib = &iwl6050_lib, |
357 | .hcmd = &iwl5000_hcmd, | 358 | .hcmd = &iwlagn_hcmd, |
358 | .utils = &iwl5000_hcmd_utils, | 359 | .utils = &iwlagn_hcmd_utils, |
359 | .led = &iwlagn_led_ops, | 360 | .led = &iwlagn_led_ops, |
360 | }; | 361 | }; |
361 | 362 | ||
362 | /* | 363 | /* |
363 | * "i": Internal configuration, use internal Power Amplifier | 364 | * "i": Internal configuration, use internal Power Amplifier |
364 | */ | 365 | */ |
366 | struct iwl_cfg iwl6000i_g2_2agn_cfg = { | ||
367 | .name = "6000 Series 2x2 AGN Gen2", | ||
368 | .fw_name_pre = IWL6000_G2_FW_PRE, | ||
369 | .ucode_api_max = IWL6000_UCODE_API_MAX, | ||
370 | .ucode_api_min = IWL6000_UCODE_API_MIN, | ||
371 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | ||
372 | .ops = &iwl6000_ops, | ||
373 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
374 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | ||
375 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, | ||
376 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
377 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, | ||
378 | .mod_params = &iwlagn_mod_params, | ||
379 | .valid_tx_ant = ANT_AB, | ||
380 | .valid_rx_ant = ANT_AB, | ||
381 | .pll_cfg_val = 0, | ||
382 | .set_l0s = true, | ||
383 | .use_bsm = false, | ||
384 | .pa_type = IWL_PA_INTERNAL, | ||
385 | .max_ll_items = OTP_MAX_LL_ITEMS_6x00, | ||
386 | .shadow_ram_support = true, | ||
387 | .ht_greenfield_support = true, | ||
388 | .led_compensation = 51, | ||
389 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
390 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | ||
391 | .supports_idle = true, | ||
392 | .adv_thermal_throttle = true, | ||
393 | .support_ct_kill_exit = true, | ||
394 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
395 | .chain_noise_scale = 1000, | ||
396 | .monitor_recover_period = IWL_MONITORING_PERIOD, | ||
397 | .max_event_log_size = 1024, | ||
398 | }; | ||
399 | |||
365 | struct iwl_cfg iwl6000i_2agn_cfg = { | 400 | struct iwl_cfg iwl6000i_2agn_cfg = { |
366 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", | 401 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", |
367 | .fw_name_pre = IWL6000_FW_PRE, | 402 | .fw_name_pre = IWL6000_FW_PRE, |
@@ -371,10 +406,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = { | |||
371 | .ops = &iwl6000_ops, | 406 | .ops = &iwl6000_ops, |
372 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 407 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
373 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | 408 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, |
374 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 409 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, |
375 | .num_of_queues = IWL50_NUM_QUEUES, | 410 | .num_of_queues = IWLAGN_NUM_QUEUES, |
376 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 411 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
377 | .mod_params = &iwl50_mod_params, | 412 | .mod_params = &iwlagn_mod_params, |
378 | .valid_tx_ant = ANT_BC, | 413 | .valid_tx_ant = ANT_BC, |
379 | .valid_rx_ant = ANT_BC, | 414 | .valid_rx_ant = ANT_BC, |
380 | .pll_cfg_val = 0, | 415 | .pll_cfg_val = 0, |
@@ -393,6 +428,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { | |||
393 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 428 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
394 | .chain_noise_scale = 1000, | 429 | .chain_noise_scale = 1000, |
395 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 430 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
431 | .max_event_log_size = 1024, | ||
396 | }; | 432 | }; |
397 | 433 | ||
398 | struct iwl_cfg iwl6000i_2abg_cfg = { | 434 | struct iwl_cfg iwl6000i_2abg_cfg = { |
@@ -404,10 +440,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = { | |||
404 | .ops = &iwl6000_ops, | 440 | .ops = &iwl6000_ops, |
405 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 441 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
406 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | 442 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, |
407 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 443 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, |
408 | .num_of_queues = IWL50_NUM_QUEUES, | 444 | .num_of_queues = IWLAGN_NUM_QUEUES, |
409 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 445 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
410 | .mod_params = &iwl50_mod_params, | 446 | .mod_params = &iwlagn_mod_params, |
411 | .valid_tx_ant = ANT_BC, | 447 | .valid_tx_ant = ANT_BC, |
412 | .valid_rx_ant = ANT_BC, | 448 | .valid_rx_ant = ANT_BC, |
413 | .pll_cfg_val = 0, | 449 | .pll_cfg_val = 0, |
@@ -425,6 +461,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { | |||
425 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 461 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
426 | .chain_noise_scale = 1000, | 462 | .chain_noise_scale = 1000, |
427 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 463 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
464 | .max_event_log_size = 1024, | ||
428 | }; | 465 | }; |
429 | 466 | ||
430 | struct iwl_cfg iwl6000i_2bg_cfg = { | 467 | struct iwl_cfg iwl6000i_2bg_cfg = { |
@@ -436,10 +473,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
436 | .ops = &iwl6000_ops, | 473 | .ops = &iwl6000_ops, |
437 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 474 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
438 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | 475 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, |
439 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 476 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, |
440 | .num_of_queues = IWL50_NUM_QUEUES, | 477 | .num_of_queues = IWLAGN_NUM_QUEUES, |
441 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 478 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
442 | .mod_params = &iwl50_mod_params, | 479 | .mod_params = &iwlagn_mod_params, |
443 | .valid_tx_ant = ANT_BC, | 480 | .valid_tx_ant = ANT_BC, |
444 | .valid_rx_ant = ANT_BC, | 481 | .valid_rx_ant = ANT_BC, |
445 | .pll_cfg_val = 0, | 482 | .pll_cfg_val = 0, |
@@ -457,6 +494,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
457 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 494 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
458 | .chain_noise_scale = 1000, | 495 | .chain_noise_scale = 1000, |
459 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 496 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
497 | .max_event_log_size = 1024, | ||
460 | }; | 498 | }; |
461 | 499 | ||
462 | struct iwl_cfg iwl6050_2agn_cfg = { | 500 | struct iwl_cfg iwl6050_2agn_cfg = { |
@@ -468,10 +506,10 @@ struct iwl_cfg iwl6050_2agn_cfg = { | |||
468 | .ops = &iwl6050_ops, | 506 | .ops = &iwl6050_ops, |
469 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 507 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
470 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, | 508 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, |
471 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 509 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, |
472 | .num_of_queues = IWL50_NUM_QUEUES, | 510 | .num_of_queues = IWLAGN_NUM_QUEUES, |
473 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 511 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
474 | .mod_params = &iwl50_mod_params, | 512 | .mod_params = &iwlagn_mod_params, |
475 | .valid_tx_ant = ANT_AB, | 513 | .valid_tx_ant = ANT_AB, |
476 | .valid_rx_ant = ANT_AB, | 514 | .valid_rx_ant = ANT_AB, |
477 | .pll_cfg_val = 0, | 515 | .pll_cfg_val = 0, |
@@ -490,6 +528,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { | |||
490 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 528 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
491 | .chain_noise_scale = 1500, | 529 | .chain_noise_scale = 1500, |
492 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 530 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
531 | .max_event_log_size = 1024, | ||
493 | }; | 532 | }; |
494 | 533 | ||
495 | struct iwl_cfg iwl6050_2abg_cfg = { | 534 | struct iwl_cfg iwl6050_2abg_cfg = { |
@@ -501,10 +540,10 @@ struct iwl_cfg iwl6050_2abg_cfg = { | |||
501 | .ops = &iwl6050_ops, | 540 | .ops = &iwl6050_ops, |
502 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 541 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
503 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, | 542 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, |
504 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 543 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, |
505 | .num_of_queues = IWL50_NUM_QUEUES, | 544 | .num_of_queues = IWLAGN_NUM_QUEUES, |
506 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 545 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
507 | .mod_params = &iwl50_mod_params, | 546 | .mod_params = &iwlagn_mod_params, |
508 | .valid_tx_ant = ANT_AB, | 547 | .valid_tx_ant = ANT_AB, |
509 | .valid_rx_ant = ANT_AB, | 548 | .valid_rx_ant = ANT_AB, |
510 | .pll_cfg_val = 0, | 549 | .pll_cfg_val = 0, |
@@ -522,6 +561,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { | |||
522 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 561 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
523 | .chain_noise_scale = 1500, | 562 | .chain_noise_scale = 1500, |
524 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 563 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
564 | .max_event_log_size = 1024, | ||
525 | }; | 565 | }; |
526 | 566 | ||
527 | struct iwl_cfg iwl6000_3agn_cfg = { | 567 | struct iwl_cfg iwl6000_3agn_cfg = { |
@@ -533,10 +573,10 @@ struct iwl_cfg iwl6000_3agn_cfg = { | |||
533 | .ops = &iwl6000_ops, | 573 | .ops = &iwl6000_ops, |
534 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | 574 | .eeprom_size = OTP_LOW_IMAGE_SIZE, |
535 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | 575 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, |
536 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, | 576 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, |
537 | .num_of_queues = IWL50_NUM_QUEUES, | 577 | .num_of_queues = IWLAGN_NUM_QUEUES, |
538 | .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, | 578 | .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, |
539 | .mod_params = &iwl50_mod_params, | 579 | .mod_params = &iwlagn_mod_params, |
540 | .valid_tx_ant = ANT_ABC, | 580 | .valid_tx_ant = ANT_ABC, |
541 | .valid_rx_ant = ANT_ABC, | 581 | .valid_rx_ant = ANT_ABC, |
542 | .pll_cfg_val = 0, | 582 | .pll_cfg_val = 0, |
@@ -555,6 +595,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { | |||
555 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 595 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
556 | .chain_noise_scale = 1000, | 596 | .chain_noise_scale = 1000, |
557 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 597 | .monitor_recover_period = IWL_MONITORING_PERIOD, |
598 | .max_event_log_size = 1024, | ||
558 | }; | 599 | }; |
559 | 600 | ||
560 | MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); | 601 | MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c new file mode 100644 index 000000000000..28bc8f8ba981 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-io.h" | ||
38 | #include "iwl-agn.h" | ||
39 | |||
40 | static int iwlagn_send_rxon_assoc(struct iwl_priv *priv) | ||
41 | { | ||
42 | int ret = 0; | ||
43 | struct iwl5000_rxon_assoc_cmd rxon_assoc; | ||
44 | const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; | ||
45 | const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; | ||
46 | |||
47 | if ((rxon1->flags == rxon2->flags) && | ||
48 | (rxon1->filter_flags == rxon2->filter_flags) && | ||
49 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | ||
50 | (rxon1->ofdm_ht_single_stream_basic_rates == | ||
51 | rxon2->ofdm_ht_single_stream_basic_rates) && | ||
52 | (rxon1->ofdm_ht_dual_stream_basic_rates == | ||
53 | rxon2->ofdm_ht_dual_stream_basic_rates) && | ||
54 | (rxon1->ofdm_ht_triple_stream_basic_rates == | ||
55 | rxon2->ofdm_ht_triple_stream_basic_rates) && | ||
56 | (rxon1->acquisition_data == rxon2->acquisition_data) && | ||
57 | (rxon1->rx_chain == rxon2->rx_chain) && | ||
58 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | ||
59 | IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | rxon_assoc.flags = priv->staging_rxon.flags; | ||
64 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | ||
65 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | ||
66 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | ||
67 | rxon_assoc.reserved1 = 0; | ||
68 | rxon_assoc.reserved2 = 0; | ||
69 | rxon_assoc.reserved3 = 0; | ||
70 | rxon_assoc.ofdm_ht_single_stream_basic_rates = | ||
71 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates; | ||
72 | rxon_assoc.ofdm_ht_dual_stream_basic_rates = | ||
73 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; | ||
74 | rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; | ||
75 | rxon_assoc.ofdm_ht_triple_stream_basic_rates = | ||
76 | priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; | ||
77 | rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; | ||
78 | |||
79 | ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, | ||
80 | sizeof(rxon_assoc), &rxon_assoc, NULL); | ||
81 | if (ret) | ||
82 | return ret; | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) | ||
88 | { | ||
89 | struct iwl_tx_ant_config_cmd tx_ant_cmd = { | ||
90 | .valid = cpu_to_le32(valid_tx_ant), | ||
91 | }; | ||
92 | |||
93 | if (IWL_UCODE_API(priv->ucode_ver) > 1) { | ||
94 | IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); | ||
95 | return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, | ||
96 | sizeof(struct iwl_tx_ant_config_cmd), | ||
97 | &tx_ant_cmd); | ||
98 | } else { | ||
99 | IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); | ||
100 | return -EOPNOTSUPP; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* Currently this is the superset of everything */ | ||
105 | static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len) | ||
106 | { | ||
107 | return len; | ||
108 | } | ||
109 | |||
110 | static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) | ||
111 | { | ||
112 | u16 size = (u16)sizeof(struct iwl_addsta_cmd); | ||
113 | struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; | ||
114 | memcpy(addsta, cmd, size); | ||
115 | /* resrved in 5000 */ | ||
116 | addsta->rate_n_flags = cpu_to_le16(0); | ||
117 | return size; | ||
118 | } | ||
119 | |||
120 | static void iwlagn_gain_computation(struct iwl_priv *priv, | ||
121 | u32 average_noise[NUM_RX_CHAINS], | ||
122 | u16 min_average_noise_antenna_i, | ||
123 | u32 min_average_noise, | ||
124 | u8 default_chain) | ||
125 | { | ||
126 | int i; | ||
127 | s32 delta_g; | ||
128 | struct iwl_chain_noise_data *data = &priv->chain_noise_data; | ||
129 | |||
130 | /* | ||
131 | * Find Gain Code for the chains based on "default chain" | ||
132 | */ | ||
133 | for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { | ||
134 | if ((data->disconn_array[i])) { | ||
135 | data->delta_gain_code[i] = 0; | ||
136 | continue; | ||
137 | } | ||
138 | |||
139 | delta_g = (priv->cfg->chain_noise_scale * | ||
140 | ((s32)average_noise[default_chain] - | ||
141 | (s32)average_noise[i])) / 1500; | ||
142 | |||
143 | /* bound gain by 2 bits value max, 3rd bit is sign */ | ||
144 | data->delta_gain_code[i] = | ||
145 | min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); | ||
146 | |||
147 | if (delta_g < 0) | ||
148 | /* | ||
149 | * set negative sign ... | ||
150 | * note to Intel developers: This is uCode API format, | ||
151 | * not the format of any internal device registers. | ||
152 | * Do not change this format for e.g. 6050 or similar | ||
153 | * devices. Change format only if more resolution | ||
154 | * (i.e. more than 2 bits magnitude) is needed. | ||
155 | */ | ||
156 | data->delta_gain_code[i] |= (1 << 2); | ||
157 | } | ||
158 | |||
159 | IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", | ||
160 | data->delta_gain_code[1], data->delta_gain_code[2]); | ||
161 | |||
162 | if (!data->radio_write) { | ||
163 | struct iwl_calib_chain_noise_gain_cmd cmd; | ||
164 | |||
165 | memset(&cmd, 0, sizeof(cmd)); | ||
166 | |||
167 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; | ||
168 | cmd.hdr.first_group = 0; | ||
169 | cmd.hdr.groups_num = 1; | ||
170 | cmd.hdr.data_valid = 1; | ||
171 | cmd.delta_gain_1 = data->delta_gain_code[1]; | ||
172 | cmd.delta_gain_2 = data->delta_gain_code[2]; | ||
173 | iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, | ||
174 | sizeof(cmd), &cmd, NULL); | ||
175 | |||
176 | data->radio_write = 1; | ||
177 | data->state = IWL_CHAIN_NOISE_CALIBRATED; | ||
178 | } | ||
179 | |||
180 | data->chain_noise_a = 0; | ||
181 | data->chain_noise_b = 0; | ||
182 | data->chain_noise_c = 0; | ||
183 | data->chain_signal_a = 0; | ||
184 | data->chain_signal_b = 0; | ||
185 | data->chain_signal_c = 0; | ||
186 | data->beacon_count = 0; | ||
187 | } | ||
188 | |||
189 | static void iwlagn_chain_noise_reset(struct iwl_priv *priv) | ||
190 | { | ||
191 | struct iwl_chain_noise_data *data = &priv->chain_noise_data; | ||
192 | int ret; | ||
193 | |||
194 | if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { | ||
195 | struct iwl_calib_chain_noise_reset_cmd cmd; | ||
196 | memset(&cmd, 0, sizeof(cmd)); | ||
197 | |||
198 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; | ||
199 | cmd.hdr.first_group = 0; | ||
200 | cmd.hdr.groups_num = 1; | ||
201 | cmd.hdr.data_valid = 1; | ||
202 | ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, | ||
203 | sizeof(cmd), &cmd); | ||
204 | if (ret) | ||
205 | IWL_ERR(priv, | ||
206 | "Could not send REPLY_PHY_CALIBRATION_CMD\n"); | ||
207 | data->state = IWL_CHAIN_NOISE_ACCUMULATE; | ||
208 | IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, | ||
213 | __le32 *tx_flags) | ||
214 | { | ||
215 | if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || | ||
216 | (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) | ||
217 | *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; | ||
218 | else | ||
219 | *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK; | ||
220 | } | ||
221 | |||
222 | /* Calc max signal level (dBm) among 3 possible receivers */ | ||
223 | static int iwlagn_calc_rssi(struct iwl_priv *priv, | ||
224 | struct iwl_rx_phy_res *rx_resp) | ||
225 | { | ||
226 | /* data from PHY/DSP regarding signal strength, etc., | ||
227 | * contents are always there, not configurable by host | ||
228 | */ | ||
229 | struct iwl5000_non_cfg_phy *ncphy = | ||
230 | (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; | ||
231 | u32 val, rssi_a, rssi_b, rssi_c, max_rssi; | ||
232 | u8 agc; | ||
233 | |||
234 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); | ||
235 | agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; | ||
236 | |||
237 | /* Find max rssi among 3 possible receivers. | ||
238 | * These values are measured by the digital signal processor (DSP). | ||
239 | * They should stay fairly constant even as the signal strength varies, | ||
240 | * if the radio's automatic gain control (AGC) is working right. | ||
241 | * AGC value (see below) will provide the "interesting" info. | ||
242 | */ | ||
243 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); | ||
244 | rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; | ||
245 | rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; | ||
246 | val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); | ||
247 | rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; | ||
248 | |||
249 | max_rssi = max_t(u32, rssi_a, rssi_b); | ||
250 | max_rssi = max_t(u32, max_rssi, rssi_c); | ||
251 | |||
252 | IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", | ||
253 | rssi_a, rssi_b, rssi_c, max_rssi, agc); | ||
254 | |||
255 | /* dBm = max_rssi dB - agc dB - constant. | ||
256 | * Higher AGC (higher radio gain) means lower signal. */ | ||
257 | return max_rssi - agc - IWLAGN_RSSI_OFFSET; | ||
258 | } | ||
259 | |||
260 | struct iwl_hcmd_ops iwlagn_hcmd = { | ||
261 | .rxon_assoc = iwlagn_send_rxon_assoc, | ||
262 | .commit_rxon = iwl_commit_rxon, | ||
263 | .set_rxon_chain = iwl_set_rxon_chain, | ||
264 | .set_tx_ant = iwlagn_send_tx_ant_config, | ||
265 | }; | ||
266 | |||
267 | struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { | ||
268 | .get_hcmd_size = iwlagn_get_hcmd_size, | ||
269 | .build_addsta_hcmd = iwlagn_build_addsta_hcmd, | ||
270 | .gain_computation = iwlagn_gain_computation, | ||
271 | .chain_noise_reset = iwlagn_chain_noise_reset, | ||
272 | .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag, | ||
273 | .calc_rssi = iwlagn_calc_rssi, | ||
274 | }; | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h new file mode 100644 index 000000000000..f9a3fbb6338f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
4 | * redistributing this file, you may do so under either license. | ||
5 | * | ||
6 | * GPL LICENSE SUMMARY | ||
7 | * | ||
8 | * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
22 | * USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution | ||
25 | * in the file called LICENSE.GPL. | ||
26 | * | ||
27 | * Contact Information: | ||
28 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
30 | * | ||
31 | * BSD LICENSE | ||
32 | * | ||
33 | * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Redistribution and use in source and binary forms, with or without | ||
37 | * modification, are permitted provided that the following conditions | ||
38 | * are met: | ||
39 | * | ||
40 | * * Redistributions of source code must retain the above copyright | ||
41 | * notice, this list of conditions and the following disclaimer. | ||
42 | * * Redistributions in binary form must reproduce the above copyright | ||
43 | * notice, this list of conditions and the following disclaimer in | ||
44 | * the documentation and/or other materials provided with the | ||
45 | * distribution. | ||
46 | * * Neither the name Intel Corporation nor the names of its | ||
47 | * contributors may be used to endorse or promote products derived | ||
48 | * from this software without specific prior written permission. | ||
49 | * | ||
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
61 | * | ||
62 | *****************************************************************************/ | ||
63 | /* | ||
64 | * Please use this file (iwl-agn-hw.h) only for hardware-related definitions. | ||
65 | */ | ||
66 | |||
67 | #ifndef __iwl_agn_hw_h__ | ||
68 | #define __iwl_agn_hw_h__ | ||
69 | |||
70 | #define IWLAGN_RTC_INST_LOWER_BOUND (0x000000) | ||
71 | #define IWLAGN_RTC_INST_UPPER_BOUND (0x020000) | ||
72 | |||
73 | #define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000) | ||
74 | #define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000) | ||
75 | |||
76 | #define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \ | ||
77 | IWLAGN_RTC_INST_LOWER_BOUND) | ||
78 | #define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \ | ||
79 | IWLAGN_RTC_DATA_LOWER_BOUND) | ||
80 | |||
81 | /* RSSI to dBm */ | ||
82 | #define IWLAGN_RSSI_OFFSET 44 | ||
83 | |||
84 | /* PCI registers */ | ||
85 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | ||
86 | |||
87 | /* PCI register values */ | ||
88 | #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 | ||
89 | #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 | ||
90 | |||
91 | #define IWLAGN_DEFAULT_TX_RETRY 15 | ||
92 | |||
93 | /* Limit range of txpower output target to be between these values */ | ||
94 | #define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ | ||
95 | #define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ | ||
96 | |||
97 | /* EEPROM */ | ||
98 | #define IWLAGN_EEPROM_IMG_SIZE 2048 | ||
99 | |||
100 | #define IWLAGN_CMD_FIFO_NUM 7 | ||
101 | #define IWLAGN_NUM_QUEUES 20 | ||
102 | #define IWLAGN_NUM_AMPDU_QUEUES 10 | ||
103 | #define IWLAGN_FIRST_AMPDU_QUEUE 10 | ||
104 | |||
105 | /* Fixed (non-configurable) rx data from phy */ | ||
106 | |||
107 | /** | ||
108 | * struct iwlagn_schedq_bc_tbl scheduler byte count table | ||
109 | * base physical address provided by SCD_DRAM_BASE_ADDR | ||
110 | * @tfd_offset 0-12 - tx command byte count | ||
111 | * 12-16 - station index | ||
112 | */ | ||
113 | struct iwlagn_scd_bc_tbl { | ||
114 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; | ||
115 | } __attribute__ ((packed)); | ||
116 | |||
117 | |||
118 | #endif /* __iwl_agn_hw_h__ */ | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c index 4c5395eae956..a273e373b7b0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c | |||
@@ -141,13 +141,14 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
141 | { | 141 | { |
142 | struct iwl_priv *priv = data; | 142 | struct iwl_priv *priv = data; |
143 | u32 inta, inta_mask; | 143 | u32 inta, inta_mask; |
144 | unsigned long flags; | ||
144 | #ifdef CONFIG_IWLWIFI_DEBUG | 145 | #ifdef CONFIG_IWLWIFI_DEBUG |
145 | u32 inta_fh; | 146 | u32 inta_fh; |
146 | #endif | 147 | #endif |
147 | if (!priv) | 148 | if (!priv) |
148 | return IRQ_NONE; | 149 | return IRQ_NONE; |
149 | 150 | ||
150 | spin_lock(&priv->lock); | 151 | spin_lock_irqsave(&priv->lock, flags); |
151 | 152 | ||
152 | /* Disable (but don't clear!) interrupts here to avoid | 153 | /* Disable (but don't clear!) interrupts here to avoid |
153 | * back-to-back ISRs and sporadic interrupts from our NIC. | 154 | * back-to-back ISRs and sporadic interrupts from our NIC. |
@@ -190,7 +191,7 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
190 | iwl_enable_interrupts(priv); | 191 | iwl_enable_interrupts(priv); |
191 | 192 | ||
192 | unplugged: | 193 | unplugged: |
193 | spin_unlock(&priv->lock); | 194 | spin_unlock_irqrestore(&priv->lock, flags); |
194 | return IRQ_HANDLED; | 195 | return IRQ_HANDLED; |
195 | 196 | ||
196 | none: | 197 | none: |
@@ -199,7 +200,7 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
199 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) | 200 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) |
200 | iwl_enable_interrupts(priv); | 201 | iwl_enable_interrupts(priv); |
201 | 202 | ||
202 | spin_unlock(&priv->lock); | 203 | spin_unlock_irqrestore(&priv->lock, flags); |
203 | return IRQ_NONE; | 204 | return IRQ_NONE; |
204 | } | 205 | } |
205 | 206 | ||
@@ -216,6 +217,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
216 | struct iwl_priv *priv = data; | 217 | struct iwl_priv *priv = data; |
217 | u32 inta, inta_mask; | 218 | u32 inta, inta_mask; |
218 | u32 val = 0; | 219 | u32 val = 0; |
220 | unsigned long flags; | ||
219 | 221 | ||
220 | if (!priv) | 222 | if (!priv) |
221 | return IRQ_NONE; | 223 | return IRQ_NONE; |
@@ -226,7 +228,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
226 | if (!priv->_agn.use_ict) | 228 | if (!priv->_agn.use_ict) |
227 | return iwl_isr(irq, data); | 229 | return iwl_isr(irq, data); |
228 | 230 | ||
229 | spin_lock(&priv->lock); | 231 | spin_lock_irqsave(&priv->lock, flags); |
230 | 232 | ||
231 | /* Disable (but don't clear!) interrupts here to avoid | 233 | /* Disable (but don't clear!) interrupts here to avoid |
232 | * back-to-back ISRs and sporadic interrupts from our NIC. | 234 | * back-to-back ISRs and sporadic interrupts from our NIC. |
@@ -290,7 +292,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
290 | iwl_enable_interrupts(priv); | 292 | iwl_enable_interrupts(priv); |
291 | } | 293 | } |
292 | 294 | ||
293 | spin_unlock(&priv->lock); | 295 | spin_unlock_irqrestore(&priv->lock, flags); |
294 | return IRQ_HANDLED; | 296 | return IRQ_HANDLED; |
295 | 297 | ||
296 | none: | 298 | none: |
@@ -300,6 +302,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
300 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) | 302 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) |
301 | iwl_enable_interrupts(priv); | 303 | iwl_enable_interrupts(priv); |
302 | 304 | ||
303 | spin_unlock(&priv->lock); | 305 | spin_unlock_irqrestore(&priv->lock, flags); |
304 | return IRQ_NONE; | 306 | return IRQ_NONE; |
305 | } | 307 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c new file mode 100644 index 000000000000..c465c8590833 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -0,0 +1,1113 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-io.h" | ||
38 | #include "iwl-helpers.h" | ||
39 | #include "iwl-agn-hw.h" | ||
40 | #include "iwl-agn.h" | ||
41 | |||
42 | static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) | ||
43 | { | ||
44 | return le32_to_cpup((__le32 *)&tx_resp->status + | ||
45 | tx_resp->frame_count) & MAX_SN; | ||
46 | } | ||
47 | |||
48 | static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, | ||
49 | struct iwl_ht_agg *agg, | ||
50 | struct iwl5000_tx_resp *tx_resp, | ||
51 | int txq_id, u16 start_idx) | ||
52 | { | ||
53 | u16 status; | ||
54 | struct agg_tx_status *frame_status = &tx_resp->status; | ||
55 | struct ieee80211_tx_info *info = NULL; | ||
56 | struct ieee80211_hdr *hdr = NULL; | ||
57 | u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
58 | int i, sh, idx; | ||
59 | u16 seq; | ||
60 | |||
61 | if (agg->wait_for_ba) | ||
62 | IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); | ||
63 | |||
64 | agg->frame_count = tx_resp->frame_count; | ||
65 | agg->start_idx = start_idx; | ||
66 | agg->rate_n_flags = rate_n_flags; | ||
67 | agg->bitmap = 0; | ||
68 | |||
69 | /* # frames attempted by Tx command */ | ||
70 | if (agg->frame_count == 1) { | ||
71 | /* Only one frame was attempted; no block-ack will arrive */ | ||
72 | status = le16_to_cpu(frame_status[0].status); | ||
73 | idx = start_idx; | ||
74 | |||
75 | /* FIXME: code repetition */ | ||
76 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", | ||
77 | agg->frame_count, agg->start_idx, idx); | ||
78 | |||
79 | info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); | ||
80 | info->status.rates[0].count = tx_resp->failure_frame + 1; | ||
81 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | ||
82 | info->flags |= iwl_tx_status_to_mac80211(status); | ||
83 | iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); | ||
84 | |||
85 | /* FIXME: code repetition end */ | ||
86 | |||
87 | IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", | ||
88 | status & 0xff, tx_resp->failure_frame); | ||
89 | IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); | ||
90 | |||
91 | agg->wait_for_ba = 0; | ||
92 | } else { | ||
93 | /* Two or more frames were attempted; expect block-ack */ | ||
94 | u64 bitmap = 0; | ||
95 | int start = agg->start_idx; | ||
96 | |||
97 | /* Construct bit-map of pending frames within Tx window */ | ||
98 | for (i = 0; i < agg->frame_count; i++) { | ||
99 | u16 sc; | ||
100 | status = le16_to_cpu(frame_status[i].status); | ||
101 | seq = le16_to_cpu(frame_status[i].sequence); | ||
102 | idx = SEQ_TO_INDEX(seq); | ||
103 | txq_id = SEQ_TO_QUEUE(seq); | ||
104 | |||
105 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | ||
106 | AGG_TX_STATE_ABORT_MSK)) | ||
107 | continue; | ||
108 | |||
109 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", | ||
110 | agg->frame_count, txq_id, idx); | ||
111 | |||
112 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); | ||
113 | if (!hdr) { | ||
114 | IWL_ERR(priv, | ||
115 | "BUG_ON idx doesn't point to valid skb" | ||
116 | " idx=%d, txq_id=%d\n", idx, txq_id); | ||
117 | return -1; | ||
118 | } | ||
119 | |||
120 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
121 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | ||
122 | IWL_ERR(priv, | ||
123 | "BUG_ON idx doesn't match seq control" | ||
124 | " idx=%d, seq_idx=%d, seq=%d\n", | ||
125 | idx, SEQ_TO_SN(sc), | ||
126 | hdr->seq_ctrl); | ||
127 | return -1; | ||
128 | } | ||
129 | |||
130 | IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", | ||
131 | i, idx, SEQ_TO_SN(sc)); | ||
132 | |||
133 | sh = idx - start; | ||
134 | if (sh > 64) { | ||
135 | sh = (start - idx) + 0xff; | ||
136 | bitmap = bitmap << sh; | ||
137 | sh = 0; | ||
138 | start = idx; | ||
139 | } else if (sh < -64) | ||
140 | sh = 0xff - (start - idx); | ||
141 | else if (sh < 0) { | ||
142 | sh = start - idx; | ||
143 | start = idx; | ||
144 | bitmap = bitmap << sh; | ||
145 | sh = 0; | ||
146 | } | ||
147 | bitmap |= 1ULL << sh; | ||
148 | IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", | ||
149 | start, (unsigned long long)bitmap); | ||
150 | } | ||
151 | |||
152 | agg->bitmap = bitmap; | ||
153 | agg->start_idx = start; | ||
154 | IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", | ||
155 | agg->frame_count, agg->start_idx, | ||
156 | (unsigned long long)agg->bitmap); | ||
157 | |||
158 | if (bitmap) | ||
159 | agg->wait_for_ba = 1; | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void iwl_check_abort_status(struct iwl_priv *priv, | ||
165 | u8 frame_count, u32 status) | ||
166 | { | ||
167 | if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { | ||
168 | IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n"); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | static void iwlagn_rx_reply_tx(struct iwl_priv *priv, | ||
173 | struct iwl_rx_mem_buffer *rxb) | ||
174 | { | ||
175 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
176 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
177 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
178 | int index = SEQ_TO_INDEX(sequence); | ||
179 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
180 | struct ieee80211_tx_info *info; | ||
181 | struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
182 | u32 status = le16_to_cpu(tx_resp->status.status); | ||
183 | int tid; | ||
184 | int sta_id; | ||
185 | int freed; | ||
186 | |||
187 | if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { | ||
188 | IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " | ||
189 | "is out of range [0-%d] %d %d\n", txq_id, | ||
190 | index, txq->q.n_bd, txq->q.write_ptr, | ||
191 | txq->q.read_ptr); | ||
192 | return; | ||
193 | } | ||
194 | |||
195 | info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); | ||
196 | memset(&info->status, 0, sizeof(info->status)); | ||
197 | |||
198 | tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; | ||
199 | sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; | ||
200 | |||
201 | if (txq->sched_retry) { | ||
202 | const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); | ||
203 | struct iwl_ht_agg *agg = NULL; | ||
204 | |||
205 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
206 | |||
207 | iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); | ||
208 | |||
209 | /* check if BAR is needed */ | ||
210 | if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) | ||
211 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
212 | |||
213 | if (txq->q.read_ptr != (scd_ssn & 0xff)) { | ||
214 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | ||
215 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " | ||
216 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", | ||
217 | scd_ssn , index, txq_id, txq->swq_id); | ||
218 | |||
219 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); | ||
220 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
221 | |||
222 | if (priv->mac80211_registered && | ||
223 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
224 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { | ||
225 | if (agg->state == IWL_AGG_OFF) | ||
226 | iwl_wake_queue(priv, txq_id); | ||
227 | else | ||
228 | iwl_wake_queue(priv, txq->swq_id); | ||
229 | } | ||
230 | } | ||
231 | } else { | ||
232 | BUG_ON(txq_id != txq->swq_id); | ||
233 | |||
234 | info->status.rates[0].count = tx_resp->failure_frame + 1; | ||
235 | info->flags |= iwl_tx_status_to_mac80211(status); | ||
236 | iwlagn_hwrate_to_tx_control(priv, | ||
237 | le32_to_cpu(tx_resp->rate_n_flags), | ||
238 | info); | ||
239 | |||
240 | IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " | ||
241 | "0x%x retries %d\n", | ||
242 | txq_id, | ||
243 | iwl_get_tx_fail_reason(status), status, | ||
244 | le32_to_cpu(tx_resp->rate_n_flags), | ||
245 | tx_resp->failure_frame); | ||
246 | |||
247 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); | ||
248 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
249 | |||
250 | if (priv->mac80211_registered && | ||
251 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | ||
252 | iwl_wake_queue(priv, txq_id); | ||
253 | } | ||
254 | |||
255 | iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); | ||
256 | |||
257 | iwl_check_abort_status(priv, tx_resp->frame_count, status); | ||
258 | } | ||
259 | |||
260 | void iwlagn_rx_handler_setup(struct iwl_priv *priv) | ||
261 | { | ||
262 | /* init calibration handlers */ | ||
263 | priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = | ||
264 | iwlagn_rx_calib_result; | ||
265 | priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = | ||
266 | iwlagn_rx_calib_complete; | ||
267 | priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; | ||
268 | } | ||
269 | |||
270 | void iwlagn_setup_deferred_work(struct iwl_priv *priv) | ||
271 | { | ||
272 | /* in agn, the tx power calibration is done in uCode */ | ||
273 | priv->disable_tx_power_cal = 1; | ||
274 | } | ||
275 | |||
276 | int iwlagn_hw_valid_rtc_data_addr(u32 addr) | ||
277 | { | ||
278 | return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && | ||
279 | (addr < IWLAGN_RTC_DATA_UPPER_BOUND); | ||
280 | } | ||
281 | |||
282 | int iwlagn_send_tx_power(struct iwl_priv *priv) | ||
283 | { | ||
284 | struct iwl5000_tx_power_dbm_cmd tx_power_cmd; | ||
285 | u8 tx_ant_cfg_cmd; | ||
286 | |||
287 | /* half dBm need to multiply */ | ||
288 | tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); | ||
289 | |||
290 | if (priv->tx_power_lmt_in_half_dbm && | ||
291 | priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) { | ||
292 | /* | ||
293 | * For the newer devices which using enhanced/extend tx power | ||
294 | * table in EEPROM, the format is in half dBm. driver need to | ||
295 | * convert to dBm format before report to mac80211. | ||
296 | * By doing so, there is a possibility of 1/2 dBm resolution | ||
297 | * lost. driver will perform "round-up" operation before | ||
298 | * reporting, but it will cause 1/2 dBm tx power over the | ||
299 | * regulatory limit. Perform the checking here, if the | ||
300 | * "tx_power_user_lmt" is higher than EEPROM value (in | ||
301 | * half-dBm format), lower the tx power based on EEPROM | ||
302 | */ | ||
303 | tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; | ||
304 | } | ||
305 | tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; | ||
306 | tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; | ||
307 | |||
308 | if (IWL_UCODE_API(priv->ucode_ver) == 1) | ||
309 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; | ||
310 | else | ||
311 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; | ||
312 | |||
313 | return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, | ||
314 | sizeof(tx_power_cmd), &tx_power_cmd, | ||
315 | NULL); | ||
316 | } | ||
317 | |||
318 | void iwlagn_temperature(struct iwl_priv *priv) | ||
319 | { | ||
320 | /* store temperature from statistics (in Celsius) */ | ||
321 | priv->temperature = le32_to_cpu(priv->statistics.general.temperature); | ||
322 | iwl_tt_handler(priv); | ||
323 | } | ||
324 | |||
325 | u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) | ||
326 | { | ||
327 | struct iwl_eeprom_calib_hdr { | ||
328 | u8 version; | ||
329 | u8 pa_type; | ||
330 | u16 voltage; | ||
331 | } *hdr; | ||
332 | |||
333 | hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, | ||
334 | EEPROM_5000_CALIB_ALL); | ||
335 | return hdr->version; | ||
336 | |||
337 | } | ||
338 | |||
339 | /* | ||
340 | * EEPROM | ||
341 | */ | ||
342 | static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) | ||
343 | { | ||
344 | u16 offset = 0; | ||
345 | |||
346 | if ((address & INDIRECT_ADDRESS) == 0) | ||
347 | return address; | ||
348 | |||
349 | switch (address & INDIRECT_TYPE_MSK) { | ||
350 | case INDIRECT_HOST: | ||
351 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST); | ||
352 | break; | ||
353 | case INDIRECT_GENERAL: | ||
354 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL); | ||
355 | break; | ||
356 | case INDIRECT_REGULATORY: | ||
357 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY); | ||
358 | break; | ||
359 | case INDIRECT_CALIBRATION: | ||
360 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION); | ||
361 | break; | ||
362 | case INDIRECT_PROCESS_ADJST: | ||
363 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST); | ||
364 | break; | ||
365 | case INDIRECT_OTHERS: | ||
366 | offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); | ||
367 | break; | ||
368 | default: | ||
369 | IWL_ERR(priv, "illegal indirect type: 0x%X\n", | ||
370 | address & INDIRECT_TYPE_MSK); | ||
371 | break; | ||
372 | } | ||
373 | |||
374 | /* translate the offset from words to byte */ | ||
375 | return (address & ADDRESS_MSK) + (offset << 1); | ||
376 | } | ||
377 | |||
378 | const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, | ||
379 | size_t offset) | ||
380 | { | ||
381 | u32 address = eeprom_indirect_address(priv, offset); | ||
382 | BUG_ON(address >= priv->cfg->eeprom_size); | ||
383 | return &priv->eeprom[address]; | ||
384 | } | ||
385 | |||
386 | struct iwl_mod_params iwlagn_mod_params = { | ||
387 | .amsdu_size_8K = 1, | ||
388 | .restart_fw = 1, | ||
389 | /* the rest are 0 by default */ | ||
390 | }; | ||
391 | |||
392 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
393 | { | ||
394 | unsigned long flags; | ||
395 | int i; | ||
396 | spin_lock_irqsave(&rxq->lock, flags); | ||
397 | INIT_LIST_HEAD(&rxq->rx_free); | ||
398 | INIT_LIST_HEAD(&rxq->rx_used); | ||
399 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
400 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
401 | /* In the reset function, these buffers may have been allocated | ||
402 | * to an SKB, so we need to unmap and free potential storage */ | ||
403 | if (rxq->pool[i].page != NULL) { | ||
404 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
405 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
406 | PCI_DMA_FROMDEVICE); | ||
407 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
408 | rxq->pool[i].page = NULL; | ||
409 | } | ||
410 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
411 | } | ||
412 | |||
413 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
414 | rxq->queue[i] = NULL; | ||
415 | |||
416 | /* Set us so that we have processed and used all buffers, but have | ||
417 | * not restocked the Rx queue with fresh buffers */ | ||
418 | rxq->read = rxq->write = 0; | ||
419 | rxq->write_actual = 0; | ||
420 | rxq->free_count = 0; | ||
421 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
422 | } | ||
423 | |||
424 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
425 | { | ||
426 | u32 rb_size; | ||
427 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
428 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
429 | |||
430 | if (!priv->cfg->use_isr_legacy) | ||
431 | rb_timeout = RX_RB_TIMEOUT; | ||
432 | |||
433 | if (priv->cfg->mod_params->amsdu_size_8K) | ||
434 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
435 | else | ||
436 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
437 | |||
438 | /* Stop Rx DMA */ | ||
439 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
440 | |||
441 | /* Reset driver's Rx queue write index */ | ||
442 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
443 | |||
444 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
445 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
446 | (u32)(rxq->dma_addr >> 8)); | ||
447 | |||
448 | /* Tell device where in DRAM to update its Rx status */ | ||
449 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
450 | rxq->rb_stts_dma >> 4); | ||
451 | |||
452 | /* Enable Rx DMA | ||
453 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
454 | * the credit mechanism in 5000 HW RX FIFO | ||
455 | * Direct rx interrupts to hosts | ||
456 | * Rx buffer size 4 or 8k | ||
457 | * RB timeout 0x10 | ||
458 | * 256 RBDs | ||
459 | */ | ||
460 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
461 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
462 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
463 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
464 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
465 | rb_size| | ||
466 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
467 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
468 | |||
469 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
470 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | int iwlagn_hw_nic_init(struct iwl_priv *priv) | ||
476 | { | ||
477 | unsigned long flags; | ||
478 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
479 | int ret; | ||
480 | |||
481 | /* nic_init */ | ||
482 | spin_lock_irqsave(&priv->lock, flags); | ||
483 | priv->cfg->ops->lib->apm_ops.init(priv); | ||
484 | |||
485 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
486 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
487 | |||
488 | spin_unlock_irqrestore(&priv->lock, flags); | ||
489 | |||
490 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); | ||
491 | |||
492 | priv->cfg->ops->lib->apm_ops.config(priv); | ||
493 | |||
494 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
495 | if (!rxq->bd) { | ||
496 | ret = iwl_rx_queue_alloc(priv); | ||
497 | if (ret) { | ||
498 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | ||
499 | return -ENOMEM; | ||
500 | } | ||
501 | } else | ||
502 | iwlagn_rx_queue_reset(priv, rxq); | ||
503 | |||
504 | iwlagn_rx_replenish(priv); | ||
505 | |||
506 | iwlagn_rx_init(priv, rxq); | ||
507 | |||
508 | spin_lock_irqsave(&priv->lock, flags); | ||
509 | |||
510 | rxq->need_update = 1; | ||
511 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
512 | |||
513 | spin_unlock_irqrestore(&priv->lock, flags); | ||
514 | |||
515 | /* Allocate or reset and init all Tx and Command queues */ | ||
516 | if (!priv->txq) { | ||
517 | ret = iwlagn_txq_ctx_alloc(priv); | ||
518 | if (ret) | ||
519 | return ret; | ||
520 | } else | ||
521 | iwlagn_txq_ctx_reset(priv); | ||
522 | |||
523 | set_bit(STATUS_INIT, &priv->status); | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
530 | */ | ||
531 | static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
532 | dma_addr_t dma_addr) | ||
533 | { | ||
534 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool | ||
539 | * | ||
540 | * If there are slots in the RX queue that need to be restocked, | ||
541 | * and we have free pre-allocated buffers, fill the ranks as much | ||
542 | * as we can, pulling from rx_free. | ||
543 | * | ||
544 | * This moves the 'write' index forward to catch up with 'processed', and | ||
545 | * also updates the memory address in the firmware to reference the new | ||
546 | * target buffer. | ||
547 | */ | ||
548 | void iwlagn_rx_queue_restock(struct iwl_priv *priv) | ||
549 | { | ||
550 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
551 | struct list_head *element; | ||
552 | struct iwl_rx_mem_buffer *rxb; | ||
553 | unsigned long flags; | ||
554 | |||
555 | spin_lock_irqsave(&rxq->lock, flags); | ||
556 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
557 | /* The overwritten rxb must be a used one */ | ||
558 | rxb = rxq->queue[rxq->write]; | ||
559 | BUG_ON(rxb && rxb->page); | ||
560 | |||
561 | /* Get next free Rx buffer, remove from free list */ | ||
562 | element = rxq->rx_free.next; | ||
563 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
564 | list_del(element); | ||
565 | |||
566 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
567 | rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, | ||
568 | rxb->page_dma); | ||
569 | rxq->queue[rxq->write] = rxb; | ||
570 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
571 | rxq->free_count--; | ||
572 | } | ||
573 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
574 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
575 | * refill it */ | ||
576 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
577 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
578 | |||
579 | |||
580 | /* If we've added more space for the firmware to place data, tell it. | ||
581 | * Increment device's write pointer in multiples of 8. */ | ||
582 | if (rxq->write_actual != (rxq->write & ~0x7)) { | ||
583 | spin_lock_irqsave(&rxq->lock, flags); | ||
584 | rxq->need_update = 1; | ||
585 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
586 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | /** | ||
591 | * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free | ||
592 | * | ||
593 | * When moving to rx_free an SKB is allocated for the slot. | ||
594 | * | ||
595 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
596 | * This is called as a scheduled work item (except for during initialization) | ||
597 | */ | ||
598 | void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
599 | { | ||
600 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
601 | struct list_head *element; | ||
602 | struct iwl_rx_mem_buffer *rxb; | ||
603 | struct page *page; | ||
604 | unsigned long flags; | ||
605 | gfp_t gfp_mask = priority; | ||
606 | |||
607 | while (1) { | ||
608 | spin_lock_irqsave(&rxq->lock, flags); | ||
609 | if (list_empty(&rxq->rx_used)) { | ||
610 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
611 | return; | ||
612 | } | ||
613 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
614 | |||
615 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
616 | gfp_mask |= __GFP_NOWARN; | ||
617 | |||
618 | if (priv->hw_params.rx_page_order > 0) | ||
619 | gfp_mask |= __GFP_COMP; | ||
620 | |||
621 | /* Alloc a new receive buffer */ | ||
622 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
623 | if (!page) { | ||
624 | if (net_ratelimit()) | ||
625 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | ||
626 | "order: %d\n", | ||
627 | priv->hw_params.rx_page_order); | ||
628 | |||
629 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
630 | net_ratelimit()) | ||
631 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", | ||
632 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
633 | rxq->free_count); | ||
634 | /* We don't reschedule replenish work here -- we will | ||
635 | * call the restock method and if it still needs | ||
636 | * more buffers it will schedule replenish */ | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | spin_lock_irqsave(&rxq->lock, flags); | ||
641 | |||
642 | if (list_empty(&rxq->rx_used)) { | ||
643 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
644 | __free_pages(page, priv->hw_params.rx_page_order); | ||
645 | return; | ||
646 | } | ||
647 | element = rxq->rx_used.next; | ||
648 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
649 | list_del(element); | ||
650 | |||
651 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
652 | |||
653 | BUG_ON(rxb->page); | ||
654 | rxb->page = page; | ||
655 | /* Get physical address of the RB */ | ||
656 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
657 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
658 | PCI_DMA_FROMDEVICE); | ||
659 | /* dma address must be no more than 36 bits */ | ||
660 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
661 | /* and also 256 byte aligned! */ | ||
662 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
663 | |||
664 | spin_lock_irqsave(&rxq->lock, flags); | ||
665 | |||
666 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
667 | rxq->free_count++; | ||
668 | priv->alloc_rxb_page++; | ||
669 | |||
670 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
671 | } | ||
672 | } | ||
673 | |||
674 | void iwlagn_rx_replenish(struct iwl_priv *priv) | ||
675 | { | ||
676 | unsigned long flags; | ||
677 | |||
678 | iwlagn_rx_allocate(priv, GFP_KERNEL); | ||
679 | |||
680 | spin_lock_irqsave(&priv->lock, flags); | ||
681 | iwlagn_rx_queue_restock(priv); | ||
682 | spin_unlock_irqrestore(&priv->lock, flags); | ||
683 | } | ||
684 | |||
685 | void iwlagn_rx_replenish_now(struct iwl_priv *priv) | ||
686 | { | ||
687 | iwlagn_rx_allocate(priv, GFP_ATOMIC); | ||
688 | |||
689 | iwlagn_rx_queue_restock(priv); | ||
690 | } | ||
691 | |||
692 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
693 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
694 | * This free routine walks the list of POOL entries and if SKB is set to | ||
695 | * non NULL it is unmapped and freed | ||
696 | */ | ||
697 | void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
698 | { | ||
699 | int i; | ||
700 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
701 | if (rxq->pool[i].page != NULL) { | ||
702 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
703 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
704 | PCI_DMA_FROMDEVICE); | ||
705 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
706 | rxq->pool[i].page = NULL; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
711 | rxq->dma_addr); | ||
712 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
713 | rxq->rb_stts, rxq->rb_stts_dma); | ||
714 | rxq->bd = NULL; | ||
715 | rxq->rb_stts = NULL; | ||
716 | } | ||
717 | |||
718 | int iwlagn_rxq_stop(struct iwl_priv *priv) | ||
719 | { | ||
720 | |||
721 | /* stop Rx DMA */ | ||
722 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
723 | iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | ||
724 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
725 | |||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) | ||
730 | { | ||
731 | int idx = 0; | ||
732 | int band_offset = 0; | ||
733 | |||
734 | /* HT rate format: mac80211 wants an MCS number, which is just LSB */ | ||
735 | if (rate_n_flags & RATE_MCS_HT_MSK) { | ||
736 | idx = (rate_n_flags & 0xff); | ||
737 | return idx; | ||
738 | /* Legacy rate format, search for match in table */ | ||
739 | } else { | ||
740 | if (band == IEEE80211_BAND_5GHZ) | ||
741 | band_offset = IWL_FIRST_OFDM_RATE; | ||
742 | for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) | ||
743 | if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) | ||
744 | return idx - band_offset; | ||
745 | } | ||
746 | |||
747 | return -1; | ||
748 | } | ||
749 | |||
750 | /* Calc max signal level (dBm) among 3 possible receivers */ | ||
751 | static inline int iwlagn_calc_rssi(struct iwl_priv *priv, | ||
752 | struct iwl_rx_phy_res *rx_resp) | ||
753 | { | ||
754 | return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); | ||
755 | } | ||
756 | |||
757 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
758 | /** | ||
759 | * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions | ||
760 | * | ||
761 | * You may hack this function to show different aspects of received frames, | ||
762 | * including selective frame dumps. | ||
763 | * group100 parameter selects whether to show 1 out of 100 good data frames. | ||
764 | * All beacon and probe response frames are printed. | ||
765 | */ | ||
766 | static void iwlagn_dbg_report_frame(struct iwl_priv *priv, | ||
767 | struct iwl_rx_phy_res *phy_res, u16 length, | ||
768 | struct ieee80211_hdr *header, int group100) | ||
769 | { | ||
770 | u32 to_us; | ||
771 | u32 print_summary = 0; | ||
772 | u32 print_dump = 0; /* set to 1 to dump all frames' contents */ | ||
773 | u32 hundred = 0; | ||
774 | u32 dataframe = 0; | ||
775 | __le16 fc; | ||
776 | u16 seq_ctl; | ||
777 | u16 channel; | ||
778 | u16 phy_flags; | ||
779 | u32 rate_n_flags; | ||
780 | u32 tsf_low; | ||
781 | int rssi; | ||
782 | |||
783 | if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX))) | ||
784 | return; | ||
785 | |||
786 | /* MAC header */ | ||
787 | fc = header->frame_control; | ||
788 | seq_ctl = le16_to_cpu(header->seq_ctrl); | ||
789 | |||
790 | /* metadata */ | ||
791 | channel = le16_to_cpu(phy_res->channel); | ||
792 | phy_flags = le16_to_cpu(phy_res->phy_flags); | ||
793 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
794 | |||
795 | /* signal statistics */ | ||
796 | rssi = iwlagn_calc_rssi(priv, phy_res); | ||
797 | tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff; | ||
798 | |||
799 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | ||
800 | |||
801 | /* if data frame is to us and all is good, | ||
802 | * (optionally) print summary for only 1 out of every 100 */ | ||
803 | if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == | ||
804 | cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | ||
805 | dataframe = 1; | ||
806 | if (!group100) | ||
807 | print_summary = 1; /* print each frame */ | ||
808 | else if (priv->framecnt_to_us < 100) { | ||
809 | priv->framecnt_to_us++; | ||
810 | print_summary = 0; | ||
811 | } else { | ||
812 | priv->framecnt_to_us = 0; | ||
813 | print_summary = 1; | ||
814 | hundred = 1; | ||
815 | } | ||
816 | } else { | ||
817 | /* print summary for all other frames */ | ||
818 | print_summary = 1; | ||
819 | } | ||
820 | |||
821 | if (print_summary) { | ||
822 | char *title; | ||
823 | int rate_idx; | ||
824 | u32 bitrate; | ||
825 | |||
826 | if (hundred) | ||
827 | title = "100Frames"; | ||
828 | else if (ieee80211_has_retry(fc)) | ||
829 | title = "Retry"; | ||
830 | else if (ieee80211_is_assoc_resp(fc)) | ||
831 | title = "AscRsp"; | ||
832 | else if (ieee80211_is_reassoc_resp(fc)) | ||
833 | title = "RasRsp"; | ||
834 | else if (ieee80211_is_probe_resp(fc)) { | ||
835 | title = "PrbRsp"; | ||
836 | print_dump = 1; /* dump frame contents */ | ||
837 | } else if (ieee80211_is_beacon(fc)) { | ||
838 | title = "Beacon"; | ||
839 | print_dump = 1; /* dump frame contents */ | ||
840 | } else if (ieee80211_is_atim(fc)) | ||
841 | title = "ATIM"; | ||
842 | else if (ieee80211_is_auth(fc)) | ||
843 | title = "Auth"; | ||
844 | else if (ieee80211_is_deauth(fc)) | ||
845 | title = "DeAuth"; | ||
846 | else if (ieee80211_is_disassoc(fc)) | ||
847 | title = "DisAssoc"; | ||
848 | else | ||
849 | title = "Frame"; | ||
850 | |||
851 | rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); | ||
852 | if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) { | ||
853 | bitrate = 0; | ||
854 | WARN_ON_ONCE(1); | ||
855 | } else { | ||
856 | bitrate = iwl_rates[rate_idx].ieee / 2; | ||
857 | } | ||
858 | |||
859 | /* print frame summary. | ||
860 | * MAC addresses show just the last byte (for brevity), | ||
861 | * but you can hack it to show more, if you'd like to. */ | ||
862 | if (dataframe) | ||
863 | IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " | ||
864 | "len=%u, rssi=%d, chnl=%d, rate=%u,\n", | ||
865 | title, le16_to_cpu(fc), header->addr1[5], | ||
866 | length, rssi, channel, bitrate); | ||
867 | else { | ||
868 | /* src/dst addresses assume managed mode */ | ||
869 | IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, " | ||
870 | "len=%u, rssi=%d, tim=%lu usec, " | ||
871 | "phy=0x%02x, chnl=%d\n", | ||
872 | title, le16_to_cpu(fc), header->addr1[5], | ||
873 | header->addr3[5], length, rssi, | ||
874 | tsf_low - priv->scan_start_tsf, | ||
875 | phy_flags, channel); | ||
876 | } | ||
877 | } | ||
878 | if (print_dump) | ||
879 | iwl_print_hex_dump(priv, IWL_DL_RX, header, length); | ||
880 | } | ||
881 | #endif | ||
882 | |||
883 | static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) | ||
884 | { | ||
885 | u32 decrypt_out = 0; | ||
886 | |||
887 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | ||
888 | RX_RES_STATUS_STATION_FOUND) | ||
889 | decrypt_out |= (RX_RES_STATUS_STATION_FOUND | | ||
890 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | ||
891 | |||
892 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | ||
893 | |||
894 | /* packet was not encrypted */ | ||
895 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
896 | RX_RES_STATUS_SEC_TYPE_NONE) | ||
897 | return decrypt_out; | ||
898 | |||
899 | /* packet was encrypted with unknown alg */ | ||
900 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
901 | RX_RES_STATUS_SEC_TYPE_ERR) | ||
902 | return decrypt_out; | ||
903 | |||
904 | /* decryption was not done in HW */ | ||
905 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | ||
906 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) | ||
907 | return decrypt_out; | ||
908 | |||
909 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | ||
910 | |||
911 | case RX_RES_STATUS_SEC_TYPE_CCMP: | ||
912 | /* alg is CCM: check MIC only */ | ||
913 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | ||
914 | /* Bad MIC */ | ||
915 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
916 | else | ||
917 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
918 | |||
919 | break; | ||
920 | |||
921 | case RX_RES_STATUS_SEC_TYPE_TKIP: | ||
922 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | ||
923 | /* Bad TTAK */ | ||
924 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | ||
925 | break; | ||
926 | } | ||
927 | /* fall through if TTAK OK */ | ||
928 | default: | ||
929 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | ||
930 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
931 | else | ||
932 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
933 | break; | ||
934 | }; | ||
935 | |||
936 | IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", | ||
937 | decrypt_in, decrypt_out); | ||
938 | |||
939 | return decrypt_out; | ||
940 | } | ||
941 | |||
942 | static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, | ||
943 | struct ieee80211_hdr *hdr, | ||
944 | u16 len, | ||
945 | u32 ampdu_status, | ||
946 | struct iwl_rx_mem_buffer *rxb, | ||
947 | struct ieee80211_rx_status *stats) | ||
948 | { | ||
949 | struct sk_buff *skb; | ||
950 | __le16 fc = hdr->frame_control; | ||
951 | |||
952 | /* We only process data packets if the interface is open */ | ||
953 | if (unlikely(!priv->is_open)) { | ||
954 | IWL_DEBUG_DROP_LIMIT(priv, | ||
955 | "Dropping packet while interface is not open.\n"); | ||
956 | return; | ||
957 | } | ||
958 | |||
959 | /* In case of HW accelerated crypto and bad decryption, drop */ | ||
960 | if (!priv->cfg->mod_params->sw_crypto && | ||
961 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) | ||
962 | return; | ||
963 | |||
964 | skb = dev_alloc_skb(128); | ||
965 | if (!skb) { | ||
966 | IWL_ERR(priv, "dev_alloc_skb failed\n"); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); | ||
971 | |||
972 | iwl_update_stats(priv, false, fc, len); | ||
973 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | ||
974 | |||
975 | ieee80211_rx(priv->hw, skb); | ||
976 | priv->alloc_rxb_page--; | ||
977 | rxb->page = NULL; | ||
978 | } | ||
979 | |||
980 | /* Called for REPLY_RX (legacy ABG frames), or | ||
981 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | ||
982 | void iwlagn_rx_reply_rx(struct iwl_priv *priv, | ||
983 | struct iwl_rx_mem_buffer *rxb) | ||
984 | { | ||
985 | struct ieee80211_hdr *header; | ||
986 | struct ieee80211_rx_status rx_status; | ||
987 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
988 | struct iwl_rx_phy_res *phy_res; | ||
989 | __le32 rx_pkt_status; | ||
990 | struct iwl4965_rx_mpdu_res_start *amsdu; | ||
991 | u32 len; | ||
992 | u32 ampdu_status; | ||
993 | u32 rate_n_flags; | ||
994 | |||
995 | /** | ||
996 | * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. | ||
997 | * REPLY_RX: physical layer info is in this buffer | ||
998 | * REPLY_RX_MPDU_CMD: physical layer info was sent in separate | ||
999 | * command and cached in priv->last_phy_res | ||
1000 | * | ||
1001 | * Here we set up local variables depending on which command is | ||
1002 | * received. | ||
1003 | */ | ||
1004 | if (pkt->hdr.cmd == REPLY_RX) { | ||
1005 | phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; | ||
1006 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) | ||
1007 | + phy_res->cfg_phy_cnt); | ||
1008 | |||
1009 | len = le16_to_cpu(phy_res->byte_count); | ||
1010 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + | ||
1011 | phy_res->cfg_phy_cnt + len); | ||
1012 | ampdu_status = le32_to_cpu(rx_pkt_status); | ||
1013 | } else { | ||
1014 | if (!priv->_agn.last_phy_res_valid) { | ||
1015 | IWL_ERR(priv, "MPDU frame without cached PHY data\n"); | ||
1016 | return; | ||
1017 | } | ||
1018 | phy_res = &priv->_agn.last_phy_res; | ||
1019 | amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; | ||
1020 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); | ||
1021 | len = le16_to_cpu(amsdu->byte_count); | ||
1022 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); | ||
1023 | ampdu_status = iwlagn_translate_rx_status(priv, | ||
1024 | le32_to_cpu(rx_pkt_status)); | ||
1025 | } | ||
1026 | |||
1027 | if ((unlikely(phy_res->cfg_phy_cnt > 20))) { | ||
1028 | IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", | ||
1029 | phy_res->cfg_phy_cnt); | ||
1030 | return; | ||
1031 | } | ||
1032 | |||
1033 | if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || | ||
1034 | !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | ||
1035 | IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", | ||
1036 | le32_to_cpu(rx_pkt_status)); | ||
1037 | return; | ||
1038 | } | ||
1039 | |||
1040 | /* This will be used in several places later */ | ||
1041 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
1042 | |||
1043 | /* rx_status carries information about the packet to mac80211 */ | ||
1044 | rx_status.mactime = le64_to_cpu(phy_res->timestamp); | ||
1045 | rx_status.freq = | ||
1046 | ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); | ||
1047 | rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? | ||
1048 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | ||
1049 | rx_status.rate_idx = | ||
1050 | iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); | ||
1051 | rx_status.flag = 0; | ||
1052 | |||
1053 | /* TSF isn't reliable. In order to allow smooth user experience, | ||
1054 | * this W/A doesn't propagate it to the mac80211 */ | ||
1055 | /*rx_status.flag |= RX_FLAG_TSFT;*/ | ||
1056 | |||
1057 | priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); | ||
1058 | |||
1059 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | ||
1060 | rx_status.signal = iwlagn_calc_rssi(priv, phy_res); | ||
1061 | |||
1062 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1063 | /* Set "1" to report good data frames in groups of 100 */ | ||
1064 | if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX)) | ||
1065 | iwlagn_dbg_report_frame(priv, phy_res, len, header, 1); | ||
1066 | #endif | ||
1067 | iwl_dbg_log_rx_data_frame(priv, len, header); | ||
1068 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", | ||
1069 | rx_status.signal, (unsigned long long)rx_status.mactime); | ||
1070 | |||
1071 | /* | ||
1072 | * "antenna number" | ||
1073 | * | ||
1074 | * It seems that the antenna field in the phy flags value | ||
1075 | * is actually a bit field. This is undefined by radiotap, | ||
1076 | * it wants an actual antenna number but I always get "7" | ||
1077 | * for most legacy frames I receive indicating that the | ||
1078 | * same frame was received on all three RX chains. | ||
1079 | * | ||
1080 | * I think this field should be removed in favor of a | ||
1081 | * new 802.11n radiotap field "RX chains" that is defined | ||
1082 | * as a bitmask. | ||
1083 | */ | ||
1084 | rx_status.antenna = | ||
1085 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) | ||
1086 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; | ||
1087 | |||
1088 | /* set the preamble flag if appropriate */ | ||
1089 | if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | ||
1090 | rx_status.flag |= RX_FLAG_SHORTPRE; | ||
1091 | |||
1092 | /* Set up the HT phy flags */ | ||
1093 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1094 | rx_status.flag |= RX_FLAG_HT; | ||
1095 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1096 | rx_status.flag |= RX_FLAG_40MHZ; | ||
1097 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1098 | rx_status.flag |= RX_FLAG_SHORT_GI; | ||
1099 | |||
1100 | iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, | ||
1101 | rxb, &rx_status); | ||
1102 | } | ||
1103 | |||
1104 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | ||
1105 | * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | ||
1106 | void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, | ||
1107 | struct iwl_rx_mem_buffer *rxb) | ||
1108 | { | ||
1109 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1110 | priv->_agn.last_phy_res_valid = true; | ||
1111 | memcpy(&priv->_agn.last_phy_res, pkt->u.raw, | ||
1112 | sizeof(struct iwl_rx_phy_res)); | ||
1113 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 0de80914be77..f7d85a2173c8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
@@ -2003,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, | |||
2003 | /* rates available for this association, and for modulation mode */ | 2003 | /* rates available for this association, and for modulation mode */ |
2004 | rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); | 2004 | rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); |
2005 | 2005 | ||
2006 | IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask); | 2006 | IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); |
2007 | 2007 | ||
2008 | /* mask with station rate restriction */ | 2008 | /* mask with station rate restriction */ |
2009 | if (is_legacy(tbl->lq_type)) { | 2009 | if (is_legacy(tbl->lq_type)) { |
@@ -2410,7 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, | |||
2410 | 2410 | ||
2411 | struct sk_buff *skb = txrc->skb; | 2411 | struct sk_buff *skb = txrc->skb; |
2412 | struct ieee80211_supported_band *sband = txrc->sband; | 2412 | struct ieee80211_supported_band *sband = txrc->sband; |
2413 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 2413 | struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; |
2414 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2414 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2415 | struct iwl_lq_sta *lq_sta = priv_sta; | 2415 | struct iwl_lq_sta *lq_sta = priv_sta; |
2416 | int rate_idx; | 2416 | int rate_idx; |
@@ -2934,8 +2934,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, | |||
2934 | desc += sprintf(buff+desc, | 2934 | desc += sprintf(buff+desc, |
2935 | "Bit Rate= %d Mb/s\n", | 2935 | "Bit Rate= %d Mb/s\n", |
2936 | iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); | 2936 | iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); |
2937 | desc += sprintf(buff+desc, "Noise Level= %d dBm\n", | ||
2938 | priv->last_rx_noise); | ||
2939 | 2937 | ||
2940 | ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); | 2938 | ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); |
2941 | return ret; | 2939 | return ret; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c new file mode 100644 index 000000000000..3077eac58880 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |||
@@ -0,0 +1,1333 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-sta.h" | ||
38 | #include "iwl-io.h" | ||
39 | #include "iwl-helpers.h" | ||
40 | #include "iwl-agn-hw.h" | ||
41 | #include "iwl-agn.h" | ||
42 | |||
43 | /* | ||
44 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
45 | * | ||
46 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
47 | * | ||
48 | * Mac80211 uses the following numbers, which we get as from it | ||
49 | * by way of skb_get_queue_mapping(skb): | ||
50 | * | ||
51 | * VO 0 | ||
52 | * VI 1 | ||
53 | * BE 2 | ||
54 | * BK 3 | ||
55 | * | ||
56 | * | ||
57 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
58 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
59 | * own queue per aggregation session (RA/TID combination), such queues are | ||
60 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
61 | * order to map frames to the right queue, we also need an AC->hw queue | ||
62 | * mapping. This is implemented here. | ||
63 | * | ||
64 | * Due to the way hw queues are set up (by the hw specific modules like | ||
65 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | ||
66 | * mapping. | ||
67 | */ | ||
68 | |||
69 | static const u8 tid_to_ac[] = { | ||
70 | /* this matches the mac80211 numbers */ | ||
71 | 2, 3, 3, 2, 1, 1, 0, 0 | ||
72 | }; | ||
73 | |||
74 | static const u8 ac_to_fifo[] = { | ||
75 | IWL_TX_FIFO_VO, | ||
76 | IWL_TX_FIFO_VI, | ||
77 | IWL_TX_FIFO_BE, | ||
78 | IWL_TX_FIFO_BK, | ||
79 | }; | ||
80 | |||
81 | static inline int get_fifo_from_ac(u8 ac) | ||
82 | { | ||
83 | return ac_to_fifo[ac]; | ||
84 | } | ||
85 | |||
86 | static inline int get_fifo_from_tid(u16 tid) | ||
87 | { | ||
88 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
89 | return get_fifo_from_ac(tid_to_ac[tid]); | ||
90 | |||
91 | /* no support for TIDs 8-15 yet */ | ||
92 | return -EINVAL; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
97 | */ | ||
98 | void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
99 | struct iwl_tx_queue *txq, | ||
100 | u16 byte_cnt) | ||
101 | { | ||
102 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
103 | int write_ptr = txq->q.write_ptr; | ||
104 | int txq_id = txq->q.id; | ||
105 | u8 sec_ctl = 0; | ||
106 | u8 sta_id = 0; | ||
107 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
108 | __le16 bc_ent; | ||
109 | |||
110 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
111 | |||
112 | if (txq_id != IWL_CMD_QUEUE_NUM) { | ||
113 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; | ||
114 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | ||
115 | |||
116 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
117 | case TX_CMD_SEC_CCM: | ||
118 | len += CCMP_MIC_LEN; | ||
119 | break; | ||
120 | case TX_CMD_SEC_TKIP: | ||
121 | len += TKIP_ICV_LEN; | ||
122 | break; | ||
123 | case TX_CMD_SEC_WEP: | ||
124 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
130 | |||
131 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
132 | |||
133 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
134 | scd_bc_tbl[txq_id]. | ||
135 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
136 | } | ||
137 | |||
138 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
139 | struct iwl_tx_queue *txq) | ||
140 | { | ||
141 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
142 | int txq_id = txq->q.id; | ||
143 | int read_ptr = txq->q.read_ptr; | ||
144 | u8 sta_id = 0; | ||
145 | __le16 bc_ent; | ||
146 | |||
147 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
148 | |||
149 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
150 | sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; | ||
151 | |||
152 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
153 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
154 | |||
155 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
156 | scd_bc_tbl[txq_id]. | ||
157 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
158 | } | ||
159 | |||
160 | static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | ||
161 | u16 txq_id) | ||
162 | { | ||
163 | u32 tbl_dw_addr; | ||
164 | u32 tbl_dw; | ||
165 | u16 scd_q2ratid; | ||
166 | |||
167 | scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
168 | |||
169 | tbl_dw_addr = priv->scd_base_addr + | ||
170 | IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | ||
171 | |||
172 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | ||
173 | |||
174 | if (txq_id & 0x1) | ||
175 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
176 | else | ||
177 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
178 | |||
179 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) | ||
185 | { | ||
186 | /* Simply stop the queue, but don't change any configuration; | ||
187 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
188 | iwl_write_prph(priv, | ||
189 | IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
190 | (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
191 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
192 | } | ||
193 | |||
194 | void iwlagn_set_wr_ptrs(struct iwl_priv *priv, | ||
195 | int txq_id, u32 index) | ||
196 | { | ||
197 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
198 | (index & 0xff) | (txq_id << 8)); | ||
199 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); | ||
200 | } | ||
201 | |||
202 | void iwlagn_tx_queue_set_status(struct iwl_priv *priv, | ||
203 | struct iwl_tx_queue *txq, | ||
204 | int tx_fifo_id, int scd_retry) | ||
205 | { | ||
206 | int txq_id = txq->q.id; | ||
207 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; | ||
208 | |||
209 | iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
210 | (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
211 | (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | | ||
212 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | | ||
213 | IWL50_SCD_QUEUE_STTS_REG_MSK); | ||
214 | |||
215 | txq->sched_retry = scd_retry; | ||
216 | |||
217 | IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", | ||
218 | active ? "Activate" : "Deactivate", | ||
219 | scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); | ||
220 | } | ||
221 | |||
222 | int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, | ||
223 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | ||
224 | { | ||
225 | unsigned long flags; | ||
226 | u16 ra_tid; | ||
227 | |||
228 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || | ||
229 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
230 | <= txq_id)) { | ||
231 | IWL_WARN(priv, | ||
232 | "queue number out of range: %d, must be %d to %d\n", | ||
233 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, | ||
234 | IWLAGN_FIRST_AMPDU_QUEUE + | ||
235 | priv->cfg->num_of_ampdu_queues - 1); | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
240 | |||
241 | /* Modify device's station table to Tx this TID */ | ||
242 | iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); | ||
243 | |||
244 | spin_lock_irqsave(&priv->lock, flags); | ||
245 | |||
246 | /* Stop this Tx queue before configuring it */ | ||
247 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
248 | |||
249 | /* Map receiver-address / traffic-ID to this queue */ | ||
250 | iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | ||
251 | |||
252 | /* Set this queue as a chain-building queue */ | ||
253 | iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id)); | ||
254 | |||
255 | /* enable aggregations for the queue */ | ||
256 | iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id)); | ||
257 | |||
258 | /* Place first TFD at index corresponding to start sequence number. | ||
259 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
260 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
261 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
262 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
263 | |||
264 | /* Set up Tx window size and frame limit for this queue */ | ||
265 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
266 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + | ||
267 | sizeof(u32), | ||
268 | ((SCD_WIN_SIZE << | ||
269 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
270 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
271 | ((SCD_FRAME_LIMIT << | ||
272 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
273 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
274 | |||
275 | iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
276 | |||
277 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
278 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | ||
279 | |||
280 | spin_unlock_irqrestore(&priv->lock, flags); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
286 | u16 ssn_idx, u8 tx_fifo) | ||
287 | { | ||
288 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || | ||
289 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
290 | <= txq_id)) { | ||
291 | IWL_ERR(priv, | ||
292 | "queue number out of range: %d, must be %d to %d\n", | ||
293 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, | ||
294 | IWLAGN_FIRST_AMPDU_QUEUE + | ||
295 | priv->cfg->num_of_ampdu_queues - 1); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
300 | |||
301 | iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); | ||
302 | |||
303 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
304 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
305 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
306 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
307 | |||
308 | iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
309 | iwl_txq_ctx_deactivate(priv, txq_id); | ||
310 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
317 | * must be called under priv->lock and mac access | ||
318 | */ | ||
319 | void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) | ||
320 | { | ||
321 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); | ||
322 | } | ||
323 | |||
324 | static inline int get_queue_from_ac(u16 ac) | ||
325 | { | ||
326 | return ac; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * handle build REPLY_TX command notification. | ||
331 | */ | ||
332 | static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | ||
333 | struct iwl_tx_cmd *tx_cmd, | ||
334 | struct ieee80211_tx_info *info, | ||
335 | struct ieee80211_hdr *hdr, | ||
336 | u8 std_id) | ||
337 | { | ||
338 | __le16 fc = hdr->frame_control; | ||
339 | __le32 tx_flags = tx_cmd->tx_flags; | ||
340 | |||
341 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
342 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
343 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
344 | if (ieee80211_is_mgmt(fc)) | ||
345 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
346 | if (ieee80211_is_probe_resp(fc) && | ||
347 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
348 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
349 | } else { | ||
350 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
351 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
352 | } | ||
353 | |||
354 | if (ieee80211_is_back_req(fc)) | ||
355 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
356 | |||
357 | |||
358 | tx_cmd->sta_id = std_id; | ||
359 | if (ieee80211_has_morefrags(fc)) | ||
360 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
361 | |||
362 | if (ieee80211_is_data_qos(fc)) { | ||
363 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
364 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
365 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
366 | } else { | ||
367 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
368 | } | ||
369 | |||
370 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | ||
371 | |||
372 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
373 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
374 | |||
375 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
376 | if (ieee80211_is_mgmt(fc)) { | ||
377 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
378 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
379 | else | ||
380 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
381 | } else { | ||
382 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
383 | } | ||
384 | |||
385 | tx_cmd->driver_txop = 0; | ||
386 | tx_cmd->tx_flags = tx_flags; | ||
387 | tx_cmd->next_frame_len = 0; | ||
388 | } | ||
389 | |||
390 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
391 | |||
392 | static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, | ||
393 | struct iwl_tx_cmd *tx_cmd, | ||
394 | struct ieee80211_tx_info *info, | ||
395 | __le16 fc) | ||
396 | { | ||
397 | u32 rate_flags; | ||
398 | int rate_idx; | ||
399 | u8 rts_retry_limit; | ||
400 | u8 data_retry_limit; | ||
401 | u8 rate_plcp; | ||
402 | |||
403 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
404 | if (ieee80211_is_probe_resp(fc)) | ||
405 | data_retry_limit = 3; | ||
406 | else | ||
407 | data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; | ||
408 | tx_cmd->data_retry_limit = data_retry_limit; | ||
409 | |||
410 | /* Set retry limit on RTS packets */ | ||
411 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
412 | if (data_retry_limit < rts_retry_limit) | ||
413 | rts_retry_limit = data_retry_limit; | ||
414 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
415 | |||
416 | /* DATA packets will use the uCode station table for rate/antenna | ||
417 | * selection */ | ||
418 | if (ieee80211_is_data(fc)) { | ||
419 | tx_cmd->initial_rate_index = 0; | ||
420 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
426 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
427 | * this band. Also use the lowest supported rate if the stored rate | ||
428 | * index is invalid. | ||
429 | */ | ||
430 | rate_idx = info->control.rates[0].idx; | ||
431 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
432 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
433 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
434 | info->control.sta); | ||
435 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
436 | if (info->band == IEEE80211_BAND_5GHZ) | ||
437 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
438 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
439 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
440 | /* Zero out flags for this packet */ | ||
441 | rate_flags = 0; | ||
442 | |||
443 | /* Set CCK flag as needed */ | ||
444 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
445 | rate_flags |= RATE_MCS_CCK_MSK; | ||
446 | |||
447 | /* Set up RTS and CTS flags for certain packets */ | ||
448 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | ||
449 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
450 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
451 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | ||
452 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | ||
453 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
454 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
455 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
456 | } | ||
457 | break; | ||
458 | default: | ||
459 | break; | ||
460 | } | ||
461 | |||
462 | /* Set up antennas */ | ||
463 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | ||
464 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
465 | |||
466 | /* Set the rate in the TX cmd */ | ||
467 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
468 | } | ||
469 | |||
470 | static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
471 | struct ieee80211_tx_info *info, | ||
472 | struct iwl_tx_cmd *tx_cmd, | ||
473 | struct sk_buff *skb_frag, | ||
474 | int sta_id) | ||
475 | { | ||
476 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
477 | |||
478 | switch (keyconf->alg) { | ||
479 | case ALG_CCMP: | ||
480 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
481 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
482 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
483 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
484 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
485 | break; | ||
486 | |||
487 | case ALG_TKIP: | ||
488 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
489 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
490 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
491 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
492 | break; | ||
493 | |||
494 | case ALG_WEP: | ||
495 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
496 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
497 | |||
498 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
499 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
500 | |||
501 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
502 | |||
503 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
504 | "with key %d\n", keyconf->keyidx); | ||
505 | break; | ||
506 | |||
507 | default: | ||
508 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); | ||
509 | break; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * start REPLY_TX command process | ||
515 | */ | ||
516 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
517 | { | ||
518 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
519 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
520 | struct ieee80211_sta *sta = info->control.sta; | ||
521 | struct iwl_station_priv *sta_priv = NULL; | ||
522 | struct iwl_tx_queue *txq; | ||
523 | struct iwl_queue *q; | ||
524 | struct iwl_device_cmd *out_cmd; | ||
525 | struct iwl_cmd_meta *out_meta; | ||
526 | struct iwl_tx_cmd *tx_cmd; | ||
527 | int swq_id, txq_id; | ||
528 | dma_addr_t phys_addr; | ||
529 | dma_addr_t txcmd_phys; | ||
530 | dma_addr_t scratch_phys; | ||
531 | u16 len, len_org, firstlen, secondlen; | ||
532 | u16 seq_number = 0; | ||
533 | __le16 fc; | ||
534 | u8 hdr_len; | ||
535 | u8 sta_id; | ||
536 | u8 wait_write_ptr = 0; | ||
537 | u8 tid = 0; | ||
538 | u8 *qc = NULL; | ||
539 | unsigned long flags; | ||
540 | |||
541 | spin_lock_irqsave(&priv->lock, flags); | ||
542 | if (iwl_is_rfkill(priv)) { | ||
543 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
544 | goto drop_unlock; | ||
545 | } | ||
546 | |||
547 | fc = hdr->frame_control; | ||
548 | |||
549 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
550 | if (ieee80211_is_auth(fc)) | ||
551 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
552 | else if (ieee80211_is_assoc_req(fc)) | ||
553 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
554 | else if (ieee80211_is_reassoc_req(fc)) | ||
555 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
556 | #endif | ||
557 | |||
558 | hdr_len = ieee80211_hdrlen(fc); | ||
559 | |||
560 | /* Find (or create) index into station table for destination station */ | ||
561 | if (info->flags & IEEE80211_TX_CTL_INJECTED) | ||
562 | sta_id = priv->hw_params.bcast_sta_id; | ||
563 | else | ||
564 | sta_id = iwl_get_sta_id(priv, hdr); | ||
565 | if (sta_id == IWL_INVALID_STATION) { | ||
566 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
567 | hdr->addr1); | ||
568 | goto drop_unlock; | ||
569 | } | ||
570 | |||
571 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
572 | |||
573 | if (sta) | ||
574 | sta_priv = (void *)sta->drv_priv; | ||
575 | |||
576 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
577 | sta_priv->asleep) { | ||
578 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
579 | /* | ||
580 | * This sends an asynchronous command to the device, | ||
581 | * but we can rely on it being processed before the | ||
582 | * next frame is processed -- and the next frame to | ||
583 | * this station is the one that will consume this | ||
584 | * counter. | ||
585 | * For now set the counter to just 1 since we do not | ||
586 | * support uAPSD yet. | ||
587 | */ | ||
588 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
589 | } | ||
590 | |||
591 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | ||
592 | if (ieee80211_is_data_qos(fc)) { | ||
593 | qc = ieee80211_get_qos_ctl(hdr); | ||
594 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
595 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
596 | goto drop_unlock; | ||
597 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | ||
598 | seq_number &= IEEE80211_SCTL_SEQ; | ||
599 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
600 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
601 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
602 | seq_number += 0x10; | ||
603 | /* aggregation is on for this <sta,tid> */ | ||
604 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
605 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
606 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | txq = &priv->txq[txq_id]; | ||
611 | swq_id = txq->swq_id; | ||
612 | q = &txq->q; | ||
613 | |||
614 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
615 | goto drop_unlock; | ||
616 | |||
617 | if (ieee80211_is_data_qos(fc)) | ||
618 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
619 | |||
620 | /* Set up driver data for this TFD */ | ||
621 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
622 | txq->txb[q->write_ptr].skb[0] = skb; | ||
623 | |||
624 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
625 | out_cmd = txq->cmd[q->write_ptr]; | ||
626 | out_meta = &txq->meta[q->write_ptr]; | ||
627 | tx_cmd = &out_cmd->cmd.tx; | ||
628 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
629 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
630 | |||
631 | /* | ||
632 | * Set up the Tx-command (not MAC!) header. | ||
633 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
634 | * after Tx, uCode's Tx response will return this value so driver can | ||
635 | * locate the frame within the tx queue and do post-tx processing. | ||
636 | */ | ||
637 | out_cmd->hdr.cmd = REPLY_TX; | ||
638 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
639 | INDEX_TO_SEQ(q->write_ptr))); | ||
640 | |||
641 | /* Copy MAC header from skb into command buffer */ | ||
642 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
643 | |||
644 | |||
645 | /* Total # bytes to be transmitted */ | ||
646 | len = (u16)skb->len; | ||
647 | tx_cmd->len = cpu_to_le16(len); | ||
648 | |||
649 | if (info->control.hw_key) | ||
650 | iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
651 | |||
652 | /* TODO need this for burst mode later on */ | ||
653 | iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | ||
654 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
655 | |||
656 | iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
657 | |||
658 | iwl_update_stats(priv, true, fc, len); | ||
659 | /* | ||
660 | * Use the first empty entry in this queue's command buffer array | ||
661 | * to contain the Tx command and MAC header concatenated together | ||
662 | * (payload data will be in another buffer). | ||
663 | * Size of this varies, due to varying MAC header length. | ||
664 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
665 | * of the MAC header (device reads on dword boundaries). | ||
666 | * We'll tell device about this padding later. | ||
667 | */ | ||
668 | len = sizeof(struct iwl_tx_cmd) + | ||
669 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
670 | |||
671 | len_org = len; | ||
672 | firstlen = len = (len + 3) & ~3; | ||
673 | |||
674 | if (len_org != len) | ||
675 | len_org = 1; | ||
676 | else | ||
677 | len_org = 0; | ||
678 | |||
679 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
680 | if (len_org) | ||
681 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
682 | |||
683 | /* Physical address of this Tx command's header (not MAC header!), | ||
684 | * within command buffer array. */ | ||
685 | txcmd_phys = pci_map_single(priv->pci_dev, | ||
686 | &out_cmd->hdr, len, | ||
687 | PCI_DMA_BIDIRECTIONAL); | ||
688 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
689 | pci_unmap_len_set(out_meta, len, len); | ||
690 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
691 | * first entry */ | ||
692 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
693 | txcmd_phys, len, 1, 0); | ||
694 | |||
695 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
696 | txq->need_update = 1; | ||
697 | if (qc) | ||
698 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
699 | } else { | ||
700 | wait_write_ptr = 1; | ||
701 | txq->need_update = 0; | ||
702 | } | ||
703 | |||
704 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
705 | * if any (802.11 null frames have no payload). */ | ||
706 | secondlen = len = skb->len - hdr_len; | ||
707 | if (len) { | ||
708 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
709 | len, PCI_DMA_TODEVICE); | ||
710 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
711 | phys_addr, len, | ||
712 | 0, 0); | ||
713 | } | ||
714 | |||
715 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
716 | offsetof(struct iwl_tx_cmd, scratch); | ||
717 | |||
718 | len = sizeof(struct iwl_tx_cmd) + | ||
719 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
720 | /* take back ownership of DMA buffer to enable update */ | ||
721 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | ||
722 | len, PCI_DMA_BIDIRECTIONAL); | ||
723 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
724 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
725 | |||
726 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | ||
727 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
728 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
729 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
730 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
731 | |||
732 | /* Set up entry for this TFD in Tx byte-count array */ | ||
733 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
734 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
735 | le16_to_cpu(tx_cmd->len)); | ||
736 | |||
737 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | ||
738 | len, PCI_DMA_BIDIRECTIONAL); | ||
739 | |||
740 | trace_iwlwifi_dev_tx(priv, | ||
741 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
742 | sizeof(struct iwl_tfd), | ||
743 | &out_cmd->hdr, firstlen, | ||
744 | skb->data + hdr_len, secondlen); | ||
745 | |||
746 | /* Tell device the write index *just past* this latest filled TFD */ | ||
747 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
748 | iwl_txq_update_write_ptr(priv, txq); | ||
749 | spin_unlock_irqrestore(&priv->lock, flags); | ||
750 | |||
751 | /* | ||
752 | * At this point the frame is "transmitted" successfully | ||
753 | * and we will get a TX status notification eventually, | ||
754 | * regardless of the value of ret. "ret" only indicates | ||
755 | * whether or not we should update the write pointer. | ||
756 | */ | ||
757 | |||
758 | /* avoid atomic ops if it isn't an associated client */ | ||
759 | if (sta_priv && sta_priv->client) | ||
760 | atomic_inc(&sta_priv->pending_frames); | ||
761 | |||
762 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
763 | if (wait_write_ptr) { | ||
764 | spin_lock_irqsave(&priv->lock, flags); | ||
765 | txq->need_update = 1; | ||
766 | iwl_txq_update_write_ptr(priv, txq); | ||
767 | spin_unlock_irqrestore(&priv->lock, flags); | ||
768 | } else { | ||
769 | iwl_stop_queue(priv, txq->swq_id); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | return 0; | ||
774 | |||
775 | drop_unlock: | ||
776 | spin_unlock_irqrestore(&priv->lock, flags); | ||
777 | return -1; | ||
778 | } | ||
779 | |||
780 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, | ||
781 | struct iwl_dma_ptr *ptr, size_t size) | ||
782 | { | ||
783 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | ||
784 | GFP_KERNEL); | ||
785 | if (!ptr->addr) | ||
786 | return -ENOMEM; | ||
787 | ptr->size = size; | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, | ||
792 | struct iwl_dma_ptr *ptr) | ||
793 | { | ||
794 | if (unlikely(!ptr->addr)) | ||
795 | return; | ||
796 | |||
797 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
798 | memset(ptr, 0, sizeof(*ptr)); | ||
799 | } | ||
800 | |||
801 | /** | ||
802 | * iwlagn_hw_txq_ctx_free - Free TXQ Context | ||
803 | * | ||
804 | * Destroy all TX DMA queues and structures | ||
805 | */ | ||
806 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) | ||
807 | { | ||
808 | int txq_id; | ||
809 | |||
810 | /* Tx queues */ | ||
811 | if (priv->txq) { | ||
812 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
813 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
814 | iwl_cmd_queue_free(priv); | ||
815 | else | ||
816 | iwl_tx_queue_free(priv, txq_id); | ||
817 | } | ||
818 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
819 | |||
820 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
821 | |||
822 | /* free tx queue structure */ | ||
823 | iwl_free_txq_mem(priv); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * iwlagn_txq_ctx_alloc - allocate TX queue context | ||
828 | * Allocate all Tx DMA structures and initialize them | ||
829 | * | ||
830 | * @param priv | ||
831 | * @return error code | ||
832 | */ | ||
833 | int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) | ||
834 | { | ||
835 | int ret; | ||
836 | int txq_id, slots_num; | ||
837 | unsigned long flags; | ||
838 | |||
839 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
840 | iwlagn_hw_txq_ctx_free(priv); | ||
841 | |||
842 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
843 | priv->hw_params.scd_bc_tbls_size); | ||
844 | if (ret) { | ||
845 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
846 | goto error_bc_tbls; | ||
847 | } | ||
848 | /* Alloc keep-warm buffer */ | ||
849 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
850 | if (ret) { | ||
851 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
852 | goto error_kw; | ||
853 | } | ||
854 | |||
855 | /* allocate tx queue structure */ | ||
856 | ret = iwl_alloc_txq_mem(priv); | ||
857 | if (ret) | ||
858 | goto error; | ||
859 | |||
860 | spin_lock_irqsave(&priv->lock, flags); | ||
861 | |||
862 | /* Turn off all Tx DMA fifos */ | ||
863 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
864 | |||
865 | /* Tell NIC where to find the "keep warm" buffer */ | ||
866 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
867 | |||
868 | spin_unlock_irqrestore(&priv->lock, flags); | ||
869 | |||
870 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
871 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
872 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | ||
873 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
874 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | ||
875 | txq_id); | ||
876 | if (ret) { | ||
877 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
878 | goto error; | ||
879 | } | ||
880 | } | ||
881 | |||
882 | return ret; | ||
883 | |||
884 | error: | ||
885 | iwlagn_hw_txq_ctx_free(priv); | ||
886 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
887 | error_kw: | ||
888 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
889 | error_bc_tbls: | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | void iwlagn_txq_ctx_reset(struct iwl_priv *priv) | ||
894 | { | ||
895 | int txq_id, slots_num; | ||
896 | unsigned long flags; | ||
897 | |||
898 | spin_lock_irqsave(&priv->lock, flags); | ||
899 | |||
900 | /* Turn off all Tx DMA fifos */ | ||
901 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
902 | |||
903 | /* Tell NIC where to find the "keep warm" buffer */ | ||
904 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
905 | |||
906 | spin_unlock_irqrestore(&priv->lock, flags); | ||
907 | |||
908 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
909 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
910 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
911 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
912 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
913 | } | ||
914 | } | ||
915 | |||
916 | /** | ||
917 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | ||
918 | */ | ||
919 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv) | ||
920 | { | ||
921 | int ch; | ||
922 | unsigned long flags; | ||
923 | |||
924 | /* Turn off all Tx DMA fifos */ | ||
925 | spin_lock_irqsave(&priv->lock, flags); | ||
926 | |||
927 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
928 | |||
929 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
930 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | ||
931 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
932 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
933 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
934 | 1000); | ||
935 | } | ||
936 | spin_unlock_irqrestore(&priv->lock, flags); | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
941 | * Called only when finding queue for aggregation. | ||
942 | * Should never return anything < 7, because they should already | ||
943 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
944 | */ | ||
945 | static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) | ||
946 | { | ||
947 | int txq_id; | ||
948 | |||
949 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
950 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
951 | return txq_id; | ||
952 | return -1; | ||
953 | } | ||
954 | |||
955 | int iwlagn_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
956 | { | ||
957 | int sta_id; | ||
958 | int tx_fifo; | ||
959 | int txq_id; | ||
960 | int ret; | ||
961 | unsigned long flags; | ||
962 | struct iwl_tid_data *tid_data; | ||
963 | |||
964 | tx_fifo = get_fifo_from_tid(tid); | ||
965 | if (unlikely(tx_fifo < 0)) | ||
966 | return tx_fifo; | ||
967 | |||
968 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | ||
969 | __func__, ra, tid); | ||
970 | |||
971 | sta_id = iwl_find_station(priv, ra); | ||
972 | if (sta_id == IWL_INVALID_STATION) { | ||
973 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
974 | return -ENXIO; | ||
975 | } | ||
976 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
977 | return -EINVAL; | ||
978 | |||
979 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
980 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
981 | return -ENXIO; | ||
982 | } | ||
983 | |||
984 | txq_id = iwlagn_txq_ctx_activate_free(priv); | ||
985 | if (txq_id == -1) { | ||
986 | IWL_ERR(priv, "No free aggregation queue available\n"); | ||
987 | return -ENXIO; | ||
988 | } | ||
989 | |||
990 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
991 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
992 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
993 | tid_data->agg.txq_id = txq_id; | ||
994 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); | ||
995 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
996 | |||
997 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
998 | sta_id, tid, *ssn); | ||
999 | if (ret) | ||
1000 | return ret; | ||
1001 | |||
1002 | if (tid_data->tfds_in_queue == 0) { | ||
1003 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1004 | tid_data->agg.state = IWL_AGG_ON; | ||
1005 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1006 | } else { | ||
1007 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | ||
1008 | tid_data->tfds_in_queue); | ||
1009 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
1010 | } | ||
1011 | return ret; | ||
1012 | } | ||
1013 | |||
1014 | int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
1015 | { | ||
1016 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
1017 | struct iwl_tid_data *tid_data; | ||
1018 | int write_ptr, read_ptr; | ||
1019 | unsigned long flags; | ||
1020 | |||
1021 | if (!ra) { | ||
1022 | IWL_ERR(priv, "ra = NULL\n"); | ||
1023 | return -EINVAL; | ||
1024 | } | ||
1025 | |||
1026 | tx_fifo_id = get_fifo_from_tid(tid); | ||
1027 | if (unlikely(tx_fifo_id < 0)) | ||
1028 | return tx_fifo_id; | ||
1029 | |||
1030 | sta_id = iwl_find_station(priv, ra); | ||
1031 | |||
1032 | if (sta_id == IWL_INVALID_STATION) { | ||
1033 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
1034 | return -ENXIO; | ||
1035 | } | ||
1036 | |||
1037 | if (priv->stations[sta_id].tid[tid].agg.state == | ||
1038 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | ||
1039 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
1040 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1041 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1046 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | ||
1047 | |||
1048 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1049 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1050 | txq_id = tid_data->agg.txq_id; | ||
1051 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1052 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1053 | |||
1054 | /* The queue is not empty */ | ||
1055 | if (write_ptr != read_ptr) { | ||
1056 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | ||
1057 | priv->stations[sta_id].tid[tid].agg.state = | ||
1058 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1063 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1064 | |||
1065 | spin_lock_irqsave(&priv->lock, flags); | ||
1066 | /* | ||
1067 | * the only reason this call can fail is queue number out of range, | ||
1068 | * which can happen if uCode is reloaded and all the station | ||
1069 | * information are lost. if it is outside the range, there is no need | ||
1070 | * to deactivate the uCode queue, just return "success" to allow | ||
1071 | * mac80211 to clean up it own data. | ||
1072 | */ | ||
1073 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1074 | tx_fifo_id); | ||
1075 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1076 | |||
1077 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1078 | |||
1079 | return 0; | ||
1080 | } | ||
1081 | |||
1082 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | ||
1083 | int sta_id, u8 tid, int txq_id) | ||
1084 | { | ||
1085 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1086 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1087 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1088 | |||
1089 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1090 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1091 | /* We are reclaiming the last packet of the */ | ||
1092 | /* aggregated HW queue */ | ||
1093 | if ((txq_id == tid_data->agg.txq_id) && | ||
1094 | (q->read_ptr == q->write_ptr)) { | ||
1095 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1096 | int tx_fifo = get_fifo_from_tid(tid); | ||
1097 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | ||
1098 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1099 | ssn, tx_fifo); | ||
1100 | tid_data->agg.state = IWL_AGG_OFF; | ||
1101 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1102 | } | ||
1103 | break; | ||
1104 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1105 | /* We are reclaiming the last packet of the queue */ | ||
1106 | if (tid_data->tfds_in_queue == 0) { | ||
1107 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | ||
1108 | tid_data->agg.state = IWL_AGG_ON; | ||
1109 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1110 | } | ||
1111 | break; | ||
1112 | } | ||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1116 | static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1117 | { | ||
1118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1119 | struct ieee80211_sta *sta; | ||
1120 | struct iwl_station_priv *sta_priv; | ||
1121 | |||
1122 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1123 | if (sta) { | ||
1124 | sta_priv = (void *)sta->drv_priv; | ||
1125 | /* avoid atomic ops if this isn't a client */ | ||
1126 | if (sta_priv->client && | ||
1127 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1128 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1129 | } | ||
1130 | |||
1131 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1132 | } | ||
1133 | |||
1134 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1135 | { | ||
1136 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1137 | struct iwl_queue *q = &txq->q; | ||
1138 | struct iwl_tx_info *tx_info; | ||
1139 | int nfreed = 0; | ||
1140 | struct ieee80211_hdr *hdr; | ||
1141 | |||
1142 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1143 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | ||
1144 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1145 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
1150 | q->read_ptr != index; | ||
1151 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1152 | |||
1153 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1154 | iwlagn_tx_status(priv, tx_info->skb[0]); | ||
1155 | |||
1156 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1157 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1158 | nfreed++; | ||
1159 | tx_info->skb[0] = NULL; | ||
1160 | |||
1161 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1162 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1163 | |||
1164 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | ||
1165 | } | ||
1166 | return nfreed; | ||
1167 | } | ||
1168 | |||
1169 | /** | ||
1170 | * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1171 | * | ||
1172 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1173 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1174 | */ | ||
1175 | static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, | ||
1176 | struct iwl_ht_agg *agg, | ||
1177 | struct iwl_compressed_ba_resp *ba_resp) | ||
1178 | |||
1179 | { | ||
1180 | int i, sh, ack; | ||
1181 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1182 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1183 | u64 bitmap; | ||
1184 | int successes = 0; | ||
1185 | struct ieee80211_tx_info *info; | ||
1186 | |||
1187 | if (unlikely(!agg->wait_for_ba)) { | ||
1188 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1189 | return -EINVAL; | ||
1190 | } | ||
1191 | |||
1192 | /* Mark that the expected block-ack response arrived */ | ||
1193 | agg->wait_for_ba = 0; | ||
1194 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | ||
1195 | |||
1196 | /* Calculate shift to align block-ack bits with our Tx window bits */ | ||
1197 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | ||
1198 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1199 | sh += 0x100; | ||
1200 | |||
1201 | /* don't use 64-bit values for now */ | ||
1202 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1203 | |||
1204 | if (agg->frame_count > (64 - sh)) { | ||
1205 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | ||
1206 | return -1; | ||
1207 | } | ||
1208 | |||
1209 | /* check for success or failure according to the | ||
1210 | * transmitted bitmap and block-ack bitmap */ | ||
1211 | bitmap &= agg->bitmap; | ||
1212 | |||
1213 | /* For each frame attempted in aggregation, | ||
1214 | * update driver's record of tx frame's status. */ | ||
1215 | for (i = 0; i < agg->frame_count ; i++) { | ||
1216 | ack = bitmap & (1ULL << i); | ||
1217 | successes += !!ack; | ||
1218 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", | ||
1219 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | ||
1220 | agg->start_idx + i); | ||
1221 | } | ||
1222 | |||
1223 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | ||
1224 | memset(&info->status, 0, sizeof(info->status)); | ||
1225 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1226 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1227 | info->status.ampdu_ack_map = successes; | ||
1228 | info->status.ampdu_ack_len = agg->frame_count; | ||
1229 | iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | ||
1230 | |||
1231 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /** | ||
1237 | * translate ucode response to mac80211 tx status control values | ||
1238 | */ | ||
1239 | void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
1240 | struct ieee80211_tx_info *info) | ||
1241 | { | ||
1242 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | ||
1243 | |||
1244 | info->antenna_sel_tx = | ||
1245 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
1246 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1247 | r->flags |= IEEE80211_TX_RC_MCS; | ||
1248 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
1249 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
1250 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1251 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
1252 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
1253 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
1254 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1255 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
1256 | r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
1257 | } | ||
1258 | |||
1259 | /** | ||
1260 | * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1261 | * | ||
1262 | * Handles block-acknowledge notification from device, which reports success | ||
1263 | * of frames sent via aggregation. | ||
1264 | */ | ||
1265 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1266 | struct iwl_rx_mem_buffer *rxb) | ||
1267 | { | ||
1268 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1269 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1270 | struct iwl_tx_queue *txq = NULL; | ||
1271 | struct iwl_ht_agg *agg; | ||
1272 | int index; | ||
1273 | int sta_id; | ||
1274 | int tid; | ||
1275 | |||
1276 | /* "flow" corresponds to Tx queue */ | ||
1277 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1278 | |||
1279 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1280 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1281 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1282 | |||
1283 | if (scd_flow >= priv->hw_params.max_txq_num) { | ||
1284 | IWL_ERR(priv, | ||
1285 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1286 | return; | ||
1287 | } | ||
1288 | |||
1289 | txq = &priv->txq[scd_flow]; | ||
1290 | sta_id = ba_resp->sta_id; | ||
1291 | tid = ba_resp->tid; | ||
1292 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1293 | |||
1294 | /* Find index just before block-ack window */ | ||
1295 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1296 | |||
1297 | /* TODO: Need to get this copy more safely - now good for debug */ | ||
1298 | |||
1299 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1300 | "sta_id = %d\n", | ||
1301 | agg->wait_for_ba, | ||
1302 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1303 | ba_resp->sta_id); | ||
1304 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | ||
1305 | "%d, scd_ssn = %d\n", | ||
1306 | ba_resp->tid, | ||
1307 | ba_resp->seq_ctl, | ||
1308 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1309 | ba_resp->scd_flow, | ||
1310 | ba_resp->scd_ssn); | ||
1311 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", | ||
1312 | agg->start_idx, | ||
1313 | (unsigned long long)agg->bitmap); | ||
1314 | |||
1315 | /* Update driver's record of ACK vs. not for each frame in window */ | ||
1316 | iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); | ||
1317 | |||
1318 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1319 | * block-ack window (we assume that they've been successfully | ||
1320 | * transmitted ... if not, it's too late anyway). */ | ||
1321 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1322 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1323 | int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); | ||
1324 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1325 | |||
1326 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1327 | priv->mac80211_registered && | ||
1328 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | ||
1329 | iwl_wake_queue(priv, txq->swq_id); | ||
1330 | |||
1331 | iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); | ||
1332 | } | ||
1333 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c new file mode 100644 index 000000000000..52ae157968b2 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-io.h" | ||
38 | #include "iwl-helpers.h" | ||
39 | #include "iwl-agn-hw.h" | ||
40 | #include "iwl-agn.h" | ||
41 | |||
42 | static const s8 iwlagn_default_queue_to_tx_fifo[] = { | ||
43 | IWL_TX_FIFO_VO, | ||
44 | IWL_TX_FIFO_VI, | ||
45 | IWL_TX_FIFO_BE, | ||
46 | IWL_TX_FIFO_BK, | ||
47 | IWLAGN_CMD_FIFO_NUM, | ||
48 | IWL_TX_FIFO_UNUSED, | ||
49 | IWL_TX_FIFO_UNUSED, | ||
50 | IWL_TX_FIFO_UNUSED, | ||
51 | IWL_TX_FIFO_UNUSED, | ||
52 | IWL_TX_FIFO_UNUSED, | ||
53 | }; | ||
54 | |||
55 | /* | ||
56 | * ucode | ||
57 | */ | ||
58 | static int iwlagn_load_section(struct iwl_priv *priv, const char *name, | ||
59 | struct fw_desc *image, u32 dst_addr) | ||
60 | { | ||
61 | dma_addr_t phy_addr = image->p_addr; | ||
62 | u32 byte_cnt = image->len; | ||
63 | int ret; | ||
64 | |||
65 | priv->ucode_write_complete = 0; | ||
66 | |||
67 | iwl_write_direct32(priv, | ||
68 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
69 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); | ||
70 | |||
71 | iwl_write_direct32(priv, | ||
72 | FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); | ||
73 | |||
74 | iwl_write_direct32(priv, | ||
75 | FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), | ||
76 | phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); | ||
77 | |||
78 | iwl_write_direct32(priv, | ||
79 | FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), | ||
80 | (iwl_get_dma_hi_addr(phy_addr) | ||
81 | << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); | ||
82 | |||
83 | iwl_write_direct32(priv, | ||
84 | FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), | ||
85 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | | ||
86 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | | ||
87 | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); | ||
88 | |||
89 | iwl_write_direct32(priv, | ||
90 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
91 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
92 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | | ||
93 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); | ||
94 | |||
95 | IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name); | ||
96 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
97 | priv->ucode_write_complete, 5 * HZ); | ||
98 | if (ret == -ERESTARTSYS) { | ||
99 | IWL_ERR(priv, "Could not load the %s uCode section due " | ||
100 | "to interrupt\n", name); | ||
101 | return ret; | ||
102 | } | ||
103 | if (!ret) { | ||
104 | IWL_ERR(priv, "Could not load the %s uCode section\n", | ||
105 | name); | ||
106 | return -ETIMEDOUT; | ||
107 | } | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int iwlagn_load_given_ucode(struct iwl_priv *priv, | ||
113 | struct fw_desc *inst_image, | ||
114 | struct fw_desc *data_image) | ||
115 | { | ||
116 | int ret = 0; | ||
117 | |||
118 | ret = iwlagn_load_section(priv, "INST", inst_image, | ||
119 | IWLAGN_RTC_INST_LOWER_BOUND); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | return iwlagn_load_section(priv, "DATA", data_image, | ||
124 | IWLAGN_RTC_DATA_LOWER_BOUND); | ||
125 | } | ||
126 | |||
127 | int iwlagn_load_ucode(struct iwl_priv *priv) | ||
128 | { | ||
129 | int ret = 0; | ||
130 | |||
131 | /* check whether init ucode should be loaded, or rather runtime ucode */ | ||
132 | if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { | ||
133 | IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n"); | ||
134 | ret = iwlagn_load_given_ucode(priv, | ||
135 | &priv->ucode_init, &priv->ucode_init_data); | ||
136 | if (!ret) { | ||
137 | IWL_DEBUG_INFO(priv, "Init ucode load complete.\n"); | ||
138 | priv->ucode_type = UCODE_INIT; | ||
139 | } | ||
140 | } else { | ||
141 | IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. " | ||
142 | "Loading runtime ucode...\n"); | ||
143 | ret = iwlagn_load_given_ucode(priv, | ||
144 | &priv->ucode_code, &priv->ucode_data); | ||
145 | if (!ret) { | ||
146 | IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n"); | ||
147 | priv->ucode_type = UCODE_RT; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | #define IWL_UCODE_GET(item) \ | ||
155 | static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\ | ||
156 | u32 api_ver) \ | ||
157 | { \ | ||
158 | if (api_ver <= 2) \ | ||
159 | return le32_to_cpu(ucode->u.v1.item); \ | ||
160 | return le32_to_cpu(ucode->u.v2.item); \ | ||
161 | } | ||
162 | |||
163 | static u32 iwlagn_ucode_get_header_size(u32 api_ver) | ||
164 | { | ||
165 | if (api_ver <= 2) | ||
166 | return UCODE_HEADER_SIZE(1); | ||
167 | return UCODE_HEADER_SIZE(2); | ||
168 | } | ||
169 | |||
170 | static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode, | ||
171 | u32 api_ver) | ||
172 | { | ||
173 | if (api_ver <= 2) | ||
174 | return 0; | ||
175 | return le32_to_cpu(ucode->u.v2.build); | ||
176 | } | ||
177 | |||
178 | static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode, | ||
179 | u32 api_ver) | ||
180 | { | ||
181 | if (api_ver <= 2) | ||
182 | return (u8 *) ucode->u.v1.data; | ||
183 | return (u8 *) ucode->u.v2.data; | ||
184 | } | ||
185 | |||
186 | IWL_UCODE_GET(inst_size); | ||
187 | IWL_UCODE_GET(data_size); | ||
188 | IWL_UCODE_GET(init_size); | ||
189 | IWL_UCODE_GET(init_data_size); | ||
190 | IWL_UCODE_GET(boot_size); | ||
191 | |||
192 | struct iwl_ucode_ops iwlagn_ucode = { | ||
193 | .get_header_size = iwlagn_ucode_get_header_size, | ||
194 | .get_build = iwlagn_ucode_get_build, | ||
195 | .get_inst_size = iwlagn_ucode_get_inst_size, | ||
196 | .get_data_size = iwlagn_ucode_get_data_size, | ||
197 | .get_init_size = iwlagn_ucode_get_init_size, | ||
198 | .get_init_data_size = iwlagn_ucode_get_init_data_size, | ||
199 | .get_boot_size = iwlagn_ucode_get_boot_size, | ||
200 | .get_data = iwlagn_ucode_get_data, | ||
201 | }; | ||
202 | |||
203 | /* | ||
204 | * Calibration | ||
205 | */ | ||
206 | static int iwlagn_set_Xtal_calib(struct iwl_priv *priv) | ||
207 | { | ||
208 | struct iwl_calib_xtal_freq_cmd cmd; | ||
209 | __le16 *xtal_calib = | ||
210 | (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
211 | |||
212 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; | ||
213 | cmd.hdr.first_group = 0; | ||
214 | cmd.hdr.groups_num = 1; | ||
215 | cmd.hdr.data_valid = 1; | ||
216 | cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); | ||
217 | cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); | ||
218 | return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], | ||
219 | (u8 *)&cmd, sizeof(cmd)); | ||
220 | } | ||
221 | |||
222 | static int iwlagn_send_calib_cfg(struct iwl_priv *priv) | ||
223 | { | ||
224 | struct iwl_calib_cfg_cmd calib_cfg_cmd; | ||
225 | struct iwl_host_cmd cmd = { | ||
226 | .id = CALIBRATION_CFG_CMD, | ||
227 | .len = sizeof(struct iwl_calib_cfg_cmd), | ||
228 | .data = &calib_cfg_cmd, | ||
229 | }; | ||
230 | |||
231 | memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); | ||
232 | calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; | ||
233 | calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; | ||
234 | calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; | ||
235 | calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; | ||
236 | |||
237 | return iwl_send_cmd(priv, &cmd); | ||
238 | } | ||
239 | |||
240 | void iwlagn_rx_calib_result(struct iwl_priv *priv, | ||
241 | struct iwl_rx_mem_buffer *rxb) | ||
242 | { | ||
243 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
244 | struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; | ||
245 | int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | ||
246 | int index; | ||
247 | |||
248 | /* reduce the size of the length field itself */ | ||
249 | len -= 4; | ||
250 | |||
251 | /* Define the order in which the results will be sent to the runtime | ||
252 | * uCode. iwl_send_calib_results sends them in a row according to | ||
253 | * their index. We sort them here | ||
254 | */ | ||
255 | switch (hdr->op_code) { | ||
256 | case IWL_PHY_CALIBRATE_DC_CMD: | ||
257 | index = IWL_CALIB_DC; | ||
258 | break; | ||
259 | case IWL_PHY_CALIBRATE_LO_CMD: | ||
260 | index = IWL_CALIB_LO; | ||
261 | break; | ||
262 | case IWL_PHY_CALIBRATE_TX_IQ_CMD: | ||
263 | index = IWL_CALIB_TX_IQ; | ||
264 | break; | ||
265 | case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: | ||
266 | index = IWL_CALIB_TX_IQ_PERD; | ||
267 | break; | ||
268 | case IWL_PHY_CALIBRATE_BASE_BAND_CMD: | ||
269 | index = IWL_CALIB_BASE_BAND; | ||
270 | break; | ||
271 | default: | ||
272 | IWL_ERR(priv, "Unknown calibration notification %d\n", | ||
273 | hdr->op_code); | ||
274 | return; | ||
275 | } | ||
276 | iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); | ||
277 | } | ||
278 | |||
279 | void iwlagn_rx_calib_complete(struct iwl_priv *priv, | ||
280 | struct iwl_rx_mem_buffer *rxb) | ||
281 | { | ||
282 | IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n"); | ||
283 | queue_work(priv->workqueue, &priv->restart); | ||
284 | } | ||
285 | |||
286 | void iwlagn_init_alive_start(struct iwl_priv *priv) | ||
287 | { | ||
288 | int ret = 0; | ||
289 | |||
290 | /* Check alive response for "valid" sign from uCode */ | ||
291 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
292 | /* We had an error bringing up the hardware, so take it | ||
293 | * all the way back down so we can try again */ | ||
294 | IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); | ||
295 | goto restart; | ||
296 | } | ||
297 | |||
298 | /* initialize uCode was loaded... verify inst image. | ||
299 | * This is a paranoid check, because we would not have gotten the | ||
300 | * "initialize" alive if code weren't properly loaded. */ | ||
301 | if (iwl_verify_ucode(priv)) { | ||
302 | /* Runtime instruction load was bad; | ||
303 | * take it all the way back down so we can try again */ | ||
304 | IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); | ||
305 | goto restart; | ||
306 | } | ||
307 | |||
308 | ret = priv->cfg->ops->lib->alive_notify(priv); | ||
309 | if (ret) { | ||
310 | IWL_WARN(priv, | ||
311 | "Could not complete ALIVE transition: %d\n", ret); | ||
312 | goto restart; | ||
313 | } | ||
314 | |||
315 | iwlagn_send_calib_cfg(priv); | ||
316 | return; | ||
317 | |||
318 | restart: | ||
319 | /* real restart (first load init_ucode) */ | ||
320 | queue_work(priv->workqueue, &priv->restart); | ||
321 | } | ||
322 | |||
323 | int iwlagn_alive_notify(struct iwl_priv *priv) | ||
324 | { | ||
325 | u32 a; | ||
326 | unsigned long flags; | ||
327 | int i, chan; | ||
328 | u32 reg_val; | ||
329 | |||
330 | spin_lock_irqsave(&priv->lock, flags); | ||
331 | |||
332 | priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); | ||
333 | a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; | ||
334 | for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; | ||
335 | a += 4) | ||
336 | iwl_write_targ_mem(priv, a, 0); | ||
337 | for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; | ||
338 | a += 4) | ||
339 | iwl_write_targ_mem(priv, a, 0); | ||
340 | for (; a < priv->scd_base_addr + | ||
341 | IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) | ||
342 | iwl_write_targ_mem(priv, a, 0); | ||
343 | |||
344 | iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, | ||
345 | priv->scd_bc_tbls.dma >> 10); | ||
346 | |||
347 | /* Enable DMA channel */ | ||
348 | for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) | ||
349 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
350 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
351 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
352 | |||
353 | /* Update FH chicken bits */ | ||
354 | reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); | ||
355 | iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, | ||
356 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
357 | |||
358 | iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, | ||
359 | IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); | ||
360 | iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); | ||
361 | |||
362 | /* initiate the queues */ | ||
363 | for (i = 0; i < priv->hw_params.max_txq_num; i++) { | ||
364 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); | ||
365 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | ||
366 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
367 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); | ||
368 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
369 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + | ||
370 | sizeof(u32), | ||
371 | ((SCD_WIN_SIZE << | ||
372 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
373 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
374 | ((SCD_FRAME_LIMIT << | ||
375 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
376 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
377 | } | ||
378 | |||
379 | iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, | ||
380 | IWL_MASK(0, priv->hw_params.max_txq_num)); | ||
381 | |||
382 | /* Activate all Tx DMA/FIFO channels */ | ||
383 | priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); | ||
384 | |||
385 | iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | ||
386 | |||
387 | /* make sure all queue are not stopped */ | ||
388 | memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); | ||
389 | for (i = 0; i < 4; i++) | ||
390 | atomic_set(&priv->queue_stop_count[i], 0); | ||
391 | |||
392 | /* reset to 0 to enable all the queue first */ | ||
393 | priv->txq_ctx_active_msk = 0; | ||
394 | /* map qos queues to fifos one-to-one */ | ||
395 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); | ||
396 | |||
397 | for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) { | ||
398 | int ac = iwlagn_default_queue_to_tx_fifo[i]; | ||
399 | |||
400 | iwl_txq_ctx_activate(priv, i); | ||
401 | |||
402 | if (ac == IWL_TX_FIFO_UNUSED) | ||
403 | continue; | ||
404 | |||
405 | iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0); | ||
406 | } | ||
407 | |||
408 | spin_unlock_irqrestore(&priv->lock, flags); | ||
409 | |||
410 | iwl_send_wimax_coex(priv); | ||
411 | |||
412 | iwlagn_set_Xtal_calib(priv); | ||
413 | iwl_send_calib_results(priv); | ||
414 | |||
415 | return 0; | ||
416 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index fe4cec61bdec..0b497d4bc659 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -84,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | |||
84 | MODULE_LICENSE("GPL"); | 84 | MODULE_LICENSE("GPL"); |
85 | MODULE_ALIAS("iwl4965"); | 85 | MODULE_ALIAS("iwl4965"); |
86 | 86 | ||
87 | /*************** STATION TABLE MANAGEMENT **** | ||
88 | * mac80211 should be examined to determine if sta_info is duplicating | ||
89 | * the functionality provided here | ||
90 | */ | ||
91 | |||
92 | /**************************************************************/ | ||
93 | |||
94 | /** | 87 | /** |
95 | * iwl_commit_rxon - commit staging_rxon to hardware | 88 | * iwl_commit_rxon - commit staging_rxon to hardware |
96 | * | 89 | * |
@@ -166,6 +159,11 @@ int iwl_commit_rxon(struct iwl_priv *priv) | |||
166 | } | 159 | } |
167 | iwl_clear_ucode_stations(priv, false); | 160 | iwl_clear_ucode_stations(priv, false); |
168 | iwl_restore_stations(priv); | 161 | iwl_restore_stations(priv); |
162 | ret = iwl_restore_default_wep_keys(priv); | ||
163 | if (ret) { | ||
164 | IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); | ||
165 | return ret; | ||
166 | } | ||
169 | } | 167 | } |
170 | 168 | ||
171 | IWL_DEBUG_INFO(priv, "Sending RXON\n" | 169 | IWL_DEBUG_INFO(priv, "Sending RXON\n" |
@@ -189,10 +187,15 @@ int iwl_commit_rxon(struct iwl_priv *priv) | |||
189 | IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); | 187 | IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); |
190 | return ret; | 188 | return ret; |
191 | } | 189 | } |
192 | IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON. \n"); | 190 | IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); |
193 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | 191 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); |
194 | iwl_clear_ucode_stations(priv, false); | 192 | iwl_clear_ucode_stations(priv, false); |
195 | iwl_restore_stations(priv); | 193 | iwl_restore_stations(priv); |
194 | ret = iwl_restore_default_wep_keys(priv); | ||
195 | if (ret) { | ||
196 | IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); | ||
197 | return ret; | ||
198 | } | ||
196 | } | 199 | } |
197 | 200 | ||
198 | priv->start_calib = 0; | 201 | priv->start_calib = 0; |
@@ -885,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv) | |||
885 | priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = | 888 | priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = |
886 | iwl_rx_missed_beacon_notif; | 889 | iwl_rx_missed_beacon_notif; |
887 | /* Rx handlers */ | 890 | /* Rx handlers */ |
888 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; | 891 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; |
889 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; | 892 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; |
890 | /* block ack */ | 893 | /* block ack */ |
891 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; | 894 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; |
892 | /* Set up hardware specific Rx handlers */ | 895 | /* Set up hardware specific Rx handlers */ |
893 | priv->cfg->ops->lib->rx_handler_setup(priv); | 896 | priv->cfg->ops->lib->rx_handler_setup(priv); |
894 | } | 897 | } |
@@ -1016,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv) | |||
1016 | count++; | 1019 | count++; |
1017 | if (count >= 8) { | 1020 | if (count >= 8) { |
1018 | rxq->read = i; | 1021 | rxq->read = i; |
1019 | iwl_rx_replenish_now(priv); | 1022 | iwlagn_rx_replenish_now(priv); |
1020 | count = 0; | 1023 | count = 0; |
1021 | } | 1024 | } |
1022 | } | 1025 | } |
@@ -1025,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv) | |||
1025 | /* Backtrack one entry */ | 1028 | /* Backtrack one entry */ |
1026 | rxq->read = i; | 1029 | rxq->read = i; |
1027 | if (fill_rx) | 1030 | if (fill_rx) |
1028 | iwl_rx_replenish_now(priv); | 1031 | iwlagn_rx_replenish_now(priv); |
1029 | else | 1032 | else |
1030 | iwl_rx_queue_restock(priv); | 1033 | iwlagn_rx_queue_restock(priv); |
1031 | } | 1034 | } |
1032 | 1035 | ||
1033 | /* call this function to flush any scheduled tasklet */ | 1036 | /* call this function to flush any scheduled tasklet */ |
@@ -1426,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) | |||
1426 | iwl_enable_interrupts(priv); | 1429 | iwl_enable_interrupts(priv); |
1427 | } | 1430 | } |
1428 | 1431 | ||
1432 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ | ||
1433 | #define ACK_CNT_RATIO (50) | ||
1434 | #define BA_TIMEOUT_CNT (5) | ||
1435 | #define BA_TIMEOUT_MAX (16) | ||
1436 | |||
1437 | /** | ||
1438 | * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. | ||
1439 | * | ||
1440 | * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding | ||
1441 | * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal | ||
1442 | * operation state. | ||
1443 | */ | ||
1444 | bool iwl_good_ack_health(struct iwl_priv *priv, | ||
1445 | struct iwl_rx_packet *pkt) | ||
1446 | { | ||
1447 | bool rc = true; | ||
1448 | int actual_ack_cnt_delta, expected_ack_cnt_delta; | ||
1449 | int ba_timeout_delta; | ||
1450 | |||
1451 | actual_ack_cnt_delta = | ||
1452 | le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - | ||
1453 | le32_to_cpu(priv->statistics.tx.actual_ack_cnt); | ||
1454 | expected_ack_cnt_delta = | ||
1455 | le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - | ||
1456 | le32_to_cpu(priv->statistics.tx.expected_ack_cnt); | ||
1457 | ba_timeout_delta = | ||
1458 | le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - | ||
1459 | le32_to_cpu(priv->statistics.tx.agg.ba_timeout); | ||
1460 | if ((priv->_agn.agg_tids_count > 0) && | ||
1461 | (expected_ack_cnt_delta > 0) && | ||
1462 | (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) | ||
1463 | < ACK_CNT_RATIO) && | ||
1464 | (ba_timeout_delta > BA_TIMEOUT_CNT)) { | ||
1465 | IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," | ||
1466 | " expected_ack_cnt = %d\n", | ||
1467 | actual_ack_cnt_delta, expected_ack_cnt_delta); | ||
1468 | |||
1469 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1470 | IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", | ||
1471 | priv->delta_statistics.tx.rx_detected_cnt); | ||
1472 | IWL_DEBUG_RADIO(priv, | ||
1473 | "ack_or_ba_timeout_collision delta = %d\n", | ||
1474 | priv->delta_statistics.tx. | ||
1475 | ack_or_ba_timeout_collision); | ||
1476 | #endif | ||
1477 | IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", | ||
1478 | ba_timeout_delta); | ||
1479 | if (!actual_ack_cnt_delta && | ||
1480 | (ba_timeout_delta >= BA_TIMEOUT_MAX)) | ||
1481 | rc = false; | ||
1482 | } | ||
1483 | return rc; | ||
1484 | } | ||
1485 | |||
1429 | 1486 | ||
1430 | /****************************************************************************** | 1487 | /****************************************************************************** |
1431 | * | 1488 | * |
@@ -1787,6 +1844,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) | |||
1787 | u32 data2, line; | 1844 | u32 data2, line; |
1788 | u32 desc, time, count, base, data1; | 1845 | u32 desc, time, count, base, data1; |
1789 | u32 blink1, blink2, ilink1, ilink2; | 1846 | u32 blink1, blink2, ilink1, ilink2; |
1847 | u32 pc, hcmd; | ||
1790 | 1848 | ||
1791 | if (priv->ucode_type == UCODE_INIT) | 1849 | if (priv->ucode_type == UCODE_INIT) |
1792 | base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); | 1850 | base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); |
@@ -1809,6 +1867,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) | |||
1809 | } | 1867 | } |
1810 | 1868 | ||
1811 | desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); | 1869 | desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); |
1870 | pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32)); | ||
1812 | blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); | 1871 | blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); |
1813 | blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); | 1872 | blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); |
1814 | ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); | 1873 | ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); |
@@ -1817,6 +1876,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) | |||
1817 | data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); | 1876 | data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); |
1818 | line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); | 1877 | line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); |
1819 | time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); | 1878 | time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); |
1879 | hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32)); | ||
1820 | 1880 | ||
1821 | trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, | 1881 | trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, |
1822 | blink1, blink2, ilink1, ilink2); | 1882 | blink1, blink2, ilink1, ilink2); |
@@ -1825,10 +1885,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) | |||
1825 | "data1 data2 line\n"); | 1885 | "data1 data2 line\n"); |
1826 | IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", | 1886 | IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", |
1827 | desc_lookup(desc), desc, time, data1, data2, line); | 1887 | desc_lookup(desc), desc, time, data1, data2, line); |
1828 | IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); | 1888 | IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); |
1829 | IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, | 1889 | IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", |
1830 | ilink1, ilink2); | 1890 | pc, blink1, blink2, ilink1, ilink2, hcmd); |
1831 | |||
1832 | } | 1891 | } |
1833 | 1892 | ||
1834 | #define EVENT_START_OFFSET (4 * sizeof(u32)) | 1893 | #define EVENT_START_OFFSET (4 * sizeof(u32)) |
@@ -1944,9 +2003,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, | |||
1944 | return pos; | 2003 | return pos; |
1945 | } | 2004 | } |
1946 | 2005 | ||
1947 | /* For sanity check only. Actual size is determined by uCode, typ. 512 */ | ||
1948 | #define MAX_EVENT_LOG_SIZE (512) | ||
1949 | |||
1950 | #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) | 2006 | #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) |
1951 | 2007 | ||
1952 | int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | 2008 | int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, |
@@ -1979,16 +2035,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | |||
1979 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); | 2035 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); |
1980 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); | 2036 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); |
1981 | 2037 | ||
1982 | if (capacity > MAX_EVENT_LOG_SIZE) { | 2038 | if (capacity > priv->cfg->max_event_log_size) { |
1983 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", | 2039 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", |
1984 | capacity, MAX_EVENT_LOG_SIZE); | 2040 | capacity, priv->cfg->max_event_log_size); |
1985 | capacity = MAX_EVENT_LOG_SIZE; | 2041 | capacity = priv->cfg->max_event_log_size; |
1986 | } | 2042 | } |
1987 | 2043 | ||
1988 | if (next_entry > MAX_EVENT_LOG_SIZE) { | 2044 | if (next_entry > priv->cfg->max_event_log_size) { |
1989 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", | 2045 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", |
1990 | next_entry, MAX_EVENT_LOG_SIZE); | 2046 | next_entry, priv->cfg->max_event_log_size); |
1991 | next_entry = MAX_EVENT_LOG_SIZE; | 2047 | next_entry = priv->cfg->max_event_log_size; |
1992 | } | 2048 | } |
1993 | 2049 | ||
1994 | size = num_wraps ? capacity : next_entry; | 2050 | size = num_wraps ? capacity : next_entry; |
@@ -2204,8 +2260,8 @@ static void __iwl_down(struct iwl_priv *priv) | |||
2204 | /* device going down, Stop using ICT table */ | 2260 | /* device going down, Stop using ICT table */ |
2205 | iwl_disable_ict(priv); | 2261 | iwl_disable_ict(priv); |
2206 | 2262 | ||
2207 | iwl_txq_ctx_stop(priv); | 2263 | iwlagn_txq_ctx_stop(priv); |
2208 | iwl_rxq_stop(priv); | 2264 | iwlagn_rxq_stop(priv); |
2209 | 2265 | ||
2210 | /* Power-down device's busmaster DMA clocks */ | 2266 | /* Power-down device's busmaster DMA clocks */ |
2211 | iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); | 2267 | iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); |
@@ -2265,7 +2321,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv) | |||
2265 | { | 2321 | { |
2266 | int ret = 0; | 2322 | int ret = 0; |
2267 | 2323 | ||
2268 | IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); | 2324 | IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n"); |
2269 | 2325 | ||
2270 | ret = iwl_set_hw_ready(priv); | 2326 | ret = iwl_set_hw_ready(priv); |
2271 | if (priv->hw_ready) | 2327 | if (priv->hw_ready) |
@@ -2326,7 +2382,7 @@ static int __iwl_up(struct iwl_priv *priv) | |||
2326 | 2382 | ||
2327 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | 2383 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); |
2328 | 2384 | ||
2329 | ret = iwl_hw_nic_init(priv); | 2385 | ret = iwlagn_hw_nic_init(priv); |
2330 | if (ret) { | 2386 | if (ret) { |
2331 | IWL_ERR(priv, "Unable to init nic\n"); | 2387 | IWL_ERR(priv, "Unable to init nic\n"); |
2332 | return ret; | 2388 | return ret; |
@@ -2476,7 +2532,7 @@ static void iwl_bg_rx_replenish(struct work_struct *data) | |||
2476 | return; | 2532 | return; |
2477 | 2533 | ||
2478 | mutex_lock(&priv->mutex); | 2534 | mutex_lock(&priv->mutex); |
2479 | iwl_rx_replenish(priv); | 2535 | iwlagn_rx_replenish(priv); |
2480 | mutex_unlock(&priv->mutex); | 2536 | mutex_unlock(&priv->mutex); |
2481 | } | 2537 | } |
2482 | 2538 | ||
@@ -2486,7 +2542,6 @@ void iwl_post_associate(struct iwl_priv *priv) | |||
2486 | { | 2542 | { |
2487 | struct ieee80211_conf *conf = NULL; | 2543 | struct ieee80211_conf *conf = NULL; |
2488 | int ret = 0; | 2544 | int ret = 0; |
2489 | unsigned long flags; | ||
2490 | 2545 | ||
2491 | if (priv->iw_mode == NL80211_IFTYPE_AP) { | 2546 | if (priv->iw_mode == NL80211_IFTYPE_AP) { |
2492 | IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); | 2547 | IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); |
@@ -2567,10 +2622,6 @@ void iwl_post_associate(struct iwl_priv *priv) | |||
2567 | break; | 2622 | break; |
2568 | } | 2623 | } |
2569 | 2624 | ||
2570 | spin_lock_irqsave(&priv->lock, flags); | ||
2571 | iwl_activate_qos(priv, 0); | ||
2572 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2573 | |||
2574 | /* the chain noise calibration will enabled PM upon completion | 2625 | /* the chain noise calibration will enabled PM upon completion |
2575 | * If chain noise has already been run, then we need to enable | 2626 | * If chain noise has already been run, then we need to enable |
2576 | * power management here */ | 2627 | * power management here */ |
@@ -2737,7 +2788,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2737 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | 2788 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, |
2738 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | 2789 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); |
2739 | 2790 | ||
2740 | if (iwl_tx_skb(priv, skb)) | 2791 | if (iwlagn_tx_skb(priv, skb)) |
2741 | dev_kfree_skb_any(skb); | 2792 | dev_kfree_skb_any(skb); |
2742 | 2793 | ||
2743 | IWL_DEBUG_MACDUMP(priv, "leave\n"); | 2794 | IWL_DEBUG_MACDUMP(priv, "leave\n"); |
@@ -2747,7 +2798,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2747 | void iwl_config_ap(struct iwl_priv *priv) | 2798 | void iwl_config_ap(struct iwl_priv *priv) |
2748 | { | 2799 | { |
2749 | int ret = 0; | 2800 | int ret = 0; |
2750 | unsigned long flags; | ||
2751 | 2801 | ||
2752 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 2802 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
2753 | return; | 2803 | return; |
@@ -2799,10 +2849,6 @@ void iwl_config_ap(struct iwl_priv *priv) | |||
2799 | /* restore RXON assoc */ | 2849 | /* restore RXON assoc */ |
2800 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | 2850 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; |
2801 | iwlcore_commit_rxon(priv); | 2851 | iwlcore_commit_rxon(priv); |
2802 | iwl_reset_qos(priv); | ||
2803 | spin_lock_irqsave(&priv->lock, flags); | ||
2804 | iwl_activate_qos(priv, 1); | ||
2805 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2806 | iwl_add_bcast_station(priv); | 2852 | iwl_add_bcast_station(priv); |
2807 | } | 2853 | } |
2808 | iwl_send_beacon_cmd(priv); | 2854 | iwl_send_beacon_cmd(priv); |
@@ -2858,12 +2904,13 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
2858 | mutex_lock(&priv->mutex); | 2904 | mutex_lock(&priv->mutex); |
2859 | iwl_scan_cancel_timeout(priv, 100); | 2905 | iwl_scan_cancel_timeout(priv, 100); |
2860 | 2906 | ||
2861 | /* If we are getting WEP group key and we didn't receive any key mapping | 2907 | /* |
2908 | * If we are getting WEP group key and we didn't receive any key mapping | ||
2862 | * so far, we are in legacy wep mode (group key only), otherwise we are | 2909 | * so far, we are in legacy wep mode (group key only), otherwise we are |
2863 | * in 1X mode. | 2910 | * in 1X mode. |
2864 | * In legacy wep mode, we use another host command to the uCode */ | 2911 | * In legacy wep mode, we use another host command to the uCode. |
2865 | if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && | 2912 | */ |
2866 | priv->iw_mode != NL80211_IFTYPE_AP) { | 2913 | if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) { |
2867 | if (cmd == SET_KEY) | 2914 | if (cmd == SET_KEY) |
2868 | is_default_wep_key = !priv->key_mapping_key; | 2915 | is_default_wep_key = !priv->key_mapping_key; |
2869 | else | 2916 | else |
@@ -2925,7 +2972,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, | |||
2925 | return ret; | 2972 | return ret; |
2926 | case IEEE80211_AMPDU_TX_START: | 2973 | case IEEE80211_AMPDU_TX_START: |
2927 | IWL_DEBUG_HT(priv, "start Tx\n"); | 2974 | IWL_DEBUG_HT(priv, "start Tx\n"); |
2928 | ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn); | 2975 | ret = iwlagn_tx_agg_start(priv, sta->addr, tid, ssn); |
2929 | if (ret == 0) { | 2976 | if (ret == 0) { |
2930 | priv->_agn.agg_tids_count++; | 2977 | priv->_agn.agg_tids_count++; |
2931 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", | 2978 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", |
@@ -2934,7 +2981,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, | |||
2934 | return ret; | 2981 | return ret; |
2935 | case IEEE80211_AMPDU_TX_STOP: | 2982 | case IEEE80211_AMPDU_TX_STOP: |
2936 | IWL_DEBUG_HT(priv, "stop Tx\n"); | 2983 | IWL_DEBUG_HT(priv, "stop Tx\n"); |
2937 | ret = iwl_tx_agg_stop(priv, sta->addr, tid); | 2984 | ret = iwlagn_tx_agg_stop(priv, sta->addr, tid); |
2938 | if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { | 2985 | if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { |
2939 | priv->_agn.agg_tids_count--; | 2986 | priv->_agn.agg_tids_count--; |
2940 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", | 2987 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", |
@@ -2997,19 +3044,6 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw, | |||
2997 | } | 3044 | } |
2998 | } | 3045 | } |
2999 | 3046 | ||
3000 | /** | ||
3001 | * iwl_restore_wepkeys - Restore WEP keys to device | ||
3002 | */ | ||
3003 | static void iwl_restore_wepkeys(struct iwl_priv *priv) | ||
3004 | { | ||
3005 | mutex_lock(&priv->mutex); | ||
3006 | if (priv->iw_mode == NL80211_IFTYPE_STATION && | ||
3007 | priv->default_wep_key && | ||
3008 | iwl_send_static_wepkey_cmd(priv, 0)) | ||
3009 | IWL_ERR(priv, "Could not send WEP static key\n"); | ||
3010 | mutex_unlock(&priv->mutex); | ||
3011 | } | ||
3012 | |||
3013 | static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, | 3047 | static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, |
3014 | struct ieee80211_vif *vif, | 3048 | struct ieee80211_vif *vif, |
3015 | struct ieee80211_sta *sta) | 3049 | struct ieee80211_sta *sta) |
@@ -3036,10 +3070,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, | |||
3036 | return ret; | 3070 | return ret; |
3037 | } | 3071 | } |
3038 | 3072 | ||
3039 | iwl_restore_wepkeys(priv); | ||
3040 | |||
3041 | /* Initialize rate scaling */ | 3073 | /* Initialize rate scaling */ |
3042 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n", | 3074 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", |
3043 | sta->addr); | 3075 | sta->addr); |
3044 | iwl_rs_rate_init(priv, sta, sta_id); | 3076 | iwl_rs_rate_init(priv, sta, sta_id); |
3045 | 3077 | ||
@@ -3337,15 +3369,10 @@ static int iwl_init_drv(struct iwl_priv *priv) | |||
3337 | 3369 | ||
3338 | iwl_init_scan_params(priv); | 3370 | iwl_init_scan_params(priv); |
3339 | 3371 | ||
3340 | iwl_reset_qos(priv); | ||
3341 | |||
3342 | priv->qos_data.qos_active = 0; | ||
3343 | priv->qos_data.qos_cap.val = 0; | ||
3344 | |||
3345 | /* Set the tx_power_user_lmt to the lowest power level | 3372 | /* Set the tx_power_user_lmt to the lowest power level |
3346 | * this value will get overwritten by channel max power avg | 3373 | * this value will get overwritten by channel max power avg |
3347 | * from eeprom */ | 3374 | * from eeprom */ |
3348 | priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN; | 3375 | priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN; |
3349 | 3376 | ||
3350 | ret = iwl_init_channel_map(priv); | 3377 | ret = iwl_init_channel_map(priv); |
3351 | if (ret) { | 3378 | if (ret) { |
@@ -3692,8 +3719,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) | |||
3692 | iwl_dealloc_ucode_pci(priv); | 3719 | iwl_dealloc_ucode_pci(priv); |
3693 | 3720 | ||
3694 | if (priv->rxq.bd) | 3721 | if (priv->rxq.bd) |
3695 | iwl_rx_queue_free(priv, &priv->rxq); | 3722 | iwlagn_rx_queue_free(priv, &priv->rxq); |
3696 | iwl_hw_txq_ctx_free(priv); | 3723 | iwlagn_hw_txq_ctx_free(priv); |
3697 | 3724 | ||
3698 | iwl_eeprom_free(priv); | 3725 | iwl_eeprom_free(priv); |
3699 | 3726 | ||
@@ -3808,6 +3835,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
3808 | {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, | 3835 | {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, |
3809 | {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, | 3836 | {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, |
3810 | {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, | 3837 | {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, |
3838 | {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000i_g2_2agn_cfg)}, | ||
3811 | 3839 | ||
3812 | /* 6x50 WiFi/WiMax Series */ | 3840 | /* 6x50 WiFi/WiMax Series */ |
3813 | {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, | 3841 | {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, |
@@ -3890,3 +3918,33 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); | |||
3890 | MODULE_PARM_DESC(debug, "debug output mask"); | 3918 | MODULE_PARM_DESC(debug, "debug output mask"); |
3891 | #endif | 3919 | #endif |
3892 | 3920 | ||
3921 | module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO); | ||
3922 | MODULE_PARM_DESC(swcrypto50, | ||
3923 | "using crypto in software (default 0 [hardware]) (deprecated)"); | ||
3924 | module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); | ||
3925 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); | ||
3926 | module_param_named(queues_num50, | ||
3927 | iwlagn_mod_params.num_of_queues, int, S_IRUGO); | ||
3928 | MODULE_PARM_DESC(queues_num50, | ||
3929 | "number of hw queues in 50xx series (deprecated)"); | ||
3930 | module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO); | ||
3931 | MODULE_PARM_DESC(queues_num, "number of hw queues."); | ||
3932 | module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO); | ||
3933 | MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)"); | ||
3934 | module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO); | ||
3935 | MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); | ||
3936 | module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K, | ||
3937 | int, S_IRUGO); | ||
3938 | MODULE_PARM_DESC(amsdu_size_8K50, | ||
3939 | "enable 8K amsdu size in 50XX series (deprecated)"); | ||
3940 | module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, | ||
3941 | int, S_IRUGO); | ||
3942 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); | ||
3943 | module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO); | ||
3944 | MODULE_PARM_DESC(fw_restart50, | ||
3945 | "restart firmware in case of error (deprecated)"); | ||
3946 | module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO); | ||
3947 | MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); | ||
3948 | module_param_named( | ||
3949 | disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO); | ||
3950 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 26eeb586ee00..5d3142287e14 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h | |||
@@ -65,10 +65,110 @@ | |||
65 | 65 | ||
66 | #include "iwl-dev.h" | 66 | #include "iwl-dev.h" |
67 | 67 | ||
68 | extern struct iwl_mod_params iwlagn_mod_params; | ||
69 | extern struct iwl_ucode_ops iwlagn_ucode; | ||
70 | extern struct iwl_hcmd_ops iwlagn_hcmd; | ||
71 | extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; | ||
72 | |||
68 | int iwl_reset_ict(struct iwl_priv *priv); | 73 | int iwl_reset_ict(struct iwl_priv *priv); |
69 | void iwl_disable_ict(struct iwl_priv *priv); | 74 | void iwl_disable_ict(struct iwl_priv *priv); |
70 | int iwl_alloc_isr_ict(struct iwl_priv *priv); | 75 | int iwl_alloc_isr_ict(struct iwl_priv *priv); |
71 | void iwl_free_isr_ict(struct iwl_priv *priv); | 76 | void iwl_free_isr_ict(struct iwl_priv *priv); |
72 | irqreturn_t iwl_isr_ict(int irq, void *data); | 77 | irqreturn_t iwl_isr_ict(int irq, void *data); |
78 | bool iwl_good_ack_health(struct iwl_priv *priv, | ||
79 | struct iwl_rx_packet *pkt); | ||
80 | |||
81 | /* tx queue */ | ||
82 | void iwlagn_set_wr_ptrs(struct iwl_priv *priv, | ||
83 | int txq_id, u32 index); | ||
84 | void iwlagn_tx_queue_set_status(struct iwl_priv *priv, | ||
85 | struct iwl_tx_queue *txq, | ||
86 | int tx_fifo_id, int scd_retry); | ||
87 | void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
88 | struct iwl_tx_queue *txq, | ||
89 | u16 byte_cnt); | ||
90 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
91 | struct iwl_tx_queue *txq); | ||
92 | int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, | ||
93 | int tx_fifo, int sta_id, int tid, u16 ssn_idx); | ||
94 | int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
95 | u16 ssn_idx, u8 tx_fifo); | ||
96 | void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask); | ||
97 | |||
98 | /* uCode */ | ||
99 | int iwlagn_load_ucode(struct iwl_priv *priv); | ||
100 | void iwlagn_rx_calib_result(struct iwl_priv *priv, | ||
101 | struct iwl_rx_mem_buffer *rxb); | ||
102 | void iwlagn_rx_calib_complete(struct iwl_priv *priv, | ||
103 | struct iwl_rx_mem_buffer *rxb); | ||
104 | void iwlagn_init_alive_start(struct iwl_priv *priv); | ||
105 | int iwlagn_alive_notify(struct iwl_priv *priv); | ||
106 | |||
107 | /* lib */ | ||
108 | void iwl_check_abort_status(struct iwl_priv *priv, | ||
109 | u8 frame_count, u32 status); | ||
110 | void iwlagn_rx_handler_setup(struct iwl_priv *priv); | ||
111 | void iwlagn_setup_deferred_work(struct iwl_priv *priv); | ||
112 | int iwlagn_hw_valid_rtc_data_addr(u32 addr); | ||
113 | int iwlagn_send_tx_power(struct iwl_priv *priv); | ||
114 | void iwlagn_temperature(struct iwl_priv *priv); | ||
115 | u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); | ||
116 | const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, | ||
117 | size_t offset); | ||
118 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
119 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
120 | int iwlagn_hw_nic_init(struct iwl_priv *priv); | ||
121 | |||
122 | /* rx */ | ||
123 | void iwlagn_rx_queue_restock(struct iwl_priv *priv); | ||
124 | void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority); | ||
125 | void iwlagn_rx_replenish(struct iwl_priv *priv); | ||
126 | void iwlagn_rx_replenish_now(struct iwl_priv *priv); | ||
127 | void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
128 | int iwlagn_rxq_stop(struct iwl_priv *priv); | ||
129 | int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); | ||
130 | void iwlagn_rx_reply_rx(struct iwl_priv *priv, | ||
131 | struct iwl_rx_mem_buffer *rxb); | ||
132 | void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, | ||
133 | struct iwl_rx_mem_buffer *rxb); | ||
134 | |||
135 | /* tx */ | ||
136 | void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
137 | struct ieee80211_tx_info *info); | ||
138 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); | ||
139 | int iwlagn_tx_agg_start(struct iwl_priv *priv, | ||
140 | const u8 *ra, u16 tid, u16 *ssn); | ||
141 | int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | ||
142 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | ||
143 | int sta_id, u8 tid, int txq_id); | ||
144 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
145 | struct iwl_rx_mem_buffer *rxb); | ||
146 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); | ||
147 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv); | ||
148 | int iwlagn_txq_ctx_alloc(struct iwl_priv *priv); | ||
149 | void iwlagn_txq_ctx_reset(struct iwl_priv *priv); | ||
150 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv); | ||
151 | |||
152 | static inline u32 iwl_tx_status_to_mac80211(u32 status) | ||
153 | { | ||
154 | status &= TX_STATUS_MSK; | ||
155 | |||
156 | switch (status) { | ||
157 | case TX_STATUS_SUCCESS: | ||
158 | case TX_STATUS_DIRECT_DONE: | ||
159 | return IEEE80211_TX_STAT_ACK; | ||
160 | case TX_STATUS_FAIL_DEST_PS: | ||
161 | return IEEE80211_TX_STAT_TX_FILTERED; | ||
162 | default: | ||
163 | return 0; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static inline bool iwl_is_tx_success(u32 status) | ||
168 | { | ||
169 | status &= TX_STATUS_MSK; | ||
170 | return (status == TX_STATUS_SUCCESS) || | ||
171 | (status == TX_STATUS_DIRECT_DONE); | ||
172 | } | ||
73 | 173 | ||
74 | #endif /* __iwl_agn_h__ */ | 174 | #endif /* __iwl_agn_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1f..0471c3f8713e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c | |||
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, | |||
593 | IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); | 593 | IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); |
594 | 594 | ||
595 | if (!rx_enable_time) { | 595 | if (!rx_enable_time) { |
596 | IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n"); | 596 | IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); |
597 | return; | 597 | return; |
598 | } | 598 | } |
599 | 599 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 6383d9f8c9b3..d830086ca195 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h | |||
@@ -106,7 +106,7 @@ enum { | |||
106 | REPLY_TX = 0x1c, | 106 | REPLY_TX = 0x1c, |
107 | REPLY_RATE_SCALE = 0x47, /* 3945 only */ | 107 | REPLY_RATE_SCALE = 0x47, /* 3945 only */ |
108 | REPLY_LEDS_CMD = 0x48, | 108 | REPLY_LEDS_CMD = 0x48, |
109 | REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ | 109 | REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ |
110 | 110 | ||
111 | /* WiMAX coexistence */ | 111 | /* WiMAX coexistence */ |
112 | COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ | 112 | COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ |
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp { | |||
512 | * | 512 | * |
513 | * Entries without timestamps contain only event_id and data. | 513 | * Entries without timestamps contain only event_id and data. |
514 | * | 514 | * |
515 | * | ||
515 | * 2) error_event_table_ptr indicates base of the error log. This contains | 516 | * 2) error_event_table_ptr indicates base of the error log. This contains |
516 | * information about any uCode error that occurs. For 4965, the format | 517 | * information about any uCode error that occurs. For agn, the format |
517 | * of the error log is: | 518 | * of the error log is: |
518 | * | 519 | * |
519 | * __le32 valid; (nonzero) valid, (0) log is empty | 520 | * __le32 valid; (nonzero) valid, (0) log is empty |
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp { | |||
529 | * __le32 bcon_time; beacon timer | 530 | * __le32 bcon_time; beacon timer |
530 | * __le32 tsf_low; network timestamp function timer | 531 | * __le32 tsf_low; network timestamp function timer |
531 | * __le32 tsf_hi; network timestamp function timer | 532 | * __le32 tsf_hi; network timestamp function timer |
533 | * __le32 gp1; GP1 timer register | ||
534 | * __le32 gp2; GP2 timer register | ||
535 | * __le32 gp3; GP3 timer register | ||
536 | * __le32 ucode_ver; uCode version | ||
537 | * __le32 hw_ver; HW Silicon version | ||
538 | * __le32 brd_ver; HW board version | ||
539 | * __le32 log_pc; log program counter | ||
540 | * __le32 frame_ptr; frame pointer | ||
541 | * __le32 stack_ptr; stack pointer | ||
542 | * __le32 hcmd; last host command | ||
543 | * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag | ||
544 | * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag | ||
545 | * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag | ||
546 | * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag | ||
547 | * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt | ||
548 | * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT | ||
549 | * __le32 wait_event; wait event() caller address | ||
550 | * __le32 l2p_control; L2pControlField | ||
551 | * __le32 l2p_duration; L2pDurationField | ||
552 | * __le32 l2p_mhvalid; L2pMhValidBits | ||
553 | * __le32 l2p_addr_match; L2pAddrMatchStat | ||
554 | * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL) | ||
555 | * __le32 u_timestamp; indicate when the date and time of the compilation | ||
556 | * __le32 reserved; | ||
532 | * | 557 | * |
533 | * The Linux driver can print both logs to the system log when a uCode error | 558 | * The Linux driver can print both logs to the system log when a uCode error |
534 | * occurs. | 559 | * occurs. |
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd { | |||
1637 | struct ieee80211_hdr hdr[0]; | 1662 | struct ieee80211_hdr hdr[0]; |
1638 | } __attribute__ ((packed)); | 1663 | } __attribute__ ((packed)); |
1639 | 1664 | ||
1640 | /* TX command response is sent after *all* transmission attempts. | 1665 | /* TX command response is sent after *3945* transmission attempts. |
1641 | * | 1666 | * |
1642 | * NOTES: | 1667 | * NOTES: |
1643 | * | 1668 | * |
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd { | |||
1665 | * control line. Receiving is still allowed in this case. | 1690 | * control line. Receiving is still allowed in this case. |
1666 | */ | 1691 | */ |
1667 | enum { | 1692 | enum { |
1693 | TX_3945_STATUS_SUCCESS = 0x01, | ||
1694 | TX_3945_STATUS_DIRECT_DONE = 0x02, | ||
1695 | TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82, | ||
1696 | TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83, | ||
1697 | TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84, | ||
1698 | TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85, | ||
1699 | TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86, | ||
1700 | TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87, | ||
1701 | TX_3945_STATUS_FAIL_DEST_PS = 0x88, | ||
1702 | TX_3945_STATUS_FAIL_ABORTED = 0x89, | ||
1703 | TX_3945_STATUS_FAIL_BT_RETRY = 0x8a, | ||
1704 | TX_3945_STATUS_FAIL_STA_INVALID = 0x8b, | ||
1705 | TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c, | ||
1706 | TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d, | ||
1707 | TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e, | ||
1708 | TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, | ||
1709 | TX_3945_STATUS_FAIL_TX_LOCKED = 0x90, | ||
1710 | TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, | ||
1711 | }; | ||
1712 | |||
1713 | /* | ||
1714 | * TX command response is sent after *agn* transmission attempts. | ||
1715 | * | ||
1716 | * both postpone and abort status are expected behavior from uCode. there is | ||
1717 | * no special operation required from driver; except for RFKILL_FLUSH, | ||
1718 | * which required tx flush host command to flush all the tx frames in queues | ||
1719 | */ | ||
1720 | enum { | ||
1668 | TX_STATUS_SUCCESS = 0x01, | 1721 | TX_STATUS_SUCCESS = 0x01, |
1669 | TX_STATUS_DIRECT_DONE = 0x02, | 1722 | TX_STATUS_DIRECT_DONE = 0x02, |
1723 | /* postpone TX */ | ||
1724 | TX_STATUS_POSTPONE_DELAY = 0x40, | ||
1725 | TX_STATUS_POSTPONE_FEW_BYTES = 0x41, | ||
1726 | TX_STATUS_POSTPONE_BT_PRIO = 0x42, | ||
1727 | TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, | ||
1728 | TX_STATUS_POSTPONE_CALC_TTAK = 0x44, | ||
1729 | /* abort TX */ | ||
1730 | TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, | ||
1670 | TX_STATUS_FAIL_SHORT_LIMIT = 0x82, | 1731 | TX_STATUS_FAIL_SHORT_LIMIT = 0x82, |
1671 | TX_STATUS_FAIL_LONG_LIMIT = 0x83, | 1732 | TX_STATUS_FAIL_LONG_LIMIT = 0x83, |
1672 | TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, | 1733 | TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, |
1673 | TX_STATUS_FAIL_MGMNT_ABORT = 0x85, | 1734 | TX_STATUS_FAIL_DRAIN_FLOW = 0x85, |
1674 | TX_STATUS_FAIL_NEXT_FRAG = 0x86, | 1735 | TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, |
1675 | TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, | 1736 | TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, |
1676 | TX_STATUS_FAIL_DEST_PS = 0x88, | 1737 | TX_STATUS_FAIL_DEST_PS = 0x88, |
1677 | TX_STATUS_FAIL_ABORTED = 0x89, | 1738 | TX_STATUS_FAIL_HOST_ABORTED = 0x89, |
1678 | TX_STATUS_FAIL_BT_RETRY = 0x8a, | 1739 | TX_STATUS_FAIL_BT_RETRY = 0x8a, |
1679 | TX_STATUS_FAIL_STA_INVALID = 0x8b, | 1740 | TX_STATUS_FAIL_STA_INVALID = 0x8b, |
1680 | TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, | 1741 | TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, |
1681 | TX_STATUS_FAIL_TID_DISABLE = 0x8d, | 1742 | TX_STATUS_FAIL_TID_DISABLE = 0x8d, |
1682 | TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, | 1743 | TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, |
1683 | TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, | 1744 | TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, |
1684 | TX_STATUS_FAIL_TX_LOCKED = 0x90, | 1745 | /* uCode drop due to FW drop request */ |
1685 | TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, | 1746 | TX_STATUS_FAIL_FW_DROP = 0x90, |
1747 | /* | ||
1748 | * uCode drop due to station color mismatch | ||
1749 | * between tx command and station table | ||
1750 | */ | ||
1751 | TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91, | ||
1686 | }; | 1752 | }; |
1687 | 1753 | ||
1688 | #define TX_PACKET_MODE_REGULAR 0x0000 | 1754 | #define TX_PACKET_MODE_REGULAR 0x0000 |
@@ -1704,30 +1770,6 @@ enum { | |||
1704 | TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ | 1770 | TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ |
1705 | }; | 1771 | }; |
1706 | 1772 | ||
1707 | static inline u32 iwl_tx_status_to_mac80211(u32 status) | ||
1708 | { | ||
1709 | status &= TX_STATUS_MSK; | ||
1710 | |||
1711 | switch (status) { | ||
1712 | case TX_STATUS_SUCCESS: | ||
1713 | case TX_STATUS_DIRECT_DONE: | ||
1714 | return IEEE80211_TX_STAT_ACK; | ||
1715 | case TX_STATUS_FAIL_DEST_PS: | ||
1716 | return IEEE80211_TX_STAT_TX_FILTERED; | ||
1717 | default: | ||
1718 | return 0; | ||
1719 | } | ||
1720 | } | ||
1721 | |||
1722 | static inline bool iwl_is_tx_success(u32 status) | ||
1723 | { | ||
1724 | status &= TX_STATUS_MSK; | ||
1725 | return (status == TX_STATUS_SUCCESS) || | ||
1726 | (status == TX_STATUS_DIRECT_DONE); | ||
1727 | } | ||
1728 | |||
1729 | |||
1730 | |||
1731 | /* ******************************* | 1773 | /* ******************************* |
1732 | * TX aggregation status | 1774 | * TX aggregation status |
1733 | ******************************* */ | 1775 | ******************************* */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 0ee8cc296e48..2a89747d3473 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -66,7 +66,7 @@ MODULE_LICENSE("GPL"); | |||
66 | */ | 66 | */ |
67 | static bool bt_coex_active = true; | 67 | static bool bt_coex_active = true; |
68 | module_param(bt_coex_active, bool, S_IRUGO); | 68 | module_param(bt_coex_active, bool, S_IRUGO); |
69 | MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n"); | 69 | MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); |
70 | 70 | ||
71 | static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { | 71 | static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { |
72 | {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, | 72 | {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, |
@@ -141,30 +141,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { | |||
141 | }; | 141 | }; |
142 | EXPORT_SYMBOL(iwl_rates); | 142 | EXPORT_SYMBOL(iwl_rates); |
143 | 143 | ||
144 | /** | ||
145 | * translate ucode response to mac80211 tx status control values | ||
146 | */ | ||
147 | void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
148 | struct ieee80211_tx_info *info) | ||
149 | { | ||
150 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | ||
151 | |||
152 | info->antenna_sel_tx = | ||
153 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
154 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
155 | r->flags |= IEEE80211_TX_RC_MCS; | ||
156 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
157 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
158 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
159 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
160 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
161 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
162 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
163 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
164 | r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
165 | } | ||
166 | EXPORT_SYMBOL(iwl_hwrate_to_tx_control); | ||
167 | |||
168 | int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) | 144 | int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) |
169 | { | 145 | { |
170 | int idx = 0; | 146 | int idx = 0; |
@@ -196,27 +172,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) | |||
196 | } | 172 | } |
197 | EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); | 173 | EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); |
198 | 174 | ||
199 | int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) | ||
200 | { | ||
201 | int idx = 0; | ||
202 | int band_offset = 0; | ||
203 | |||
204 | /* HT rate format: mac80211 wants an MCS number, which is just LSB */ | ||
205 | if (rate_n_flags & RATE_MCS_HT_MSK) { | ||
206 | idx = (rate_n_flags & 0xff); | ||
207 | return idx; | ||
208 | /* Legacy rate format, search for match in table */ | ||
209 | } else { | ||
210 | if (band == IEEE80211_BAND_5GHZ) | ||
211 | band_offset = IWL_FIRST_OFDM_RATE; | ||
212 | for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) | ||
213 | if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) | ||
214 | return idx - band_offset; | ||
215 | } | ||
216 | |||
217 | return -1; | ||
218 | } | ||
219 | |||
220 | u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) | 175 | u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) |
221 | { | 176 | { |
222 | int i; | 177 | int i; |
@@ -266,74 +221,16 @@ void iwl_hw_detect(struct iwl_priv *priv) | |||
266 | } | 221 | } |
267 | EXPORT_SYMBOL(iwl_hw_detect); | 222 | EXPORT_SYMBOL(iwl_hw_detect); |
268 | 223 | ||
269 | int iwl_hw_nic_init(struct iwl_priv *priv) | ||
270 | { | ||
271 | unsigned long flags; | ||
272 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
273 | int ret; | ||
274 | |||
275 | /* nic_init */ | ||
276 | spin_lock_irqsave(&priv->lock, flags); | ||
277 | priv->cfg->ops->lib->apm_ops.init(priv); | ||
278 | |||
279 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
280 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
281 | |||
282 | spin_unlock_irqrestore(&priv->lock, flags); | ||
283 | |||
284 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); | ||
285 | |||
286 | priv->cfg->ops->lib->apm_ops.config(priv); | ||
287 | |||
288 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
289 | if (!rxq->bd) { | ||
290 | ret = iwl_rx_queue_alloc(priv); | ||
291 | if (ret) { | ||
292 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | ||
293 | return -ENOMEM; | ||
294 | } | ||
295 | } else | ||
296 | iwl_rx_queue_reset(priv, rxq); | ||
297 | |||
298 | iwl_rx_replenish(priv); | ||
299 | |||
300 | iwl_rx_init(priv, rxq); | ||
301 | |||
302 | spin_lock_irqsave(&priv->lock, flags); | ||
303 | |||
304 | rxq->need_update = 1; | ||
305 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
306 | |||
307 | spin_unlock_irqrestore(&priv->lock, flags); | ||
308 | |||
309 | /* Allocate or reset and init all Tx and Command queues */ | ||
310 | if (!priv->txq) { | ||
311 | ret = iwl_txq_ctx_alloc(priv); | ||
312 | if (ret) | ||
313 | return ret; | ||
314 | } else | ||
315 | iwl_txq_ctx_reset(priv); | ||
316 | |||
317 | set_bit(STATUS_INIT, &priv->status); | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | EXPORT_SYMBOL(iwl_hw_nic_init); | ||
322 | |||
323 | /* | 224 | /* |
324 | * QoS support | 225 | * QoS support |
325 | */ | 226 | */ |
326 | void iwl_activate_qos(struct iwl_priv *priv, u8 force) | 227 | static void iwl_update_qos(struct iwl_priv *priv) |
327 | { | 228 | { |
328 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 229 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
329 | return; | 230 | return; |
330 | 231 | ||
331 | priv->qos_data.def_qos_parm.qos_flags = 0; | 232 | priv->qos_data.def_qos_parm.qos_flags = 0; |
332 | 233 | ||
333 | if (priv->qos_data.qos_cap.q_AP.queue_request && | ||
334 | !priv->qos_data.qos_cap.q_AP.txop_request) | ||
335 | priv->qos_data.def_qos_parm.qos_flags |= | ||
336 | QOS_PARAM_FLG_TXOP_TYPE_MSK; | ||
337 | if (priv->qos_data.qos_active) | 234 | if (priv->qos_data.qos_active) |
338 | priv->qos_data.def_qos_parm.qos_flags |= | 235 | priv->qos_data.def_qos_parm.qos_flags |= |
339 | QOS_PARAM_FLG_UPDATE_EDCA_MSK; | 236 | QOS_PARAM_FLG_UPDATE_EDCA_MSK; |
@@ -341,118 +238,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force) | |||
341 | if (priv->current_ht_config.is_ht) | 238 | if (priv->current_ht_config.is_ht) |
342 | priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; | 239 | priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; |
343 | 240 | ||
344 | if (force || iwl_is_associated(priv)) { | 241 | IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", |
345 | IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", | 242 | priv->qos_data.qos_active, |
346 | priv->qos_data.qos_active, | 243 | priv->qos_data.def_qos_parm.qos_flags); |
347 | priv->qos_data.def_qos_parm.qos_flags); | ||
348 | |||
349 | iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, | ||
350 | sizeof(struct iwl_qosparam_cmd), | ||
351 | &priv->qos_data.def_qos_parm, NULL); | ||
352 | } | ||
353 | } | ||
354 | EXPORT_SYMBOL(iwl_activate_qos); | ||
355 | |||
356 | /* | ||
357 | * AC CWmin CW max AIFSN TXOP Limit TXOP Limit | ||
358 | * (802.11b) (802.11a/g) | ||
359 | * AC_BK 15 1023 7 0 0 | ||
360 | * AC_BE 15 1023 3 0 0 | ||
361 | * AC_VI 7 15 2 6.016ms 3.008ms | ||
362 | * AC_VO 3 7 2 3.264ms 1.504ms | ||
363 | */ | ||
364 | void iwl_reset_qos(struct iwl_priv *priv) | ||
365 | { | ||
366 | u16 cw_min = 15; | ||
367 | u16 cw_max = 1023; | ||
368 | u8 aifs = 2; | ||
369 | bool is_legacy = false; | ||
370 | unsigned long flags; | ||
371 | int i; | ||
372 | |||
373 | spin_lock_irqsave(&priv->lock, flags); | ||
374 | /* QoS always active in AP and ADHOC mode | ||
375 | * In STA mode wait for association | ||
376 | */ | ||
377 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC || | ||
378 | priv->iw_mode == NL80211_IFTYPE_AP) | ||
379 | priv->qos_data.qos_active = 1; | ||
380 | else | ||
381 | priv->qos_data.qos_active = 0; | ||
382 | |||
383 | /* check for legacy mode */ | ||
384 | if ((priv->iw_mode == NL80211_IFTYPE_ADHOC && | ||
385 | (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) || | ||
386 | (priv->iw_mode == NL80211_IFTYPE_STATION && | ||
387 | (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) { | ||
388 | cw_min = 31; | ||
389 | is_legacy = 1; | ||
390 | } | ||
391 | |||
392 | if (priv->qos_data.qos_active) | ||
393 | aifs = 3; | ||
394 | |||
395 | /* AC_BE */ | ||
396 | priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); | ||
397 | priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); | ||
398 | priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; | ||
399 | priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; | ||
400 | priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; | ||
401 | |||
402 | if (priv->qos_data.qos_active) { | ||
403 | /* AC_BK */ | ||
404 | i = 1; | ||
405 | priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); | ||
406 | priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); | ||
407 | priv->qos_data.def_qos_parm.ac[i].aifsn = 7; | ||
408 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
409 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
410 | |||
411 | /* AC_VI */ | ||
412 | i = 2; | ||
413 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
414 | cpu_to_le16((cw_min + 1) / 2 - 1); | ||
415 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
416 | cpu_to_le16(cw_min); | ||
417 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
418 | if (is_legacy) | ||
419 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
420 | cpu_to_le16(6016); | ||
421 | else | ||
422 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
423 | cpu_to_le16(3008); | ||
424 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
425 | |||
426 | /* AC_VO */ | ||
427 | i = 3; | ||
428 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
429 | cpu_to_le16((cw_min + 1) / 4 - 1); | ||
430 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
431 | cpu_to_le16((cw_min + 1) / 2 - 1); | ||
432 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
433 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
434 | if (is_legacy) | ||
435 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
436 | cpu_to_le16(3264); | ||
437 | else | ||
438 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
439 | cpu_to_le16(1504); | ||
440 | } else { | ||
441 | for (i = 1; i < 4; i++) { | ||
442 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
443 | cpu_to_le16(cw_min); | ||
444 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
445 | cpu_to_le16(cw_max); | ||
446 | priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; | ||
447 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
448 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
449 | } | ||
450 | } | ||
451 | IWL_DEBUG_QOS(priv, "set QoS to default \n"); | ||
452 | 244 | ||
453 | spin_unlock_irqrestore(&priv->lock, flags); | 245 | iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, |
246 | sizeof(struct iwl_qosparam_cmd), | ||
247 | &priv->qos_data.def_qos_parm, NULL); | ||
454 | } | 248 | } |
455 | EXPORT_SYMBOL(iwl_reset_qos); | ||
456 | 249 | ||
457 | #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ | 250 | #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ |
458 | #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ | 251 | #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ |
@@ -1092,12 +885,12 @@ void iwl_set_rxon_chain(struct iwl_priv *priv) | |||
1092 | rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; | 885 | rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; |
1093 | 886 | ||
1094 | /* copied from 'iwl_bg_request_scan()' */ | 887 | /* copied from 'iwl_bg_request_scan()' */ |
1095 | /* Force use of chains B and C (0x6) for Rx for 4965 | 888 | /* Force use of chains B and C (0x6) for Rx |
1096 | * Avoid A (0x1) because of its off-channel reception on A-band. | 889 | * Avoid A (0x1) for the device has off-channel reception on A-band. |
1097 | * MIMO is not used here, but value is required */ | 890 | * MIMO is not used here, but value is required */ |
1098 | if (iwl_is_monitor_mode(priv) && | 891 | if (iwl_is_monitor_mode(priv) && |
1099 | !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) && | 892 | !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) && |
1100 | ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) { | 893 | priv->cfg->off_channel_workaround) { |
1101 | rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS; | 894 | rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS; |
1102 | rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS; | 895 | rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS; |
1103 | rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; | 896 | rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; |
@@ -1584,10 +1377,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) | |||
1584 | int ret = 0; | 1377 | int ret = 0; |
1585 | s8 prev_tx_power = priv->tx_power_user_lmt; | 1378 | s8 prev_tx_power = priv->tx_power_user_lmt; |
1586 | 1379 | ||
1587 | if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { | 1380 | if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { |
1588 | IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", | 1381 | IWL_WARN(priv, |
1382 | "Requested user TXPOWER %d below lower limit %d.\n", | ||
1589 | tx_power, | 1383 | tx_power, |
1590 | IWL_TX_POWER_TARGET_POWER_MIN); | 1384 | IWLAGN_TX_POWER_TARGET_POWER_MIN); |
1591 | return -EINVAL; | 1385 | return -EINVAL; |
1592 | } | 1386 | } |
1593 | 1387 | ||
@@ -1631,10 +1425,11 @@ irqreturn_t iwl_isr_legacy(int irq, void *data) | |||
1631 | struct iwl_priv *priv = data; | 1425 | struct iwl_priv *priv = data; |
1632 | u32 inta, inta_mask; | 1426 | u32 inta, inta_mask; |
1633 | u32 inta_fh; | 1427 | u32 inta_fh; |
1428 | unsigned long flags; | ||
1634 | if (!priv) | 1429 | if (!priv) |
1635 | return IRQ_NONE; | 1430 | return IRQ_NONE; |
1636 | 1431 | ||
1637 | spin_lock(&priv->lock); | 1432 | spin_lock_irqsave(&priv->lock, flags); |
1638 | 1433 | ||
1639 | /* Disable (but don't clear!) interrupts here to avoid | 1434 | /* Disable (but don't clear!) interrupts here to avoid |
1640 | * back-to-back ISRs and sporadic interrupts from our NIC. | 1435 | * back-to-back ISRs and sporadic interrupts from our NIC. |
@@ -1672,7 +1467,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data) | |||
1672 | tasklet_schedule(&priv->irq_tasklet); | 1467 | tasklet_schedule(&priv->irq_tasklet); |
1673 | 1468 | ||
1674 | unplugged: | 1469 | unplugged: |
1675 | spin_unlock(&priv->lock); | 1470 | spin_unlock_irqrestore(&priv->lock, flags); |
1676 | return IRQ_HANDLED; | 1471 | return IRQ_HANDLED; |
1677 | 1472 | ||
1678 | none: | 1473 | none: |
@@ -1680,7 +1475,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data) | |||
1680 | /* only Re-enable if diabled by irq */ | 1475 | /* only Re-enable if diabled by irq */ |
1681 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1476 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1682 | iwl_enable_interrupts(priv); | 1477 | iwl_enable_interrupts(priv); |
1683 | spin_unlock(&priv->lock); | 1478 | spin_unlock_irqrestore(&priv->lock, flags); |
1684 | return IRQ_NONE; | 1479 | return IRQ_NONE; |
1685 | } | 1480 | } |
1686 | EXPORT_SYMBOL(iwl_isr_legacy); | 1481 | EXPORT_SYMBOL(iwl_isr_legacy); |
@@ -1993,12 +1788,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, | |||
1993 | cpu_to_le16((params->txop * 32)); | 1788 | cpu_to_le16((params->txop * 32)); |
1994 | 1789 | ||
1995 | priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; | 1790 | priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; |
1996 | priv->qos_data.qos_active = 1; | ||
1997 | |||
1998 | if (priv->iw_mode == NL80211_IFTYPE_AP) | ||
1999 | iwl_activate_qos(priv, 1); | ||
2000 | else if (priv->assoc_id && iwl_is_associated(priv)) | ||
2001 | iwl_activate_qos(priv, 0); | ||
2002 | 1791 | ||
2003 | spin_unlock_irqrestore(&priv->lock, flags); | 1792 | spin_unlock_irqrestore(&priv->lock, flags); |
2004 | 1793 | ||
@@ -2013,7 +1802,7 @@ static void iwl_ht_conf(struct iwl_priv *priv, | |||
2013 | struct iwl_ht_config *ht_conf = &priv->current_ht_config; | 1802 | struct iwl_ht_config *ht_conf = &priv->current_ht_config; |
2014 | struct ieee80211_sta *sta; | 1803 | struct ieee80211_sta *sta; |
2015 | 1804 | ||
2016 | IWL_DEBUG_MAC80211(priv, "enter: \n"); | 1805 | IWL_DEBUG_MAC80211(priv, "enter:\n"); |
2017 | 1806 | ||
2018 | if (!ht_conf->is_ht) | 1807 | if (!ht_conf->is_ht) |
2019 | return; | 1808 | return; |
@@ -2269,11 +2058,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2269 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 2058 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
2270 | spin_unlock_irqrestore(&priv->lock, flags); | 2059 | spin_unlock_irqrestore(&priv->lock, flags); |
2271 | 2060 | ||
2272 | iwl_reset_qos(priv); | ||
2273 | |||
2274 | priv->cfg->ops->lib->post_associate(priv); | 2061 | priv->cfg->ops->lib->post_associate(priv); |
2275 | 2062 | ||
2276 | |||
2277 | return 0; | 2063 | return 0; |
2278 | } | 2064 | } |
2279 | EXPORT_SYMBOL(iwl_mac_beacon_update); | 2065 | EXPORT_SYMBOL(iwl_mac_beacon_update); |
@@ -2495,6 +2281,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
2495 | iwl_set_tx_power(priv, conf->power_level, false); | 2281 | iwl_set_tx_power(priv, conf->power_level, false); |
2496 | } | 2282 | } |
2497 | 2283 | ||
2284 | if (changed & IEEE80211_CONF_CHANGE_QOS) { | ||
2285 | bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS); | ||
2286 | |||
2287 | spin_lock_irqsave(&priv->lock, flags); | ||
2288 | priv->qos_data.qos_active = qos_active; | ||
2289 | iwl_update_qos(priv); | ||
2290 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2291 | } | ||
2292 | |||
2498 | if (!iwl_is_ready(priv)) { | 2293 | if (!iwl_is_ready(priv)) { |
2499 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); | 2294 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); |
2500 | goto out; | 2295 | goto out; |
@@ -2529,8 +2324,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | |||
2529 | memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); | 2324 | memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); |
2530 | spin_unlock_irqrestore(&priv->lock, flags); | 2325 | spin_unlock_irqrestore(&priv->lock, flags); |
2531 | 2326 | ||
2532 | iwl_reset_qos(priv); | ||
2533 | |||
2534 | spin_lock_irqsave(&priv->lock, flags); | 2327 | spin_lock_irqsave(&priv->lock, flags); |
2535 | priv->assoc_id = 0; | 2328 | priv->assoc_id = 0; |
2536 | priv->assoc_capability = 0; | 2329 | priv->assoc_capability = 0; |
@@ -2574,7 +2367,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv) | |||
2574 | sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, | 2367 | sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, |
2575 | GFP_KERNEL); | 2368 | GFP_KERNEL); |
2576 | if (!priv->txq) { | 2369 | if (!priv->txq) { |
2577 | IWL_ERR(priv, "Not enough memory for txq \n"); | 2370 | IWL_ERR(priv, "Not enough memory for txq\n"); |
2578 | return -ENOMEM; | 2371 | return -ENOMEM; |
2579 | } | 2372 | } |
2580 | return 0; | 2373 | return 0; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index f3b6c72d82cd..d89755f5031a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -305,6 +305,9 @@ struct iwl_cfg { | |||
305 | s32 chain_noise_scale; | 305 | s32 chain_noise_scale; |
306 | /* timer period for monitor the driver queues */ | 306 | /* timer period for monitor the driver queues */ |
307 | u32 monitor_recover_period; | 307 | u32 monitor_recover_period; |
308 | bool temperature_kelvin; | ||
309 | bool off_channel_workaround; | ||
310 | u32 max_event_log_size; | ||
308 | }; | 311 | }; |
309 | 312 | ||
310 | /*************************** | 313 | /*************************** |
@@ -314,8 +317,7 @@ struct iwl_cfg { | |||
314 | struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, | 317 | struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, |
315 | struct ieee80211_ops *hw_ops); | 318 | struct ieee80211_ops *hw_ops); |
316 | void iwl_hw_detect(struct iwl_priv *priv); | 319 | void iwl_hw_detect(struct iwl_priv *priv); |
317 | void iwl_reset_qos(struct iwl_priv *priv); | 320 | void iwl_activate_qos(struct iwl_priv *priv); |
318 | void iwl_activate_qos(struct iwl_priv *priv, u8 force); | ||
319 | int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, | 321 | int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, |
320 | const struct ieee80211_tx_queue_params *params); | 322 | const struct ieee80211_tx_queue_params *params); |
321 | void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); | 323 | void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); |
@@ -336,7 +338,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv); | |||
336 | void iwl_configure_filter(struct ieee80211_hw *hw, | 338 | void iwl_configure_filter(struct ieee80211_hw *hw, |
337 | unsigned int changed_flags, | 339 | unsigned int changed_flags, |
338 | unsigned int *total_flags, u64 multicast); | 340 | unsigned int *total_flags, u64 multicast); |
339 | int iwl_hw_nic_init(struct iwl_priv *priv); | ||
340 | int iwl_set_hw_params(struct iwl_priv *priv); | 341 | int iwl_set_hw_params(struct iwl_priv *priv); |
341 | bool iwl_is_monitor_mode(struct iwl_priv *priv); | 342 | bool iwl_is_monitor_mode(struct iwl_priv *priv); |
342 | void iwl_post_associate(struct iwl_priv *priv); | 343 | void iwl_post_associate(struct iwl_priv *priv); |
@@ -420,21 +421,13 @@ void iwl_rx_reply_error(struct iwl_priv *priv, | |||
420 | /***************************************************** | 421 | /***************************************************** |
421 | * RX | 422 | * RX |
422 | ******************************************************/ | 423 | ******************************************************/ |
423 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
424 | void iwl_cmd_queue_free(struct iwl_priv *priv); | 424 | void iwl_cmd_queue_free(struct iwl_priv *priv); |
425 | int iwl_rx_queue_alloc(struct iwl_priv *priv); | 425 | int iwl_rx_queue_alloc(struct iwl_priv *priv); |
426 | void iwl_rx_handle(struct iwl_priv *priv); | 426 | void iwl_rx_handle(struct iwl_priv *priv); |
427 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, | 427 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, |
428 | struct iwl_rx_queue *q); | 428 | struct iwl_rx_queue *q); |
429 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
430 | void iwl_rx_replenish(struct iwl_priv *priv); | ||
431 | void iwl_rx_replenish_now(struct iwl_priv *priv); | ||
432 | int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
433 | void iwl_rx_queue_restock(struct iwl_priv *priv); | ||
434 | int iwl_rx_queue_space(const struct iwl_rx_queue *q); | 429 | int iwl_rx_queue_space(const struct iwl_rx_queue *q); |
435 | void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority); | ||
436 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | 430 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); |
437 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); | ||
438 | /* Handlers */ | 431 | /* Handlers */ |
439 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, | 432 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, |
440 | struct iwl_rx_mem_buffer *rxb); | 433 | struct iwl_rx_mem_buffer *rxb); |
@@ -455,14 +448,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
455 | /***************************************************** | 448 | /***************************************************** |
456 | * TX | 449 | * TX |
457 | ******************************************************/ | 450 | ******************************************************/ |
458 | int iwl_txq_ctx_alloc(struct iwl_priv *priv); | ||
459 | void iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
460 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 451 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
461 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 452 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
462 | struct iwl_tx_queue *txq, | 453 | struct iwl_tx_queue *txq, |
463 | dma_addr_t addr, u16 len, u8 reset, u8 pad); | 454 | dma_addr_t addr, u16 len, u8 reset, u8 pad); |
464 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); | ||
465 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv); | ||
466 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 455 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
467 | struct iwl_tx_queue *txq); | 456 | struct iwl_tx_queue *txq); |
468 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | 457 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, |
@@ -473,9 +462,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
473 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 462 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
474 | int slots_num, u32 txq_id); | 463 | int slots_num, u32 txq_id); |
475 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 464 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
476 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | ||
477 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | ||
478 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); | ||
479 | /***************************************************** | 465 | /***************************************************** |
480 | * TX power | 466 | * TX power |
481 | ****************************************************/ | 467 | ****************************************************/ |
@@ -485,10 +471,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); | |||
485 | * Rate | 471 | * Rate |
486 | ******************************************************************************/ | 472 | ******************************************************************************/ |
487 | 473 | ||
488 | void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
489 | struct ieee80211_tx_info *info); | ||
490 | int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); | 474 | int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); |
491 | int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); | ||
492 | 475 | ||
493 | u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); | 476 | u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); |
494 | 477 | ||
@@ -688,12 +671,6 @@ extern int iwl_send_statistics_request(struct iwl_priv *priv, | |||
688 | extern int iwl_verify_ucode(struct iwl_priv *priv); | 671 | extern int iwl_verify_ucode(struct iwl_priv *priv); |
689 | extern int iwl_send_lq_cmd(struct iwl_priv *priv, | 672 | extern int iwl_send_lq_cmd(struct iwl_priv *priv, |
690 | struct iwl_link_quality_cmd *lq, u8 flags, bool init); | 673 | struct iwl_link_quality_cmd *lq, u8 flags, bool init); |
691 | extern void iwl_rx_reply_rx(struct iwl_priv *priv, | ||
692 | struct iwl_rx_mem_buffer *rxb); | ||
693 | extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv, | ||
694 | struct iwl_rx_mem_buffer *rxb); | ||
695 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
696 | struct iwl_rx_mem_buffer *rxb); | ||
697 | void iwl_apm_stop(struct iwl_priv *priv); | 674 | void iwl_apm_stop(struct iwl_priv *priv); |
698 | int iwl_apm_init(struct iwl_priv *priv); | 675 | int iwl_apm_init(struct iwl_priv *priv); |
699 | 676 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index e847e6197a3d..9466e909f553 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "iwl-debug.h" | 43 | #include "iwl-debug.h" |
44 | #include "iwl-4965-hw.h" | 44 | #include "iwl-4965-hw.h" |
45 | #include "iwl-3945-hw.h" | 45 | #include "iwl-3945-hw.h" |
46 | #include "iwl-agn-hw.h" | ||
46 | #include "iwl-led.h" | 47 | #include "iwl-led.h" |
47 | #include "iwl-power.h" | 48 | #include "iwl-power.h" |
48 | #include "iwl-agn-rs.h" | 49 | #include "iwl-agn-rs.h" |
@@ -57,6 +58,7 @@ extern struct iwl_cfg iwl5100_abg_cfg; | |||
57 | extern struct iwl_cfg iwl5150_agn_cfg; | 58 | extern struct iwl_cfg iwl5150_agn_cfg; |
58 | extern struct iwl_cfg iwl5150_abg_cfg; | 59 | extern struct iwl_cfg iwl5150_abg_cfg; |
59 | extern struct iwl_cfg iwl6000i_2agn_cfg; | 60 | extern struct iwl_cfg iwl6000i_2agn_cfg; |
61 | extern struct iwl_cfg iwl6000i_g2_2agn_cfg; | ||
60 | extern struct iwl_cfg iwl6000i_2abg_cfg; | 62 | extern struct iwl_cfg iwl6000i_2abg_cfg; |
61 | extern struct iwl_cfg iwl6000i_2bg_cfg; | 63 | extern struct iwl_cfg iwl6000i_2bg_cfg; |
62 | extern struct iwl_cfg iwl6000_3agn_cfg; | 64 | extern struct iwl_cfg iwl6000_3agn_cfg; |
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg; | |||
67 | 69 | ||
68 | struct iwl_tx_queue; | 70 | struct iwl_tx_queue; |
69 | 71 | ||
70 | /* shared structures from iwl-5000.c */ | ||
71 | extern struct iwl_mod_params iwl50_mod_params; | ||
72 | extern struct iwl_ucode_ops iwl5000_ucode; | ||
73 | extern struct iwl_lib_ops iwl5000_lib; | ||
74 | extern struct iwl_hcmd_ops iwl5000_hcmd; | ||
75 | extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils; | ||
76 | |||
77 | /* shared functions from iwl-5000.c */ | ||
78 | extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len); | ||
79 | extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, | ||
80 | u8 *data); | ||
81 | extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, | ||
82 | __le32 *tx_flags); | ||
83 | extern int iwl5000_calc_rssi(struct iwl_priv *priv, | ||
84 | struct iwl_rx_phy_res *rx_resp); | ||
85 | extern void iwl5000_nic_config(struct iwl_priv *priv); | ||
86 | extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv); | ||
87 | extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, | ||
88 | size_t offset); | ||
89 | extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
90 | struct iwl_tx_queue *txq, | ||
91 | u16 byte_cnt); | ||
92 | extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
93 | struct iwl_tx_queue *txq); | ||
94 | extern int iwl5000_load_ucode(struct iwl_priv *priv); | ||
95 | extern void iwl5000_init_alive_start(struct iwl_priv *priv); | ||
96 | extern int iwl5000_alive_notify(struct iwl_priv *priv); | ||
97 | extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv); | ||
98 | extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id, | ||
99 | int tx_fifo, int sta_id, int tid, u16 ssn_idx); | ||
100 | extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
101 | u16 ssn_idx, u8 tx_fifo); | ||
102 | extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask); | ||
103 | extern void iwl5000_setup_deferred_work(struct iwl_priv *priv); | ||
104 | extern void iwl5000_rx_handler_setup(struct iwl_priv *priv); | ||
105 | extern int iwl5000_hw_valid_rtc_data_addr(u32 addr); | ||
106 | extern int iwl5000_send_tx_power(struct iwl_priv *priv); | ||
107 | extern void iwl5000_temperature(struct iwl_priv *priv); | ||
108 | |||
109 | /* CT-KILL constants */ | 72 | /* CT-KILL constants */ |
110 | #define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ | 73 | #define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ |
111 | #define CT_KILL_THRESHOLD 114 /* in Celsius */ | 74 | #define CT_KILL_THRESHOLD 114 /* in Celsius */ |
@@ -363,13 +326,6 @@ enum { | |||
363 | 326 | ||
364 | #define DEF_CMD_PAYLOAD_SIZE 320 | 327 | #define DEF_CMD_PAYLOAD_SIZE 320 |
365 | 328 | ||
366 | /* | ||
367 | * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header, | ||
368 | * SNAP header and alignment. It should also be big enough for 802.11 | ||
369 | * control frames. | ||
370 | */ | ||
371 | #define IWL_LINK_HDR_MAX 64 | ||
372 | |||
373 | /** | 329 | /** |
374 | * struct iwl_device_cmd | 330 | * struct iwl_device_cmd |
375 | * | 331 | * |
@@ -521,30 +477,9 @@ struct iwl_ht_config { | |||
521 | u8 non_GF_STA_present; | 477 | u8 non_GF_STA_present; |
522 | }; | 478 | }; |
523 | 479 | ||
524 | union iwl_qos_capabity { | ||
525 | struct { | ||
526 | u8 edca_count:4; /* bit 0-3 */ | ||
527 | u8 q_ack:1; /* bit 4 */ | ||
528 | u8 queue_request:1; /* bit 5 */ | ||
529 | u8 txop_request:1; /* bit 6 */ | ||
530 | u8 reserved:1; /* bit 7 */ | ||
531 | } q_AP; | ||
532 | struct { | ||
533 | u8 acvo_APSD:1; /* bit 0 */ | ||
534 | u8 acvi_APSD:1; /* bit 1 */ | ||
535 | u8 ac_bk_APSD:1; /* bit 2 */ | ||
536 | u8 ac_be_APSD:1; /* bit 3 */ | ||
537 | u8 q_ack:1; /* bit 4 */ | ||
538 | u8 max_len:2; /* bit 5-6 */ | ||
539 | u8 more_data_ack:1; /* bit 7 */ | ||
540 | } q_STA; | ||
541 | u8 val; | ||
542 | }; | ||
543 | |||
544 | /* QoS structures */ | 480 | /* QoS structures */ |
545 | struct iwl_qos_info { | 481 | struct iwl_qos_info { |
546 | int qos_active; | 482 | int qos_active; |
547 | union iwl_qos_capabity qos_cap; | ||
548 | struct iwl_qosparam_cmd def_qos_parm; | 483 | struct iwl_qosparam_cmd def_qos_parm; |
549 | }; | 484 | }; |
550 | 485 | ||
@@ -1185,7 +1120,6 @@ struct iwl_priv { | |||
1185 | __le16 sensitivity_tbl[HD_TABLE_SIZE]; | 1120 | __le16 sensitivity_tbl[HD_TABLE_SIZE]; |
1186 | 1121 | ||
1187 | struct iwl_ht_config current_ht_config; | 1122 | struct iwl_ht_config current_ht_config; |
1188 | u8 last_phy_res[100]; | ||
1189 | 1123 | ||
1190 | /* Rate scaling data */ | 1124 | /* Rate scaling data */ |
1191 | u8 retry_rate; | 1125 | u8 retry_rate; |
@@ -1205,8 +1139,6 @@ struct iwl_priv { | |||
1205 | 1139 | ||
1206 | unsigned long status; | 1140 | unsigned long status; |
1207 | 1141 | ||
1208 | int last_rx_noise; /* From beacon statistics */ | ||
1209 | |||
1210 | /* counts mgmt, ctl, and data packets */ | 1142 | /* counts mgmt, ctl, and data packets */ |
1211 | struct traffic_stats tx_stats; | 1143 | struct traffic_stats tx_stats; |
1212 | struct traffic_stats rx_stats; | 1144 | struct traffic_stats rx_stats; |
@@ -1234,7 +1166,6 @@ struct iwl_priv { | |||
1234 | int num_stations; | 1166 | int num_stations; |
1235 | struct iwl_station_entry stations[IWL_STATION_COUNT]; | 1167 | struct iwl_station_entry stations[IWL_STATION_COUNT]; |
1236 | struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */ | 1168 | struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */ |
1237 | u8 default_wep_key; | ||
1238 | u8 key_mapping_key; | 1169 | u8 key_mapping_key; |
1239 | unsigned long ucode_key_table; | 1170 | unsigned long ucode_key_table; |
1240 | 1171 | ||
@@ -1305,6 +1236,9 @@ struct iwl_priv { | |||
1305 | * no AGGREGATION | 1236 | * no AGGREGATION |
1306 | */ | 1237 | */ |
1307 | u8 agg_tids_count; | 1238 | u8 agg_tids_count; |
1239 | |||
1240 | struct iwl_rx_phy_res last_phy_res; | ||
1241 | bool last_phy_res_valid; | ||
1308 | } _agn; | 1242 | } _agn; |
1309 | #endif | 1243 | #endif |
1310 | }; | 1244 | }; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 2ffc2edbf4f0..4a487639d932 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c | |||
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); | |||
37 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); | 37 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); |
38 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); | 38 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); |
39 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); | 39 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); |
40 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx); | ||
40 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); | 41 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); |
41 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); | 42 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); |
42 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); | 43 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 4e1ba824dc50..cb6d50b78140 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
@@ -188,19 +188,19 @@ struct iwl_eeprom_enhanced_txpwr { | |||
188 | /* 5000 regulatory - indirect access */ | 188 | /* 5000 regulatory - indirect access */ |
189 | #define EEPROM_5000_REG_SKU_ID ((0x02)\ | 189 | #define EEPROM_5000_REG_SKU_ID ((0x02)\ |
190 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ | 190 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ |
191 | #define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\ | 191 | #define EEPROM_REG_BAND_1_CHANNELS ((0x08)\ |
192 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ | 192 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ |
193 | #define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\ | 193 | #define EEPROM_REG_BAND_2_CHANNELS ((0x26)\ |
194 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */ | 194 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */ |
195 | #define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\ | 195 | #define EEPROM_REG_BAND_3_CHANNELS ((0x42)\ |
196 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ | 196 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ |
197 | #define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\ | 197 | #define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\ |
198 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ | 198 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ |
199 | #define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\ | 199 | #define EEPROM_REG_BAND_5_CHANNELS ((0x74)\ |
200 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */ | 200 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */ |
201 | #define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\ | 201 | #define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\ |
202 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ | 202 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ |
203 | #define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ | 203 | #define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\ |
204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ | 204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ |
205 | 205 | ||
206 | /* 6000 and up regulatory tx power - indirect access */ | 206 | /* 6000 and up regulatory tx power - indirect access */ |
@@ -261,12 +261,15 @@ struct iwl_eeprom_enhanced_txpwr { | |||
261 | #define EEPROM_5050_EEPROM_VERSION (0x21E) | 261 | #define EEPROM_5050_EEPROM_VERSION (0x21E) |
262 | 262 | ||
263 | /* 1000 Specific */ | 263 | /* 1000 Specific */ |
264 | #define EEPROM_1000_TX_POWER_VERSION (4) | ||
264 | #define EEPROM_1000_EEPROM_VERSION (0x15C) | 265 | #define EEPROM_1000_EEPROM_VERSION (0x15C) |
265 | 266 | ||
266 | /* 6x00 Specific */ | 267 | /* 6x00 Specific */ |
268 | #define EEPROM_6000_TX_POWER_VERSION (4) | ||
267 | #define EEPROM_6000_EEPROM_VERSION (0x434) | 269 | #define EEPROM_6000_EEPROM_VERSION (0x434) |
268 | 270 | ||
269 | /* 6x50 Specific */ | 271 | /* 6x50 Specific */ |
272 | #define EEPROM_6050_TX_POWER_VERSION (4) | ||
270 | #define EEPROM_6050_EEPROM_VERSION (0x532) | 273 | #define EEPROM_6050_EEPROM_VERSION (0x532) |
271 | 274 | ||
272 | /* OTP */ | 275 | /* OTP */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index 73681c4fefe7..51f89e7ba681 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c | |||
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
169 | mutex_lock(&priv->sync_cmd_mutex); | 169 | mutex_lock(&priv->sync_cmd_mutex); |
170 | 170 | ||
171 | set_bit(STATUS_HCMD_ACTIVE, &priv->status); | 171 | set_bit(STATUS_HCMD_ACTIVE, &priv->status); |
172 | IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n", | 172 | IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", |
173 | get_cmd_string(cmd->id)); | 173 | get_cmd_string(cmd->id)); |
174 | 174 | ||
175 | cmd_idx = iwl_enqueue_hcmd(priv, cmd); | 175 | cmd_idx = iwl_enqueue_hcmd(priv, cmd); |
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
191 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | 191 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); |
192 | 192 | ||
193 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 193 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
194 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", | 194 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
195 | get_cmd_string(cmd->id)); | 195 | get_cmd_string(cmd->id)); |
196 | ret = -ETIMEDOUT; | 196 | ret = -ETIMEDOUT; |
197 | goto cancel; | 197 | goto cancel; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index 16eb3ced9b30..0203a3bbf872 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h | |||
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l, | |||
298 | struct iwl_priv *priv, u32 reg) | 298 | struct iwl_priv *priv, u32 reg) |
299 | { | 299 | { |
300 | u32 value = _iwl_read_direct32(priv, reg); | 300 | u32 value = _iwl_read_direct32(priv, reg); |
301 | IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value, | 301 | IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value, |
302 | f, l); | 302 | f, l); |
303 | return value; | 303 | return value; |
304 | } | 304 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index a6f9c918aabc..db5bfcb036ca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c | |||
@@ -46,7 +46,7 @@ | |||
46 | static int led_mode; | 46 | static int led_mode; |
47 | module_param(led_mode, int, S_IRUGO); | 47 | module_param(led_mode, int, S_IRUGO); |
48 | MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " | 48 | MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " |
49 | "(default 0)\n"); | 49 | "(default 0)"); |
50 | 50 | ||
51 | 51 | ||
52 | static const struct { | 52 | static const struct { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 548dac2f6a96..581c683a8507 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c | |||
@@ -384,10 +384,10 @@ EXPORT_SYMBOL(iwl_ht_enabled); | |||
384 | 384 | ||
385 | bool iwl_within_ct_kill_margin(struct iwl_priv *priv) | 385 | bool iwl_within_ct_kill_margin(struct iwl_priv *priv) |
386 | { | 386 | { |
387 | s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ | 387 | s32 temp = priv->temperature; /* degrees CELSIUS except specified */ |
388 | bool within_margin = false; | 388 | bool within_margin = false; |
389 | 389 | ||
390 | if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) | 390 | if (priv->cfg->temperature_kelvin) |
391 | temp = KELVIN_TO_CELSIUS(priv->temperature); | 391 | temp = KELVIN_TO_CELSIUS(priv->temperature); |
392 | 392 | ||
393 | if (!priv->thermal_throttle.advanced_tt) | 393 | if (!priv->thermal_throttle.advanced_tt) |
@@ -840,12 +840,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill); | |||
840 | static void iwl_bg_tt_work(struct work_struct *work) | 840 | static void iwl_bg_tt_work(struct work_struct *work) |
841 | { | 841 | { |
842 | struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); | 842 | struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); |
843 | s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ | 843 | s32 temp = priv->temperature; /* degrees CELSIUS except specified */ |
844 | 844 | ||
845 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 845 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
846 | return; | 846 | return; |
847 | 847 | ||
848 | if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) | 848 | if (priv->cfg->temperature_kelvin) |
849 | temp = KELVIN_TO_CELSIUS(priv->temperature); | 849 | temp = KELVIN_TO_CELSIUS(priv->temperature); |
850 | 850 | ||
851 | if (!priv->thermal_throttle.advanced_tt) | 851 | if (!priv->thermal_throttle.advanced_tt) |
@@ -875,7 +875,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) | |||
875 | int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); | 875 | int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); |
876 | struct iwl_tt_trans *transaction; | 876 | struct iwl_tt_trans *transaction; |
877 | 877 | ||
878 | IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n"); | 878 | IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n"); |
879 | 879 | ||
880 | memset(tt, 0, sizeof(struct iwl_tt_mgmt)); | 880 | memset(tt, 0, sizeof(struct iwl_tt_mgmt)); |
881 | 881 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index d3b2fb389e58..267eb8935902 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q | |||
163 | spin_unlock_irqrestore(&q->lock, flags); | 163 | spin_unlock_irqrestore(&q->lock, flags); |
164 | } | 164 | } |
165 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); | 165 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); |
166 | /** | ||
167 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
168 | */ | ||
169 | static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
170 | dma_addr_t dma_addr) | ||
171 | { | ||
172 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
177 | * | ||
178 | * If there are slots in the RX queue that need to be restocked, | ||
179 | * and we have free pre-allocated buffers, fill the ranks as much | ||
180 | * as we can, pulling from rx_free. | ||
181 | * | ||
182 | * This moves the 'write' index forward to catch up with 'processed', and | ||
183 | * also updates the memory address in the firmware to reference the new | ||
184 | * target buffer. | ||
185 | */ | ||
186 | void iwl_rx_queue_restock(struct iwl_priv *priv) | ||
187 | { | ||
188 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
189 | struct list_head *element; | ||
190 | struct iwl_rx_mem_buffer *rxb; | ||
191 | unsigned long flags; | ||
192 | int write; | ||
193 | |||
194 | spin_lock_irqsave(&rxq->lock, flags); | ||
195 | write = rxq->write & ~0x7; | ||
196 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
197 | /* Get next free Rx buffer, remove from free list */ | ||
198 | element = rxq->rx_free.next; | ||
199 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
200 | list_del(element); | ||
201 | |||
202 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
203 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma); | ||
204 | rxq->queue[rxq->write] = rxb; | ||
205 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
206 | rxq->free_count--; | ||
207 | } | ||
208 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
209 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
210 | * refill it */ | ||
211 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
212 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
213 | |||
214 | |||
215 | /* If we've added more space for the firmware to place data, tell it. | ||
216 | * Increment device's write pointer in multiples of 8. */ | ||
217 | if (rxq->write_actual != (rxq->write & ~0x7)) { | ||
218 | spin_lock_irqsave(&rxq->lock, flags); | ||
219 | rxq->need_update = 1; | ||
220 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
221 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL(iwl_rx_queue_restock); | ||
225 | |||
226 | |||
227 | /** | ||
228 | * iwl_rx_replenish - Move all used packet from rx_used to rx_free | ||
229 | * | ||
230 | * When moving to rx_free an SKB is allocated for the slot. | ||
231 | * | ||
232 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
233 | * This is called as a scheduled work item (except for during initialization) | ||
234 | */ | ||
235 | void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
236 | { | ||
237 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
238 | struct list_head *element; | ||
239 | struct iwl_rx_mem_buffer *rxb; | ||
240 | struct page *page; | ||
241 | unsigned long flags; | ||
242 | gfp_t gfp_mask = priority; | ||
243 | |||
244 | while (1) { | ||
245 | spin_lock_irqsave(&rxq->lock, flags); | ||
246 | if (list_empty(&rxq->rx_used)) { | ||
247 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
248 | return; | ||
249 | } | ||
250 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
251 | |||
252 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
253 | gfp_mask |= __GFP_NOWARN; | ||
254 | |||
255 | if (priv->hw_params.rx_page_order > 0) | ||
256 | gfp_mask |= __GFP_COMP; | ||
257 | |||
258 | /* Alloc a new receive buffer */ | ||
259 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
260 | if (!page) { | ||
261 | if (net_ratelimit()) | ||
262 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | ||
263 | "order: %d\n", | ||
264 | priv->hw_params.rx_page_order); | ||
265 | |||
266 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
267 | net_ratelimit()) | ||
268 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", | ||
269 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
270 | rxq->free_count); | ||
271 | /* We don't reschedule replenish work here -- we will | ||
272 | * call the restock method and if it still needs | ||
273 | * more buffers it will schedule replenish */ | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | spin_lock_irqsave(&rxq->lock, flags); | ||
278 | |||
279 | if (list_empty(&rxq->rx_used)) { | ||
280 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
281 | __free_pages(page, priv->hw_params.rx_page_order); | ||
282 | return; | ||
283 | } | ||
284 | element = rxq->rx_used.next; | ||
285 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
286 | list_del(element); | ||
287 | |||
288 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
289 | |||
290 | rxb->page = page; | ||
291 | /* Get physical address of the RB */ | ||
292 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
293 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
294 | PCI_DMA_FROMDEVICE); | ||
295 | /* dma address must be no more than 36 bits */ | ||
296 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
297 | /* and also 256 byte aligned! */ | ||
298 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
299 | |||
300 | spin_lock_irqsave(&rxq->lock, flags); | ||
301 | |||
302 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
303 | rxq->free_count++; | ||
304 | priv->alloc_rxb_page++; | ||
305 | |||
306 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | void iwl_rx_replenish(struct iwl_priv *priv) | ||
311 | { | ||
312 | unsigned long flags; | ||
313 | |||
314 | iwl_rx_allocate(priv, GFP_KERNEL); | ||
315 | |||
316 | spin_lock_irqsave(&priv->lock, flags); | ||
317 | iwl_rx_queue_restock(priv); | ||
318 | spin_unlock_irqrestore(&priv->lock, flags); | ||
319 | } | ||
320 | EXPORT_SYMBOL(iwl_rx_replenish); | ||
321 | |||
322 | void iwl_rx_replenish_now(struct iwl_priv *priv) | ||
323 | { | ||
324 | iwl_rx_allocate(priv, GFP_ATOMIC); | ||
325 | |||
326 | iwl_rx_queue_restock(priv); | ||
327 | } | ||
328 | EXPORT_SYMBOL(iwl_rx_replenish_now); | ||
329 | |||
330 | |||
331 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
332 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
333 | * This free routine walks the list of POOL entries and if SKB is set to | ||
334 | * non NULL it is unmapped and freed | ||
335 | */ | ||
336 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
337 | { | ||
338 | int i; | ||
339 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
340 | if (rxq->pool[i].page != NULL) { | ||
341 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
342 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
343 | PCI_DMA_FROMDEVICE); | ||
344 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
345 | rxq->pool[i].page = NULL; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
350 | rxq->dma_addr); | ||
351 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
352 | rxq->rb_stts, rxq->rb_stts_dma); | ||
353 | rxq->bd = NULL; | ||
354 | rxq->rb_stts = NULL; | ||
355 | } | ||
356 | EXPORT_SYMBOL(iwl_rx_queue_free); | ||
357 | 166 | ||
358 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | 167 | int iwl_rx_queue_alloc(struct iwl_priv *priv) |
359 | { | 168 | { |
@@ -396,98 +205,6 @@ err_bd: | |||
396 | } | 205 | } |
397 | EXPORT_SYMBOL(iwl_rx_queue_alloc); | 206 | EXPORT_SYMBOL(iwl_rx_queue_alloc); |
398 | 207 | ||
399 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
400 | { | ||
401 | unsigned long flags; | ||
402 | int i; | ||
403 | spin_lock_irqsave(&rxq->lock, flags); | ||
404 | INIT_LIST_HEAD(&rxq->rx_free); | ||
405 | INIT_LIST_HEAD(&rxq->rx_used); | ||
406 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
407 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
408 | /* In the reset function, these buffers may have been allocated | ||
409 | * to an SKB, so we need to unmap and free potential storage */ | ||
410 | if (rxq->pool[i].page != NULL) { | ||
411 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
412 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
413 | PCI_DMA_FROMDEVICE); | ||
414 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
415 | rxq->pool[i].page = NULL; | ||
416 | } | ||
417 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
418 | } | ||
419 | |||
420 | /* Set us so that we have processed and used all buffers, but have | ||
421 | * not restocked the Rx queue with fresh buffers */ | ||
422 | rxq->read = rxq->write = 0; | ||
423 | rxq->write_actual = 0; | ||
424 | rxq->free_count = 0; | ||
425 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
426 | } | ||
427 | |||
428 | int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
429 | { | ||
430 | u32 rb_size; | ||
431 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
432 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
433 | |||
434 | if (!priv->cfg->use_isr_legacy) | ||
435 | rb_timeout = RX_RB_TIMEOUT; | ||
436 | |||
437 | if (priv->cfg->mod_params->amsdu_size_8K) | ||
438 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
439 | else | ||
440 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
441 | |||
442 | /* Stop Rx DMA */ | ||
443 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
444 | |||
445 | /* Reset driver's Rx queue write index */ | ||
446 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
447 | |||
448 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
449 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
450 | (u32)(rxq->dma_addr >> 8)); | ||
451 | |||
452 | /* Tell device where in DRAM to update its Rx status */ | ||
453 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
454 | rxq->rb_stts_dma >> 4); | ||
455 | |||
456 | /* Enable Rx DMA | ||
457 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
458 | * the credit mechanism in 5000 HW RX FIFO | ||
459 | * Direct rx interrupts to hosts | ||
460 | * Rx buffer size 4 or 8k | ||
461 | * RB timeout 0x10 | ||
462 | * 256 RBDs | ||
463 | */ | ||
464 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
465 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
466 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
467 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
468 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
469 | rb_size| | ||
470 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
471 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
472 | |||
473 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
474 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | int iwl_rxq_stop(struct iwl_priv *priv) | ||
480 | { | ||
481 | |||
482 | /* stop Rx DMA */ | ||
483 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
484 | iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | ||
485 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | EXPORT_SYMBOL(iwl_rxq_stop); | ||
490 | |||
491 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, | 208 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, |
492 | struct iwl_rx_mem_buffer *rxb) | 209 | struct iwl_rx_mem_buffer *rxb) |
493 | 210 | ||
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) | |||
543 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | 260 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; |
544 | int bcn_silence_c = | 261 | int bcn_silence_c = |
545 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | 262 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; |
263 | int last_rx_noise; | ||
546 | 264 | ||
547 | if (bcn_silence_a) { | 265 | if (bcn_silence_a) { |
548 | total_silence += bcn_silence_a; | 266 | total_silence += bcn_silence_a; |
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) | |||
559 | 277 | ||
560 | /* Average among active antennas */ | 278 | /* Average among active antennas */ |
561 | if (num_active_rx) | 279 | if (num_active_rx) |
562 | priv->last_rx_noise = (total_silence / num_active_rx) - 107; | 280 | last_rx_noise = (total_silence / num_active_rx) - 107; |
563 | else | 281 | else |
564 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 282 | last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; |
565 | 283 | ||
566 | IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", | 284 | IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", |
567 | bcn_silence_a, bcn_silence_b, bcn_silence_c, | 285 | bcn_silence_a, bcn_silence_b, bcn_silence_c, |
568 | priv->last_rx_noise); | 286 | last_rx_noise); |
569 | } | 287 | } |
570 | 288 | ||
571 | #ifdef CONFIG_IWLWIFI_DEBUG | 289 | #ifdef CONFIG_IWLWIFI_DEBUG |
@@ -617,63 +335,6 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, | |||
617 | 335 | ||
618 | #define REG_RECALIB_PERIOD (60) | 336 | #define REG_RECALIB_PERIOD (60) |
619 | 337 | ||
620 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ | ||
621 | #define ACK_CNT_RATIO (50) | ||
622 | #define BA_TIMEOUT_CNT (5) | ||
623 | #define BA_TIMEOUT_MAX (16) | ||
624 | |||
625 | #if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE) | ||
626 | /** | ||
627 | * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. | ||
628 | * | ||
629 | * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding | ||
630 | * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal | ||
631 | * operation state. | ||
632 | */ | ||
633 | bool iwl_good_ack_health(struct iwl_priv *priv, | ||
634 | struct iwl_rx_packet *pkt) | ||
635 | { | ||
636 | bool rc = true; | ||
637 | int actual_ack_cnt_delta, expected_ack_cnt_delta; | ||
638 | int ba_timeout_delta; | ||
639 | |||
640 | actual_ack_cnt_delta = | ||
641 | le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - | ||
642 | le32_to_cpu(priv->statistics.tx.actual_ack_cnt); | ||
643 | expected_ack_cnt_delta = | ||
644 | le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - | ||
645 | le32_to_cpu(priv->statistics.tx.expected_ack_cnt); | ||
646 | ba_timeout_delta = | ||
647 | le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - | ||
648 | le32_to_cpu(priv->statistics.tx.agg.ba_timeout); | ||
649 | if ((priv->_agn.agg_tids_count > 0) && | ||
650 | (expected_ack_cnt_delta > 0) && | ||
651 | (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) | ||
652 | < ACK_CNT_RATIO) && | ||
653 | (ba_timeout_delta > BA_TIMEOUT_CNT)) { | ||
654 | IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," | ||
655 | " expected_ack_cnt = %d\n", | ||
656 | actual_ack_cnt_delta, expected_ack_cnt_delta); | ||
657 | |||
658 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
659 | IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", | ||
660 | priv->delta_statistics.tx.rx_detected_cnt); | ||
661 | IWL_DEBUG_RADIO(priv, | ||
662 | "ack_or_ba_timeout_collision delta = %d\n", | ||
663 | priv->delta_statistics.tx. | ||
664 | ack_or_ba_timeout_collision); | ||
665 | #endif | ||
666 | IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", | ||
667 | ba_timeout_delta); | ||
668 | if (!actual_ack_cnt_delta && | ||
669 | (ba_timeout_delta >= BA_TIMEOUT_MAX)) | ||
670 | rc = false; | ||
671 | } | ||
672 | return rc; | ||
673 | } | ||
674 | EXPORT_SYMBOL(iwl_good_ack_health); | ||
675 | #endif | ||
676 | |||
677 | /** | 338 | /** |
678 | * iwl_good_plcp_health - checks for plcp error. | 339 | * iwl_good_plcp_health - checks for plcp error. |
679 | * | 340 | * |
@@ -830,139 +491,6 @@ void iwl_reply_statistics(struct iwl_priv *priv, | |||
830 | } | 491 | } |
831 | EXPORT_SYMBOL(iwl_reply_statistics); | 492 | EXPORT_SYMBOL(iwl_reply_statistics); |
832 | 493 | ||
833 | /* Calc max signal level (dBm) among 3 possible receivers */ | ||
834 | static inline int iwl_calc_rssi(struct iwl_priv *priv, | ||
835 | struct iwl_rx_phy_res *rx_resp) | ||
836 | { | ||
837 | return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); | ||
838 | } | ||
839 | |||
840 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
841 | /** | ||
842 | * iwl_dbg_report_frame - dump frame to syslog during debug sessions | ||
843 | * | ||
844 | * You may hack this function to show different aspects of received frames, | ||
845 | * including selective frame dumps. | ||
846 | * group100 parameter selects whether to show 1 out of 100 good data frames. | ||
847 | * All beacon and probe response frames are printed. | ||
848 | */ | ||
849 | static void iwl_dbg_report_frame(struct iwl_priv *priv, | ||
850 | struct iwl_rx_phy_res *phy_res, u16 length, | ||
851 | struct ieee80211_hdr *header, int group100) | ||
852 | { | ||
853 | u32 to_us; | ||
854 | u32 print_summary = 0; | ||
855 | u32 print_dump = 0; /* set to 1 to dump all frames' contents */ | ||
856 | u32 hundred = 0; | ||
857 | u32 dataframe = 0; | ||
858 | __le16 fc; | ||
859 | u16 seq_ctl; | ||
860 | u16 channel; | ||
861 | u16 phy_flags; | ||
862 | u32 rate_n_flags; | ||
863 | u32 tsf_low; | ||
864 | int rssi; | ||
865 | |||
866 | if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX))) | ||
867 | return; | ||
868 | |||
869 | /* MAC header */ | ||
870 | fc = header->frame_control; | ||
871 | seq_ctl = le16_to_cpu(header->seq_ctrl); | ||
872 | |||
873 | /* metadata */ | ||
874 | channel = le16_to_cpu(phy_res->channel); | ||
875 | phy_flags = le16_to_cpu(phy_res->phy_flags); | ||
876 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
877 | |||
878 | /* signal statistics */ | ||
879 | rssi = iwl_calc_rssi(priv, phy_res); | ||
880 | tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff; | ||
881 | |||
882 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | ||
883 | |||
884 | /* if data frame is to us and all is good, | ||
885 | * (optionally) print summary for only 1 out of every 100 */ | ||
886 | if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == | ||
887 | cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | ||
888 | dataframe = 1; | ||
889 | if (!group100) | ||
890 | print_summary = 1; /* print each frame */ | ||
891 | else if (priv->framecnt_to_us < 100) { | ||
892 | priv->framecnt_to_us++; | ||
893 | print_summary = 0; | ||
894 | } else { | ||
895 | priv->framecnt_to_us = 0; | ||
896 | print_summary = 1; | ||
897 | hundred = 1; | ||
898 | } | ||
899 | } else { | ||
900 | /* print summary for all other frames */ | ||
901 | print_summary = 1; | ||
902 | } | ||
903 | |||
904 | if (print_summary) { | ||
905 | char *title; | ||
906 | int rate_idx; | ||
907 | u32 bitrate; | ||
908 | |||
909 | if (hundred) | ||
910 | title = "100Frames"; | ||
911 | else if (ieee80211_has_retry(fc)) | ||
912 | title = "Retry"; | ||
913 | else if (ieee80211_is_assoc_resp(fc)) | ||
914 | title = "AscRsp"; | ||
915 | else if (ieee80211_is_reassoc_resp(fc)) | ||
916 | title = "RasRsp"; | ||
917 | else if (ieee80211_is_probe_resp(fc)) { | ||
918 | title = "PrbRsp"; | ||
919 | print_dump = 1; /* dump frame contents */ | ||
920 | } else if (ieee80211_is_beacon(fc)) { | ||
921 | title = "Beacon"; | ||
922 | print_dump = 1; /* dump frame contents */ | ||
923 | } else if (ieee80211_is_atim(fc)) | ||
924 | title = "ATIM"; | ||
925 | else if (ieee80211_is_auth(fc)) | ||
926 | title = "Auth"; | ||
927 | else if (ieee80211_is_deauth(fc)) | ||
928 | title = "DeAuth"; | ||
929 | else if (ieee80211_is_disassoc(fc)) | ||
930 | title = "DisAssoc"; | ||
931 | else | ||
932 | title = "Frame"; | ||
933 | |||
934 | rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); | ||
935 | if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) { | ||
936 | bitrate = 0; | ||
937 | WARN_ON_ONCE(1); | ||
938 | } else { | ||
939 | bitrate = iwl_rates[rate_idx].ieee / 2; | ||
940 | } | ||
941 | |||
942 | /* print frame summary. | ||
943 | * MAC addresses show just the last byte (for brevity), | ||
944 | * but you can hack it to show more, if you'd like to. */ | ||
945 | if (dataframe) | ||
946 | IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " | ||
947 | "len=%u, rssi=%d, chnl=%d, rate=%u, \n", | ||
948 | title, le16_to_cpu(fc), header->addr1[5], | ||
949 | length, rssi, channel, bitrate); | ||
950 | else { | ||
951 | /* src/dst addresses assume managed mode */ | ||
952 | IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, " | ||
953 | "len=%u, rssi=%d, tim=%lu usec, " | ||
954 | "phy=0x%02x, chnl=%d\n", | ||
955 | title, le16_to_cpu(fc), header->addr1[5], | ||
956 | header->addr3[5], length, rssi, | ||
957 | tsf_low - priv->scan_start_tsf, | ||
958 | phy_flags, channel); | ||
959 | } | ||
960 | } | ||
961 | if (print_dump) | ||
962 | iwl_print_hex_dump(priv, IWL_DL_RX, header, length); | ||
963 | } | ||
964 | #endif | ||
965 | |||
966 | /* | 494 | /* |
967 | * returns non-zero if packet should be dropped | 495 | * returns non-zero if packet should be dropped |
968 | */ | 496 | */ |
@@ -1010,281 +538,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, | |||
1010 | return 0; | 538 | return 0; |
1011 | } | 539 | } |
1012 | EXPORT_SYMBOL(iwl_set_decrypted_flag); | 540 | EXPORT_SYMBOL(iwl_set_decrypted_flag); |
1013 | |||
1014 | static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) | ||
1015 | { | ||
1016 | u32 decrypt_out = 0; | ||
1017 | |||
1018 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | ||
1019 | RX_RES_STATUS_STATION_FOUND) | ||
1020 | decrypt_out |= (RX_RES_STATUS_STATION_FOUND | | ||
1021 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | ||
1022 | |||
1023 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | ||
1024 | |||
1025 | /* packet was not encrypted */ | ||
1026 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
1027 | RX_RES_STATUS_SEC_TYPE_NONE) | ||
1028 | return decrypt_out; | ||
1029 | |||
1030 | /* packet was encrypted with unknown alg */ | ||
1031 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
1032 | RX_RES_STATUS_SEC_TYPE_ERR) | ||
1033 | return decrypt_out; | ||
1034 | |||
1035 | /* decryption was not done in HW */ | ||
1036 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | ||
1037 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) | ||
1038 | return decrypt_out; | ||
1039 | |||
1040 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | ||
1041 | |||
1042 | case RX_RES_STATUS_SEC_TYPE_CCMP: | ||
1043 | /* alg is CCM: check MIC only */ | ||
1044 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | ||
1045 | /* Bad MIC */ | ||
1046 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
1047 | else | ||
1048 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
1049 | |||
1050 | break; | ||
1051 | |||
1052 | case RX_RES_STATUS_SEC_TYPE_TKIP: | ||
1053 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | ||
1054 | /* Bad TTAK */ | ||
1055 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | ||
1056 | break; | ||
1057 | } | ||
1058 | /* fall through if TTAK OK */ | ||
1059 | default: | ||
1060 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | ||
1061 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
1062 | else | ||
1063 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
1064 | break; | ||
1065 | }; | ||
1066 | |||
1067 | IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", | ||
1068 | decrypt_in, decrypt_out); | ||
1069 | |||
1070 | return decrypt_out; | ||
1071 | } | ||
1072 | |||
1073 | static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | ||
1074 | struct ieee80211_hdr *hdr, | ||
1075 | u16 len, | ||
1076 | u32 ampdu_status, | ||
1077 | struct iwl_rx_mem_buffer *rxb, | ||
1078 | struct ieee80211_rx_status *stats) | ||
1079 | { | ||
1080 | struct sk_buff *skb; | ||
1081 | int ret = 0; | ||
1082 | __le16 fc = hdr->frame_control; | ||
1083 | |||
1084 | /* We only process data packets if the interface is open */ | ||
1085 | if (unlikely(!priv->is_open)) { | ||
1086 | IWL_DEBUG_DROP_LIMIT(priv, | ||
1087 | "Dropping packet while interface is not open.\n"); | ||
1088 | return; | ||
1089 | } | ||
1090 | |||
1091 | /* In case of HW accelerated crypto and bad decryption, drop */ | ||
1092 | if (!priv->cfg->mod_params->sw_crypto && | ||
1093 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) | ||
1094 | return; | ||
1095 | |||
1096 | skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); | ||
1097 | if (!skb) { | ||
1098 | IWL_ERR(priv, "alloc_skb failed\n"); | ||
1099 | return; | ||
1100 | } | ||
1101 | |||
1102 | skb_reserve(skb, IWL_LINK_HDR_MAX); | ||
1103 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); | ||
1104 | |||
1105 | /* mac80211 currently doesn't support paged SKB. Convert it to | ||
1106 | * linear SKB for management frame and data frame requires | ||
1107 | * software decryption or software defragementation. */ | ||
1108 | if (ieee80211_is_mgmt(fc) || | ||
1109 | ieee80211_has_protected(fc) || | ||
1110 | ieee80211_has_morefrags(fc) || | ||
1111 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || | ||
1112 | (ieee80211_is_data_qos(fc) && | ||
1113 | *ieee80211_get_qos_ctl(hdr) & | ||
1114 | IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) | ||
1115 | ret = skb_linearize(skb); | ||
1116 | else | ||
1117 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | ||
1118 | 0 : -ENOMEM; | ||
1119 | |||
1120 | if (ret) { | ||
1121 | kfree_skb(skb); | ||
1122 | goto out; | ||
1123 | } | ||
1124 | |||
1125 | /* | ||
1126 | * XXX: We cannot touch the page and its virtual memory (hdr) after | ||
1127 | * here. It might have already been freed by the above skb change. | ||
1128 | */ | ||
1129 | |||
1130 | iwl_update_stats(priv, false, fc, len); | ||
1131 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | ||
1132 | |||
1133 | ieee80211_rx(priv->hw, skb); | ||
1134 | out: | ||
1135 | priv->alloc_rxb_page--; | ||
1136 | rxb->page = NULL; | ||
1137 | } | ||
1138 | |||
1139 | /* Called for REPLY_RX (legacy ABG frames), or | ||
1140 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | ||
1141 | void iwl_rx_reply_rx(struct iwl_priv *priv, | ||
1142 | struct iwl_rx_mem_buffer *rxb) | ||
1143 | { | ||
1144 | struct ieee80211_hdr *header; | ||
1145 | struct ieee80211_rx_status rx_status; | ||
1146 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1147 | struct iwl_rx_phy_res *phy_res; | ||
1148 | __le32 rx_pkt_status; | ||
1149 | struct iwl4965_rx_mpdu_res_start *amsdu; | ||
1150 | u32 len; | ||
1151 | u32 ampdu_status; | ||
1152 | u32 rate_n_flags; | ||
1153 | |||
1154 | /** | ||
1155 | * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. | ||
1156 | * REPLY_RX: physical layer info is in this buffer | ||
1157 | * REPLY_RX_MPDU_CMD: physical layer info was sent in separate | ||
1158 | * command and cached in priv->last_phy_res | ||
1159 | * | ||
1160 | * Here we set up local variables depending on which command is | ||
1161 | * received. | ||
1162 | */ | ||
1163 | if (pkt->hdr.cmd == REPLY_RX) { | ||
1164 | phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; | ||
1165 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) | ||
1166 | + phy_res->cfg_phy_cnt); | ||
1167 | |||
1168 | len = le16_to_cpu(phy_res->byte_count); | ||
1169 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + | ||
1170 | phy_res->cfg_phy_cnt + len); | ||
1171 | ampdu_status = le32_to_cpu(rx_pkt_status); | ||
1172 | } else { | ||
1173 | if (!priv->last_phy_res[0]) { | ||
1174 | IWL_ERR(priv, "MPDU frame without cached PHY data\n"); | ||
1175 | return; | ||
1176 | } | ||
1177 | phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1]; | ||
1178 | amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; | ||
1179 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); | ||
1180 | len = le16_to_cpu(amsdu->byte_count); | ||
1181 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); | ||
1182 | ampdu_status = iwl_translate_rx_status(priv, | ||
1183 | le32_to_cpu(rx_pkt_status)); | ||
1184 | } | ||
1185 | |||
1186 | if ((unlikely(phy_res->cfg_phy_cnt > 20))) { | ||
1187 | IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", | ||
1188 | phy_res->cfg_phy_cnt); | ||
1189 | return; | ||
1190 | } | ||
1191 | |||
1192 | if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || | ||
1193 | !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | ||
1194 | IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", | ||
1195 | le32_to_cpu(rx_pkt_status)); | ||
1196 | return; | ||
1197 | } | ||
1198 | |||
1199 | /* This will be used in several places later */ | ||
1200 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
1201 | |||
1202 | /* rx_status carries information about the packet to mac80211 */ | ||
1203 | rx_status.mactime = le64_to_cpu(phy_res->timestamp); | ||
1204 | rx_status.freq = | ||
1205 | ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); | ||
1206 | rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? | ||
1207 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | ||
1208 | rx_status.rate_idx = | ||
1209 | iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); | ||
1210 | rx_status.flag = 0; | ||
1211 | |||
1212 | /* TSF isn't reliable. In order to allow smooth user experience, | ||
1213 | * this W/A doesn't propagate it to the mac80211 */ | ||
1214 | /*rx_status.flag |= RX_FLAG_TSFT;*/ | ||
1215 | |||
1216 | priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); | ||
1217 | |||
1218 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | ||
1219 | rx_status.signal = iwl_calc_rssi(priv, phy_res); | ||
1220 | |||
1221 | /* Meaningful noise values are available only from beacon statistics, | ||
1222 | * which are gathered only when associated, and indicate noise | ||
1223 | * only for the associated network channel ... | ||
1224 | * Ignore these noise values while scanning (other channels) */ | ||
1225 | if (iwl_is_associated(priv) && | ||
1226 | !test_bit(STATUS_SCANNING, &priv->status)) { | ||
1227 | rx_status.noise = priv->last_rx_noise; | ||
1228 | } else { | ||
1229 | rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | ||
1230 | } | ||
1231 | |||
1232 | /* Reset beacon noise level if not associated. */ | ||
1233 | if (!iwl_is_associated(priv)) | ||
1234 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | ||
1235 | |||
1236 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1237 | /* Set "1" to report good data frames in groups of 100 */ | ||
1238 | if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX)) | ||
1239 | iwl_dbg_report_frame(priv, phy_res, len, header, 1); | ||
1240 | #endif | ||
1241 | iwl_dbg_log_rx_data_frame(priv, len, header); | ||
1242 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n", | ||
1243 | rx_status.signal, rx_status.noise, | ||
1244 | (unsigned long long)rx_status.mactime); | ||
1245 | |||
1246 | /* | ||
1247 | * "antenna number" | ||
1248 | * | ||
1249 | * It seems that the antenna field in the phy flags value | ||
1250 | * is actually a bit field. This is undefined by radiotap, | ||
1251 | * it wants an actual antenna number but I always get "7" | ||
1252 | * for most legacy frames I receive indicating that the | ||
1253 | * same frame was received on all three RX chains. | ||
1254 | * | ||
1255 | * I think this field should be removed in favor of a | ||
1256 | * new 802.11n radiotap field "RX chains" that is defined | ||
1257 | * as a bitmask. | ||
1258 | */ | ||
1259 | rx_status.antenna = | ||
1260 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) | ||
1261 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; | ||
1262 | |||
1263 | /* set the preamble flag if appropriate */ | ||
1264 | if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | ||
1265 | rx_status.flag |= RX_FLAG_SHORTPRE; | ||
1266 | |||
1267 | /* Set up the HT phy flags */ | ||
1268 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1269 | rx_status.flag |= RX_FLAG_HT; | ||
1270 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1271 | rx_status.flag |= RX_FLAG_40MHZ; | ||
1272 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1273 | rx_status.flag |= RX_FLAG_SHORT_GI; | ||
1274 | |||
1275 | iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, | ||
1276 | rxb, &rx_status); | ||
1277 | } | ||
1278 | EXPORT_SYMBOL(iwl_rx_reply_rx); | ||
1279 | |||
1280 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | ||
1281 | * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | ||
1282 | void iwl_rx_reply_rx_phy(struct iwl_priv *priv, | ||
1283 | struct iwl_rx_mem_buffer *rxb) | ||
1284 | { | ||
1285 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1286 | priv->last_phy_res[0] = 1; | ||
1287 | memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), | ||
1288 | sizeof(struct iwl_rx_phy_res)); | ||
1289 | } | ||
1290 | EXPORT_SYMBOL(iwl_rx_reply_rx_phy); | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index e8e4b5493e89..ae981932ce61 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -454,7 +454,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv, | |||
454 | added++; | 454 | added++; |
455 | } | 455 | } |
456 | 456 | ||
457 | IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); | 457 | IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); |
458 | return added; | 458 | return added; |
459 | } | 459 | } |
460 | 460 | ||
@@ -814,10 +814,11 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
814 | */ | 814 | */ |
815 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; | 815 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; |
816 | 816 | ||
817 | /* Force use of chains B and C (0x6) for scan Rx for 4965 | 817 | /* Force use of chains B and C (0x6) for scan Rx |
818 | * Avoid A (0x1) because of its off-channel reception on A-band. | 818 | * Avoid A (0x1) for the device has off-channel reception |
819 | * on A-band. | ||
819 | */ | 820 | */ |
820 | if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) | 821 | if (priv->cfg->off_channel_workaround) |
821 | rx_ant = ANT_BC; | 822 | rx_ant = ANT_BC; |
822 | } else { | 823 | } else { |
823 | IWL_WARN(priv, "Invalid scan band count\n"); | 824 | IWL_WARN(priv, "Invalid scan band count\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index d401b6f226f9..d86ecd2f9ec2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -71,7 +71,7 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) | |||
71 | (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) || | 71 | (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) || |
72 | ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) && | 72 | ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) && |
73 | (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) { | 73 | (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) { |
74 | IWL_ERR(priv, "Requested station info for sta %d before ready. \n", | 74 | IWL_ERR(priv, "Requested station info for sta %d before ready.\n", |
75 | ret); | 75 | ret); |
76 | ret = IWL_INVALID_STATION; | 76 | ret = IWL_INVALID_STATION; |
77 | } | 77 | } |
@@ -143,7 +143,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv, | |||
143 | sta_id); | 143 | sta_id); |
144 | break; | 144 | break; |
145 | case ADD_STA_MODIFY_NON_EXIST_STA: | 145 | case ADD_STA_MODIFY_NON_EXIST_STA: |
146 | IWL_ERR(priv, "Attempting to modify non-existing station %d \n", | 146 | IWL_ERR(priv, "Attempting to modify non-existing station %d\n", |
147 | sta_id); | 147 | sta_id); |
148 | break; | 148 | break; |
149 | default: | 149 | default: |
@@ -194,7 +194,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, | |||
194 | .flags = flags, | 194 | .flags = flags, |
195 | .data = data, | 195 | .data = data, |
196 | }; | 196 | }; |
197 | u8 sta_id = sta->sta.sta_id; | 197 | u8 sta_id __maybe_unused = sta->sta.sta_id; |
198 | 198 | ||
199 | IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", | 199 | IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", |
200 | sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); | 200 | sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); |
@@ -425,6 +425,7 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap) | |||
425 | .reserved1 = 0, | 425 | .reserved1 = 0, |
426 | }; | 426 | }; |
427 | u32 rate_flags; | 427 | u32 rate_flags; |
428 | int ret = 0; | ||
428 | 429 | ||
429 | /* Set up the rate scaling to start at selected rate, fall back | 430 | /* Set up the rate scaling to start at selected rate, fall back |
430 | * all the way down to 1M in IEEE order, and then spin on 1M */ | 431 | * all the way down to 1M in IEEE order, and then spin on 1M */ |
@@ -458,8 +459,10 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap) | |||
458 | /* Update the rate scaling for control frame Tx to AP */ | 459 | /* Update the rate scaling for control frame Tx to AP */ |
459 | link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id; | 460 | link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id; |
460 | 461 | ||
461 | iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, | 462 | ret = iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, |
462 | sizeof(link_cmd), &link_cmd); | 463 | sizeof(link_cmd), &link_cmd); |
464 | if (ret) | ||
465 | IWL_ERR(priv, "REPLY_TX_LINK_QUALITY_CMD failed (%d)\n", ret); | ||
463 | } | 466 | } |
464 | 467 | ||
465 | /* | 468 | /* |
@@ -571,7 +574,7 @@ static int iwl_remove_station(struct iwl_priv *priv, struct ieee80211_sta *sta) | |||
571 | 574 | ||
572 | if (!iwl_is_ready(priv)) { | 575 | if (!iwl_is_ready(priv)) { |
573 | IWL_DEBUG_INFO(priv, | 576 | IWL_DEBUG_INFO(priv, |
574 | "Unable to remove station %pM, device not ready. \n", | 577 | "Unable to remove station %pM, device not ready.\n", |
575 | sta->addr); | 578 | sta->addr); |
576 | /* | 579 | /* |
577 | * It is typical for stations to be removed when we are | 580 | * It is typical for stations to be removed when we are |
@@ -668,7 +671,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv, bool force) | |||
668 | } else { | 671 | } else { |
669 | for (i = 0; i < priv->hw_params.max_stations; i++) { | 672 | for (i = 0; i < priv->hw_params.max_stations; i++) { |
670 | if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { | 673 | if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { |
671 | IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d \n", i); | 674 | IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i); |
672 | priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; | 675 | priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; |
673 | cleared = true; | 676 | cleared = true; |
674 | } | 677 | } |
@@ -759,7 +762,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv) | |||
759 | } | 762 | } |
760 | EXPORT_SYMBOL(iwl_get_free_ucode_key_index); | 763 | EXPORT_SYMBOL(iwl_get_free_ucode_key_index); |
761 | 764 | ||
762 | int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) | 765 | static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) |
763 | { | 766 | { |
764 | int i, not_empty = 0; | 767 | int i, not_empty = 0; |
765 | u8 buff[sizeof(struct iwl_wep_cmd) + | 768 | u8 buff[sizeof(struct iwl_wep_cmd) + |
@@ -803,7 +806,14 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) | |||
803 | else | 806 | else |
804 | return 0; | 807 | return 0; |
805 | } | 808 | } |
806 | EXPORT_SYMBOL(iwl_send_static_wepkey_cmd); | 809 | |
810 | int iwl_restore_default_wep_keys(struct iwl_priv *priv) | ||
811 | { | ||
812 | WARN_ON(!mutex_is_locked(&priv->mutex)); | ||
813 | |||
814 | return iwl_send_static_wepkey_cmd(priv, 0); | ||
815 | } | ||
816 | EXPORT_SYMBOL(iwl_restore_default_wep_keys); | ||
807 | 817 | ||
808 | int iwl_remove_default_wep_key(struct iwl_priv *priv, | 818 | int iwl_remove_default_wep_key(struct iwl_priv *priv, |
809 | struct ieee80211_key_conf *keyconf) | 819 | struct ieee80211_key_conf *keyconf) |
@@ -815,11 +825,6 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, | |||
815 | IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", | 825 | IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", |
816 | keyconf->keyidx); | 826 | keyconf->keyidx); |
817 | 827 | ||
818 | if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) | ||
819 | IWL_ERR(priv, "index %d not used in uCode key table.\n", | ||
820 | keyconf->keyidx); | ||
821 | |||
822 | priv->default_wep_key--; | ||
823 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); | 828 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); |
824 | if (iwl_is_rfkill(priv)) { | 829 | if (iwl_is_rfkill(priv)) { |
825 | IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); | 830 | IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); |
@@ -851,12 +856,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv, | |||
851 | keyconf->hw_key_idx = HW_KEY_DEFAULT; | 856 | keyconf->hw_key_idx = HW_KEY_DEFAULT; |
852 | priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; | 857 | priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; |
853 | 858 | ||
854 | priv->default_wep_key++; | ||
855 | |||
856 | if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table)) | ||
857 | IWL_ERR(priv, "index %d already used in uCode key table.\n", | ||
858 | keyconf->keyidx); | ||
859 | |||
860 | priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; | 859 | priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; |
861 | memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, | 860 | memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, |
862 | keyconf->keylen); | 861 | keyconf->keylen); |
@@ -1105,7 +1104,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
1105 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 1104 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; |
1106 | 1105 | ||
1107 | if (iwl_is_rfkill(priv)) { | 1106 | if (iwl_is_rfkill(priv)) { |
1108 | IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); | 1107 | IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); |
1109 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 1108 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
1110 | return 0; | 1109 | return 0; |
1111 | } | 1110 | } |
@@ -1191,13 +1190,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, | |||
1191 | .data = lq, | 1190 | .data = lq, |
1192 | }; | 1191 | }; |
1193 | 1192 | ||
1194 | if ((lq->sta_id == 0xFF) && | 1193 | if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) |
1195 | (priv->iw_mode == NL80211_IFTYPE_ADHOC)) | ||
1196 | return -EINVAL; | 1194 | return -EINVAL; |
1197 | 1195 | ||
1198 | if (lq->sta_id == 0xFF) | ||
1199 | lq->sta_id = IWL_AP_ID; | ||
1200 | |||
1201 | iwl_dump_lq_cmd(priv, lq); | 1196 | iwl_dump_lq_cmd(priv, lq); |
1202 | BUG_ON(init && (cmd.flags & CMD_ASYNC)); | 1197 | BUG_ON(init && (cmd.flags & CMD_ASYNC)); |
1203 | 1198 | ||
@@ -1207,7 +1202,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, | |||
1207 | return ret; | 1202 | return ret; |
1208 | 1203 | ||
1209 | if (init) { | 1204 | if (init) { |
1210 | IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d \n", | 1205 | IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n", |
1211 | lq->sta_id); | 1206 | lq->sta_id); |
1212 | spin_lock_irqsave(&priv->sta_lock, flags_spin); | 1207 | spin_lock_irqsave(&priv->sta_lock, flags_spin); |
1213 | priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; | 1208 | priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; |
@@ -1395,6 +1390,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) | |||
1395 | 1390 | ||
1396 | iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 1391 | iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); |
1397 | } | 1392 | } |
1393 | EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count); | ||
1398 | 1394 | ||
1399 | int iwl_mac_sta_remove(struct ieee80211_hw *hw, | 1395 | int iwl_mac_sta_remove(struct ieee80211_hw *hw, |
1400 | struct ieee80211_vif *vif, | 1396 | struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h index 87a34997a758..42cd2f4a01cd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.h +++ b/drivers/net/wireless/iwlwifi/iwl-sta.h | |||
@@ -44,11 +44,11 @@ | |||
44 | */ | 44 | */ |
45 | u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid); | 45 | u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid); |
46 | 46 | ||
47 | int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty); | ||
48 | int iwl_remove_default_wep_key(struct iwl_priv *priv, | 47 | int iwl_remove_default_wep_key(struct iwl_priv *priv, |
49 | struct ieee80211_key_conf *key); | 48 | struct ieee80211_key_conf *key); |
50 | int iwl_set_default_wep_key(struct iwl_priv *priv, | 49 | int iwl_set_default_wep_key(struct iwl_priv *priv, |
51 | struct ieee80211_key_conf *key); | 50 | struct ieee80211_key_conf *key); |
51 | int iwl_restore_default_wep_keys(struct iwl_priv *priv); | ||
52 | int iwl_set_dynamic_key(struct iwl_priv *priv, | 52 | int iwl_set_dynamic_key(struct iwl_priv *priv, |
53 | struct ieee80211_key_conf *key, u8 sta_id); | 53 | struct ieee80211_key_conf *key, u8 sta_id); |
54 | int iwl_remove_dynamic_key(struct iwl_priv *priv, | 54 | int iwl_remove_dynamic_key(struct iwl_priv *priv, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index b798fbabc3b6..1ece2ea09773 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -38,84 +38,6 @@ | |||
38 | #include "iwl-io.h" | 38 | #include "iwl-io.h" |
39 | #include "iwl-helpers.h" | 39 | #include "iwl-helpers.h" |
40 | 40 | ||
41 | /* | ||
42 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
43 | * | ||
44 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
45 | * | ||
46 | * Mac80211 uses the following numbers, which we get as from it | ||
47 | * by way of skb_get_queue_mapping(skb): | ||
48 | * | ||
49 | * VO 0 | ||
50 | * VI 1 | ||
51 | * BE 2 | ||
52 | * BK 3 | ||
53 | * | ||
54 | * | ||
55 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
56 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
57 | * own queue per aggregation session (RA/TID combination), such queues are | ||
58 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
59 | * order to map frames to the right queue, we also need an AC->hw queue | ||
60 | * mapping. This is implemented here. | ||
61 | * | ||
62 | * Due to the way hw queues are set up (by the hw specific modules like | ||
63 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | ||
64 | * mapping. | ||
65 | */ | ||
66 | |||
67 | static const u8 tid_to_ac[] = { | ||
68 | /* this matches the mac80211 numbers */ | ||
69 | 2, 3, 3, 2, 1, 1, 0, 0 | ||
70 | }; | ||
71 | |||
72 | static const u8 ac_to_fifo[] = { | ||
73 | IWL_TX_FIFO_VO, | ||
74 | IWL_TX_FIFO_VI, | ||
75 | IWL_TX_FIFO_BE, | ||
76 | IWL_TX_FIFO_BK, | ||
77 | }; | ||
78 | |||
79 | static inline int get_fifo_from_ac(u8 ac) | ||
80 | { | ||
81 | return ac_to_fifo[ac]; | ||
82 | } | ||
83 | |||
84 | static inline int get_queue_from_ac(u16 ac) | ||
85 | { | ||
86 | return ac; | ||
87 | } | ||
88 | |||
89 | static inline int get_fifo_from_tid(u16 tid) | ||
90 | { | ||
91 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
92 | return get_fifo_from_ac(tid_to_ac[tid]); | ||
93 | |||
94 | /* no support for TIDs 8-15 yet */ | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, | ||
99 | struct iwl_dma_ptr *ptr, size_t size) | ||
100 | { | ||
101 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | ||
102 | GFP_KERNEL); | ||
103 | if (!ptr->addr) | ||
104 | return -ENOMEM; | ||
105 | ptr->size = size; | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | ||
110 | struct iwl_dma_ptr *ptr) | ||
111 | { | ||
112 | if (unlikely(!ptr->addr)) | ||
113 | return; | ||
114 | |||
115 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
116 | memset(ptr, 0, sizeof(*ptr)); | ||
117 | } | ||
118 | |||
119 | /** | 41 | /** |
120 | * iwl_txq_update_write_ptr - Send new write index to hardware | 42 | * iwl_txq_update_write_ptr - Send new write index to hardware |
121 | */ | 43 | */ |
@@ -493,598 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
493 | } | 415 | } |
494 | EXPORT_SYMBOL(iwl_tx_queue_reset); | 416 | EXPORT_SYMBOL(iwl_tx_queue_reset); |
495 | 417 | ||
496 | /** | ||
497 | * iwl_hw_txq_ctx_free - Free TXQ Context | ||
498 | * | ||
499 | * Destroy all TX DMA queues and structures | ||
500 | */ | ||
501 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | ||
502 | { | ||
503 | int txq_id; | ||
504 | |||
505 | /* Tx queues */ | ||
506 | if (priv->txq) { | ||
507 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
508 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
509 | iwl_cmd_queue_free(priv); | ||
510 | else | ||
511 | iwl_tx_queue_free(priv, txq_id); | ||
512 | } | ||
513 | iwl_free_dma_ptr(priv, &priv->kw); | ||
514 | |||
515 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
516 | |||
517 | /* free tx queue structure */ | ||
518 | iwl_free_txq_mem(priv); | ||
519 | } | ||
520 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | ||
521 | |||
522 | /** | ||
523 | * iwl_txq_ctx_alloc - allocate TX queue context | ||
524 | * Allocate all Tx DMA structures and initialize them | ||
525 | * | ||
526 | * @param priv | ||
527 | * @return error code | ||
528 | */ | ||
529 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) | ||
530 | { | ||
531 | int ret; | ||
532 | int txq_id, slots_num; | ||
533 | unsigned long flags; | ||
534 | |||
535 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
536 | iwl_hw_txq_ctx_free(priv); | ||
537 | |||
538 | ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
539 | priv->hw_params.scd_bc_tbls_size); | ||
540 | if (ret) { | ||
541 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
542 | goto error_bc_tbls; | ||
543 | } | ||
544 | /* Alloc keep-warm buffer */ | ||
545 | ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
546 | if (ret) { | ||
547 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
548 | goto error_kw; | ||
549 | } | ||
550 | |||
551 | /* allocate tx queue structure */ | ||
552 | ret = iwl_alloc_txq_mem(priv); | ||
553 | if (ret) | ||
554 | goto error; | ||
555 | |||
556 | spin_lock_irqsave(&priv->lock, flags); | ||
557 | |||
558 | /* Turn off all Tx DMA fifos */ | ||
559 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
560 | |||
561 | /* Tell NIC where to find the "keep warm" buffer */ | ||
562 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
563 | |||
564 | spin_unlock_irqrestore(&priv->lock, flags); | ||
565 | |||
566 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
567 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
568 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | ||
569 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
570 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | ||
571 | txq_id); | ||
572 | if (ret) { | ||
573 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
574 | goto error; | ||
575 | } | ||
576 | } | ||
577 | |||
578 | return ret; | ||
579 | |||
580 | error: | ||
581 | iwl_hw_txq_ctx_free(priv); | ||
582 | iwl_free_dma_ptr(priv, &priv->kw); | ||
583 | error_kw: | ||
584 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
585 | error_bc_tbls: | ||
586 | return ret; | ||
587 | } | ||
588 | |||
589 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
590 | { | ||
591 | int txq_id, slots_num; | ||
592 | unsigned long flags; | ||
593 | |||
594 | spin_lock_irqsave(&priv->lock, flags); | ||
595 | |||
596 | /* Turn off all Tx DMA fifos */ | ||
597 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
598 | |||
599 | /* Tell NIC where to find the "keep warm" buffer */ | ||
600 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
601 | |||
602 | spin_unlock_irqrestore(&priv->lock, flags); | ||
603 | |||
604 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
605 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
606 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
607 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
608 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | /** | ||
613 | * iwl_txq_ctx_stop - Stop all Tx DMA channels | ||
614 | */ | ||
615 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | ||
616 | { | ||
617 | int ch; | ||
618 | unsigned long flags; | ||
619 | |||
620 | /* Turn off all Tx DMA fifos */ | ||
621 | spin_lock_irqsave(&priv->lock, flags); | ||
622 | |||
623 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
624 | |||
625 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
626 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | ||
627 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
628 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
629 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
630 | 1000); | ||
631 | } | ||
632 | spin_unlock_irqrestore(&priv->lock, flags); | ||
633 | } | ||
634 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | ||
635 | |||
636 | /* | ||
637 | * handle build REPLY_TX command notification. | ||
638 | */ | ||
639 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | ||
640 | struct iwl_tx_cmd *tx_cmd, | ||
641 | struct ieee80211_tx_info *info, | ||
642 | struct ieee80211_hdr *hdr, | ||
643 | u8 std_id) | ||
644 | { | ||
645 | __le16 fc = hdr->frame_control; | ||
646 | __le32 tx_flags = tx_cmd->tx_flags; | ||
647 | |||
648 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
649 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
650 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
651 | if (ieee80211_is_mgmt(fc)) | ||
652 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
653 | if (ieee80211_is_probe_resp(fc) && | ||
654 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
655 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
656 | } else { | ||
657 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
658 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
659 | } | ||
660 | |||
661 | if (ieee80211_is_back_req(fc)) | ||
662 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
663 | |||
664 | |||
665 | tx_cmd->sta_id = std_id; | ||
666 | if (ieee80211_has_morefrags(fc)) | ||
667 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
668 | |||
669 | if (ieee80211_is_data_qos(fc)) { | ||
670 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
671 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
672 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
673 | } else { | ||
674 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
675 | } | ||
676 | |||
677 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | ||
678 | |||
679 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
680 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
681 | |||
682 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
683 | if (ieee80211_is_mgmt(fc)) { | ||
684 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
685 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
686 | else | ||
687 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
688 | } else { | ||
689 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
690 | } | ||
691 | |||
692 | tx_cmd->driver_txop = 0; | ||
693 | tx_cmd->tx_flags = tx_flags; | ||
694 | tx_cmd->next_frame_len = 0; | ||
695 | } | ||
696 | |||
697 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
698 | |||
699 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | ||
700 | struct iwl_tx_cmd *tx_cmd, | ||
701 | struct ieee80211_tx_info *info, | ||
702 | __le16 fc) | ||
703 | { | ||
704 | u32 rate_flags; | ||
705 | int rate_idx; | ||
706 | u8 rts_retry_limit; | ||
707 | u8 data_retry_limit; | ||
708 | u8 rate_plcp; | ||
709 | |||
710 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
711 | if (ieee80211_is_probe_resp(fc)) | ||
712 | data_retry_limit = 3; | ||
713 | else | ||
714 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | ||
715 | tx_cmd->data_retry_limit = data_retry_limit; | ||
716 | |||
717 | /* Set retry limit on RTS packets */ | ||
718 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
719 | if (data_retry_limit < rts_retry_limit) | ||
720 | rts_retry_limit = data_retry_limit; | ||
721 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
722 | |||
723 | /* DATA packets will use the uCode station table for rate/antenna | ||
724 | * selection */ | ||
725 | if (ieee80211_is_data(fc)) { | ||
726 | tx_cmd->initial_rate_index = 0; | ||
727 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
728 | return; | ||
729 | } | ||
730 | |||
731 | /** | ||
732 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
733 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
734 | * this band. Also use the lowest supported rate if the stored rate | ||
735 | * index is invalid. | ||
736 | */ | ||
737 | rate_idx = info->control.rates[0].idx; | ||
738 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
739 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
740 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
741 | info->control.sta); | ||
742 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
743 | if (info->band == IEEE80211_BAND_5GHZ) | ||
744 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
745 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
746 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
747 | /* Zero out flags for this packet */ | ||
748 | rate_flags = 0; | ||
749 | |||
750 | /* Set CCK flag as needed */ | ||
751 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
752 | rate_flags |= RATE_MCS_CCK_MSK; | ||
753 | |||
754 | /* Set up RTS and CTS flags for certain packets */ | ||
755 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | ||
756 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
757 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
758 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | ||
759 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | ||
760 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
761 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
762 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
763 | } | ||
764 | break; | ||
765 | default: | ||
766 | break; | ||
767 | } | ||
768 | |||
769 | /* Set up antennas */ | ||
770 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | ||
771 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
772 | |||
773 | /* Set the rate in the TX cmd */ | ||
774 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
775 | } | ||
776 | |||
777 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
778 | struct ieee80211_tx_info *info, | ||
779 | struct iwl_tx_cmd *tx_cmd, | ||
780 | struct sk_buff *skb_frag, | ||
781 | int sta_id) | ||
782 | { | ||
783 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
784 | |||
785 | switch (keyconf->alg) { | ||
786 | case ALG_CCMP: | ||
787 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
788 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
789 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
790 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
791 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
792 | break; | ||
793 | |||
794 | case ALG_TKIP: | ||
795 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
796 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
797 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
798 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
799 | break; | ||
800 | |||
801 | case ALG_WEP: | ||
802 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
803 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
804 | |||
805 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
806 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
807 | |||
808 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
809 | |||
810 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
811 | "with key %d\n", keyconf->keyidx); | ||
812 | break; | ||
813 | |||
814 | default: | ||
815 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); | ||
816 | break; | ||
817 | } | ||
818 | } | ||
819 | |||
820 | /* | ||
821 | * start REPLY_TX command process | ||
822 | */ | ||
823 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
824 | { | ||
825 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
826 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
827 | struct ieee80211_sta *sta = info->control.sta; | ||
828 | struct iwl_station_priv *sta_priv = NULL; | ||
829 | struct iwl_tx_queue *txq; | ||
830 | struct iwl_queue *q; | ||
831 | struct iwl_device_cmd *out_cmd; | ||
832 | struct iwl_cmd_meta *out_meta; | ||
833 | struct iwl_tx_cmd *tx_cmd; | ||
834 | int swq_id, txq_id; | ||
835 | dma_addr_t phys_addr; | ||
836 | dma_addr_t txcmd_phys; | ||
837 | dma_addr_t scratch_phys; | ||
838 | u16 len, len_org, firstlen, secondlen; | ||
839 | u16 seq_number = 0; | ||
840 | __le16 fc; | ||
841 | u8 hdr_len; | ||
842 | u8 sta_id; | ||
843 | u8 wait_write_ptr = 0; | ||
844 | u8 tid = 0; | ||
845 | u8 *qc = NULL; | ||
846 | unsigned long flags; | ||
847 | |||
848 | spin_lock_irqsave(&priv->lock, flags); | ||
849 | if (iwl_is_rfkill(priv)) { | ||
850 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
851 | goto drop_unlock; | ||
852 | } | ||
853 | |||
854 | fc = hdr->frame_control; | ||
855 | |||
856 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
857 | if (ieee80211_is_auth(fc)) | ||
858 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
859 | else if (ieee80211_is_assoc_req(fc)) | ||
860 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
861 | else if (ieee80211_is_reassoc_req(fc)) | ||
862 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
863 | #endif | ||
864 | |||
865 | hdr_len = ieee80211_hdrlen(fc); | ||
866 | |||
867 | /* Find (or create) index into station table for destination station */ | ||
868 | if (info->flags & IEEE80211_TX_CTL_INJECTED) | ||
869 | sta_id = priv->hw_params.bcast_sta_id; | ||
870 | else | ||
871 | sta_id = iwl_get_sta_id(priv, hdr); | ||
872 | if (sta_id == IWL_INVALID_STATION) { | ||
873 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
874 | hdr->addr1); | ||
875 | goto drop_unlock; | ||
876 | } | ||
877 | |||
878 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
879 | |||
880 | if (sta) | ||
881 | sta_priv = (void *)sta->drv_priv; | ||
882 | |||
883 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
884 | sta_priv->asleep) { | ||
885 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
886 | /* | ||
887 | * This sends an asynchronous command to the device, | ||
888 | * but we can rely on it being processed before the | ||
889 | * next frame is processed -- and the next frame to | ||
890 | * this station is the one that will consume this | ||
891 | * counter. | ||
892 | * For now set the counter to just 1 since we do not | ||
893 | * support uAPSD yet. | ||
894 | */ | ||
895 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
896 | } | ||
897 | |||
898 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | ||
899 | if (ieee80211_is_data_qos(fc)) { | ||
900 | qc = ieee80211_get_qos_ctl(hdr); | ||
901 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
902 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
903 | goto drop_unlock; | ||
904 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | ||
905 | seq_number &= IEEE80211_SCTL_SEQ; | ||
906 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
907 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
908 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
909 | seq_number += 0x10; | ||
910 | /* aggregation is on for this <sta,tid> */ | ||
911 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
912 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
913 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
914 | } | ||
915 | } | ||
916 | |||
917 | txq = &priv->txq[txq_id]; | ||
918 | swq_id = txq->swq_id; | ||
919 | q = &txq->q; | ||
920 | |||
921 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
922 | goto drop_unlock; | ||
923 | |||
924 | if (ieee80211_is_data_qos(fc)) | ||
925 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
926 | |||
927 | /* Set up driver data for this TFD */ | ||
928 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
929 | txq->txb[q->write_ptr].skb[0] = skb; | ||
930 | |||
931 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
932 | out_cmd = txq->cmd[q->write_ptr]; | ||
933 | out_meta = &txq->meta[q->write_ptr]; | ||
934 | tx_cmd = &out_cmd->cmd.tx; | ||
935 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
936 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
937 | |||
938 | /* | ||
939 | * Set up the Tx-command (not MAC!) header. | ||
940 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
941 | * after Tx, uCode's Tx response will return this value so driver can | ||
942 | * locate the frame within the tx queue and do post-tx processing. | ||
943 | */ | ||
944 | out_cmd->hdr.cmd = REPLY_TX; | ||
945 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
946 | INDEX_TO_SEQ(q->write_ptr))); | ||
947 | |||
948 | /* Copy MAC header from skb into command buffer */ | ||
949 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
950 | |||
951 | |||
952 | /* Total # bytes to be transmitted */ | ||
953 | len = (u16)skb->len; | ||
954 | tx_cmd->len = cpu_to_le16(len); | ||
955 | |||
956 | if (info->control.hw_key) | ||
957 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
958 | |||
959 | /* TODO need this for burst mode later on */ | ||
960 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | ||
961 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
962 | |||
963 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
964 | |||
965 | iwl_update_stats(priv, true, fc, len); | ||
966 | /* | ||
967 | * Use the first empty entry in this queue's command buffer array | ||
968 | * to contain the Tx command and MAC header concatenated together | ||
969 | * (payload data will be in another buffer). | ||
970 | * Size of this varies, due to varying MAC header length. | ||
971 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
972 | * of the MAC header (device reads on dword boundaries). | ||
973 | * We'll tell device about this padding later. | ||
974 | */ | ||
975 | len = sizeof(struct iwl_tx_cmd) + | ||
976 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
977 | |||
978 | len_org = len; | ||
979 | firstlen = len = (len + 3) & ~3; | ||
980 | |||
981 | if (len_org != len) | ||
982 | len_org = 1; | ||
983 | else | ||
984 | len_org = 0; | ||
985 | |||
986 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
987 | if (len_org) | ||
988 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
989 | |||
990 | /* Physical address of this Tx command's header (not MAC header!), | ||
991 | * within command buffer array. */ | ||
992 | txcmd_phys = pci_map_single(priv->pci_dev, | ||
993 | &out_cmd->hdr, len, | ||
994 | PCI_DMA_BIDIRECTIONAL); | ||
995 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
996 | pci_unmap_len_set(out_meta, len, len); | ||
997 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
998 | * first entry */ | ||
999 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
1000 | txcmd_phys, len, 1, 0); | ||
1001 | |||
1002 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
1003 | txq->need_update = 1; | ||
1004 | if (qc) | ||
1005 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
1006 | } else { | ||
1007 | wait_write_ptr = 1; | ||
1008 | txq->need_update = 0; | ||
1009 | } | ||
1010 | |||
1011 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
1012 | * if any (802.11 null frames have no payload). */ | ||
1013 | secondlen = len = skb->len - hdr_len; | ||
1014 | if (len) { | ||
1015 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
1016 | len, PCI_DMA_TODEVICE); | ||
1017 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
1018 | phys_addr, len, | ||
1019 | 0, 0); | ||
1020 | } | ||
1021 | |||
1022 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
1023 | offsetof(struct iwl_tx_cmd, scratch); | ||
1024 | |||
1025 | len = sizeof(struct iwl_tx_cmd) + | ||
1026 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1027 | /* take back ownership of DMA buffer to enable update */ | ||
1028 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | ||
1029 | len, PCI_DMA_BIDIRECTIONAL); | ||
1030 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1031 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1032 | |||
1033 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", | ||
1034 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
1035 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | ||
1036 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
1037 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
1038 | |||
1039 | /* Set up entry for this TFD in Tx byte-count array */ | ||
1040 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
1041 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
1042 | le16_to_cpu(tx_cmd->len)); | ||
1043 | |||
1044 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | ||
1045 | len, PCI_DMA_BIDIRECTIONAL); | ||
1046 | |||
1047 | trace_iwlwifi_dev_tx(priv, | ||
1048 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
1049 | sizeof(struct iwl_tfd), | ||
1050 | &out_cmd->hdr, firstlen, | ||
1051 | skb->data + hdr_len, secondlen); | ||
1052 | |||
1053 | /* Tell device the write index *just past* this latest filled TFD */ | ||
1054 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1055 | iwl_txq_update_write_ptr(priv, txq); | ||
1056 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1057 | |||
1058 | /* | ||
1059 | * At this point the frame is "transmitted" successfully | ||
1060 | * and we will get a TX status notification eventually, | ||
1061 | * regardless of the value of ret. "ret" only indicates | ||
1062 | * whether or not we should update the write pointer. | ||
1063 | */ | ||
1064 | |||
1065 | /* avoid atomic ops if it isn't an associated client */ | ||
1066 | if (sta_priv && sta_priv->client) | ||
1067 | atomic_inc(&sta_priv->pending_frames); | ||
1068 | |||
1069 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
1070 | if (wait_write_ptr) { | ||
1071 | spin_lock_irqsave(&priv->lock, flags); | ||
1072 | txq->need_update = 1; | ||
1073 | iwl_txq_update_write_ptr(priv, txq); | ||
1074 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1075 | } else { | ||
1076 | iwl_stop_queue(priv, txq->swq_id); | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1080 | return 0; | ||
1081 | |||
1082 | drop_unlock: | ||
1083 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1084 | return -1; | ||
1085 | } | ||
1086 | EXPORT_SYMBOL(iwl_tx_skb); | ||
1087 | |||
1088 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 418 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
1089 | 419 | ||
1090 | /** | 420 | /** |
@@ -1218,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1218 | return idx; | 548 | return idx; |
1219 | } | 549 | } |
1220 | 550 | ||
1221 | static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1222 | { | ||
1223 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1224 | struct ieee80211_sta *sta; | ||
1225 | struct iwl_station_priv *sta_priv; | ||
1226 | |||
1227 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1228 | if (sta) { | ||
1229 | sta_priv = (void *)sta->drv_priv; | ||
1230 | /* avoid atomic ops if this isn't a client */ | ||
1231 | if (sta_priv->client && | ||
1232 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1233 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1234 | } | ||
1235 | |||
1236 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1237 | } | ||
1238 | |||
1239 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1240 | { | ||
1241 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1242 | struct iwl_queue *q = &txq->q; | ||
1243 | struct iwl_tx_info *tx_info; | ||
1244 | int nfreed = 0; | ||
1245 | struct ieee80211_hdr *hdr; | ||
1246 | |||
1247 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1248 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | ||
1249 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1250 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1251 | return 0; | ||
1252 | } | ||
1253 | |||
1254 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
1255 | q->read_ptr != index; | ||
1256 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1257 | |||
1258 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1259 | iwl_tx_status(priv, tx_info->skb[0]); | ||
1260 | |||
1261 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1262 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1263 | nfreed++; | ||
1264 | tx_info->skb[0] = NULL; | ||
1265 | |||
1266 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1267 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1268 | |||
1269 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | ||
1270 | } | ||
1271 | return nfreed; | ||
1272 | } | ||
1273 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | ||
1274 | |||
1275 | |||
1276 | /** | 551 | /** |
1277 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | 552 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd |
1278 | * | 553 | * |
@@ -1366,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1366 | 641 | ||
1367 | if (!(meta->flags & CMD_ASYNC)) { | 642 | if (!(meta->flags & CMD_ASYNC)) { |
1368 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 643 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
1369 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", | 644 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
1370 | get_cmd_string(cmd->hdr.cmd)); | 645 | get_cmd_string(cmd->hdr.cmd)); |
1371 | wake_up_interruptible(&priv->wait_command_queue); | 646 | wake_up_interruptible(&priv->wait_command_queue); |
1372 | } | 647 | } |
@@ -1374,353 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1374 | } | 649 | } |
1375 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 650 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1376 | 651 | ||
1377 | /* | ||
1378 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
1379 | * Called only when finding queue for aggregation. | ||
1380 | * Should never return anything < 7, because they should already | ||
1381 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
1382 | */ | ||
1383 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | ||
1384 | { | ||
1385 | int txq_id; | ||
1386 | |||
1387 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
1388 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
1389 | return txq_id; | ||
1390 | return -1; | ||
1391 | } | ||
1392 | |||
1393 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
1394 | { | ||
1395 | int sta_id; | ||
1396 | int tx_fifo; | ||
1397 | int txq_id; | ||
1398 | int ret; | ||
1399 | unsigned long flags; | ||
1400 | struct iwl_tid_data *tid_data; | ||
1401 | |||
1402 | tx_fifo = get_fifo_from_tid(tid); | ||
1403 | if (unlikely(tx_fifo < 0)) | ||
1404 | return tx_fifo; | ||
1405 | |||
1406 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | ||
1407 | __func__, ra, tid); | ||
1408 | |||
1409 | sta_id = iwl_find_station(priv, ra); | ||
1410 | if (sta_id == IWL_INVALID_STATION) { | ||
1411 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
1412 | return -ENXIO; | ||
1413 | } | ||
1414 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
1415 | return -EINVAL; | ||
1416 | |||
1417 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
1418 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
1419 | return -ENXIO; | ||
1420 | } | ||
1421 | |||
1422 | txq_id = iwl_txq_ctx_activate_free(priv); | ||
1423 | if (txq_id == -1) { | ||
1424 | IWL_ERR(priv, "No free aggregation queue available\n"); | ||
1425 | return -ENXIO; | ||
1426 | } | ||
1427 | |||
1428 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1429 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1430 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1431 | tid_data->agg.txq_id = txq_id; | ||
1432 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); | ||
1433 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1434 | |||
1435 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
1436 | sta_id, tid, *ssn); | ||
1437 | if (ret) | ||
1438 | return ret; | ||
1439 | |||
1440 | if (tid_data->tfds_in_queue == 0) { | ||
1441 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1442 | tid_data->agg.state = IWL_AGG_ON; | ||
1443 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1444 | } else { | ||
1445 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | ||
1446 | tid_data->tfds_in_queue); | ||
1447 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
1448 | } | ||
1449 | return ret; | ||
1450 | } | ||
1451 | EXPORT_SYMBOL(iwl_tx_agg_start); | ||
1452 | |||
1453 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
1454 | { | ||
1455 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
1456 | struct iwl_tid_data *tid_data; | ||
1457 | int write_ptr, read_ptr; | ||
1458 | unsigned long flags; | ||
1459 | |||
1460 | if (!ra) { | ||
1461 | IWL_ERR(priv, "ra = NULL\n"); | ||
1462 | return -EINVAL; | ||
1463 | } | ||
1464 | |||
1465 | tx_fifo_id = get_fifo_from_tid(tid); | ||
1466 | if (unlikely(tx_fifo_id < 0)) | ||
1467 | return tx_fifo_id; | ||
1468 | |||
1469 | sta_id = iwl_find_station(priv, ra); | ||
1470 | |||
1471 | if (sta_id == IWL_INVALID_STATION) { | ||
1472 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
1473 | return -ENXIO; | ||
1474 | } | ||
1475 | |||
1476 | if (priv->stations[sta_id].tid[tid].agg.state == | ||
1477 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | ||
1478 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
1479 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1480 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1485 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | ||
1486 | |||
1487 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1488 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1489 | txq_id = tid_data->agg.txq_id; | ||
1490 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1491 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1492 | |||
1493 | /* The queue is not empty */ | ||
1494 | if (write_ptr != read_ptr) { | ||
1495 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | ||
1496 | priv->stations[sta_id].tid[tid].agg.state = | ||
1497 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1498 | return 0; | ||
1499 | } | ||
1500 | |||
1501 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1502 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1503 | |||
1504 | spin_lock_irqsave(&priv->lock, flags); | ||
1505 | /* | ||
1506 | * the only reason this call can fail is queue number out of range, | ||
1507 | * which can happen if uCode is reloaded and all the station | ||
1508 | * information are lost. if it is outside the range, there is no need | ||
1509 | * to deactivate the uCode queue, just return "success" to allow | ||
1510 | * mac80211 to clean up it own data. | ||
1511 | */ | ||
1512 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1513 | tx_fifo_id); | ||
1514 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1515 | |||
1516 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1517 | |||
1518 | return 0; | ||
1519 | } | ||
1520 | EXPORT_SYMBOL(iwl_tx_agg_stop); | ||
1521 | |||
1522 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | ||
1523 | { | ||
1524 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1525 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1526 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1527 | |||
1528 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1529 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1530 | /* We are reclaiming the last packet of the */ | ||
1531 | /* aggregated HW queue */ | ||
1532 | if ((txq_id == tid_data->agg.txq_id) && | ||
1533 | (q->read_ptr == q->write_ptr)) { | ||
1534 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1535 | int tx_fifo = get_fifo_from_tid(tid); | ||
1536 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | ||
1537 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1538 | ssn, tx_fifo); | ||
1539 | tid_data->agg.state = IWL_AGG_OFF; | ||
1540 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1541 | } | ||
1542 | break; | ||
1543 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1544 | /* We are reclaiming the last packet of the queue */ | ||
1545 | if (tid_data->tfds_in_queue == 0) { | ||
1546 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | ||
1547 | tid_data->agg.state = IWL_AGG_ON; | ||
1548 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1549 | } | ||
1550 | break; | ||
1551 | } | ||
1552 | return 0; | ||
1553 | } | ||
1554 | EXPORT_SYMBOL(iwl_txq_check_empty); | ||
1555 | |||
1556 | /** | ||
1557 | * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1558 | * | ||
1559 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1560 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1561 | */ | ||
1562 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | ||
1563 | struct iwl_ht_agg *agg, | ||
1564 | struct iwl_compressed_ba_resp *ba_resp) | ||
1565 | |||
1566 | { | ||
1567 | int i, sh, ack; | ||
1568 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1569 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1570 | u64 bitmap; | ||
1571 | int successes = 0; | ||
1572 | struct ieee80211_tx_info *info; | ||
1573 | |||
1574 | if (unlikely(!agg->wait_for_ba)) { | ||
1575 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1576 | return -EINVAL; | ||
1577 | } | ||
1578 | |||
1579 | /* Mark that the expected block-ack response arrived */ | ||
1580 | agg->wait_for_ba = 0; | ||
1581 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | ||
1582 | |||
1583 | /* Calculate shift to align block-ack bits with our Tx window bits */ | ||
1584 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | ||
1585 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1586 | sh += 0x100; | ||
1587 | |||
1588 | /* don't use 64-bit values for now */ | ||
1589 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1590 | |||
1591 | if (agg->frame_count > (64 - sh)) { | ||
1592 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | ||
1593 | return -1; | ||
1594 | } | ||
1595 | |||
1596 | /* check for success or failure according to the | ||
1597 | * transmitted bitmap and block-ack bitmap */ | ||
1598 | bitmap &= agg->bitmap; | ||
1599 | |||
1600 | /* For each frame attempted in aggregation, | ||
1601 | * update driver's record of tx frame's status. */ | ||
1602 | for (i = 0; i < agg->frame_count ; i++) { | ||
1603 | ack = bitmap & (1ULL << i); | ||
1604 | successes += !!ack; | ||
1605 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", | ||
1606 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | ||
1607 | agg->start_idx + i); | ||
1608 | } | ||
1609 | |||
1610 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | ||
1611 | memset(&info->status, 0, sizeof(info->status)); | ||
1612 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1613 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1614 | info->status.ampdu_ack_map = successes; | ||
1615 | info->status.ampdu_ack_len = agg->frame_count; | ||
1616 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | ||
1617 | |||
1618 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | ||
1619 | |||
1620 | return 0; | ||
1621 | } | ||
1622 | |||
1623 | /** | ||
1624 | * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1625 | * | ||
1626 | * Handles block-acknowledge notification from device, which reports success | ||
1627 | * of frames sent via aggregation. | ||
1628 | */ | ||
1629 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1630 | struct iwl_rx_mem_buffer *rxb) | ||
1631 | { | ||
1632 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1633 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1634 | struct iwl_tx_queue *txq = NULL; | ||
1635 | struct iwl_ht_agg *agg; | ||
1636 | int index; | ||
1637 | int sta_id; | ||
1638 | int tid; | ||
1639 | |||
1640 | /* "flow" corresponds to Tx queue */ | ||
1641 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1642 | |||
1643 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1644 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1645 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1646 | |||
1647 | if (scd_flow >= priv->hw_params.max_txq_num) { | ||
1648 | IWL_ERR(priv, | ||
1649 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1650 | return; | ||
1651 | } | ||
1652 | |||
1653 | txq = &priv->txq[scd_flow]; | ||
1654 | sta_id = ba_resp->sta_id; | ||
1655 | tid = ba_resp->tid; | ||
1656 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1657 | |||
1658 | /* Find index just before block-ack window */ | ||
1659 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1660 | |||
1661 | /* TODO: Need to get this copy more safely - now good for debug */ | ||
1662 | |||
1663 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1664 | "sta_id = %d\n", | ||
1665 | agg->wait_for_ba, | ||
1666 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1667 | ba_resp->sta_id); | ||
1668 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | ||
1669 | "%d, scd_ssn = %d\n", | ||
1670 | ba_resp->tid, | ||
1671 | ba_resp->seq_ctl, | ||
1672 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1673 | ba_resp->scd_flow, | ||
1674 | ba_resp->scd_ssn); | ||
1675 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", | ||
1676 | agg->start_idx, | ||
1677 | (unsigned long long)agg->bitmap); | ||
1678 | |||
1679 | /* Update driver's record of ACK vs. not for each frame in window */ | ||
1680 | iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | ||
1681 | |||
1682 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1683 | * block-ack window (we assume that they've been successfully | ||
1684 | * transmitted ... if not, it's too late anyway). */ | ||
1685 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1686 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1687 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | ||
1688 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1689 | |||
1690 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1691 | priv->mac80211_registered && | ||
1692 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | ||
1693 | iwl_wake_queue(priv, txq->swq_id); | ||
1694 | |||
1695 | iwl_txq_check_empty(priv, sta_id, tid, scd_flow); | ||
1696 | } | ||
1697 | } | ||
1698 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | ||
1699 | |||
1700 | #ifdef CONFIG_IWLWIFI_DEBUG | 652 | #ifdef CONFIG_IWLWIFI_DEBUG |
1701 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 653 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x |
654 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | ||
1702 | 655 | ||
1703 | const char *iwl_get_tx_fail_reason(u32 status) | 656 | const char *iwl_get_tx_fail_reason(u32 status) |
1704 | { | 657 | { |
1705 | switch (status & TX_STATUS_MSK) { | 658 | switch (status & TX_STATUS_MSK) { |
1706 | case TX_STATUS_SUCCESS: | 659 | case TX_STATUS_SUCCESS: |
1707 | return "SUCCESS"; | 660 | return "SUCCESS"; |
1708 | TX_STATUS_ENTRY(SHORT_LIMIT); | 661 | TX_STATUS_POSTPONE(DELAY); |
1709 | TX_STATUS_ENTRY(LONG_LIMIT); | 662 | TX_STATUS_POSTPONE(FEW_BYTES); |
1710 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | 663 | TX_STATUS_POSTPONE(BT_PRIO); |
1711 | TX_STATUS_ENTRY(MGMNT_ABORT); | 664 | TX_STATUS_POSTPONE(QUIET_PERIOD); |
1712 | TX_STATUS_ENTRY(NEXT_FRAG); | 665 | TX_STATUS_POSTPONE(CALC_TTAK); |
1713 | TX_STATUS_ENTRY(LIFE_EXPIRE); | 666 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); |
1714 | TX_STATUS_ENTRY(DEST_PS); | 667 | TX_STATUS_FAIL(SHORT_LIMIT); |
1715 | TX_STATUS_ENTRY(ABORTED); | 668 | TX_STATUS_FAIL(LONG_LIMIT); |
1716 | TX_STATUS_ENTRY(BT_RETRY); | 669 | TX_STATUS_FAIL(FIFO_UNDERRUN); |
1717 | TX_STATUS_ENTRY(STA_INVALID); | 670 | TX_STATUS_FAIL(DRAIN_FLOW); |
1718 | TX_STATUS_ENTRY(FRAG_DROPPED); | 671 | TX_STATUS_FAIL(RFKILL_FLUSH); |
1719 | TX_STATUS_ENTRY(TID_DISABLE); | 672 | TX_STATUS_FAIL(LIFE_EXPIRE); |
1720 | TX_STATUS_ENTRY(FRAME_FLUSHED); | 673 | TX_STATUS_FAIL(DEST_PS); |
1721 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | 674 | TX_STATUS_FAIL(HOST_ABORTED); |
1722 | TX_STATUS_ENTRY(TX_LOCKED); | 675 | TX_STATUS_FAIL(BT_RETRY); |
1723 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | 676 | TX_STATUS_FAIL(STA_INVALID); |
677 | TX_STATUS_FAIL(FRAG_DROPPED); | ||
678 | TX_STATUS_FAIL(TID_DISABLE); | ||
679 | TX_STATUS_FAIL(FIFO_FLUSHED); | ||
680 | TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); | ||
681 | TX_STATUS_FAIL(FW_DROP); | ||
682 | TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP); | ||
1724 | } | 683 | } |
1725 | 684 | ||
1726 | return "UNKNOWN"; | 685 | return "UNKNOWN"; |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index e0c05feb296c..9f362024a29c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -598,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
598 | txq->need_update = 0; | 598 | txq->need_update = 0; |
599 | } | 599 | } |
600 | 600 | ||
601 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", | 601 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", |
602 | le16_to_cpu(out_cmd->hdr.sequence)); | 602 | le16_to_cpu(out_cmd->hdr.sequence)); |
603 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | 603 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
604 | iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); | 604 | iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); |
605 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, | 605 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, |
606 | ieee80211_hdrlen(fc)); | 606 | ieee80211_hdrlen(fc)); |
@@ -1604,9 +1604,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, | |||
1604 | return pos; | 1604 | return pos; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | /* For sanity check only. Actual size is determined by uCode, typ. 512 */ | ||
1608 | #define IWL3945_MAX_EVENT_LOG_SIZE (512) | ||
1609 | |||
1610 | #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) | 1607 | #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) |
1611 | 1608 | ||
1612 | int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | 1609 | int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, |
@@ -1633,16 +1630,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | |||
1633 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); | 1630 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); |
1634 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); | 1631 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); |
1635 | 1632 | ||
1636 | if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) { | 1633 | if (capacity > priv->cfg->max_event_log_size) { |
1637 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", | 1634 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", |
1638 | capacity, IWL3945_MAX_EVENT_LOG_SIZE); | 1635 | capacity, priv->cfg->max_event_log_size); |
1639 | capacity = IWL3945_MAX_EVENT_LOG_SIZE; | 1636 | capacity = priv->cfg->max_event_log_size; |
1640 | } | 1637 | } |
1641 | 1638 | ||
1642 | if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) { | 1639 | if (next_entry > priv->cfg->max_event_log_size) { |
1643 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", | 1640 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", |
1644 | next_entry, IWL3945_MAX_EVENT_LOG_SIZE); | 1641 | next_entry, priv->cfg->max_event_log_size); |
1645 | next_entry = IWL3945_MAX_EVENT_LOG_SIZE; | 1642 | next_entry = priv->cfg->max_event_log_size; |
1646 | } | 1643 | } |
1647 | 1644 | ||
1648 | size = num_wraps ? capacity : next_entry; | 1645 | size = num_wraps ? capacity : next_entry; |
@@ -1938,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, | |||
1938 | added++; | 1935 | added++; |
1939 | } | 1936 | } |
1940 | 1937 | ||
1941 | IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); | 1938 | IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); |
1942 | return added; | 1939 | return added; |
1943 | } | 1940 | } |
1944 | 1941 | ||
@@ -3141,8 +3138,6 @@ void iwl3945_post_associate(struct iwl_priv *priv) | |||
3141 | break; | 3138 | break; |
3142 | } | 3139 | } |
3143 | 3140 | ||
3144 | iwl_activate_qos(priv, 0); | ||
3145 | |||
3146 | /* we have just associated, don't start scan too early */ | 3141 | /* we have just associated, don't start scan too early */ |
3147 | priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; | 3142 | priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; |
3148 | } | 3143 | } |
@@ -3404,7 +3399,7 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, | |||
3404 | } | 3399 | } |
3405 | 3400 | ||
3406 | /* Initialize rate scaling */ | 3401 | /* Initialize rate scaling */ |
3407 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n", | 3402 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", |
3408 | sta->addr); | 3403 | sta->addr); |
3409 | iwl3945_rs_rate_init(priv, sta, sta_id); | 3404 | iwl3945_rs_rate_init(priv, sta, sta_id); |
3410 | 3405 | ||
@@ -3890,11 +3885,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) | |||
3890 | priv->iw_mode = NL80211_IFTYPE_STATION; | 3885 | priv->iw_mode = NL80211_IFTYPE_STATION; |
3891 | priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; | 3886 | priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; |
3892 | 3887 | ||
3893 | iwl_reset_qos(priv); | ||
3894 | |||
3895 | priv->qos_data.qos_active = 0; | ||
3896 | priv->qos_data.qos_cap.val = 0; | ||
3897 | |||
3898 | priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; | 3888 | priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; |
3899 | 3889 | ||
3900 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { | 3890 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { |