aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-4965.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-4965.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c4004
1 files changed, 742 insertions, 3262 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index de330ae0ca95..9afecb813716 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -39,81 +39,33 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h"
47#include "iwl-sta.h"
48
49static int iwl4965_send_tx_power(struct iwl_priv *priv);
50static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
51
52/* Change firmware file name, using "-" and incrementing number,
53 * *only* when uCode interface or architecture changes so that it
54 * is not compatible with earlier drivers.
55 * This number will also appear in << 8 position of 1st dword of uCode file */
56#define IWL4965_UCODE_API "-2"
57
46 58
47/* module parameters */ 59/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = { 60static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL4965_MAX_NUM_QUEUES, 61 .num_of_queues = IWL49_NUM_QUEUES,
62 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
50 .enable_qos = 1, 63 .enable_qos = 1,
51 .amsdu_size_8K = 1, 64 .amsdu_size_8K = 1,
65 .restart_fw = 1,
52 /* the rest are 0 by default */ 66 /* the rest are 0 by default */
53}; 67};
54 68
55static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
56
57#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
77const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
78 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
93#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
117/* check contents of special bootstrap uCode SRAM */ 69/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv) 70static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{ 71{
@@ -192,15 +144,18 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
192 144
193 IWL_DEBUG_INFO("Begin load bsm\n"); 145 IWL_DEBUG_INFO("Begin load bsm\n");
194 146
147 priv->ucode_type = UCODE_RT;
148
195 /* make sure bootstrap program is no larger than BSM's SRAM size */ 149 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE) 150 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL; 151 return -EINVAL;
198 152
199 /* Tell bootstrap uCode where to find the "Initialize" uCode 153 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965. 154 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values, 155 * NOTE: iwl_init_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to 156 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */ 157 * runtime/protocol instructions and backup data cache.
158 */
204 pinst = priv->ucode_init.p_addr >> 4; 159 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4; 160 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len; 161 inst_len = priv->ucode_init.len;
@@ -259,271 +214,134 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
259 return 0; 214 return 0;
260} 215}
261 216
262static int iwl4965_init_drv(struct iwl_priv *priv) 217/**
218 * iwl4965_set_ucode_ptrs - Set uCode address location
219 *
220 * Tell initialization uCode where to find runtime uCode.
221 *
222 * BSM registers initially contain pointers to initialization uCode.
223 * We need to replace them to load runtime uCode inst and data,
224 * and to save runtime data when powering down.
225 */
226static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
263{ 227{
264 int ret; 228 dma_addr_t pinst;
265 int i; 229 dma_addr_t pdata;
266 230 unsigned long flags;
267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna; 231 int ret = 0;
268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276
277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
278 sizeof(struct iwl4965_shared),
279 &priv->shared_phys);
280
281 if (!priv->shared_virt) {
282 ret = -ENOMEM;
283 goto err;
284 }
285
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
287
288
289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
319 232
320 priv->rates_mask = IWL_RATES_MASK; 233 /* bits 35:4 for 4965 */
321 /* If power management is turned on, default to AC mode */ 234 pinst = priv->ucode_code.p_addr >> 4;
322 priv->power_mode = IWL_POWER_AC; 235 pdata = priv->ucode_data_backup.p_addr >> 4;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324 236
325 ret = iwl_init_channel_map(priv); 237 spin_lock_irqsave(&priv->lock, flags);
238 ret = iwl_grab_nic_access(priv);
326 if (ret) { 239 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret); 240 spin_unlock_irqrestore(&priv->lock, flags);
328 goto err; 241 return ret;
329 } 242 }
330 243
331 ret = iwl4965_init_geos(priv); 244 /* Tell bootstrap uCode where to find image to load */
332 if (ret) { 245 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
333 IWL_ERROR("initializing geos failed: %d\n", ret); 246 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
334 goto err_free_channel_map; 247 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
335 } 248 priv->ucode_data.len);
336 249
337 ret = ieee80211_register_hw(priv->hw); 250 /* Inst bytecount must be last to set up, bit 31 signals uCode
338 if (ret) { 251 * that all new ptr/size info is in place */
339 IWL_ERROR("Failed to register network device (error %d)\n", 252 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
340 ret); 253 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
341 goto err_free_geos; 254 iwl_release_nic_access(priv);
342 }
343 255
344 priv->hw->conf.beacon_int = 100; 256 spin_unlock_irqrestore(&priv->lock, flags);
345 priv->mac80211_registered = 1;
346 257
347 return 0; 258 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
348 259
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret; 260 return ret;
355} 261}
356 262
357static int is_fat_channel(__le32 rxon_flags) 263/**
358{ 264 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
359 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 265 *
360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); 266 * Called after REPLY_ALIVE notification received from "initialize" uCode.
361} 267 *
362 268 * The 4965 "initialize" ALIVE reply contains calibration data for:
363static u8 is_single_stream(struct iwl_priv *priv) 269 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
364{ 270 * (3945 does not contain this data).
365#ifdef CONFIG_IWL4965_HT 271 *
366 if (!priv->current_ht_config.is_ht || 272 * Tell "initialize" uCode to go ahead and load the runtime uCode.
367 (priv->current_ht_config.supp_mcs_set[1] == 0) || 273*/
368 (priv->ps_mode == IWL_MIMO_PS_STATIC)) 274static void iwl4965_init_alive_start(struct iwl_priv *priv)
369 return 1; 275{
370#else 276 /* Check alive response for "valid" sign from uCode */
371 return 1; 277 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
372#endif /*CONFIG_IWL4965_HT */ 278 /* We had an error bringing up the hardware, so take it
373 return 0; 279 * all the way back down so we can try again */
374} 280 IWL_DEBUG_INFO("Initialize Alive failed.\n");
375 281 goto restart;
376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) 282 }
377{ 283
378 int idx = 0; 284 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
379 285 * This is a paranoid check, because we would not have gotten the
380 /* 4965 HT rate format */ 286 * "initialize" alive if code weren't properly loaded. */
381 if (rate_n_flags & RATE_MCS_HT_MSK) { 287 if (iwl_verify_ucode(priv)) {
382 idx = (rate_n_flags & 0xff); 288 /* Runtime instruction load was bad;
383 289 * take it all the way back down so we can try again */
384 if (idx >= IWL_RATE_MIMO_6M_PLCP) 290 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
385 idx = idx - IWL_RATE_MIMO_6M_PLCP; 291 goto restart;
386 292 }
387 idx += IWL_FIRST_OFDM_RATE; 293
388 /* skip 9M not supported in ht*/ 294 /* Calculate temperature */
389 if (idx >= IWL_RATE_9M_INDEX) 295 priv->temperature = iwl4965_hw_get_temperature(priv);
390 idx += 1; 296
391 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) 297 /* Send pointers to protocol/runtime uCode image ... init code will
392 return idx; 298 * load and launch runtime uCode, which will send us another "Alive"
393 299 * notification. */
394 /* 4965 legacy rate format, search for match in table */ 300 IWL_DEBUG_INFO("Initialization Alive received.\n");
395 } else { 301 if (iwl4965_set_ucode_ptrs(priv)) {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++) 302 /* Runtime instruction load won't happen;
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF)) 303 * take it all the way back down so we can try again */
398 return idx; 304 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
305 goto restart;
399 } 306 }
307 return;
400 308
401 return -1; 309restart:
310 queue_work(priv->workqueue, &priv->restart);
402} 311}
403 312
404/** 313static int is_fat_channel(__le32 rxon_flags)
405 * translate ucode response to mac80211 tx status control values
406 */
407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
408 struct ieee80211_tx_control *control)
409{ 314{
410 int rate_index; 315 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
411 316 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
412 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1)
428 control->tx_rate = NULL;
429 else
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432} 317}
433 318
434/* 319/*
435 * Determine how many receiver/antenna chains to use. 320 * EEPROM handlers
436 * More provides better reception via diversity. Fewer saves power.
437 * MIMO (dual stream) requires at least 2, but works better with 3.
438 * This does not determine *which* chains to use, just how many.
439 */ 321 */
440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
441 u8 *idle_state, u8 *rx_state)
442{
443 u8 is_single = is_single_stream(priv);
444 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
445
446 /* # of Rx chains to use when expecting MIMO. */
447 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
448 *rx_state = 2;
449 else
450 *rx_state = 3;
451
452 /* # Rx chains when idling and maybe trying to save power */
453 switch (priv->ps_mode) {
454 case IWL_MIMO_PS_STATIC:
455 case IWL_MIMO_PS_DYNAMIC:
456 *idle_state = (is_cam) ? 2 : 1;
457 break;
458 case IWL_MIMO_PS_NONE:
459 *idle_state = (is_cam) ? *rx_state : 1;
460 break;
461 default:
462 *idle_state = 1;
463 break;
464 }
465 322
466 return 0; 323static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
467}
468
469int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
470{ 324{
471 int rc; 325 u16 eeprom_ver;
472 unsigned long flags; 326 u16 calib_ver;
473 327
474 spin_lock_irqsave(&priv->lock, flags); 328 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
475 rc = iwl_grab_nic_access(priv);
476 if (rc) {
477 spin_unlock_irqrestore(&priv->lock, flags);
478 return rc;
479 }
480 329
481 /* stop Rx DMA */ 330 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
484 (1 << 24), 1000);
485 if (rc < 0)
486 IWL_ERROR("Can't stop Rx DMA.\n");
487 331
488 iwl_release_nic_access(priv); 332 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
489 spin_unlock_irqrestore(&priv->lock, flags); 333 calib_ver < EEPROM_4965_TX_POWER_VERSION)
334 goto err;
490 335
491 return 0; 336 return 0;
492} 337err:
493 338 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr) 339 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
495{ 340 calib_ver, EEPROM_4965_TX_POWER_VERSION);
496 int i; 341 return -EINVAL;
497 int start = 0;
498 int ret = IWL_INVALID_STATION;
499 unsigned long flags;
500 DECLARE_MAC_BUF(mac);
501
502 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
503 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
504 start = IWL_STA_ID;
505
506 if (is_broadcast_ether_addr(addr))
507 return priv->hw_params.bcast_sta_id;
508
509 spin_lock_irqsave(&priv->sta_lock, flags);
510 for (i = start; i < priv->hw_params.max_stations; i++)
511 if ((priv->stations[i].used) &&
512 (!compare_ether_addr
513 (priv->stations[i].sta.sta.addr, addr))) {
514 ret = i;
515 goto out;
516 }
517
518 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
519 print_mac(mac, addr), priv->num_stations);
520 342
521 out:
522 spin_unlock_irqrestore(&priv->sta_lock, flags);
523 return ret;
524} 343}
525 344int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
527{ 345{
528 int ret; 346 int ret;
529 unsigned long flags; 347 unsigned long flags;
@@ -535,340 +353,130 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
535 return ret; 353 return ret;
536 } 354 }
537 355
538 if (!pwr_max) { 356 if (src == IWL_PWR_SRC_VAUX) {
539 u32 val; 357 u32 val;
540
541 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 358 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
542 &val); 359 &val);
543 360
544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 361 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 362 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 363 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
547 ~APMG_PS_CTRL_MSK_PWR_SRC); 364 ~APMG_PS_CTRL_MSK_PWR_SRC);
548 } else 365 }
366 } else {
549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 367 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 368 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
551 ~APMG_PS_CTRL_MSK_PWR_SRC); 369 ~APMG_PS_CTRL_MSK_PWR_SRC);
552
553 iwl_release_nic_access(priv);
554 spin_unlock_irqrestore(&priv->lock, flags);
555
556 return ret;
557}
558
559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
560{
561 int ret;
562 unsigned long flags;
563 unsigned int rb_size;
564
565 spin_lock_irqsave(&priv->lock, flags);
566 ret = iwl_grab_nic_access(priv);
567 if (ret) {
568 spin_unlock_irqrestore(&priv->lock, flags);
569 return ret;
570 } 370 }
571 371
572 if (priv->cfg->mod_params->amsdu_size_8K)
573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
574 else
575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
576
577 /* Stop Rx DMA */
578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
579
580 /* Reset driver's Rx queue write index */
581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
582
583 /* Tell device where to find RBD circular buffer in DRAM */
584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
585 rxq->dma_addr >> 8);
586
587 /* Tell device where in DRAM to update its Rx status */
588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
589 (priv->shared_phys +
590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
591
592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
596 rb_size |
597 /* 0x10 << 4 | */
598 (RX_QUEUE_SIZE_LOG <<
599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
600
601 /*
602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
603 */
604
605 iwl_release_nic_access(priv);
606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 return 0;
609}
610
611/* Tell 4965 where to find the "keep warm" buffer */
612static int iwl4965_kw_init(struct iwl_priv *priv)
613{
614 unsigned long flags;
615 int rc;
616
617 spin_lock_irqsave(&priv->lock, flags);
618 rc = iwl_grab_nic_access(priv);
619 if (rc)
620 goto out;
621
622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
623 priv->kw.dma_addr >> 4);
624 iwl_release_nic_access(priv); 372 iwl_release_nic_access(priv);
625out:
626 spin_unlock_irqrestore(&priv->lock, flags); 373 spin_unlock_irqrestore(&priv->lock, flags);
627 return rc;
628}
629
630static int iwl4965_kw_alloc(struct iwl_priv *priv)
631{
632 struct pci_dev *dev = priv->pci_dev;
633 struct iwl4965_kw *kw = &priv->kw;
634
635 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
636 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
637 if (!kw->v_addr)
638 return -ENOMEM;
639 374
640 return 0; 375 return ret;
641} 376}
642 377
643/** 378/*
644 * iwl4965_kw_free - Free the "keep warm" buffer 379 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
380 * must be called under priv->lock and mac access
645 */ 381 */
646static void iwl4965_kw_free(struct iwl_priv *priv) 382static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
647{ 383{
648 struct pci_dev *dev = priv->pci_dev; 384 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
649 struct iwl4965_kw *kw = &priv->kw;
650
651 if (kw->v_addr) {
652 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
653 memset(kw, 0, sizeof(*kw));
654 }
655} 385}
656 386
657/** 387static int iwl4965_apm_init(struct iwl_priv *priv)
658 * iwl4965_txq_ctx_reset - Reset TX queue context
659 * Destroys all DMA structures and initialise them again
660 *
661 * @param priv
662 * @return error code
663 */
664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
665{ 388{
666 int rc = 0; 389 int ret = 0;
667 int txq_id, slots_num;
668 unsigned long flags;
669
670 iwl4965_kw_free(priv);
671
672 /* Free all tx/cmd queues and keep-warm buffer */
673 iwl4965_hw_txq_ctx_free(priv);
674
675 /* Alloc keep-warm buffer */
676 rc = iwl4965_kw_alloc(priv);
677 if (rc) {
678 IWL_ERROR("Keep Warm allocation failed");
679 goto error_kw;
680 }
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 rc = iwl_grab_nic_access(priv);
685 if (unlikely(rc)) {
686 IWL_ERROR("TX reset failed");
687 spin_unlock_irqrestore(&priv->lock, flags);
688 goto error_reset;
689 }
690
691 /* Turn off all Tx DMA channels */
692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
693 iwl_release_nic_access(priv);
694 spin_unlock_irqrestore(&priv->lock, flags);
695
696 /* Tell 4965 where to find the keep-warm buffer */
697 rc = iwl4965_kw_init(priv);
698 if (rc) {
699 IWL_ERROR("kw_init failed\n");
700 goto error_reset;
701 }
702
703 /* Alloc and init all (default 16) Tx queues,
704 * including the command queue (#4) */
705 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
709 txq_id);
710 if (rc) {
711 IWL_ERROR("Tx %d queue init failed\n", txq_id);
712 goto error;
713 }
714 }
715
716 return rc;
717
718 error:
719 iwl4965_hw_txq_ctx_free(priv);
720 error_reset:
721 iwl4965_kw_free(priv);
722 error_kw:
723 return rc;
724}
725
726int iwl4965_hw_nic_init(struct iwl_priv *priv)
727{
728 int rc;
729 unsigned long flags;
730 struct iwl4965_rx_queue *rxq = &priv->rxq;
731 u8 rev_id;
732 u32 val;
733 u8 val_link;
734
735 iwl4965_power_init_handle(priv);
736 390
737 /* nic_init */ 391 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
738 spin_lock_irqsave(&priv->lock, flags); 392 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
739 393
394 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 395 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 396 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
742 397
398 /* set "initialization complete" bit to move adapter
399 * D0U* --> D0A* state */
743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 400 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
747 if (rc < 0) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 IWL_DEBUG_INFO("Failed to init the card\n");
750 return rc;
751 }
752 401
753 rc = iwl_grab_nic_access(priv); 402 /* wait for clock stabilization */
754 if (rc) { 403 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
755 spin_unlock_irqrestore(&priv->lock, flags); 404 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
756 return rc; 405 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
406 if (ret < 0) {
407 IWL_DEBUG_INFO("Failed to init the card\n");
408 goto out;
757 } 409 }
758 410
759 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 411 ret = iwl_grab_nic_access(priv);
412 if (ret)
413 goto out;
760 414
761 iwl_write_prph(priv, APMG_CLK_CTRL_REG, 415 /* enable DMA */
762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 416 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
763 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 417 APMG_CLK_VAL_BSM_CLK_RQT);
764 418
765 udelay(20); 419 udelay(20);
766 420
421 /* disable L1-Active */
767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 422 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 423 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
769 424
770 iwl_release_nic_access(priv); 425 iwl_release_nic_access(priv);
771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); 426out:
772 spin_unlock_irqrestore(&priv->lock, flags); 427 return ret;
428}
773 429
774 /* Determine HW type */
775 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
776 if (rc)
777 return rc;
778 430
779 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); 431static void iwl4965_nic_config(struct iwl_priv *priv)
432{
433 unsigned long flags;
434 u32 val;
435 u16 radio_cfg;
436 u8 val_link;
780 437
781 iwl4965_nic_set_pwr_src(priv, 1);
782 spin_lock_irqsave(&priv->lock, flags); 438 spin_lock_irqsave(&priv->lock, flags);
783 439
784 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { 440 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
785 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); 441 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
786 /* Enable No Snoop field */ 442 /* Enable No Snoop field */
787 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, 443 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
788 val & ~(1 << 11)); 444 val & ~(1 << 11));
789 } 445 }
790 446
791 spin_unlock_irqrestore(&priv->lock, flags);
792
793 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
794 IWL_ERROR("Older EEPROM detected! Aborting.\n");
795 return -EINVAL;
796 }
797
798 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 447 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
799 448
800 /* disable L1 entry -- workaround for pre-B1 */ 449 /* L1 is enabled by BIOS */
801 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); 450 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
451 /* diable L0S disabled L1A enabled */
452 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
453 else
454 /* L0S enabled L1A disabled */
455 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
802 456
803 spin_lock_irqsave(&priv->lock, flags); 457 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
804 458
805 /* set CSR_HW_CONFIG_REG for uCode use */ 459 /* write radio config values to register */
460 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
461 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
462 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
463 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
464 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
806 465
466 /* set CSR_HW_CONFIG_REG for uCode use */
807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 467 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R | 468 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | 469 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
811 470
812 rc = iwl_grab_nic_access(priv); 471 priv->calib_info = (struct iwl_eeprom_calib_info *)
813 if (rc < 0) { 472 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
814 spin_unlock_irqrestore(&priv->lock, flags);
815 IWL_DEBUG_INFO("Failed to init the card\n");
816 return rc;
817 }
818 473
819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
821 udelay(5);
822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
823
824 iwl_release_nic_access(priv);
825 spin_unlock_irqrestore(&priv->lock, flags); 474 spin_unlock_irqrestore(&priv->lock, flags);
826
827 iwl4965_hw_card_show_info(priv);
828
829 /* end nic_init */
830
831 /* Allocate the RX queue, or reset if it is already allocated */
832 if (!rxq->bd) {
833 rc = iwl4965_rx_queue_alloc(priv);
834 if (rc) {
835 IWL_ERROR("Unable to initialize Rx queue\n");
836 return -ENOMEM;
837 }
838 } else
839 iwl4965_rx_queue_reset(priv, rxq);
840
841 iwl4965_rx_replenish(priv);
842
843 iwl4965_rx_init(priv, rxq);
844
845 spin_lock_irqsave(&priv->lock, flags);
846
847 rxq->need_update = 1;
848 iwl4965_rx_queue_update_write_ptr(priv, rxq);
849
850 spin_unlock_irqrestore(&priv->lock, flags);
851
852 /* Allocate and init all Tx and Command queues */
853 rc = iwl4965_txq_ctx_reset(priv);
854 if (rc)
855 return rc;
856
857 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
858 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
859
860 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
861 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
862
863 set_bit(STATUS_INIT, &priv->status);
864
865 return 0;
866} 475}
867 476
868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv) 477static int iwl4965_apm_stop_master(struct iwl_priv *priv)
869{ 478{
870 int rc = 0; 479 int ret = 0;
871 u32 reg_val;
872 unsigned long flags; 480 unsigned long flags;
873 481
874 spin_lock_irqsave(&priv->lock, flags); 482 spin_lock_irqsave(&priv->lock, flags);
@@ -876,64 +484,24 @@ int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
876 /* set stop master bit */ 484 /* set stop master bit */
877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 485 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
878 486
879 reg_val = iwl_read32(priv, CSR_GP_CNTRL); 487 ret = iwl_poll_bit(priv, CSR_RESET,
880
881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
883 IWL_DEBUG_INFO("Card in power save, master is already "
884 "stopped\n");
885 else {
886 rc = iwl_poll_bit(priv, CSR_RESET,
887 CSR_RESET_REG_FLAG_MASTER_DISABLED, 488 CSR_RESET_REG_FLAG_MASTER_DISABLED,
888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 489 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
889 if (rc < 0) { 490 if (ret < 0)
890 spin_unlock_irqrestore(&priv->lock, flags); 491 goto out;
891 return rc;
892 }
893 }
894 492
493out:
895 spin_unlock_irqrestore(&priv->lock, flags); 494 spin_unlock_irqrestore(&priv->lock, flags);
896 IWL_DEBUG_INFO("stop master\n"); 495 IWL_DEBUG_INFO("stop master\n");
897 496
898 return rc; 497 return ret;
899}
900
901/**
902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
903 */
904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
905{
906
907 int txq_id;
908 unsigned long flags;
909
910 /* Stop each Tx DMA channel, and wait for it to be idle */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 spin_lock_irqsave(&priv->lock, flags);
913 if (iwl_grab_nic_access(priv)) {
914 spin_unlock_irqrestore(&priv->lock, flags);
915 continue;
916 }
917
918 iwl_write_direct32(priv,
919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
922 (txq_id), 200);
923 iwl_release_nic_access(priv);
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926
927 /* Deallocate memory for all Tx queues */
928 iwl4965_hw_txq_ctx_free(priv);
929} 498}
930 499
931int iwl4965_hw_nic_reset(struct iwl_priv *priv) 500static void iwl4965_apm_stop(struct iwl_priv *priv)
932{ 501{
933 int rc = 0;
934 unsigned long flags; 502 unsigned long flags;
935 503
936 iwl4965_hw_nic_stop_master(priv); 504 iwl4965_apm_stop_master(priv);
937 505
938 spin_lock_irqsave(&priv->lock, flags); 506 spin_lock_irqsave(&priv->lock, flags);
939 507
@@ -942,508 +510,66 @@ int iwl4965_hw_nic_reset(struct iwl_priv *priv)
942 udelay(10); 510 udelay(10);
943 511
944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 512 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
945 rc = iwl_poll_bit(priv, CSR_RESET,
946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
948
949 udelay(10);
950
951 rc = iwl_grab_nic_access(priv);
952 if (!rc) {
953 iwl_write_prph(priv, APMG_CLK_EN_REG,
954 APMG_CLK_VAL_DMA_CLK_RQT |
955 APMG_CLK_VAL_BSM_CLK_RQT);
956
957 udelay(10);
958
959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
961
962 iwl_release_nic_access(priv);
963 }
964
965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
966 wake_up_interruptible(&priv->wait_command_queue);
967
968 spin_unlock_irqrestore(&priv->lock, flags); 513 spin_unlock_irqrestore(&priv->lock, flags);
969
970 return rc;
971
972} 514}
973 515
974#define REG_RECALIB_PERIOD (60) 516static int iwl4965_apm_reset(struct iwl_priv *priv)
975
976/**
977 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
978 *
979 * This callback is provided in order to send a statistics request.
980 *
981 * This timer function is continually reset to execute within
982 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
983 * was received. We need to ensure we receive the statistics in order
984 * to update the temperature used for calibrating the TXPOWER.
985 */
986static void iwl4965_bg_statistics_periodic(unsigned long data)
987{ 517{
988 struct iwl_priv *priv = (struct iwl_priv *)data;
989
990 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
991 return;
992
993 iwl_send_statistics_request(priv, CMD_ASYNC);
994}
995
996#define CT_LIMIT_CONST 259
997#define TM_CT_KILL_THRESHOLD 110
998
999void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1000{
1001 struct iwl4965_ct_kill_config cmd;
1002 u32 R1, R2, R3;
1003 u32 temp_th;
1004 u32 crit_temperature;
1005 unsigned long flags;
1006 int ret = 0; 518 int ret = 0;
519 unsigned long flags;
1007 520
1008 spin_lock_irqsave(&priv->lock, flags); 521 iwl4965_apm_stop_master(priv);
1009 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1010 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1011 spin_unlock_irqrestore(&priv->lock, flags);
1012
1013 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
1014 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1015 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1016 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1017 } else {
1018 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1019 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1020 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1021 }
1022
1023 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1024
1025 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1026 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
1027 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1028 sizeof(cmd), &cmd);
1029 if (ret)
1030 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1031 else
1032 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
1033}
1034
1035#ifdef CONFIG_IWL4965_SENSITIVITY
1036
1037/* "false alarms" are signals that our DSP tries to lock onto,
1038 * but then determines that they are either noise, or transmissions
1039 * from a distant wireless network (also "noise", really) that get
1040 * "stepped on" by stronger transmissions within our own network.
1041 * This algorithm attempts to set a sensitivity level that is high
1042 * enough to receive all of our own network traffic, but not so
1043 * high that our DSP gets too busy trying to lock onto non-network
1044 * activity/noise. */
1045static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
1046 u32 norm_fa,
1047 u32 rx_enable_time,
1048 struct statistics_general_data *rx_info)
1049{
1050 u32 max_nrg_cck = 0;
1051 int i = 0;
1052 u8 max_silence_rssi = 0;
1053 u32 silence_ref = 0;
1054 u8 silence_rssi_a = 0;
1055 u8 silence_rssi_b = 0;
1056 u8 silence_rssi_c = 0;
1057 u32 val;
1058
1059 /* "false_alarms" values below are cross-multiplications to assess the
1060 * numbers of false alarms within the measured period of actual Rx
1061 * (Rx is off when we're txing), vs the min/max expected false alarms
1062 * (some should be expected if rx is sensitive enough) in a
1063 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1064 *
1065 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1066 *
1067 * */
1068 u32 false_alarms = norm_fa * 200 * 1024;
1069 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1070 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
1071 struct iwl4965_sensitivity_data *data = NULL;
1072
1073 data = &(priv->sensitivity_data);
1074
1075 data->nrg_auto_corr_silence_diff = 0;
1076
1077 /* Find max silence rssi among all 3 receivers.
1078 * This is background noise, which may include transmissions from other
1079 * networks, measured during silence before our network's beacon */
1080 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
1081 ALL_BAND_FILTER) >> 8);
1082 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
1083 ALL_BAND_FILTER) >> 8);
1084 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
1085 ALL_BAND_FILTER) >> 8);
1086
1087 val = max(silence_rssi_b, silence_rssi_c);
1088 max_silence_rssi = max(silence_rssi_a, (u8) val);
1089
1090 /* Store silence rssi in 20-beacon history table */
1091 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1092 data->nrg_silence_idx++;
1093 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1094 data->nrg_silence_idx = 0;
1095
1096 /* Find max silence rssi across 20 beacon history */
1097 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1098 val = data->nrg_silence_rssi[i];
1099 silence_ref = max(silence_ref, val);
1100 }
1101 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1102 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1103 silence_ref);
1104
1105 /* Find max rx energy (min value!) among all 3 receivers,
1106 * measured during beacon frame.
1107 * Save it in 10-beacon history table. */
1108 i = data->nrg_energy_idx;
1109 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1110 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1111
1112 data->nrg_energy_idx++;
1113 if (data->nrg_energy_idx >= 10)
1114 data->nrg_energy_idx = 0;
1115
1116 /* Find min rx energy (max value) across 10 beacon history.
1117 * This is the minimum signal level that we want to receive well.
1118 * Add backoff (margin so we don't miss slightly lower energy frames).
1119 * This establishes an upper bound (min value) for energy threshold. */
1120 max_nrg_cck = data->nrg_value[0];
1121 for (i = 1; i < 10; i++)
1122 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1123 max_nrg_cck += 6;
1124
1125 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1126 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1127 rx_info->beacon_energy_c, max_nrg_cck - 6);
1128
1129 /* Count number of consecutive beacons with fewer-than-desired
1130 * false alarms. */
1131 if (false_alarms < min_false_alarms)
1132 data->num_in_cck_no_fa++;
1133 else
1134 data->num_in_cck_no_fa = 0;
1135 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1136 data->num_in_cck_no_fa);
1137
1138 /* If we got too many false alarms this time, reduce sensitivity */
1139 if (false_alarms > max_false_alarms) {
1140 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1141 false_alarms, max_false_alarms);
1142 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1143 data->nrg_curr_state = IWL_FA_TOO_MANY;
1144
1145 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1146 /* Store for "fewer than desired" on later beacon */
1147 data->nrg_silence_ref = silence_ref;
1148
1149 /* increase energy threshold (reduce nrg value)
1150 * to decrease sensitivity */
1151 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1152 data->nrg_th_cck = data->nrg_th_cck
1153 - NRG_STEP_CCK;
1154 }
1155
1156 /* increase auto_corr values to decrease sensitivity */
1157 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1158 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1159 else {
1160 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1161 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1162 }
1163 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1164 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1165
1166 /* Else if we got fewer than desired, increase sensitivity */
1167 } else if (false_alarms < min_false_alarms) {
1168 data->nrg_curr_state = IWL_FA_TOO_FEW;
1169
1170 /* Compare silence level with silence level for most recent
1171 * healthy number or too many false alarms */
1172 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1173 (s32)silence_ref;
1174
1175 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1176 false_alarms, min_false_alarms,
1177 data->nrg_auto_corr_silence_diff);
1178
1179 /* Increase value to increase sensitivity, but only if:
1180 * 1a) previous beacon did *not* have *too many* false alarms
1181 * 1b) AND there's a significant difference in Rx levels
1182 * from a previous beacon with too many, or healthy # FAs
1183 * OR 2) We've seen a lot of beacons (100) with too few
1184 * false alarms */
1185 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1186 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1187 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1188
1189 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1190 /* Increase nrg value to increase sensitivity */
1191 val = data->nrg_th_cck + NRG_STEP_CCK;
1192 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1193
1194 /* Decrease auto_corr values to increase sensitivity */
1195 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1196 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1197
1198 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1199 data->auto_corr_cck_mrc =
1200 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1201
1202 } else
1203 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1204
1205 /* Else we got a healthy number of false alarms, keep status quo */
1206 } else {
1207 IWL_DEBUG_CALIB(" FA in safe zone\n");
1208 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1209
1210 /* Store for use in "fewer than desired" with later beacon */
1211 data->nrg_silence_ref = silence_ref;
1212
1213 /* If previous beacon had too many false alarms,
1214 * give it some extra margin by reducing sensitivity again
1215 * (but don't go below measured energy of desired Rx) */
1216 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1217 IWL_DEBUG_CALIB("... increasing margin\n");
1218 data->nrg_th_cck -= NRG_MARGIN;
1219 }
1220 }
1221
1222 /* Make sure the energy threshold does not go above the measured
1223 * energy of the desired Rx signals (reduced by backoff margin),
1224 * or else we might start missing Rx frames.
1225 * Lower value is higher energy, so we use max()!
1226 */
1227 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1228 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1229
1230 data->nrg_prev_state = data->nrg_curr_state;
1231
1232 return 0;
1233}
1234
1235
1236static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
1237 u32 norm_fa,
1238 u32 rx_enable_time)
1239{
1240 u32 val;
1241 u32 false_alarms = norm_fa * 200 * 1024;
1242 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1243 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1244 struct iwl4965_sensitivity_data *data = NULL;
1245
1246 data = &(priv->sensitivity_data);
1247
1248 /* If we got too many false alarms this time, reduce sensitivity */
1249 if (false_alarms > max_false_alarms) {
1250
1251 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1252 false_alarms, max_false_alarms);
1253
1254 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1255 data->auto_corr_ofdm =
1256 min((u32)AUTO_CORR_MAX_OFDM, val);
1257
1258 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1259 data->auto_corr_ofdm_mrc =
1260 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1261
1262 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1263 data->auto_corr_ofdm_x1 =
1264 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1265
1266 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1267 data->auto_corr_ofdm_mrc_x1 =
1268 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1269 }
1270
1271 /* Else if we got fewer than desired, increase sensitivity */
1272 else if (false_alarms < min_false_alarms) {
1273
1274 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1275 false_alarms, min_false_alarms);
1276
1277 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1278 data->auto_corr_ofdm =
1279 max((u32)AUTO_CORR_MIN_OFDM, val);
1280
1281 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1282 data->auto_corr_ofdm_mrc =
1283 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1284
1285 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1286 data->auto_corr_ofdm_x1 =
1287 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1288
1289 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1290 data->auto_corr_ofdm_mrc_x1 =
1291 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1292 }
1293 522
1294 else 523 spin_lock_irqsave(&priv->lock, flags);
1295 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1296 min_false_alarms, false_alarms, max_false_alarms);
1297 524
1298 return 0; 525 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1299}
1300 526
1301static int iwl4965_sensitivity_callback(struct iwl_priv *priv, 527 udelay(10);
1302 struct iwl_cmd *cmd, struct sk_buff *skb)
1303{
1304 /* We didn't cache the SKB; let the caller free it */
1305 return 1;
1306}
1307 528
1308/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 529 /* FIXME: put here L1A -L0S w/a */
1309static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1310{
1311 struct iwl4965_sensitivity_cmd cmd ;
1312 struct iwl4965_sensitivity_data *data = NULL;
1313 struct iwl_host_cmd cmd_out = {
1314 .id = SENSITIVITY_CMD,
1315 .len = sizeof(struct iwl4965_sensitivity_cmd),
1316 .meta.flags = flags,
1317 .data = &cmd,
1318 };
1319 int ret;
1320 530
1321 data = &(priv->sensitivity_data); 531 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1322
1323 memset(&cmd, 0, sizeof(cmd));
1324
1325 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1326 cpu_to_le16((u16)data->auto_corr_ofdm);
1327 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1328 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1329 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1330 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1331 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1332 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1333
1334 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1335 cpu_to_le16((u16)data->auto_corr_cck);
1336 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1337 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1338
1339 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1340 cpu_to_le16((u16)data->nrg_th_cck);
1341 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1342 cpu_to_le16((u16)data->nrg_th_ofdm);
1343
1344 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1345 __constant_cpu_to_le16(190);
1346 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1347 __constant_cpu_to_le16(390);
1348 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1349 __constant_cpu_to_le16(62);
1350
1351 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1352 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1353 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1354 data->nrg_th_ofdm);
1355
1356 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1357 data->auto_corr_cck, data->auto_corr_cck_mrc,
1358 data->nrg_th_cck);
1359
1360 /* Update uCode's "work" table, and copy it to DSP */
1361 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1362
1363 if (flags & CMD_ASYNC)
1364 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1365
1366 /* Don't send command to uCode if nothing has changed */
1367 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1368 sizeof(u16)*HD_TABLE_SIZE)) {
1369 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1370 return 0;
1371 }
1372 532
1373 /* Copy table for comparison next time */ 533 ret = iwl_poll_bit(priv, CSR_RESET,
1374 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 534 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1375 sizeof(u16)*HD_TABLE_SIZE); 535 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
1376 536
1377 ret = iwl_send_cmd(priv, &cmd_out);
1378 if (ret) 537 if (ret)
1379 IWL_ERROR("SENSITIVITY_CMD failed\n"); 538 goto out;
1380
1381 return ret;
1382}
1383
1384void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1385{
1386 struct iwl4965_sensitivity_data *data = NULL;
1387 int i;
1388 int ret = 0;
1389
1390 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1391
1392 if (force)
1393 memset(&(priv->sensitivity_tbl[0]), 0,
1394 sizeof(u16)*HD_TABLE_SIZE);
1395
1396 /* Clear driver's sensitivity algo data */
1397 data = &(priv->sensitivity_data);
1398 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1399 539
1400 data->num_in_cck_no_fa = 0; 540 udelay(10);
1401 data->nrg_curr_state = IWL_FA_TOO_MANY;
1402 data->nrg_prev_state = IWL_FA_TOO_MANY;
1403 data->nrg_silence_ref = 0;
1404 data->nrg_silence_idx = 0;
1405 data->nrg_energy_idx = 0;
1406 541
1407 for (i = 0; i < 10; i++) 542 ret = iwl_grab_nic_access(priv);
1408 data->nrg_value[i] = 0; 543 if (ret)
544 goto out;
545 /* Enable DMA and BSM Clock */
546 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
547 APMG_CLK_VAL_BSM_CLK_RQT);
1409 548
1410 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 549 udelay(10);
1411 data->nrg_silence_rssi[i] = 0;
1412 550
1413 data->auto_corr_ofdm = 90; 551 /* disable L1A */
1414 data->auto_corr_ofdm_mrc = 170; 552 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1415 data->auto_corr_ofdm_x1 = 105; 553 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1416 data->auto_corr_ofdm_mrc_x1 = 220;
1417 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1418 data->auto_corr_cck_mrc = 200;
1419 data->nrg_th_cck = 100;
1420 data->nrg_th_ofdm = 100;
1421 554
1422 data->last_bad_plcp_cnt_ofdm = 0; 555 iwl_release_nic_access(priv);
1423 data->last_fa_cnt_ofdm = 0;
1424 data->last_bad_plcp_cnt_cck = 0;
1425 data->last_fa_cnt_cck = 0;
1426 556
1427 /* Clear prior Sensitivity command data to force send to uCode */ 557 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1428 if (force) 558 wake_up_interruptible(&priv->wait_command_queue);
1429 memset(&(priv->sensitivity_tbl[0]), 0,
1430 sizeof(u16)*HD_TABLE_SIZE);
1431 559
1432 ret |= iwl4965_sensitivity_write(priv, flags); 560out:
1433 IWL_DEBUG_CALIB("<<return 0x%X\n", ret); 561 spin_unlock_irqrestore(&priv->lock, flags);
1434 562
1435 return; 563 return ret;
1436} 564}
1437 565
1438
1439/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 566/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1440 * Called after every association, but this runs only once! 567 * Called after every association, but this runs only once!
1441 * ... once chain noise is calibrated the first time, it's good forever. */ 568 * ... once chain noise is calibrated the first time, it's good forever. */
1442void iwl4965_chain_noise_reset(struct iwl_priv *priv) 569static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1443{ 570{
1444 struct iwl4965_chain_noise_data *data = NULL; 571 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
1445 572
1446 data = &(priv->chain_noise_data);
1447 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 573 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1448 struct iwl4965_calibration_cmd cmd; 574 struct iwl4965_calibration_cmd cmd;
1449 575
@@ -1452,388 +578,89 @@ void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1452 cmd.diff_gain_a = 0; 578 cmd.diff_gain_a = 0;
1453 cmd.diff_gain_b = 0; 579 cmd.diff_gain_b = 0;
1454 cmd.diff_gain_c = 0; 580 cmd.diff_gain_c = 0;
1455 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 581 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1456 sizeof(cmd), &cmd, NULL); 582 sizeof(cmd), &cmd))
1457 msleep(4); 583 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
1458 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 584 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1459 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 585 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1460 } 586 }
1461 return;
1462} 587}
1463 588
1464/* 589static void iwl4965_gain_computation(struct iwl_priv *priv,
1465 * Accumulate 20 beacons of signal and noise statistics for each of 590 u32 *average_noise,
1466 * 3 receivers/antennas/rx-chains, then figure out: 591 u16 min_average_noise_antenna_i,
1467 * 1) Which antennas are connected. 592 u32 min_average_noise)
1468 * 2) Differential rx gain settings to balance the 3 receivers.
1469 */
1470static void iwl4965_noise_calibration(struct iwl_priv *priv,
1471 struct iwl4965_notif_statistics *stat_resp)
1472{ 593{
1473 struct iwl4965_chain_noise_data *data = NULL; 594 int i, ret;
1474 int ret = 0; 595 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1475
1476 u32 chain_noise_a;
1477 u32 chain_noise_b;
1478 u32 chain_noise_c;
1479 u32 chain_sig_a;
1480 u32 chain_sig_b;
1481 u32 chain_sig_c;
1482 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1483 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1484 u32 max_average_sig;
1485 u16 max_average_sig_antenna_i;
1486 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1487 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1488 u16 i = 0;
1489 u16 chan_num = INITIALIZATION_VALUE;
1490 u32 band = INITIALIZATION_VALUE;
1491 u32 active_chains = 0;
1492 unsigned long flags;
1493 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1494
1495 data = &(priv->chain_noise_data);
1496
1497 /* Accumulate just the first 20 beacons after the first association,
1498 * then we're done forever. */
1499 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1500 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1501 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1502 return;
1503 }
1504
1505 spin_lock_irqsave(&priv->lock, flags);
1506 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1507 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1508 spin_unlock_irqrestore(&priv->lock, flags);
1509 return;
1510 }
1511
1512 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1513 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1514
1515 /* Make sure we accumulate data for just the associated channel
1516 * (even if scanning). */
1517 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1518 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1519 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1520 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1521 chan_num, band);
1522 spin_unlock_irqrestore(&priv->lock, flags);
1523 return;
1524 }
1525
1526 /* Accumulate beacon statistics values across 20 beacons */
1527 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1528 IN_BAND_FILTER;
1529 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1530 IN_BAND_FILTER;
1531 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1532 IN_BAND_FILTER;
1533
1534 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1535 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1536 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1537
1538 spin_unlock_irqrestore(&priv->lock, flags);
1539
1540 data->beacon_count++;
1541
1542 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1543 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1544 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1545
1546 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1547 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1548 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1549
1550 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1551 data->beacon_count);
1552 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1553 chain_sig_a, chain_sig_b, chain_sig_c);
1554 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1555 chain_noise_a, chain_noise_b, chain_noise_c);
1556
1557 /* If this is the 20th beacon, determine:
1558 * 1) Disconnected antennas (using signal strengths)
1559 * 2) Differential gain (using silence noise) to balance receivers */
1560 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1561
1562 /* Analyze signal for disconnected antenna */
1563 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1564 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1565 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1566
1567 if (average_sig[0] >= average_sig[1]) {
1568 max_average_sig = average_sig[0];
1569 max_average_sig_antenna_i = 0;
1570 active_chains = (1 << max_average_sig_antenna_i);
1571 } else {
1572 max_average_sig = average_sig[1];
1573 max_average_sig_antenna_i = 1;
1574 active_chains = (1 << max_average_sig_antenna_i);
1575 }
1576
1577 if (average_sig[2] >= max_average_sig) {
1578 max_average_sig = average_sig[2];
1579 max_average_sig_antenna_i = 2;
1580 active_chains = (1 << max_average_sig_antenna_i);
1581 }
1582
1583 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1584 average_sig[0], average_sig[1], average_sig[2]);
1585 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1586 max_average_sig, max_average_sig_antenna_i);
1587
1588 /* Compare signal strengths for all 3 receivers. */
1589 for (i = 0; i < NUM_RX_CHAINS; i++) {
1590 if (i != max_average_sig_antenna_i) {
1591 s32 rssi_delta = (max_average_sig -
1592 average_sig[i]);
1593
1594 /* If signal is very weak, compared with
1595 * strongest, mark it as disconnected. */
1596 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1597 data->disconn_array[i] = 1;
1598 else
1599 active_chains |= (1 << i);
1600 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1601 "disconn_array[i] = %d\n",
1602 i, rssi_delta, data->disconn_array[i]);
1603 }
1604 }
1605
1606 /*If both chains A & B are disconnected -
1607 * connect B and leave A as is */
1608 if (data->disconn_array[CHAIN_A] &&
1609 data->disconn_array[CHAIN_B]) {
1610 data->disconn_array[CHAIN_B] = 0;
1611 active_chains |= (1 << CHAIN_B);
1612 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1613 "W/A - declare B as connected\n");
1614 }
1615 596
1616 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", 597 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1617 active_chains);
1618 598
1619 /* Save for use within RXON, TX, SCAN commands, etc. */ 599 for (i = 0; i < NUM_RX_CHAINS; i++) {
1620 priv->valid_antenna = active_chains; 600 s32 delta_g = 0;
1621 601
1622 /* Analyze noise for rx balance */ 602 if (!(data->disconn_array[i]) &&
1623 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 603 (data->delta_gain_code[i] ==
1624 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1625 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1626
1627 for (i = 0; i < NUM_RX_CHAINS; i++) {
1628 if (!(data->disconn_array[i]) &&
1629 (average_noise[i] <= min_average_noise)) {
1630 /* This means that chain i is active and has
1631 * lower noise values so far: */
1632 min_average_noise = average_noise[i];
1633 min_average_noise_antenna_i = i;
1634 }
1635 }
1636
1637 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1638
1639 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1640 average_noise[0], average_noise[1],
1641 average_noise[2]);
1642
1643 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1644 min_average_noise, min_average_noise_antenna_i);
1645
1646 for (i = 0; i < NUM_RX_CHAINS; i++) {
1647 s32 delta_g = 0;
1648
1649 if (!(data->disconn_array[i]) &&
1650 (data->delta_gain_code[i] ==
1651 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 604 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1652 delta_g = average_noise[i] - min_average_noise; 605 delta_g = average_noise[i] - min_average_noise;
1653 data->delta_gain_code[i] = (u8)((delta_g * 606 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
1654 10) / 15); 607 data->delta_gain_code[i] =
1655 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < 608 min(data->delta_gain_code[i],
1656 data->delta_gain_code[i]) 609 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1657 data->delta_gain_code[i] = 610
1658 CHAIN_NOISE_MAX_DELTA_GAIN_CODE; 611 data->delta_gain_code[i] =
1659 612 (data->delta_gain_code[i] | (1 << 2));
1660 data->delta_gain_code[i] = 613 } else {
1661 (data->delta_gain_code[i] | (1 << 2)); 614 data->delta_gain_code[i] = 0;
1662 } else
1663 data->delta_gain_code[i] = 0;
1664 }
1665 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1666 data->delta_gain_code[0],
1667 data->delta_gain_code[1],
1668 data->delta_gain_code[2]);
1669
1670 /* Differential gain gets sent to uCode only once */
1671 if (!data->radio_write) {
1672 struct iwl4965_calibration_cmd cmd;
1673 data->radio_write = 1;
1674
1675 memset(&cmd, 0, sizeof(cmd));
1676 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1677 cmd.diff_gain_a = data->delta_gain_code[0];
1678 cmd.diff_gain_b = data->delta_gain_code[1];
1679 cmd.diff_gain_c = data->delta_gain_code[2];
1680 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1681 sizeof(cmd), &cmd);
1682 if (ret)
1683 IWL_DEBUG_CALIB("fail sending cmd "
1684 "REPLY_PHY_CALIBRATION_CMD \n");
1685
1686 /* TODO we might want recalculate
1687 * rx_chain in rxon cmd */
1688
1689 /* Mark so we run this algo only once! */
1690 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1691 } 615 }
1692 data->chain_noise_a = 0;
1693 data->chain_noise_b = 0;
1694 data->chain_noise_c = 0;
1695 data->chain_signal_a = 0;
1696 data->chain_signal_b = 0;
1697 data->chain_signal_c = 0;
1698 data->beacon_count = 0;
1699 }
1700 return;
1701}
1702
1703static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1704 struct iwl4965_notif_statistics *resp)
1705{
1706 u32 rx_enable_time;
1707 u32 fa_cck;
1708 u32 fa_ofdm;
1709 u32 bad_plcp_cck;
1710 u32 bad_plcp_ofdm;
1711 u32 norm_fa_ofdm;
1712 u32 norm_fa_cck;
1713 struct iwl4965_sensitivity_data *data = NULL;
1714 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1715 struct statistics_rx *statistics = &(resp->rx);
1716 unsigned long flags;
1717 struct statistics_general_data statis;
1718 int ret;
1719
1720 data = &(priv->sensitivity_data);
1721
1722 if (!iwl_is_associated(priv)) {
1723 IWL_DEBUG_CALIB("<< - not associated\n");
1724 return;
1725 } 616 }
617 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
618 data->delta_gain_code[0],
619 data->delta_gain_code[1],
620 data->delta_gain_code[2]);
1726 621
1727 spin_lock_irqsave(&priv->lock, flags); 622 /* Differential gain gets sent to uCode only once */
1728 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 623 if (!data->radio_write) {
1729 IWL_DEBUG_CALIB("<< invalid data.\n"); 624 struct iwl4965_calibration_cmd cmd;
1730 spin_unlock_irqrestore(&priv->lock, flags); 625 data->radio_write = 1;
1731 return;
1732 }
1733
1734 /* Extract Statistics: */
1735 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1736 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1737 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1738 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1739 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1740
1741 statis.beacon_silence_rssi_a =
1742 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1743 statis.beacon_silence_rssi_b =
1744 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1745 statis.beacon_silence_rssi_c =
1746 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1747 statis.beacon_energy_a =
1748 le32_to_cpu(statistics->general.beacon_energy_a);
1749 statis.beacon_energy_b =
1750 le32_to_cpu(statistics->general.beacon_energy_b);
1751 statis.beacon_energy_c =
1752 le32_to_cpu(statistics->general.beacon_energy_c);
1753
1754 spin_unlock_irqrestore(&priv->lock, flags);
1755
1756 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1757
1758 if (!rx_enable_time) {
1759 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1760 return;
1761 }
1762
1763 /* These statistics increase monotonically, and do not reset
1764 * at each beacon. Calculate difference from last value, or just
1765 * use the new statistics value if it has reset or wrapped around. */
1766 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1767 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1768 else {
1769 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1770 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1771 }
1772 626
1773 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) 627 memset(&cmd, 0, sizeof(cmd));
1774 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; 628 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1775 else { 629 cmd.diff_gain_a = data->delta_gain_code[0];
1776 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; 630 cmd.diff_gain_b = data->delta_gain_code[1];
1777 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; 631 cmd.diff_gain_c = data->delta_gain_code[2];
1778 } 632 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
633 sizeof(cmd), &cmd);
634 if (ret)
635 IWL_DEBUG_CALIB("fail sending cmd "
636 "REPLY_PHY_CALIBRATION_CMD \n");
1779 637
1780 if (data->last_fa_cnt_ofdm > fa_ofdm) 638 /* TODO we might want recalculate
1781 data->last_fa_cnt_ofdm = fa_ofdm; 639 * rx_chain in rxon cmd */
1782 else {
1783 fa_ofdm -= data->last_fa_cnt_ofdm;
1784 data->last_fa_cnt_ofdm += fa_ofdm;
1785 }
1786 640
1787 if (data->last_fa_cnt_cck > fa_cck) 641 /* Mark so we run this algo only once! */
1788 data->last_fa_cnt_cck = fa_cck; 642 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1789 else {
1790 fa_cck -= data->last_fa_cnt_cck;
1791 data->last_fa_cnt_cck += fa_cck;
1792 } 643 }
1793 644 data->chain_noise_a = 0;
1794 /* Total aborted signal locks */ 645 data->chain_noise_b = 0;
1795 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 646 data->chain_noise_c = 0;
1796 norm_fa_cck = fa_cck + bad_plcp_cck; 647 data->chain_signal_a = 0;
1797 648 data->chain_signal_b = 0;
1798 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 649 data->chain_signal_c = 0;
1799 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); 650 data->beacon_count = 0;
1800
1801 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1802 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1803 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
1804
1805 return;
1806} 651}
1807 652
1808static void iwl4965_bg_sensitivity_work(struct work_struct *work) 653static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
654 __le32 *tx_flags)
1809{ 655{
1810 struct iwl_priv *priv = container_of(work, struct iwl_priv, 656 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
1811 sensitivity_work); 657 *tx_flags |= TX_CMD_FLG_RTS_MSK;
1812 658 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
1813 mutex_lock(&priv->mutex); 659 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
1814 660 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1815 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 661 *tx_flags |= TX_CMD_FLG_CTS_MSK;
1816 test_bit(STATUS_SCANNING, &priv->status)) {
1817 mutex_unlock(&priv->mutex);
1818 return;
1819 }
1820
1821 if (priv->start_calib) {
1822 iwl4965_noise_calibration(priv, &priv->statistics);
1823
1824 if (priv->sensitivity_data.state ==
1825 IWL_SENS_CALIB_NEED_REINIT) {
1826 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1827 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1828 } else
1829 iwl4965_sensitivity_calibration(priv,
1830 &priv->statistics);
1831 } 662 }
1832
1833 mutex_unlock(&priv->mutex);
1834 return;
1835} 663}
1836#endif /*CONFIG_IWL4965_SENSITIVITY*/
1837 664
1838static void iwl4965_bg_txpower_work(struct work_struct *work) 665static void iwl4965_bg_txpower_work(struct work_struct *work)
1839{ 666{
@@ -1853,7 +680,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
1853 /* Regardless of if we are assocaited, we must reconfigure the 680 /* Regardless of if we are assocaited, we must reconfigure the
1854 * TX power since frames can be sent on non-radar channels while 681 * TX power since frames can be sent on non-radar channels while
1855 * not associated */ 682 * not associated */
1856 iwl4965_hw_reg_send_txpower(priv); 683 iwl4965_send_tx_power(priv);
1857 684
1858 /* Update last_temperature to keep is_calib_needed from running 685 /* Update last_temperature to keep is_calib_needed from running
1859 * when it isn't needed... */ 686 * when it isn't needed... */
@@ -1880,7 +707,7 @@ static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1880 * NOTE: Acquire priv->lock before calling this function ! 707 * NOTE: Acquire priv->lock before calling this function !
1881 */ 708 */
1882static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, 709static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1883 struct iwl4965_tx_queue *txq, 710 struct iwl_tx_queue *txq,
1884 int tx_fifo_id, int scd_retry) 711 int tx_fifo_id, int scd_retry)
1885{ 712{
1886 int txq_id = txq->q.id; 713 int txq_id = txq->q.id;
@@ -1890,11 +717,11 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1890 717
1891 /* Set up and activate */ 718 /* Set up and activate */
1892 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 719 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1893 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 720 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1894 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 721 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
1895 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 722 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1896 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | 723 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1897 SCD_QUEUE_STTS_REG_MSK); 724 IWL49_SCD_QUEUE_STTS_REG_MSK);
1898 725
1899 txq->sched_retry = scd_retry; 726 txq->sched_retry = scd_retry;
1900 727
@@ -1908,22 +735,12 @@ static const u16 default_queue_to_tx_fifo[] = {
1908 IWL_TX_FIFO_AC2, 735 IWL_TX_FIFO_AC2,
1909 IWL_TX_FIFO_AC1, 736 IWL_TX_FIFO_AC1,
1910 IWL_TX_FIFO_AC0, 737 IWL_TX_FIFO_AC0,
1911 IWL_CMD_FIFO_NUM, 738 IWL49_CMD_FIFO_NUM,
1912 IWL_TX_FIFO_HCCA_1, 739 IWL_TX_FIFO_HCCA_1,
1913 IWL_TX_FIFO_HCCA_2 740 IWL_TX_FIFO_HCCA_2
1914}; 741};
1915 742
1916static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 743static int iwl4965_alive_notify(struct iwl_priv *priv)
1917{
1918 set_bit(txq_id, &priv->txq_ctx_active_msk);
1919}
1920
1921static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1922{
1923 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1924}
1925
1926int iwl4965_alive_notify(struct iwl_priv *priv)
1927{ 744{
1928 u32 a; 745 u32 a;
1929 int i = 0; 746 int i = 0;
@@ -1932,15 +749,6 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1932 749
1933 spin_lock_irqsave(&priv->lock, flags); 750 spin_lock_irqsave(&priv->lock, flags);
1934 751
1935#ifdef CONFIG_IWL4965_SENSITIVITY
1936 memset(&(priv->sensitivity_data), 0,
1937 sizeof(struct iwl4965_sensitivity_data));
1938 memset(&(priv->chain_noise_data), 0,
1939 sizeof(struct iwl4965_chain_noise_data));
1940 for (i = 0; i < NUM_RX_CHAINS; i++)
1941 priv->chain_noise_data.delta_gain_code[i] =
1942 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1943#endif /* CONFIG_IWL4965_SENSITIVITY*/
1944 ret = iwl_grab_nic_access(priv); 752 ret = iwl_grab_nic_access(priv);
1945 if (ret) { 753 if (ret) {
1946 spin_unlock_irqrestore(&priv->lock, flags); 754 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1949,10 +757,10 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1949 757
1950 /* Clear 4965's internal Tx Scheduler data base */ 758 /* Clear 4965's internal Tx Scheduler data base */
1951 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); 759 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1952 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 760 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1953 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 761 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1954 iwl_write_targ_mem(priv, a, 0); 762 iwl_write_targ_mem(priv, a, 0);
1955 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 763 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1956 iwl_write_targ_mem(priv, a, 0); 764 iwl_write_targ_mem(priv, a, 0);
1957 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 765 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1958 iwl_write_targ_mem(priv, a, 0); 766 iwl_write_targ_mem(priv, a, 0);
@@ -1974,160 +782,109 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1974 782
1975 /* Max Tx Window size for Scheduler-ACK mode */ 783 /* Max Tx Window size for Scheduler-ACK mode */
1976 iwl_write_targ_mem(priv, priv->scd_base_addr + 784 iwl_write_targ_mem(priv, priv->scd_base_addr +
1977 SCD_CONTEXT_QUEUE_OFFSET(i), 785 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1978 (SCD_WIN_SIZE << 786 (SCD_WIN_SIZE <<
1979 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 787 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1980 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 788 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1981 789
1982 /* Frame limit */ 790 /* Frame limit */
1983 iwl_write_targ_mem(priv, priv->scd_base_addr + 791 iwl_write_targ_mem(priv, priv->scd_base_addr +
1984 SCD_CONTEXT_QUEUE_OFFSET(i) + 792 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1985 sizeof(u32), 793 sizeof(u32),
1986 (SCD_FRAME_LIMIT << 794 (SCD_FRAME_LIMIT <<
1987 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 795 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1988 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 796 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1989 797
1990 } 798 }
1991 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, 799 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1992 (1 << priv->hw_params.max_txq_num) - 1); 800 (1 << priv->hw_params.max_txq_num) - 1);
1993 801
1994 /* Activate all Tx DMA/FIFO channels */ 802 /* Activate all Tx DMA/FIFO channels */
1995 iwl_write_prph(priv, IWL49_SCD_TXFACT, 803 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
1996 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1997 804
1998 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 805 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1999 806
2000 /* Map each Tx/cmd queue to its corresponding fifo */ 807 /* Map each Tx/cmd queue to its corresponding fifo */
2001 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 808 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2002 int ac = default_queue_to_tx_fifo[i]; 809 int ac = default_queue_to_tx_fifo[i];
2003 iwl4965_txq_ctx_activate(priv, i); 810 iwl_txq_ctx_activate(priv, i);
2004 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 811 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2005 } 812 }
2006 813
2007 iwl_release_nic_access(priv); 814 iwl_release_nic_access(priv);
2008 spin_unlock_irqrestore(&priv->lock, flags); 815 spin_unlock_irqrestore(&priv->lock, flags);
2009 816
2010 /* Ask for statistics now, the uCode will send statistics notification
2011 * periodically after association */
2012 iwl_send_statistics_request(priv, CMD_ASYNC);
2013 return ret; 817 return ret;
2014} 818}
2015 819
820static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
821 .min_nrg_cck = 97,
822 .max_nrg_cck = 0,
823
824 .auto_corr_min_ofdm = 85,
825 .auto_corr_min_ofdm_mrc = 170,
826 .auto_corr_min_ofdm_x1 = 105,
827 .auto_corr_min_ofdm_mrc_x1 = 220,
828
829 .auto_corr_max_ofdm = 120,
830 .auto_corr_max_ofdm_mrc = 210,
831 .auto_corr_max_ofdm_x1 = 140,
832 .auto_corr_max_ofdm_mrc_x1 = 270,
833
834 .auto_corr_min_cck = 125,
835 .auto_corr_max_cck = 200,
836 .auto_corr_min_cck_mrc = 200,
837 .auto_corr_max_cck_mrc = 400,
838
839 .nrg_th_cck = 100,
840 .nrg_th_ofdm = 100,
841};
842
2016/** 843/**
2017 * iwl4965_hw_set_hw_params 844 * iwl4965_hw_set_hw_params
2018 * 845 *
2019 * Called when initializing driver 846 * Called when initializing driver
2020 */ 847 */
2021int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 848static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2022{ 849{
2023 850
2024 if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) || 851 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
2025 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 852 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
2026 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 853 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2027 IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES); 854 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
2028 return -EINVAL; 855 return -EINVAL;
2029 } 856 }
2030 857
2031 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 858 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
2032 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); 859 priv->hw_params.first_ampdu_q = IWL49_FIRST_AMPDU_QUEUE;
2033 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2034 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2035 if (priv->cfg->mod_params->amsdu_size_8K)
2036 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
2037 else
2038 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
2039 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
2040 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 860 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
2041 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 861 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
862 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
863 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
864 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
865 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
2042 866
2043 priv->hw_params.tx_chains_num = 2; 867 priv->hw_params.tx_chains_num = 2;
2044 priv->hw_params.rx_chains_num = 2; 868 priv->hw_params.rx_chains_num = 2;
2045 priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 869 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
2046 priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 870 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
2047 871 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
2048 return 0;
2049}
2050
2051/**
2052 * iwl4965_hw_txq_ctx_free - Free TXQ Context
2053 *
2054 * Destroy all TX DMA queues and structures
2055 */
2056void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
2057{
2058 int txq_id;
2059
2060 /* Tx queues */
2061 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
2062 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
2063
2064 /* Keep-warm buffer */
2065 iwl4965_kw_free(priv);
2066}
2067
2068/**
2069 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
2070 *
2071 * Does NOT advance any TFD circular buffer read/write indexes
2072 * Does NOT free the TFD itself (which is within circular buffer)
2073 */
2074int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2075{
2076 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
2077 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
2078 struct pci_dev *dev = priv->pci_dev;
2079 int i;
2080 int counter = 0;
2081 int index, is_odd;
2082 872
2083 /* Host command buffers stay mapped in memory, nothing to clean */ 873 priv->hw_params.sens = &iwl4965_sensitivity;
2084 if (txq->q.id == IWL_CMD_QUEUE_NUM)
2085 return 0;
2086
2087 /* Sanity check on number of chunks */
2088 counter = IWL_GET_BITS(*bd, num_tbs);
2089 if (counter > MAX_NUM_OF_TBS) {
2090 IWL_ERROR("Too many chunks: %i\n", counter);
2091 /* @todo issue fatal error, it is quite serious situation */
2092 return 0;
2093 }
2094 874
2095 /* Unmap chunks, if any.
2096 * TFD info for odd chunks is different format than for even chunks. */
2097 for (i = 0; i < counter; i++) {
2098 index = i / 2;
2099 is_odd = i & 0x1;
2100
2101 if (is_odd)
2102 pci_unmap_single(
2103 dev,
2104 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2105 (IWL_GET_BITS(bd->pa[index],
2106 tb2_addr_hi20) << 16),
2107 IWL_GET_BITS(bd->pa[index], tb2_len),
2108 PCI_DMA_TODEVICE);
2109
2110 else if (i > 0)
2111 pci_unmap_single(dev,
2112 le32_to_cpu(bd->pa[index].tb1_addr),
2113 IWL_GET_BITS(bd->pa[index], tb1_len),
2114 PCI_DMA_TODEVICE);
2115
2116 /* Free SKB, if any, for this chunk */
2117 if (txq->txb[txq->q.read_ptr].skb[i]) {
2118 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
2119
2120 dev_kfree_skb(skb);
2121 txq->txb[txq->q.read_ptr].skb[i] = NULL;
2122 }
2123 }
2124 return 0; 875 return 0;
2125} 876}
2126 877
2127int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) 878/* set card power command */
879static int iwl4965_set_power(struct iwl_priv *priv,
880 void *cmd)
2128{ 881{
2129 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); 882 int ret = 0;
2130 return -EINVAL; 883
884 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
885 sizeof(struct iwl4965_powertable_cmd),
886 cmd, NULL);
887 return ret;
2131} 888}
2132 889
2133static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) 890static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
@@ -2179,20 +936,6 @@ static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
2179 return comp; 936 return comp;
2180} 937}
2181 938
2182static const struct iwl_channel_info *
2183iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
2184 enum ieee80211_band band, u16 channel)
2185{
2186 const struct iwl_channel_info *ch_info;
2187
2188 ch_info = iwl_get_channel_info(priv, band, channel);
2189
2190 if (!is_channel_valid(ch_info))
2191 return NULL;
2192
2193 return ch_info;
2194}
2195
2196static s32 iwl4965_get_tx_atten_grp(u16 channel) 939static s32 iwl4965_get_tx_atten_grp(u16 channel)
2197{ 940{
2198 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && 941 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
@@ -2224,11 +967,11 @@ static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
2224 s32 b = -1; 967 s32 b = -1;
2225 968
2226 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { 969 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2227 if (priv->eeprom.calib_info.band_info[b].ch_from == 0) 970 if (priv->calib_info->band_info[b].ch_from == 0)
2228 continue; 971 continue;
2229 972
2230 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) 973 if ((channel >= priv->calib_info->band_info[b].ch_from)
2231 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) 974 && (channel <= priv->calib_info->band_info[b].ch_to))
2232 break; 975 break;
2233 } 976 }
2234 977
@@ -2256,14 +999,14 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2256 * in channel number. 999 * in channel number.
2257 */ 1000 */
2258static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, 1001static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2259 struct iwl4965_eeprom_calib_ch_info *chan_info) 1002 struct iwl_eeprom_calib_ch_info *chan_info)
2260{ 1003{
2261 s32 s = -1; 1004 s32 s = -1;
2262 u32 c; 1005 u32 c;
2263 u32 m; 1006 u32 m;
2264 const struct iwl4965_eeprom_calib_measure *m1; 1007 const struct iwl_eeprom_calib_measure *m1;
2265 const struct iwl4965_eeprom_calib_measure *m2; 1008 const struct iwl_eeprom_calib_measure *m2;
2266 struct iwl4965_eeprom_calib_measure *omeas; 1009 struct iwl_eeprom_calib_measure *omeas;
2267 u32 ch_i1; 1010 u32 ch_i1;
2268 u32 ch_i2; 1011 u32 ch_i2;
2269 1012
@@ -2273,8 +1016,8 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2273 return -1; 1016 return -1;
2274 } 1017 }
2275 1018
2276 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; 1019 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
2277 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; 1020 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
2278 chan_info->ch_num = (u8) channel; 1021 chan_info->ch_num = (u8) channel;
2279 1022
2280 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", 1023 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
@@ -2282,9 +1025,9 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2282 1025
2283 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { 1026 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2284 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { 1027 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2285 m1 = &(priv->eeprom.calib_info.band_info[s].ch1. 1028 m1 = &(priv->calib_info->band_info[s].ch1.
2286 measurements[c][m]); 1029 measurements[c][m]);
2287 m2 = &(priv->eeprom.calib_info.band_info[s].ch2. 1030 m2 = &(priv->calib_info->band_info[s].ch2.
2288 measurements[c][m]); 1031 measurements[c][m]);
2289 omeas = &(chan_info->measurements[c][m]); 1032 omeas = &(chan_info->measurements[c][m]);
2290 1033
@@ -2603,8 +1346,8 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2603 int i; 1346 int i;
2604 int c; 1347 int c;
2605 const struct iwl_channel_info *ch_info = NULL; 1348 const struct iwl_channel_info *ch_info = NULL;
2606 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; 1349 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
2607 const struct iwl4965_eeprom_calib_measure *measurement; 1350 const struct iwl_eeprom_calib_measure *measurement;
2608 s16 voltage; 1351 s16 voltage;
2609 s32 init_voltage; 1352 s32 init_voltage;
2610 s32 voltage_compensation; 1353 s32 voltage_compensation;
@@ -2616,30 +1359,17 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2616 s32 factory_actual_pwr[2]; 1359 s32 factory_actual_pwr[2];
2617 s32 power_index; 1360 s32 power_index;
2618 1361
2619 /* Sanity check requested level (dBm) */
2620 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2621 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2622 priv->user_txpower_limit);
2623 return -EINVAL;
2624 }
2625 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2626 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2627 priv->user_txpower_limit);
2628 return -EINVAL;
2629 }
2630
2631 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units 1362 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2632 * are used for indexing into txpower table) */ 1363 * are used for indexing into txpower table) */
2633 user_target_power = 2 * priv->user_txpower_limit; 1364 user_target_power = 2 * priv->tx_power_user_lmt;
2634 1365
2635 /* Get current (RXON) channel, band, width */ 1366 /* Get current (RXON) channel, band, width */
2636 ch_info =
2637 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
2638
2639 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, 1367 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2640 is_fat); 1368 is_fat);
2641 1369
2642 if (!ch_info) 1370 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1371
1372 if (!is_channel_valid(ch_info))
2643 return -EINVAL; 1373 return -EINVAL;
2644 1374
2645 /* get txatten group, used to select 1) thermal txpower adjustment 1375 /* get txatten group, used to select 1) thermal txpower adjustment
@@ -2661,9 +1391,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2661 /* hardware txpower limits ... 1391 /* hardware txpower limits ...
2662 * saturation (clipping distortion) txpowers are in half-dBm */ 1392 * saturation (clipping distortion) txpowers are in half-dBm */
2663 if (band) 1393 if (band)
2664 saturation_power = priv->eeprom.calib_info.saturation_power24; 1394 saturation_power = priv->calib_info->saturation_power24;
2665 else 1395 else
2666 saturation_power = priv->eeprom.calib_info.saturation_power52; 1396 saturation_power = priv->calib_info->saturation_power52;
2667 1397
2668 if (saturation_power < IWL_TX_POWER_SATURATION_MIN || 1398 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2669 saturation_power > IWL_TX_POWER_SATURATION_MAX) { 1399 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
@@ -2693,7 +1423,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2693 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1423 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2694 1424
2695 /* calculate tx gain adjustment based on power supply voltage */ 1425 /* calculate tx gain adjustment based on power supply voltage */
2696 voltage = priv->eeprom.calib_info.voltage; 1426 voltage = priv->calib_info->voltage;
2697 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1427 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2698 voltage_compensation = 1428 voltage_compensation =
2699 iwl4965_get_voltage_compensation(voltage, init_voltage); 1429 iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -2840,12 +1570,12 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2840} 1570}
2841 1571
2842/** 1572/**
2843 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit 1573 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
2844 * 1574 *
2845 * Uses the active RXON for channel, band, and characteristics (fat, high) 1575 * Uses the active RXON for channel, band, and characteristics (fat, high)
2846 * The power limit is taken from priv->user_txpower_limit. 1576 * The power limit is taken from priv->tx_power_user_lmt.
2847 */ 1577 */
2848int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv) 1578static int iwl4965_send_tx_power(struct iwl_priv *priv)
2849{ 1579{
2850 struct iwl4965_txpowertable_cmd cmd = { 0 }; 1580 struct iwl4965_txpowertable_cmd cmd = { 0 };
2851 int ret; 1581 int ret;
@@ -2888,8 +1618,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2888{ 1618{
2889 int ret = 0; 1619 int ret = 0;
2890 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1620 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2891 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon; 1621 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
2892 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon; 1622 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
2893 1623
2894 if ((rxon1->flags == rxon2->flags) && 1624 if ((rxon1->flags == rxon2->flags) &&
2895 (rxon1->filter_flags == rxon2->filter_flags) && 1625 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -2965,89 +1695,14 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2965 return rc; 1695 return rc;
2966} 1696}
2967 1697
2968#define RTS_HCCA_RETRY_LIMIT 3 1698static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
2969#define RTS_DFAULT_RETRY_LIMIT 60
2970
2971void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2972 struct iwl_cmd *cmd,
2973 struct ieee80211_tx_control *ctrl,
2974 struct ieee80211_hdr *hdr, int sta_id,
2975 int is_hcca)
2976{
2977 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2978 u8 rts_retry_limit = 0;
2979 u8 data_retry_limit = 0;
2980 u16 fc = le16_to_cpu(hdr->frame_control);
2981 u8 rate_plcp;
2982 u16 rate_flags = 0;
2983 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2984
2985 rate_plcp = iwl4965_rates[rate_idx].plcp;
2986
2987 rts_retry_limit = (is_hcca) ?
2988 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2989
2990 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2991 rate_flags |= RATE_MCS_CCK_MSK;
2992
2993
2994 if (ieee80211_is_probe_response(fc)) {
2995 data_retry_limit = 3;
2996 if (data_retry_limit < rts_retry_limit)
2997 rts_retry_limit = data_retry_limit;
2998 } else
2999 data_retry_limit = IWL_DEFAULT_TX_RETRY;
3000
3001 if (priv->data_retry_limit != -1)
3002 data_retry_limit = priv->data_retry_limit;
3003
3004
3005 if (ieee80211_is_data(fc)) {
3006 tx->initial_rate_index = 0;
3007 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
3008 } else {
3009 switch (fc & IEEE80211_FCTL_STYPE) {
3010 case IEEE80211_STYPE_AUTH:
3011 case IEEE80211_STYPE_DEAUTH:
3012 case IEEE80211_STYPE_ASSOC_REQ:
3013 case IEEE80211_STYPE_REASSOC_REQ:
3014 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
3015 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
3016 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
3017 }
3018 break;
3019 default:
3020 break;
3021 }
3022
3023 /* Alternate between antenna A and B for successive frames */
3024 if (priv->use_ant_b_for_management_frame) {
3025 priv->use_ant_b_for_management_frame = 0;
3026 rate_flags |= RATE_MCS_ANT_B_MSK;
3027 } else {
3028 priv->use_ant_b_for_management_frame = 1;
3029 rate_flags |= RATE_MCS_ANT_A_MSK;
3030 }
3031 }
3032
3033 tx->rts_retry_limit = rts_retry_limit;
3034 tx->data_retry_limit = data_retry_limit;
3035 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
3036}
3037
3038int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
3039{ 1699{
3040 struct iwl4965_shared *s = priv->shared_virt; 1700 struct iwl4965_shared *s = priv->shared_virt;
3041 return le32_to_cpu(s->rb_closed) & 0xFFF; 1701 return le32_to_cpu(s->rb_closed) & 0xFFF;
3042} 1702}
3043 1703
3044int iwl4965_hw_get_temperature(struct iwl_priv *priv)
3045{
3046 return priv->temperature;
3047}
3048
3049unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 1704unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3050 struct iwl4965_frame *frame, u8 rate) 1705 struct iwl_frame *frame, u8 rate)
3051{ 1706{
3052 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; 1707 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
3053 unsigned int frame_size; 1708 unsigned int frame_size;
@@ -3060,7 +1715,7 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3060 1715
3061 frame_size = iwl4965_fill_beacon_frame(priv, 1716 frame_size = iwl4965_fill_beacon_frame(priv,
3062 tx_beacon_cmd->frame, 1717 tx_beacon_cmd->frame,
3063 iwl4965_broadcast_addr, 1718 iwl_bcast_addr,
3064 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 1719 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3065 1720
3066 BUG_ON(frame_size > MAX_MPDU_SIZE); 1721 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -3068,105 +1723,45 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3068 1723
3069 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) 1724 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
3070 tx_beacon_cmd->tx.rate_n_flags = 1725 tx_beacon_cmd->tx.rate_n_flags =
3071 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); 1726 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
3072 else 1727 else
3073 tx_beacon_cmd->tx.rate_n_flags = 1728 tx_beacon_cmd->tx.rate_n_flags =
3074 iwl4965_hw_set_rate_n_flags(rate, 0); 1729 iwl_hw_set_rate_n_flags(rate, 0);
3075 1730
3076 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | 1731 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
3077 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK); 1732 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
3078 return (sizeof(*tx_beacon_cmd) + frame_size); 1733 return (sizeof(*tx_beacon_cmd) + frame_size);
3079} 1734}
3080 1735
3081/* 1736static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
3082 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3083 * given Tx queue, and enable the DMA channel used for that queue.
3084 *
3085 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3086 * channels supported in hardware.
3087 */
3088int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
3089{
3090 int rc;
3091 unsigned long flags;
3092 int txq_id = txq->q.id;
3093
3094 spin_lock_irqsave(&priv->lock, flags);
3095 rc = iwl_grab_nic_access(priv);
3096 if (rc) {
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098 return rc;
3099 }
3100
3101 /* Circular buffer (TFD queue in DRAM) physical base address */
3102 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
3103 txq->q.dma_addr >> 8);
3104
3105 /* Enable DMA channel, using same id as for TFD queue */
3106 iwl_write_direct32(
3107 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3108 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3109 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3110 iwl_release_nic_access(priv);
3111 spin_unlock_irqrestore(&priv->lock, flags);
3112
3113 return 0;
3114}
3115
3116int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
3117 dma_addr_t addr, u16 len)
3118{ 1737{
3119 int index, is_odd; 1738 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
3120 struct iwl4965_tfd_frame *tfd = ptr; 1739 sizeof(struct iwl4965_shared),
3121 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 1740 &priv->shared_phys);
3122 1741 if (!priv->shared_virt)
3123 /* Each TFD can point to a maximum 20 Tx buffers */ 1742 return -ENOMEM;
3124 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3125 IWL_ERROR("Error can not send more than %d chunks\n",
3126 MAX_NUM_OF_TBS);
3127 return -EINVAL;
3128 }
3129
3130 index = num_tbs / 2;
3131 is_odd = num_tbs & 0x1;
3132 1743
3133 if (!is_odd) { 1744 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
3134 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3135 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
3136 iwl_get_dma_hi_address(addr));
3137 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3138 } else {
3139 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3140 (u32) (addr & 0xffff));
3141 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3142 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3143 }
3144 1745
3145 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 1746 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
3146 1747
3147 return 0; 1748 return 0;
3148} 1749}
3149 1750
3150static void iwl4965_hw_card_show_info(struct iwl_priv *priv) 1751static void iwl4965_free_shared_mem(struct iwl_priv *priv)
3151{ 1752{
3152 u16 hw_version = priv->eeprom.board_revision_4965; 1753 if (priv->shared_virt)
3153 1754 pci_free_consistent(priv->pci_dev,
3154 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", 1755 sizeof(struct iwl4965_shared),
3155 ((hw_version >> 8) & 0x0F), 1756 priv->shared_virt,
3156 ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); 1757 priv->shared_phys);
3157
3158 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3159 priv->eeprom.board_pba_number_4965);
3160} 1758}
3161 1759
3162#define IWL_TX_CRC_SIZE 4
3163#define IWL_TX_DELIMITER_SIZE 4
3164
3165/** 1760/**
3166 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1761 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
3167 */ 1762 */
3168static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 1763static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3169 struct iwl4965_tx_queue *txq, 1764 struct iwl_tx_queue *txq,
3170 u16 byte_cnt) 1765 u16 byte_cnt)
3171{ 1766{
3172 int len; 1767 int len;
@@ -3180,50 +1775,13 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3180 tfd_offset[txq->q.write_ptr], byte_cnt, len); 1775 tfd_offset[txq->q.write_ptr], byte_cnt, len);
3181 1776
3182 /* If within first 64 entries, duplicate at end */ 1777 /* If within first 64 entries, duplicate at end */
3183 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) 1778 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
3184 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1779 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
3185 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 1780 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
3186 byte_cnt, len); 1781 byte_cnt, len);
3187} 1782}
3188 1783
3189/** 1784/**
3190 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3191 *
3192 * Selects how many and which Rx receivers/antennas/chains to use.
3193 * This should not be used for scan command ... it puts data in wrong place.
3194 */
3195void iwl4965_set_rxon_chain(struct iwl_priv *priv)
3196{
3197 u8 is_single = is_single_stream(priv);
3198 u8 idle_state, rx_state;
3199
3200 priv->staging_rxon.rx_chain = 0;
3201 rx_state = idle_state = 3;
3202
3203 /* Tell uCode which antennas are actually connected.
3204 * Before first association, we assume all antennas are connected.
3205 * Just after first association, iwl4965_noise_calibration()
3206 * checks which antennas actually *are* connected. */
3207 priv->staging_rxon.rx_chain |=
3208 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3209
3210 /* How many receivers should we use? */
3211 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3212 priv->staging_rxon.rx_chain |=
3213 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3214 priv->staging_rxon.rx_chain |=
3215 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3216
3217 if (!is_single && (rx_state >= 2) &&
3218 !test_bit(STATUS_POWER_PMI, &priv->status))
3219 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3220 else
3221 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3222
3223 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3224}
3225
3226/**
3227 * sign_extend - Sign extend a value using specified bit as sign-bit 1785 * sign_extend - Sign extend a value using specified bit as sign-bit
3228 * 1786 *
3229 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 1787 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
@@ -3240,12 +1798,12 @@ static s32 sign_extend(u32 oper, int index)
3240} 1798}
3241 1799
3242/** 1800/**
3243 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin) 1801 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
3244 * @statistics: Provides the temperature reading from the uCode 1802 * @statistics: Provides the temperature reading from the uCode
3245 * 1803 *
3246 * A return of <0 indicates bogus data in the statistics 1804 * A return of <0 indicates bogus data in the statistics
3247 */ 1805 */
3248int iwl4965_get_temperature(const struct iwl_priv *priv) 1806static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
3249{ 1807{
3250 s32 temperature; 1808 s32 temperature;
3251 s32 vt; 1809 s32 vt;
@@ -3280,8 +1838,7 @@ int iwl4965_get_temperature(const struct iwl_priv *priv)
3280 vt = sign_extend( 1838 vt = sign_extend(
3281 le32_to_cpu(priv->statistics.general.temperature), 23); 1839 le32_to_cpu(priv->statistics.general.temperature), 23);
3282 1840
3283 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", 1841 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
3284 R1, R2, R3, vt);
3285 1842
3286 if (R3 == R1) { 1843 if (R3 == R1) {
3287 IWL_ERROR("Calibration conflict R1 == R3\n"); 1844 IWL_ERROR("Calibration conflict R1 == R3\n");
@@ -3292,11 +1849,10 @@ int iwl4965_get_temperature(const struct iwl_priv *priv)
3292 * Add offset to center the adjustment around 0 degrees Centigrade. */ 1849 * Add offset to center the adjustment around 0 degrees Centigrade. */
3293 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); 1850 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3294 temperature /= (R3 - R1); 1851 temperature /= (R3 - R1);
3295 temperature = (temperature * 97) / 100 + 1852 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
3296 TEMPERATURE_CALIB_KELVIN_OFFSET;
3297 1853
3298 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature, 1854 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n",
3299 KELVIN_TO_CELSIUS(temperature)); 1855 temperature, KELVIN_TO_CELSIUS(temperature));
3300 1856
3301 return temperature; 1857 return temperature;
3302} 1858}
@@ -3343,89 +1899,11 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
3343 return 1; 1899 return 1;
3344} 1900}
3345 1901
3346/* Calculate noise level, based on measurements during network silence just 1902static void iwl4965_temperature_calib(struct iwl_priv *priv)
3347 * before arriving beacon. This measurement can be done only if we know
3348 * exactly when to expect beacons, therefore only when we're associated. */
3349static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3350{
3351 struct statistics_rx_non_phy *rx_info
3352 = &(priv->statistics.rx.general);
3353 int num_active_rx = 0;
3354 int total_silence = 0;
3355 int bcn_silence_a =
3356 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3357 int bcn_silence_b =
3358 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3359 int bcn_silence_c =
3360 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3361
3362 if (bcn_silence_a) {
3363 total_silence += bcn_silence_a;
3364 num_active_rx++;
3365 }
3366 if (bcn_silence_b) {
3367 total_silence += bcn_silence_b;
3368 num_active_rx++;
3369 }
3370 if (bcn_silence_c) {
3371 total_silence += bcn_silence_c;
3372 num_active_rx++;
3373 }
3374
3375 /* Average among active antennas */
3376 if (num_active_rx)
3377 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3378 else
3379 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3380
3381 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3382 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3383 priv->last_rx_noise);
3384}
3385
3386void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3387{ 1903{
3388 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3389 int change;
3390 s32 temp; 1904 s32 temp;
3391 1905
3392 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", 1906 temp = iwl4965_hw_get_temperature(priv);
3393 (int)sizeof(priv->statistics), pkt->len);
3394
3395 change = ((priv->statistics.general.temperature !=
3396 pkt->u.stats.general.temperature) ||
3397 ((priv->statistics.flag &
3398 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3399 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3400
3401 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3402
3403 set_bit(STATUS_STATISTICS, &priv->status);
3404
3405 /* Reschedule the statistics timer to occur in
3406 * REG_RECALIB_PERIOD seconds to ensure we get a
3407 * thermal update even if the uCode doesn't give
3408 * us one */
3409 mod_timer(&priv->statistics_periodic, jiffies +
3410 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3411
3412 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3413 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3414 iwl4965_rx_calc_noise(priv);
3415#ifdef CONFIG_IWL4965_SENSITIVITY
3416 queue_work(priv->workqueue, &priv->sensitivity_work);
3417#endif
3418 }
3419
3420 iwl_leds_background(priv);
3421
3422 /* If the hardware hasn't reported a change in
3423 * temperature then don't bother computing a
3424 * calibrated temperature value */
3425 if (!change)
3426 return;
3427
3428 temp = iwl4965_get_temperature(priv);
3429 if (temp < 0) 1907 if (temp < 0)
3430 return; 1908 return;
3431 1909
@@ -3444,810 +1922,12 @@ void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffe
3444 priv->temperature = temp; 1922 priv->temperature = temp;
3445 set_bit(STATUS_TEMPERATURE, &priv->status); 1923 set_bit(STATUS_TEMPERATURE, &priv->status);
3446 1924
3447 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 1925 if (!priv->disable_tx_power_cal &&
3448 iwl4965_is_temp_calib_needed(priv)) 1926 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1927 iwl4965_is_temp_calib_needed(priv))
3449 queue_work(priv->workqueue, &priv->txpower_work); 1928 queue_work(priv->workqueue, &priv->txpower_work);
3450} 1929}
3451 1930
3452static void iwl4965_add_radiotap(struct iwl_priv *priv,
3453 struct sk_buff *skb,
3454 struct iwl4965_rx_phy_res *rx_start,
3455 struct ieee80211_rx_status *stats,
3456 u32 ampdu_status)
3457{
3458 s8 signal = stats->ssi;
3459 s8 noise = 0;
3460 int rate = stats->rate_idx;
3461 u64 tsf = stats->mactime;
3462 __le16 antenna;
3463 __le16 phy_flags_hw = rx_start->phy_flags;
3464 struct iwl4965_rt_rx_hdr {
3465 struct ieee80211_radiotap_header rt_hdr;
3466 __le64 rt_tsf; /* TSF */
3467 u8 rt_flags; /* radiotap packet flags */
3468 u8 rt_rate; /* rate in 500kb/s */
3469 __le16 rt_channelMHz; /* channel in MHz */
3470 __le16 rt_chbitmask; /* channel bitfield */
3471 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3472 s8 rt_dbmnoise;
3473 u8 rt_antenna; /* antenna number */
3474 } __attribute__ ((packed)) *iwl4965_rt;
3475
3476 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3477 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3478 if (net_ratelimit())
3479 printk(KERN_ERR "not enough headroom [%d] for "
3480 "radiotap head [%zd]\n",
3481 skb_headroom(skb), sizeof(*iwl4965_rt));
3482 return;
3483 }
3484
3485 /* put radiotap header in front of 802.11 header and data */
3486 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3487
3488 /* initialise radiotap header */
3489 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3490 iwl4965_rt->rt_hdr.it_pad = 0;
3491
3492 /* total header + data */
3493 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3494 &iwl4965_rt->rt_hdr.it_len);
3495
3496 /* Indicate all the fields we add to the radiotap header */
3497 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3498 (1 << IEEE80211_RADIOTAP_FLAGS) |
3499 (1 << IEEE80211_RADIOTAP_RATE) |
3500 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3501 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3502 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3503 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3504 &iwl4965_rt->rt_hdr.it_present);
3505
3506 /* Zero the flags, we'll add to them as we go */
3507 iwl4965_rt->rt_flags = 0;
3508
3509 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3510
3511 iwl4965_rt->rt_dbmsignal = signal;
3512 iwl4965_rt->rt_dbmnoise = noise;
3513
3514 /* Convert the channel frequency and set the flags */
3515 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3516 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3517 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3518 IEEE80211_CHAN_5GHZ),
3519 &iwl4965_rt->rt_chbitmask);
3520 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3521 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3522 IEEE80211_CHAN_2GHZ),
3523 &iwl4965_rt->rt_chbitmask);
3524 else /* 802.11g */
3525 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3526 IEEE80211_CHAN_2GHZ),
3527 &iwl4965_rt->rt_chbitmask);
3528
3529 if (rate == -1)
3530 iwl4965_rt->rt_rate = 0;
3531 else {
3532 if (stats->band == IEEE80211_BAND_5GHZ)
3533 rate += IWL_FIRST_OFDM_RATE;
3534
3535 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3536 }
3537
3538 /*
3539 * "antenna number"
3540 *
3541 * It seems that the antenna field in the phy flags value
3542 * is actually a bitfield. This is undefined by radiotap,
3543 * it wants an actual antenna number but I always get "7"
3544 * for most legacy frames I receive indicating that the
3545 * same frame was received on all three RX chains.
3546 *
3547 * I think this field should be removed in favour of a
3548 * new 802.11n radiotap field "RX chains" that is defined
3549 * as a bitmask.
3550 */
3551 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
3552 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
3553
3554 /* set the preamble flag if appropriate */
3555 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3556 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3557
3558 stats->flag |= RX_FLAG_RADIOTAP;
3559}
3560
3561static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3562{
3563 /* 0 - mgmt, 1 - cnt, 2 - data */
3564 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
3565 priv->rx_stats[idx].cnt++;
3566 priv->rx_stats[idx].bytes += len;
3567}
3568
3569static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3570{
3571 u32 decrypt_out = 0;
3572
3573 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
3574 RX_RES_STATUS_STATION_FOUND)
3575 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
3576 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
3577
3578 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
3579
3580 /* packet was not encrypted */
3581 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3582 RX_RES_STATUS_SEC_TYPE_NONE)
3583 return decrypt_out;
3584
3585 /* packet was encrypted with unknown alg */
3586 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3587 RX_RES_STATUS_SEC_TYPE_ERR)
3588 return decrypt_out;
3589
3590 /* decryption was not done in HW */
3591 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
3592 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
3593 return decrypt_out;
3594
3595 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
3596
3597 case RX_RES_STATUS_SEC_TYPE_CCMP:
3598 /* alg is CCM: check MIC only */
3599 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
3600 /* Bad MIC */
3601 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3602 else
3603 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3604
3605 break;
3606
3607 case RX_RES_STATUS_SEC_TYPE_TKIP:
3608 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
3609 /* Bad TTAK */
3610 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
3611 break;
3612 }
3613 /* fall through if TTAK OK */
3614 default:
3615 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
3616 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3617 else
3618 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3619 break;
3620 };
3621
3622 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
3623 decrypt_in, decrypt_out);
3624
3625 return decrypt_out;
3626}
3627
3628static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3629 int include_phy,
3630 struct iwl4965_rx_mem_buffer *rxb,
3631 struct ieee80211_rx_status *stats)
3632{
3633 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3634 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3635 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3636 struct ieee80211_hdr *hdr;
3637 u16 len;
3638 __le32 *rx_end;
3639 unsigned int skblen;
3640 u32 ampdu_status;
3641 u32 ampdu_status_legacy;
3642
3643 if (!include_phy && priv->last_phy_res[0])
3644 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3645
3646 if (!rx_start) {
3647 IWL_ERROR("MPDU frame without a PHY data\n");
3648 return;
3649 }
3650 if (include_phy) {
3651 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3652 rx_start->cfg_phy_cnt);
3653
3654 len = le16_to_cpu(rx_start->byte_count);
3655
3656 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3657 sizeof(struct iwl4965_rx_phy_res) +
3658 rx_start->cfg_phy_cnt + len);
3659
3660 } else {
3661 struct iwl4965_rx_mpdu_res_start *amsdu =
3662 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3663
3664 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3665 sizeof(struct iwl4965_rx_mpdu_res_start));
3666 len = le16_to_cpu(amsdu->byte_count);
3667 rx_start->byte_count = amsdu->byte_count;
3668 rx_end = (__le32 *) (((u8 *) hdr) + len);
3669 }
3670 if (len > priv->hw_params.max_pkt_size || len < 16) {
3671 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3672 return;
3673 }
3674
3675 ampdu_status = le32_to_cpu(*rx_end);
3676 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3677
3678 if (!include_phy) {
3679 /* New status scheme, need to translate */
3680 ampdu_status_legacy = ampdu_status;
3681 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
3682 }
3683
3684 /* start from MAC */
3685 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3686 skb_put(rxb->skb, len); /* end where data ends */
3687
3688 /* We only process data packets if the interface is open */
3689 if (unlikely(!priv->is_open)) {
3690 IWL_DEBUG_DROP_LIMIT
3691 ("Dropping packet while interface is not open.\n");
3692 return;
3693 }
3694
3695 stats->flag = 0;
3696 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3697
3698 if (!priv->cfg->mod_params->sw_crypto)
3699 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3700
3701 if (priv->add_radiotap)
3702 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3703
3704 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
3705 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3706 priv->alloc_rxb_skb--;
3707 rxb->skb = NULL;
3708}
3709
3710/* Calc max signal level (dBm) among 3 possible receivers */
3711static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3712{
3713 /* data from PHY/DSP regarding signal strength, etc.,
3714 * contents are always there, not configurable by host. */
3715 struct iwl4965_rx_non_cfg_phy *ncphy =
3716 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3717 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3718 >> IWL_AGC_DB_POS;
3719
3720 u32 valid_antennae =
3721 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3722 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3723 u8 max_rssi = 0;
3724 u32 i;
3725
3726 /* Find max rssi among 3 possible receivers.
3727 * These values are measured by the digital signal processor (DSP).
3728 * They should stay fairly constant even as the signal strength varies,
3729 * if the radio's automatic gain control (AGC) is working right.
3730 * AGC value (see below) will provide the "interesting" info. */
3731 for (i = 0; i < 3; i++)
3732 if (valid_antennae & (1 << i))
3733 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3734
3735 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3736 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3737 max_rssi, agc);
3738
3739 /* dBm = max_rssi dB - agc dB - constant.
3740 * Higher AGC (higher radio gain) means lower signal. */
3741 return (max_rssi - agc - IWL_RSSI_OFFSET);
3742}
3743
3744#ifdef CONFIG_IWL4965_HT
3745
3746void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3747 struct ieee80211_ht_info *ht_info,
3748 enum ieee80211_band band)
3749{
3750 ht_info->cap = 0;
3751 memset(ht_info->supp_mcs_set, 0, 16);
3752
3753 ht_info->ht_supported = 1;
3754
3755 if (band == IEEE80211_BAND_5GHZ) {
3756 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3757 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3758 ht_info->supp_mcs_set[4] = 0x01;
3759 }
3760 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3761 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3762 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3763 (IWL_MIMO_PS_NONE << 2));
3764
3765 if (priv->cfg->mod_params->amsdu_size_8K)
3766 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3767
3768 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3769 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3770
3771 ht_info->supp_mcs_set[0] = 0xFF;
3772 ht_info->supp_mcs_set[1] = 0xFF;
3773}
3774#endif /* CONFIG_IWL4965_HT */
3775
3776static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3777{
3778 unsigned long flags;
3779
3780 spin_lock_irqsave(&priv->sta_lock, flags);
3781 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3782 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3783 priv->stations[sta_id].sta.sta.modify_mask = 0;
3784 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3785 spin_unlock_irqrestore(&priv->sta_lock, flags);
3786
3787 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3788}
3789
3790static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3791{
3792 /* FIXME: need locking over ps_status ??? */
3793 u8 sta_id = iwl4965_hw_find_station(priv, addr);
3794
3795 if (sta_id != IWL_INVALID_STATION) {
3796 u8 sta_awake = priv->stations[sta_id].
3797 ps_status == STA_PS_STATUS_WAKE;
3798
3799 if (sta_awake && ps_bit)
3800 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3801 else if (!sta_awake && !ps_bit) {
3802 iwl4965_sta_modify_ps_wake(priv, sta_id);
3803 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3804 }
3805 }
3806}
3807#ifdef CONFIG_IWLWIFI_DEBUG
3808
3809/**
3810 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3811 *
3812 * You may hack this function to show different aspects of received frames,
3813 * including selective frame dumps.
3814 * group100 parameter selects whether to show 1 out of 100 good frames.
3815 *
3816 * TODO: This was originally written for 3945, need to audit for
3817 * proper operation with 4965.
3818 */
3819static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3820 struct iwl4965_rx_packet *pkt,
3821 struct ieee80211_hdr *header, int group100)
3822{
3823 u32 to_us;
3824 u32 print_summary = 0;
3825 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3826 u32 hundred = 0;
3827 u32 dataframe = 0;
3828 u16 fc;
3829 u16 seq_ctl;
3830 u16 channel;
3831 u16 phy_flags;
3832 int rate_sym;
3833 u16 length;
3834 u16 status;
3835 u16 bcn_tmr;
3836 u32 tsf_low;
3837 u64 tsf;
3838 u8 rssi;
3839 u8 agc;
3840 u16 sig_avg;
3841 u16 noise_diff;
3842 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3843 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3844 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3845 u8 *data = IWL_RX_DATA(pkt);
3846
3847 if (likely(!(iwl_debug_level & IWL_DL_RX)))
3848 return;
3849
3850 /* MAC header */
3851 fc = le16_to_cpu(header->frame_control);
3852 seq_ctl = le16_to_cpu(header->seq_ctrl);
3853
3854 /* metadata */
3855 channel = le16_to_cpu(rx_hdr->channel);
3856 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3857 rate_sym = rx_hdr->rate;
3858 length = le16_to_cpu(rx_hdr->len);
3859
3860 /* end-of-frame status and timestamp */
3861 status = le32_to_cpu(rx_end->status);
3862 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3863 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3864 tsf = le64_to_cpu(rx_end->timestamp);
3865
3866 /* signal statistics */
3867 rssi = rx_stats->rssi;
3868 agc = rx_stats->agc;
3869 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3870 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3871
3872 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3873
3874 /* if data frame is to us and all is good,
3875 * (optionally) print summary for only 1 out of every 100 */
3876 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3877 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3878 dataframe = 1;
3879 if (!group100)
3880 print_summary = 1; /* print each frame */
3881 else if (priv->framecnt_to_us < 100) {
3882 priv->framecnt_to_us++;
3883 print_summary = 0;
3884 } else {
3885 priv->framecnt_to_us = 0;
3886 print_summary = 1;
3887 hundred = 1;
3888 }
3889 } else {
3890 /* print summary for all other frames */
3891 print_summary = 1;
3892 }
3893
3894 if (print_summary) {
3895 char *title;
3896 int rate_idx;
3897 u32 bitrate;
3898
3899 if (hundred)
3900 title = "100Frames";
3901 else if (fc & IEEE80211_FCTL_RETRY)
3902 title = "Retry";
3903 else if (ieee80211_is_assoc_response(fc))
3904 title = "AscRsp";
3905 else if (ieee80211_is_reassoc_response(fc))
3906 title = "RasRsp";
3907 else if (ieee80211_is_probe_response(fc)) {
3908 title = "PrbRsp";
3909 print_dump = 1; /* dump frame contents */
3910 } else if (ieee80211_is_beacon(fc)) {
3911 title = "Beacon";
3912 print_dump = 1; /* dump frame contents */
3913 } else if (ieee80211_is_atim(fc))
3914 title = "ATIM";
3915 else if (ieee80211_is_auth(fc))
3916 title = "Auth";
3917 else if (ieee80211_is_deauth(fc))
3918 title = "DeAuth";
3919 else if (ieee80211_is_disassoc(fc))
3920 title = "DisAssoc";
3921 else
3922 title = "Frame";
3923
3924 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3925 if (unlikely(rate_idx == -1))
3926 bitrate = 0;
3927 else
3928 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3929
3930 /* print frame summary.
3931 * MAC addresses show just the last byte (for brevity),
3932 * but you can hack it to show more, if you'd like to. */
3933 if (dataframe)
3934 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3935 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3936 title, fc, header->addr1[5],
3937 length, rssi, channel, bitrate);
3938 else {
3939 /* src/dst addresses assume managed mode */
3940 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3941 "src=0x%02x, rssi=%u, tim=%lu usec, "
3942 "phy=0x%02x, chnl=%d\n",
3943 title, fc, header->addr1[5],
3944 header->addr3[5], rssi,
3945 tsf_low - priv->scan_start_tsf,
3946 phy_flags, channel);
3947 }
3948 }
3949 if (print_dump)
3950 iwl_print_hex_dump(IWL_DL_RX, data, length);
3951}
3952#else
3953static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3954 struct iwl4965_rx_packet *pkt,
3955 struct ieee80211_hdr *header,
3956 int group100)
3957{
3958}
3959#endif
3960
3961
3962
3963/* Called for REPLY_RX (legacy ABG frames), or
3964 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3965static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb)
3967{
3968 struct ieee80211_hdr *header;
3969 struct ieee80211_rx_status rx_status;
3970 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3971 /* Use phy data (Rx signal strength, etc.) contained within
3972 * this rx packet for legacy frames,
3973 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3974 int include_phy = (pkt->hdr.cmd == REPLY_RX);
3975 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3976 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3977 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3978 __le32 *rx_end;
3979 unsigned int len = 0;
3980 u16 fc;
3981 u8 network_packet;
3982
3983 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3984 rx_status.freq =
3985 ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
3986 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3987 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3988 rx_status.rate_idx =
3989 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
3990 if (rx_status.band == IEEE80211_BAND_5GHZ)
3991 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3992
3993 rx_status.antenna = 0;
3994 rx_status.flag = 0;
3995
3996 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3997 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3998 rx_start->cfg_phy_cnt);
3999 return;
4000 }
4001
4002 if (!include_phy) {
4003 if (priv->last_phy_res[0])
4004 rx_start = (struct iwl4965_rx_phy_res *)
4005 &priv->last_phy_res[1];
4006 else
4007 rx_start = NULL;
4008 }
4009
4010 if (!rx_start) {
4011 IWL_ERROR("MPDU frame without a PHY data\n");
4012 return;
4013 }
4014
4015 if (include_phy) {
4016 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
4017 + rx_start->cfg_phy_cnt);
4018
4019 len = le16_to_cpu(rx_start->byte_count);
4020 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
4021 sizeof(struct iwl4965_rx_phy_res) + len);
4022 } else {
4023 struct iwl4965_rx_mpdu_res_start *amsdu =
4024 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
4025
4026 header = (void *)(pkt->u.raw +
4027 sizeof(struct iwl4965_rx_mpdu_res_start));
4028 len = le16_to_cpu(amsdu->byte_count);
4029 rx_end = (__le32 *) (pkt->u.raw +
4030 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
4031 }
4032
4033 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
4034 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
4035 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
4036 le32_to_cpu(*rx_end));
4037 return;
4038 }
4039
4040 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4041
4042 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
4043 rx_status.ssi = iwl4965_calc_rssi(rx_start);
4044
4045 /* Meaningful noise values are available only from beacon statistics,
4046 * which are gathered only when associated, and indicate noise
4047 * only for the associated network channel ...
4048 * Ignore these noise values while scanning (other channels) */
4049 if (iwl_is_associated(priv) &&
4050 !test_bit(STATUS_SCANNING, &priv->status)) {
4051 rx_status.noise = priv->last_rx_noise;
4052 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
4053 rx_status.noise);
4054 } else {
4055 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4056 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
4057 }
4058
4059 /* Reset beacon noise level if not associated. */
4060 if (!iwl_is_associated(priv))
4061 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4062
4063 /* Set "1" to report good data frames in groups of 100 */
4064 /* FIXME: need to optimze the call: */
4065 iwl4965_dbg_report_frame(priv, pkt, header, 1);
4066
4067 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4068 rx_status.ssi, rx_status.noise, rx_status.signal,
4069 (unsigned long long)rx_status.mactime);
4070
4071 network_packet = iwl4965_is_network_packet(priv, header);
4072 if (network_packet) {
4073 priv->last_rx_rssi = rx_status.ssi;
4074 priv->last_beacon_time = priv->ucode_beacon_time;
4075 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4076 }
4077
4078 fc = le16_to_cpu(header->frame_control);
4079 switch (fc & IEEE80211_FCTL_FTYPE) {
4080 case IEEE80211_FTYPE_MGMT:
4081 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4082 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4083 header->addr2);
4084 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
4085 break;
4086
4087 case IEEE80211_FTYPE_CTL:
4088#ifdef CONFIG_IWL4965_HT
4089 switch (fc & IEEE80211_FCTL_STYPE) {
4090 case IEEE80211_STYPE_BACK_REQ:
4091 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
4092 iwl4965_handle_data_packet(priv, 0, include_phy,
4093 rxb, &rx_status);
4094 break;
4095 default:
4096 break;
4097 }
4098#endif
4099 break;
4100
4101 case IEEE80211_FTYPE_DATA: {
4102 DECLARE_MAC_BUF(mac1);
4103 DECLARE_MAC_BUF(mac2);
4104 DECLARE_MAC_BUF(mac3);
4105
4106 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4107 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4108 header->addr2);
4109
4110 if (unlikely(!network_packet))
4111 IWL_DEBUG_DROP("Dropping (non network): "
4112 "%s, %s, %s\n",
4113 print_mac(mac1, header->addr1),
4114 print_mac(mac2, header->addr2),
4115 print_mac(mac3, header->addr3));
4116 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
4117 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
4118 print_mac(mac1, header->addr1),
4119 print_mac(mac2, header->addr2),
4120 print_mac(mac3, header->addr3));
4121 else
4122 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
4123 &rx_status);
4124 break;
4125 }
4126 default:
4127 break;
4128
4129 }
4130}
4131
4132/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4133 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4134static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
4135 struct iwl4965_rx_mem_buffer *rxb)
4136{
4137 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4138 priv->last_phy_res[0] = 1;
4139 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4140 sizeof(struct iwl4965_rx_phy_res));
4141}
4142static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
4143 struct iwl4965_rx_mem_buffer *rxb)
4144
4145{
4146#ifdef CONFIG_IWL4965_SENSITIVITY
4147 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4148 struct iwl4965_missed_beacon_notif *missed_beacon;
4149
4150 missed_beacon = &pkt->u.missed_beacon;
4151 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4152 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4153 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4154 le32_to_cpu(missed_beacon->total_missed_becons),
4155 le32_to_cpu(missed_beacon->num_recvd_beacons),
4156 le32_to_cpu(missed_beacon->num_expected_beacons));
4157 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4158 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4159 queue_work(priv->workqueue, &priv->sensitivity_work);
4160 }
4161#endif /*CONFIG_IWL4965_SENSITIVITY*/
4162}
4163#ifdef CONFIG_IWL4965_HT
4164
4165/**
4166 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4167 */
4168static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4169 int sta_id, int tid)
4170{
4171 unsigned long flags;
4172
4173 /* Remove "disable" flag, to enable Tx for this TID */
4174 spin_lock_irqsave(&priv->sta_lock, flags);
4175 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4176 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4177 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4178 spin_unlock_irqrestore(&priv->sta_lock, flags);
4179
4180 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4181}
4182
4183/**
4184 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4185 *
4186 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4187 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4188 */
4189static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4190 struct iwl4965_ht_agg *agg,
4191 struct iwl4965_compressed_ba_resp*
4192 ba_resp)
4193
4194{
4195 int i, sh, ack;
4196 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4197 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4198 u64 bitmap;
4199 int successes = 0;
4200 struct ieee80211_tx_status *tx_status;
4201
4202 if (unlikely(!agg->wait_for_ba)) {
4203 IWL_ERROR("Received BA when not expected\n");
4204 return -EINVAL;
4205 }
4206
4207 /* Mark that the expected block-ack response arrived */
4208 agg->wait_for_ba = 0;
4209 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4210
4211 /* Calculate shift to align block-ack bits with our Tx window bits */
4212 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4213 if (sh < 0) /* tbw something is wrong with indices */
4214 sh += 0x100;
4215
4216 /* don't use 64-bit values for now */
4217 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4218
4219 if (agg->frame_count > (64 - sh)) {
4220 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4221 return -1;
4222 }
4223
4224 /* check for success or failure according to the
4225 * transmitted bitmap and block-ack bitmap */
4226 bitmap &= agg->bitmap;
4227
4228 /* For each frame attempted in aggregation,
4229 * update driver's record of tx frame's status. */
4230 for (i = 0; i < agg->frame_count ; i++) {
4231 ack = bitmap & (1 << i);
4232 successes += !!ack;
4233 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4234 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4235 agg->start_idx + i);
4236 }
4237
4238 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4239 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4240 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4241 tx_status->ampdu_ack_map = successes;
4242 tx_status->ampdu_ack_len = agg->frame_count;
4243 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4244 &tx_status->control);
4245
4246 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4247
4248 return 0;
4249}
4250
4251/** 1931/**
4252 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration 1932 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4253 */ 1933 */
@@ -4258,22 +1938,24 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4258 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 1938 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4259 iwl_write_prph(priv, 1939 iwl_write_prph(priv,
4260 IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 1940 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
4261 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 1941 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4262 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1942 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4263} 1943}
4264 1944
4265/** 1945/**
4266 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID 1946 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
4267 * priv->lock must be held by the caller 1947 * priv->lock must be held by the caller
4268 */ 1948 */
4269static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, 1949static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
4270 u16 ssn_idx, u8 tx_fifo) 1950 u16 ssn_idx, u8 tx_fifo)
4271{ 1951{
4272 int ret = 0; 1952 int ret = 0;
4273 1953
4274 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) { 1954 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
4275 IWL_WARNING("queue number too small: %d, must be > %d\n", 1955 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
4276 txq_id, IWL_BACK_QUEUE_FIRST_ID); 1956 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1957 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1958 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
4277 return -EINVAL; 1959 return -EINVAL;
4278 } 1960 }
4279 1961
@@ -4291,7 +1973,7 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4291 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 1973 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4292 1974
4293 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 1975 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4294 iwl4965_txq_ctx_deactivate(priv, txq_id); 1976 iwl_txq_ctx_deactivate(priv, txq_id);
4295 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 1977 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4296 1978
4297 iwl_release_nic_access(priv); 1979 iwl_release_nic_access(priv);
@@ -4299,121 +1981,6 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4299 return 0; 1981 return 0;
4300} 1982}
4301 1983
4302int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
4303 u8 tid, int txq_id)
4304{
4305 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4306 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4307 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4308
4309 switch (priv->stations[sta_id].tid[tid].agg.state) {
4310 case IWL_EMPTYING_HW_QUEUE_DELBA:
4311 /* We are reclaiming the last packet of the */
4312 /* aggregated HW queue */
4313 if (txq_id == tid_data->agg.txq_id &&
4314 q->read_ptr == q->write_ptr) {
4315 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4316 int tx_fifo = default_tid_to_tx_fifo[tid];
4317 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4318 iwl4965_tx_queue_agg_disable(priv, txq_id,
4319 ssn, tx_fifo);
4320 tid_data->agg.state = IWL_AGG_OFF;
4321 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4322 }
4323 break;
4324 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4325 /* We are reclaiming the last packet of the queue */
4326 if (tid_data->tfds_in_queue == 0) {
4327 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4328 tid_data->agg.state = IWL_AGG_ON;
4329 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4330 }
4331 break;
4332 }
4333 return 0;
4334}
4335
4336/**
4337 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4338 * @index -- current index
4339 * @n_bd -- total number of entries in queue (s/b power of 2)
4340 */
4341static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4342{
4343 return (index == 0) ? n_bd - 1 : index - 1;
4344}
4345
4346/**
4347 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4348 *
4349 * Handles block-acknowledge notification from device, which reports success
4350 * of frames sent via aggregation.
4351 */
4352static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4353 struct iwl4965_rx_mem_buffer *rxb)
4354{
4355 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4356 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4357 int index;
4358 struct iwl4965_tx_queue *txq = NULL;
4359 struct iwl4965_ht_agg *agg;
4360 DECLARE_MAC_BUF(mac);
4361
4362 /* "flow" corresponds to Tx queue */
4363 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4364
4365 /* "ssn" is start of block-ack Tx window, corresponds to index
4366 * (in Tx queue's circular buffer) of first TFD/frame in window */
4367 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4368
4369 if (scd_flow >= priv->hw_params.max_txq_num) {
4370 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4371 return;
4372 }
4373
4374 txq = &priv->txq[scd_flow];
4375 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4376
4377 /* Find index just before block-ack window */
4378 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4379
4380 /* TODO: Need to get this copy more safely - now good for debug */
4381
4382 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4383 "sta_id = %d\n",
4384 agg->wait_for_ba,
4385 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4386 ba_resp->sta_id);
4387 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4388 "%d, scd_ssn = %d\n",
4389 ba_resp->tid,
4390 ba_resp->seq_ctl,
4391 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
4392 ba_resp->scd_flow,
4393 ba_resp->scd_ssn);
4394 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4395 agg->start_idx,
4396 (unsigned long long)agg->bitmap);
4397
4398 /* Update driver's record of ACK vs. not for each frame in window */
4399 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4400
4401 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4402 * block-ack window (we assume that they've been successfully
4403 * transmitted ... if not, it's too late anyway). */
4404 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4405 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4406 priv->stations[ba_resp->sta_id].
4407 tid[ba_resp->tid].tfds_in_queue -= freed;
4408 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4409 priv->mac80211_registered &&
4410 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4411 ieee80211_wake_queue(priv->hw, scd_flow);
4412 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4413 ba_resp->tid, scd_flow);
4414 }
4415}
4416
4417/** 1984/**
4418 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue 1985 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4419 */ 1986 */
@@ -4424,10 +1991,10 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4424 u32 tbl_dw; 1991 u32 tbl_dw;
4425 u16 scd_q2ratid; 1992 u16 scd_q2ratid;
4426 1993
4427 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1994 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4428 1995
4429 tbl_dw_addr = priv->scd_base_addr + 1996 tbl_dw_addr = priv->scd_base_addr +
4430 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 1997 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4431 1998
4432 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 1999 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4433 2000
@@ -4445,31 +2012,34 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4445/** 2012/**
4446 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue 2013 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4447 * 2014 *
4448 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, 2015 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
4449 * i.e. it must be one of the higher queues used for aggregation 2016 * i.e. it must be one of the higher queues used for aggregation
4450 */ 2017 */
4451static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, 2018static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
4452 int tx_fifo, int sta_id, int tid, 2019 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
4453 u16 ssn_idx)
4454{ 2020{
4455 unsigned long flags; 2021 unsigned long flags;
4456 int rc; 2022 int ret;
4457 u16 ra_tid; 2023 u16 ra_tid;
4458 2024
4459 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) 2025 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
4460 IWL_WARNING("queue number too small: %d, must be > %d\n", 2026 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
4461 txq_id, IWL_BACK_QUEUE_FIRST_ID); 2027 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
2028 txq_id, IWL49_FIRST_AMPDU_QUEUE,
2029 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
2030 return -EINVAL;
2031 }
4462 2032
4463 ra_tid = BUILD_RAxTID(sta_id, tid); 2033 ra_tid = BUILD_RAxTID(sta_id, tid);
4464 2034
4465 /* Modify device's station table to Tx this TID */ 2035 /* Modify device's station table to Tx this TID */
4466 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 2036 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
4467 2037
4468 spin_lock_irqsave(&priv->lock, flags); 2038 spin_lock_irqsave(&priv->lock, flags);
4469 rc = iwl_grab_nic_access(priv); 2039 ret = iwl_grab_nic_access(priv);
4470 if (rc) { 2040 if (ret) {
4471 spin_unlock_irqrestore(&priv->lock, flags); 2041 spin_unlock_irqrestore(&priv->lock, flags);
4472 return rc; 2042 return ret;
4473 } 2043 }
4474 2044
4475 /* Stop this Tx queue before configuring it */ 2045 /* Stop this Tx queue before configuring it */
@@ -4489,14 +2059,14 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4489 2059
4490 /* Set up Tx window size and frame limit for this queue */ 2060 /* Set up Tx window size and frame limit for this queue */
4491 iwl_write_targ_mem(priv, 2061 iwl_write_targ_mem(priv,
4492 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 2062 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4493 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 2063 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4494 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 2064 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4495 2065
4496 iwl_write_targ_mem(priv, priv->scd_base_addr + 2066 iwl_write_targ_mem(priv, priv->scd_base_addr +
4497 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 2067 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4498 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 2068 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4499 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 2069 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4500 2070
4501 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 2071 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4502 2072
@@ -4509,444 +2079,313 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4509 return 0; 2079 return 0;
4510} 2080}
4511 2081
4512#endif /* CONFIG_IWL4965_HT */ 2082int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4513 2083 enum ieee80211_ampdu_mlme_action action,
4514/** 2084 const u8 *addr, u16 tid, u16 *ssn)
4515 * iwl4965_add_station - Initialize a station's hardware rate table
4516 *
4517 * The uCode's station table contains a table of fallback rates
4518 * for automatic fallback during transmission.
4519 *
4520 * NOTE: This sets up a default set of values. These will be replaced later
4521 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4522 * rc80211_simple.
4523 *
4524 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4525 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4526 * which requires station table entry to exist).
4527 */
4528void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4529{
4530 int i, r;
4531 struct iwl_link_quality_cmd link_cmd = {
4532 .reserved1 = 0,
4533 };
4534 u16 rate_flags;
4535
4536 /* Set up the rate scaling to start at selected rate, fall back
4537 * all the way down to 1M in IEEE order, and then spin on 1M */
4538 if (is_ap)
4539 r = IWL_RATE_54M_INDEX;
4540 else if (priv->band == IEEE80211_BAND_5GHZ)
4541 r = IWL_RATE_6M_INDEX;
4542 else
4543 r = IWL_RATE_1M_INDEX;
4544
4545 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4546 rate_flags = 0;
4547 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4548 rate_flags |= RATE_MCS_CCK_MSK;
4549
4550 /* Use Tx antenna B only */
4551 rate_flags |= RATE_MCS_ANT_B_MSK;
4552 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4553
4554 link_cmd.rs_table[i].rate_n_flags =
4555 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4556 r = iwl4965_get_prev_ieee_rate(r);
4557 }
4558
4559 link_cmd.general_params.single_stream_ant_msk = 2;
4560 link_cmd.general_params.dual_stream_ant_msk = 3;
4561 link_cmd.agg_params.agg_dis_start_th = 3;
4562 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4563
4564 /* Update the rate scaling for control frame Tx to AP */
4565 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
4566
4567 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4568 sizeof(link_cmd), &link_cmd, NULL);
4569}
4570
4571#ifdef CONFIG_IWL4965_HT
4572
4573static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
4574 enum ieee80211_band band,
4575 u16 channel, u8 extension_chan_offset)
4576{
4577 const struct iwl_channel_info *ch_info;
4578
4579 ch_info = iwl_get_channel_info(priv, band, channel);
4580 if (!is_channel_valid(ch_info))
4581 return 0;
4582
4583 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4584 return 0;
4585
4586 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4587 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4588 return 1;
4589
4590 return 0;
4591}
4592
4593static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
4594 struct ieee80211_ht_info *sta_ht_inf)
4595{
4596 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4597
4598 if ((!iwl_ht_conf->is_ht) ||
4599 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4600 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4601 return 0;
4602
4603 if (sta_ht_inf) {
4604 if ((!sta_ht_inf->ht_supported) ||
4605 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4606 return 0;
4607 }
4608
4609 return (iwl4965_is_channel_extension(priv, priv->band,
4610 iwl_ht_conf->control_channel,
4611 iwl_ht_conf->extension_chan_offset));
4612}
4613
4614void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
4615{ 2085{
4616 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 2086 struct iwl_priv *priv = hw->priv;
4617 u32 val; 2087 DECLARE_MAC_BUF(mac);
4618 2088
4619 if (!ht_info->is_ht) 2089 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
4620 return; 2090 print_mac(mac, addr), tid);
4621 2091
4622 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ 2092 if (!(priv->cfg->sku & IWL_SKU_N))
4623 if (iwl4965_is_fat_tx_allowed(priv, NULL)) 2093 return -EACCES;
4624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4625 else
4626 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4627 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4628
4629 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4630 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4631 le16_to_cpu(rxon->channel),
4632 ht_info->control_channel);
4633 rxon->channel = cpu_to_le16(ht_info->control_channel);
4634 return;
4635 }
4636 2094
4637 /* Note: control channel is opposite of extension channel */ 2095 switch (action) {
4638 switch (ht_info->extension_chan_offset) { 2096 case IEEE80211_AMPDU_RX_START:
4639 case IWL_EXT_CHANNEL_OFFSET_ABOVE: 2097 IWL_DEBUG_HT("start Rx\n");
4640 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 2098 return iwl_rx_agg_start(priv, addr, tid, *ssn);
4641 break; 2099 case IEEE80211_AMPDU_RX_STOP:
4642 case IWL_EXT_CHANNEL_OFFSET_BELOW: 2100 IWL_DEBUG_HT("stop Rx\n");
4643 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 2101 return iwl_rx_agg_stop(priv, addr, tid);
4644 break; 2102 case IEEE80211_AMPDU_TX_START:
4645 case IWL_EXT_CHANNEL_OFFSET_NONE: 2103 IWL_DEBUG_HT("start Tx\n");
2104 return iwl_tx_agg_start(priv, addr, tid, ssn);
2105 case IEEE80211_AMPDU_TX_STOP:
2106 IWL_DEBUG_HT("stop Tx\n");
2107 return iwl_tx_agg_stop(priv, addr, tid);
4646 default: 2108 default:
4647 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; 2109 IWL_DEBUG_HT("unknown\n");
2110 return -EINVAL;
4648 break; 2111 break;
4649 } 2112 }
4650 2113 return 0;
4651 val = ht_info->ht_protection;
4652
4653 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4654
4655 iwl4965_set_rxon_chain(priv);
4656
4657 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4658 "rxon flags 0x%X operation mode :0x%X "
4659 "extension channel offset 0x%x "
4660 "control chan %d\n",
4661 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4662 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4663 ht_info->extension_chan_offset,
4664 ht_info->control_channel);
4665 return;
4666} 2114}
4667 2115
4668void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index, 2116static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
4669 struct ieee80211_ht_info *sta_ht_inf)
4670{ 2117{
4671 __le32 sta_flags; 2118 switch (cmd_id) {
4672 u8 mimo_ps_mode; 2119 case REPLY_RXON:
4673 2120 return (u16) sizeof(struct iwl4965_rxon_cmd);
4674 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4675 goto done;
4676
4677 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4678
4679 sta_flags = priv->stations[index].sta.station_flags;
4680
4681 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4682
4683 switch (mimo_ps_mode) {
4684 case WLAN_HT_CAP_MIMO_PS_STATIC:
4685 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4686 break;
4687 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4688 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4689 break;
4690 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4691 break;
4692 default: 2121 default:
4693 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); 2122 return len;
4694 break;
4695 } 2123 }
4696
4697 sta_flags |= cpu_to_le32(
4698 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4699
4700 sta_flags |= cpu_to_le32(
4701 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4702
4703 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4704 sta_flags |= STA_FLG_FAT_EN_MSK;
4705 else
4706 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4707
4708 priv->stations[index].sta.station_flags = sta_flags;
4709 done:
4710 return;
4711} 2124}
4712 2125
4713static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv, 2126static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
4714 int sta_id, int tid, u16 ssn)
4715{ 2127{
4716 unsigned long flags; 2128 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
4717 2129 addsta->mode = cmd->mode;
4718 spin_lock_irqsave(&priv->sta_lock, flags); 2130 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
4719 priv->stations[sta_id].sta.station_flags_msk = 0; 2131 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
4720 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; 2132 addsta->station_flags = cmd->station_flags;
4721 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; 2133 addsta->station_flags_msk = cmd->station_flags_msk;
4722 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); 2134 addsta->tid_disable_tx = cmd->tid_disable_tx;
4723 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 2135 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
4724 spin_unlock_irqrestore(&priv->sta_lock, flags); 2136 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2137 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2138 addsta->reserved1 = __constant_cpu_to_le16(0);
2139 addsta->reserved2 = __constant_cpu_to_le32(0);
4725 2140
4726 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 2141 return (u16)sizeof(struct iwl4965_addsta_cmd);
4727} 2142}
4728 2143
4729static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, 2144static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
4730 int sta_id, int tid)
4731{ 2145{
4732 unsigned long flags; 2146 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
4733
4734 spin_lock_irqsave(&priv->sta_lock, flags);
4735 priv->stations[sta_id].sta.station_flags_msk = 0;
4736 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4737 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4738 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4739 spin_unlock_irqrestore(&priv->sta_lock, flags);
4740
4741 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4742} 2147}
4743 2148
4744/* 2149/**
4745 * Find first available (lowest unused) Tx Queue, mark it "active". 2150 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
4746 * Called only when finding queue for aggregation.
4747 * Should never return anything < 7, because they should already
4748 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4749 */ 2151 */
4750static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) 2152static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
4751{ 2153 struct iwl_ht_agg *agg,
4752 int txq_id; 2154 struct iwl4965_tx_resp *tx_resp,
4753 2155 int txq_id, u16 start_idx)
4754 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
4755 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4756 return txq_id;
4757 return -1;
4758}
4759
4760static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4761 u16 tid, u16 *start_seq_num)
4762{ 2156{
4763 struct iwl_priv *priv = hw->priv; 2157 u16 status;
4764 int sta_id; 2158 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
4765 int tx_fifo; 2159 struct ieee80211_tx_info *info = NULL;
4766 int txq_id; 2160 struct ieee80211_hdr *hdr = NULL;
4767 int ssn = -1; 2161 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
4768 int ret = 0; 2162 int i, sh, idx;
4769 unsigned long flags; 2163 u16 seq;
4770 struct iwl4965_tid_data *tid_data; 2164 if (agg->wait_for_ba)
4771 DECLARE_MAC_BUF(mac); 2165 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
4772 2166
4773 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 2167 agg->frame_count = tx_resp->frame_count;
4774 tx_fifo = default_tid_to_tx_fifo[tid]; 2168 agg->start_idx = start_idx;
4775 else 2169 agg->rate_n_flags = rate_n_flags;
4776 return -EINVAL; 2170 agg->bitmap = 0;
4777 2171
4778 IWL_WARNING("%s on da = %s tid = %d\n", 2172 /* # frames attempted by Tx command */
4779 __func__, print_mac(mac, da), tid); 2173 if (agg->frame_count == 1) {
4780 2174 /* Only one frame was attempted; no block-ack will arrive */
4781 sta_id = iwl4965_hw_find_station(priv, da); 2175 status = le16_to_cpu(frame_status[0].status);
4782 if (sta_id == IWL_INVALID_STATION) 2176 idx = start_idx;
4783 return -ENXIO; 2177
4784 2178 /* FIXME: code repetition */
4785 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 2179 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
4786 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 2180 agg->frame_count, agg->start_idx, idx);
4787 return -ENXIO; 2181
4788 } 2182 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
4789 2183 info->status.retry_count = tx_resp->failure_frame;
4790 txq_id = iwl4965_txq_ctx_activate_free(priv); 2184 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
4791 if (txq_id == -1) 2185 info->flags |= iwl_is_tx_success(status)?
4792 return -ENXIO; 2186 IEEE80211_TX_STAT_ACK : 0;
2187 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
2188 /* FIXME: code repetition end */
2189
2190 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2191 status & 0xff, tx_resp->failure_frame);
2192 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2193
2194 agg->wait_for_ba = 0;
2195 } else {
2196 /* Two or more frames were attempted; expect block-ack */
2197 u64 bitmap = 0;
2198 int start = agg->start_idx;
2199
2200 /* Construct bit-map of pending frames within Tx window */
2201 for (i = 0; i < agg->frame_count; i++) {
2202 u16 sc;
2203 status = le16_to_cpu(frame_status[i].status);
2204 seq = le16_to_cpu(frame_status[i].sequence);
2205 idx = SEQ_TO_INDEX(seq);
2206 txq_id = SEQ_TO_QUEUE(seq);
2207
2208 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2209 AGG_TX_STATE_ABORT_MSK))
2210 continue;
2211
2212 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2213 agg->frame_count, txq_id, idx);
2214
2215 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2216
2217 sc = le16_to_cpu(hdr->seq_ctrl);
2218 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2219 IWL_ERROR("BUG_ON idx doesn't match seq control"
2220 " idx=%d, seq_idx=%d, seq=%d\n",
2221 idx, SEQ_TO_SN(sc),
2222 hdr->seq_ctrl);
2223 return -1;
2224 }
4793 2225
4794 spin_lock_irqsave(&priv->sta_lock, flags); 2226 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
4795 tid_data = &priv->stations[sta_id].tid[tid]; 2227 i, idx, SEQ_TO_SN(sc));
4796 ssn = SEQ_TO_SN(tid_data->seq_number); 2228
4797 tid_data->agg.txq_id = txq_id; 2229 sh = idx - start;
4798 spin_unlock_irqrestore(&priv->sta_lock, flags); 2230 if (sh > 64) {
2231 sh = (start - idx) + 0xff;
2232 bitmap = bitmap << sh;
2233 sh = 0;
2234 start = idx;
2235 } else if (sh < -64)
2236 sh = 0xff - (start - idx);
2237 else if (sh < 0) {
2238 sh = start - idx;
2239 start = idx;
2240 bitmap = bitmap << sh;
2241 sh = 0;
2242 }
2243 bitmap |= (1 << sh);
2244 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2245 start, (u32)(bitmap & 0xFFFFFFFF));
2246 }
4799 2247
4800 *start_seq_num = ssn; 2248 agg->bitmap = bitmap;
4801 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 2249 agg->start_idx = start;
4802 sta_id, tid, ssn); 2250 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
4803 if (ret) 2251 agg->frame_count, agg->start_idx,
4804 return ret; 2252 (unsigned long long)agg->bitmap);
4805 2253
4806 ret = 0; 2254 if (bitmap)
4807 if (tid_data->tfds_in_queue == 0) { 2255 agg->wait_for_ba = 1;
4808 printk(KERN_ERR "HW queue is empty\n");
4809 tid_data->agg.state = IWL_AGG_ON;
4810 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4811 } else {
4812 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4813 tid_data->tfds_in_queue);
4814 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4815 } 2256 }
4816 return ret; 2257 return 0;
4817} 2258}
4818 2259
4819static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, 2260/**
4820 u16 tid) 2261 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
4821{ 2262 */
4822 2263static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
4823 struct iwl_priv *priv = hw->priv; 2264 struct iwl_rx_mem_buffer *rxb)
4824 int tx_fifo_id, txq_id, sta_id, ssn = -1; 2265{
4825 struct iwl4965_tid_data *tid_data; 2266 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4826 int ret, write_ptr, read_ptr; 2267 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4827 unsigned long flags; 2268 int txq_id = SEQ_TO_QUEUE(sequence);
4828 DECLARE_MAC_BUF(mac); 2269 int index = SEQ_TO_INDEX(sequence);
2270 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2271 struct ieee80211_tx_info *info;
2272 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2273 u32 status = le32_to_cpu(tx_resp->u.status);
2274 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
2275 __le16 fc;
2276 struct ieee80211_hdr *hdr;
2277 u8 *qc = NULL;
4829 2278
4830 if (!da) { 2279 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
4831 IWL_ERROR("da = NULL\n"); 2280 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
4832 return -EINVAL; 2281 "is out of range [0-%d] %d %d\n", txq_id,
2282 index, txq->q.n_bd, txq->q.write_ptr,
2283 txq->q.read_ptr);
2284 return;
4833 } 2285 }
4834 2286
4835 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 2287 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
4836 tx_fifo_id = default_tid_to_tx_fifo[tid]; 2288 memset(&info->status, 0, sizeof(info->status));
4837 else
4838 return -EINVAL;
4839
4840 sta_id = iwl4965_hw_find_station(priv, da);
4841
4842 if (sta_id == IWL_INVALID_STATION)
4843 return -ENXIO;
4844
4845 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4846 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4847 2289
4848 tid_data = &priv->stations[sta_id].tid[tid]; 2290 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
4849 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 2291 fc = hdr->frame_control;
4850 txq_id = tid_data->agg.txq_id; 2292 if (ieee80211_is_data_qos(fc)) {
4851 write_ptr = priv->txq[txq_id].q.write_ptr; 2293 qc = ieee80211_get_qos_ctl(hdr);
4852 read_ptr = priv->txq[txq_id].q.read_ptr; 2294 tid = qc[0] & 0xf;
2295 }
4853 2296
4854 /* The queue is not empty */ 2297 sta_id = iwl_get_ra_sta_id(priv, hdr);
4855 if (write_ptr != read_ptr) { 2298 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
4856 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); 2299 IWL_ERROR("Station not known\n");
4857 priv->stations[sta_id].tid[tid].agg.state = 2300 return;
4858 IWL_EMPTYING_HW_QUEUE_DELBA;
4859 return 0;
4860 } 2301 }
4861 2302
4862 IWL_DEBUG_HT("HW queue empty\n");; 2303 if (txq->sched_retry) {
4863 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 2304 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2305 struct iwl_ht_agg *agg = NULL;
4864 2306
4865 spin_lock_irqsave(&priv->lock, flags); 2307 if (!qc)
4866 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 2308 return;
4867 spin_unlock_irqrestore(&priv->lock, flags);
4868 2309
4869 if (ret) 2310 agg = &priv->stations[sta_id].tid[tid].agg;
4870 return ret;
4871 2311
4872 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid); 2312 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
4873 2313
4874 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", 2314 /* check if BAR is needed */
4875 print_mac(mac, da), tid); 2315 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2316 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
4876 2317
4877 return 0; 2318 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
4878} 2319 int freed, ampdu_q;
2320 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2321 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2322 "%d index %d\n", scd_ssn , index);
2323 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2324 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
4879 2325
4880int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 2326 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4881 enum ieee80211_ampdu_mlme_action action, 2327 txq_id >= 0 && priv->mac80211_registered &&
4882 const u8 *addr, u16 tid, u16 *ssn) 2328 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
4883{ 2329 /* calculate mac80211 ampdu sw queue to wake */
4884 struct iwl_priv *priv = hw->priv; 2330 ampdu_q = txq_id - IWL49_FIRST_AMPDU_QUEUE +
4885 int sta_id; 2331 priv->hw->queues;
4886 DECLARE_MAC_BUF(mac); 2332 if (agg->state == IWL_AGG_OFF)
4887 2333 ieee80211_wake_queue(priv->hw, txq_id);
4888 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ", 2334 else
4889 print_mac(mac, addr), tid); 2335 ieee80211_wake_queue(priv->hw, ampdu_q);
4890 sta_id = iwl4965_hw_find_station(priv, addr); 2336 }
4891 switch (action) { 2337 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
4892 case IEEE80211_AMPDU_RX_START: 2338 }
4893 IWL_DEBUG_HT("start Rx\n"); 2339 } else {
4894 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn); 2340 info->status.retry_count = tx_resp->failure_frame;
4895 break; 2341 info->flags |=
4896 case IEEE80211_AMPDU_RX_STOP: 2342 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
4897 IWL_DEBUG_HT("stop Rx\n"); 2343 iwl_hwrate_to_tx_control(priv,
4898 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); 2344 le32_to_cpu(tx_resp->rate_n_flags),
4899 break; 2345 info);
4900 case IEEE80211_AMPDU_TX_START: 2346
4901 IWL_DEBUG_HT("start Tx\n"); 2347 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags "
4902 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn); 2348 "0x%x retries %d\n", txq_id,
4903 case IEEE80211_AMPDU_TX_STOP: 2349 iwl_get_tx_fail_reason(status),
4904 IWL_DEBUG_HT("stop Tx\n"); 2350 status, le32_to_cpu(tx_resp->rate_n_flags),
4905 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid); 2351 tx_resp->failure_frame);
4906 default: 2352
4907 IWL_DEBUG_HT("unknown\n"); 2353 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
4908 return -EINVAL; 2354
4909 break; 2355 if (index != -1) {
2356 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2357 if (tid != MAX_TID_COUNT)
2358 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2359 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
2360 (txq_id >= 0) && priv->mac80211_registered)
2361 ieee80211_wake_queue(priv->hw, txq_id);
2362 if (tid != MAX_TID_COUNT)
2363 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2364 }
4910 } 2365 }
4911 return 0; 2366
2367 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2368 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
4912} 2369}
4913 2370
4914#endif /* CONFIG_IWL4965_HT */
4915 2371
4916/* Set up 4965-specific Rx frame reply handlers */ 2372/* Set up 4965-specific Rx frame reply handlers */
4917void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv) 2373static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
4918{ 2374{
4919 /* Legacy Rx frames */ 2375 /* Legacy Rx frames */
4920 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; 2376 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
4921 2377 /* Tx response */
4922 /* High-throughput (HT) Rx frames */ 2378 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
4923 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4924 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4925
4926 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4927 iwl4965_rx_missed_beacon_notif;
4928
4929#ifdef CONFIG_IWL4965_HT
4930 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4931#endif /* CONFIG_IWL4965_HT */
4932} 2379}
4933 2380
4934void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv) 2381static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
4935{ 2382{
4936 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); 2383 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4937#ifdef CONFIG_IWL4965_SENSITIVITY
4938 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4939#endif
4940 init_timer(&priv->statistics_periodic);
4941 priv->statistics_periodic.data = (unsigned long)priv;
4942 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4943} 2384}
4944 2385
4945void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv) 2386static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
4946{ 2387{
4947 del_timer_sync(&priv->statistics_periodic); 2388 cancel_work_sync(&priv->txpower_work);
4948
4949 cancel_delayed_work(&priv->init_alive_start);
4950} 2389}
4951 2390
4952 2391
@@ -4955,23 +2394,56 @@ static struct iwl_hcmd_ops iwl4965_hcmd = {
4955}; 2394};
4956 2395
4957static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2396static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4958 .enqueue_hcmd = iwl4965_enqueue_hcmd, 2397 .get_hcmd_size = iwl4965_get_hcmd_size,
2398 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2399 .chain_noise_reset = iwl4965_chain_noise_reset,
2400 .gain_computation = iwl4965_gain_computation,
2401 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
4959}; 2402};
4960 2403
4961static struct iwl_lib_ops iwl4965_lib = { 2404static struct iwl_lib_ops iwl4965_lib = {
4962 .init_drv = iwl4965_init_drv,
4963 .set_hw_params = iwl4965_hw_set_hw_params, 2405 .set_hw_params = iwl4965_hw_set_hw_params,
2406 .alloc_shared_mem = iwl4965_alloc_shared_mem,
2407 .free_shared_mem = iwl4965_free_shared_mem,
2408 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
4964 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2409 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4965 .hw_nic_init = iwl4965_hw_nic_init, 2410 .txq_set_sched = iwl4965_txq_set_sched,
2411 .txq_agg_enable = iwl4965_txq_agg_enable,
2412 .txq_agg_disable = iwl4965_txq_agg_disable,
2413 .rx_handler_setup = iwl4965_rx_handler_setup,
2414 .setup_deferred_work = iwl4965_setup_deferred_work,
2415 .cancel_deferred_work = iwl4965_cancel_deferred_work,
4966 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 2416 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4967 .alive_notify = iwl4965_alive_notify, 2417 .alive_notify = iwl4965_alive_notify,
2418 .init_alive_start = iwl4965_init_alive_start,
4968 .load_ucode = iwl4965_load_bsm, 2419 .load_ucode = iwl4965_load_bsm,
2420 .apm_ops = {
2421 .init = iwl4965_apm_init,
2422 .reset = iwl4965_apm_reset,
2423 .stop = iwl4965_apm_stop,
2424 .config = iwl4965_nic_config,
2425 .set_pwr_src = iwl4965_set_pwr_src,
2426 },
4969 .eeprom_ops = { 2427 .eeprom_ops = {
2428 .regulatory_bands = {
2429 EEPROM_REGULATORY_BAND_1_CHANNELS,
2430 EEPROM_REGULATORY_BAND_2_CHANNELS,
2431 EEPROM_REGULATORY_BAND_3_CHANNELS,
2432 EEPROM_REGULATORY_BAND_4_CHANNELS,
2433 EEPROM_REGULATORY_BAND_5_CHANNELS,
2434 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
2435 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
2436 },
4970 .verify_signature = iwlcore_eeprom_verify_signature, 2437 .verify_signature = iwlcore_eeprom_verify_signature,
4971 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2438 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4972 .release_semaphore = iwlcore_eeprom_release_semaphore, 2439 .release_semaphore = iwlcore_eeprom_release_semaphore,
2440 .check_version = iwl4965_eeprom_check_version,
2441 .query_addr = iwlcore_eeprom_query_addr,
4973 }, 2442 },
4974 .radio_kill_sw = iwl4965_radio_kill_sw, 2443 .set_power = iwl4965_set_power,
2444 .send_tx_power = iwl4965_send_tx_power,
2445 .update_chain_flags = iwl4965_update_chain_flags,
2446 .temperature = iwl4965_temperature_calib,
4975}; 2447};
4976 2448
4977static struct iwl_ops iwl4965_ops = { 2449static struct iwl_ops iwl4965_ops = {
@@ -4984,10 +2456,14 @@ struct iwl_cfg iwl4965_agn_cfg = {
4984 .name = "4965AGN", 2456 .name = "4965AGN",
4985 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", 2457 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4986 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 2458 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2459 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
4987 .ops = &iwl4965_ops, 2460 .ops = &iwl4965_ops,
4988 .mod_params = &iwl4965_mod_params, 2461 .mod_params = &iwl4965_mod_params,
4989}; 2462};
4990 2463
2464/* Module firmware */
2465MODULE_FIRMWARE("iwlwifi-4965" IWL4965_UCODE_API ".ucode");
2466
4991module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); 2467module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4992MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 2468MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4993module_param_named(disable, iwl4965_mod_params.disable, int, 0444); 2469module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
@@ -5002,10 +2478,14 @@ MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
5002 2478
5003module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); 2479module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
5004MODULE_PARM_DESC(queues_num, "number of hw queues."); 2480MODULE_PARM_DESC(queues_num, "number of hw queues.");
5005
5006/* QoS */ 2481/* QoS */
5007module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444); 2482module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
5008MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); 2483MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
2484/* 11n */
2485module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2486MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
5009module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 2487module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
5010MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 2488MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
5011 2489
2490module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
2491MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");