aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl3945-base.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/wireless/iwlwifi/iwl3945-base.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c653
1 files changed, 362 insertions, 291 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d00a80334095..b74a56c48d26 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -31,6 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/slab.h>
34#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
35#include <linux/delay.h> 36#include <linux/delay.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
@@ -42,7 +43,6 @@
42#include <linux/if_arp.h> 43#include <linux/if_arp.h>
43 44
44#include <net/ieee80211_radiotap.h> 45#include <net/ieee80211_radiotap.h>
45#include <net/lib80211.h>
46#include <net/mac80211.h> 46#include <net/mac80211.h>
47 47
48#include <asm/div64.h> 48#include <asm/div64.h>
@@ -54,9 +54,10 @@
54#include "iwl-commands.h" 54#include "iwl-commands.h"
55#include "iwl-sta.h" 55#include "iwl-sta.h"
56#include "iwl-3945.h" 56#include "iwl-3945.h"
57#include "iwl-helpers.h"
58#include "iwl-core.h" 57#include "iwl-core.h"
58#include "iwl-helpers.h"
59#include "iwl-dev.h" 59#include "iwl-dev.h"
60#include "iwl-spectrum.h"
60 61
61/* 62/*
62 * module name, copyright, version, etc. 63 * module name, copyright, version, etc.
@@ -71,17 +72,14 @@
71#define VD 72#define VD
72#endif 73#endif
73 74
74#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 75/*
75#define VS "s" 76 * add "s" to indicate spectrum measurement included.
76#else 77 * we add it here to be consistent with previous releases in which
77#define VS 78 * this was configurable.
78#endif 79 */
79 80#define DRV_VERSION IWLWIFI_VERSION VD "s"
80#define IWL39_VERSION "1.2.26k" VD VS 81#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
81#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
82#define DRV_AUTHOR "<ilw@linux.intel.com>" 82#define DRV_AUTHOR "<ilw@linux.intel.com>"
83#define DRV_VERSION IWL39_VERSION
84
85 83
86MODULE_DESCRIPTION(DRV_DESCRIPTION); 84MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
@@ -90,7 +88,6 @@ MODULE_LICENSE("GPL");
90 88
91 /* module parameters */ 89 /* module parameters */
92struct iwl_mod_params iwl3945_mod_params = { 90struct iwl_mod_params iwl3945_mod_params = {
93 .num_of_queues = IWL39_NUM_QUEUES, /* Not used */
94 .sw_crypto = 1, 91 .sw_crypto = 1,
95 .restart_fw = 1, 92 .restart_fw = 1,
96 /* the rest are 0 by default */ 93 /* the rest are 0 by default */
@@ -356,10 +353,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
356static void iwl3945_unset_hw_params(struct iwl_priv *priv) 353static void iwl3945_unset_hw_params(struct iwl_priv *priv)
357{ 354{
358 if (priv->shared_virt) 355 if (priv->shared_virt)
359 pci_free_consistent(priv->pci_dev, 356 dma_free_coherent(&priv->pci_dev->dev,
360 sizeof(struct iwl3945_shared), 357 sizeof(struct iwl3945_shared),
361 priv->shared_virt, 358 priv->shared_virt,
362 priv->shared_phys); 359 priv->shared_phys);
363} 360}
364 361
365static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, 362static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -368,13 +365,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
368 struct sk_buff *skb_frag, 365 struct sk_buff *skb_frag,
369 int sta_id) 366 int sta_id)
370{ 367{
371 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 368 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
372 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 369 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
373 370
374 switch (keyinfo->alg) { 371 switch (keyinfo->alg) {
375 case ALG_CCMP: 372 case ALG_CCMP:
376 tx->sec_ctl = TX_CMD_SEC_CCM; 373 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx->key, keyinfo->key, keyinfo->keylen); 374 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 375 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
379 break; 376 break;
380 377
@@ -382,13 +379,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
382 break; 379 break;
383 380
384 case ALG_WEP: 381 case ALG_WEP:
385 tx->sec_ctl = TX_CMD_SEC_WEP | 382 tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
386 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 383 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
387 384
388 if (keyinfo->keylen == 13) 385 if (keyinfo->keylen == 13)
389 tx->sec_ctl |= TX_CMD_SEC_KEY128; 386 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
390 387
391 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen); 388 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
392 389
393 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 390 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
394 "with key %d\n", info->control.hw_key->hw_key_idx); 391 "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -408,12 +405,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
408 struct ieee80211_tx_info *info, 405 struct ieee80211_tx_info *info,
409 struct ieee80211_hdr *hdr, u8 std_id) 406 struct ieee80211_hdr *hdr, u8 std_id)
410{ 407{
411 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 408 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
412 __le32 tx_flags = tx->tx_flags; 409 __le32 tx_flags = tx_cmd->tx_flags;
413 __le16 fc = hdr->frame_control; 410 __le16 fc = hdr->frame_control;
414 u8 rc_flags = info->control.rates[0].flags;
415 411
416 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 412 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
417 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 413 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
418 tx_flags |= TX_CMD_FLG_ACK_MSK; 414 tx_flags |= TX_CMD_FLG_ACK_MSK;
419 if (ieee80211_is_mgmt(fc)) 415 if (ieee80211_is_mgmt(fc))
@@ -426,25 +422,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
426 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 422 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
427 } 423 }
428 424
429 tx->sta_id = std_id; 425 tx_cmd->sta_id = std_id;
430 if (ieee80211_has_morefrags(fc)) 426 if (ieee80211_has_morefrags(fc))
431 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 427 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
432 428
433 if (ieee80211_is_data_qos(fc)) { 429 if (ieee80211_is_data_qos(fc)) {
434 u8 *qc = ieee80211_get_qos_ctl(hdr); 430 u8 *qc = ieee80211_get_qos_ctl(hdr);
435 tx->tid_tspec = qc[0] & 0xf; 431 tx_cmd->tid_tspec = qc[0] & 0xf;
436 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 432 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
437 } else { 433 } else {
438 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 434 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
439 } 435 }
440 436
441 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 437 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
442 tx_flags |= TX_CMD_FLG_RTS_MSK;
443 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
444 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
445 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
446 tx_flags |= TX_CMD_FLG_CTS_MSK;
447 }
448 438
449 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) 439 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
450 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 440 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -452,19 +442,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
452 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 442 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
453 if (ieee80211_is_mgmt(fc)) { 443 if (ieee80211_is_mgmt(fc)) {
454 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 444 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
455 tx->timeout.pm_frame_timeout = cpu_to_le16(3); 445 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
456 else 446 else
457 tx->timeout.pm_frame_timeout = cpu_to_le16(2); 447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
458 } else { 448 } else {
459 tx->timeout.pm_frame_timeout = 0; 449 tx_cmd->timeout.pm_frame_timeout = 0;
460#ifdef CONFIG_IWLWIFI_LEDS
461 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
462#endif
463 } 450 }
464 451
465 tx->driver_txop = 0; 452 tx_cmd->driver_txop = 0;
466 tx->tx_flags = tx_flags; 453 tx_cmd->tx_flags = tx_flags;
467 tx->next_frame_len = 0; 454 tx_cmd->next_frame_len = 0;
468} 455}
469 456
470/* 457/*
@@ -474,7 +461,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
474{ 461{
475 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
476 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 463 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
477 struct iwl3945_tx_cmd *tx; 464 struct iwl3945_tx_cmd *tx_cmd;
478 struct iwl_tx_queue *txq = NULL; 465 struct iwl_tx_queue *txq = NULL;
479 struct iwl_queue *q = NULL; 466 struct iwl_queue *q = NULL;
480 struct iwl_device_cmd *out_cmd; 467 struct iwl_device_cmd *out_cmd;
@@ -492,7 +479,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
492 u8 wait_write_ptr = 0; 479 u8 wait_write_ptr = 0;
493 u8 *qc = NULL; 480 u8 *qc = NULL;
494 unsigned long flags; 481 unsigned long flags;
495 int rc;
496 482
497 spin_lock_irqsave(&priv->lock, flags); 483 spin_lock_irqsave(&priv->lock, flags);
498 if (iwl_is_rfkill(priv)) { 484 if (iwl_is_rfkill(priv)) {
@@ -562,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
562 txq = &priv->txq[txq_id]; 548 txq = &priv->txq[txq_id];
563 q = &txq->q; 549 q = &txq->q;
564 550
551 if ((iwl_queue_space(q) < q->high_mark))
552 goto drop;
553
565 spin_lock_irqsave(&priv->lock, flags); 554 spin_lock_irqsave(&priv->lock, flags);
566 555
567 idx = get_cmd_index(q, q->write_ptr, 0); 556 idx = get_cmd_index(q, q->write_ptr, 0);
@@ -573,9 +562,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
573 /* Init first empty entry in queue's array of Tx/cmd buffers */ 562 /* Init first empty entry in queue's array of Tx/cmd buffers */
574 out_cmd = txq->cmd[idx]; 563 out_cmd = txq->cmd[idx];
575 out_meta = &txq->meta[idx]; 564 out_meta = &txq->meta[idx];
576 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; 565 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
577 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 566 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
578 memset(tx, 0, sizeof(*tx)); 567 memset(tx_cmd, 0, sizeof(*tx_cmd));
579 568
580 /* 569 /*
581 * Set up the Tx-command (not MAC!) header. 570 * Set up the Tx-command (not MAC!) header.
@@ -588,7 +577,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
588 INDEX_TO_SEQ(q->write_ptr))); 577 INDEX_TO_SEQ(q->write_ptr)));
589 578
590 /* Copy MAC header from skb into command buffer */ 579 /* Copy MAC header from skb into command buffer */
591 memcpy(tx->hdr, hdr, hdr_len); 580 memcpy(tx_cmd->hdr, hdr, hdr_len);
592 581
593 582
594 if (info->control.hw_key) 583 if (info->control.hw_key)
@@ -602,12 +591,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
602 591
603 /* Total # bytes to be transmitted */ 592 /* Total # bytes to be transmitted */
604 len = (u16)skb->len; 593 len = (u16)skb->len;
605 tx->len = cpu_to_le16(len); 594 tx_cmd->len = cpu_to_le16(len);
606 595
607 iwl_dbg_log_tx_data_frame(priv, len, hdr); 596 iwl_dbg_log_tx_data_frame(priv, len, hdr);
608 iwl_update_stats(priv, true, fc, len); 597 iwl_update_stats(priv, true, fc, len);
609 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 598 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
610 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 599 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
611 600
612 if (!ieee80211_has_morefrags(hdr->frame_control)) { 601 if (!ieee80211_has_morefrags(hdr->frame_control)) {
613 txq->need_update = 1; 602 txq->need_update = 1;
@@ -620,9 +609,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
620 609
621 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 610 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
622 le16_to_cpu(out_cmd->hdr.sequence)); 611 le16_to_cpu(out_cmd->hdr.sequence));
623 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); 612 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
624 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); 613 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
625 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, 614 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
626 ieee80211_hdrlen(fc)); 615 ieee80211_hdrlen(fc));
627 616
628 /* 617 /*
@@ -674,12 +663,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
674 663
675 /* Tell device the write index *just past* this latest filled TFD */ 664 /* Tell device the write index *just past* this latest filled TFD */
676 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 665 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
677 rc = iwl_txq_update_write_ptr(priv, txq); 666 iwl_txq_update_write_ptr(priv, txq);
678 spin_unlock_irqrestore(&priv->lock, flags); 667 spin_unlock_irqrestore(&priv->lock, flags);
679 668
680 if (rc)
681 return rc;
682
683 if ((iwl_queue_space(q) < q->high_mark) 669 if ((iwl_queue_space(q) < q->high_mark)
684 && priv->mac80211_registered) { 670 && priv->mac80211_registered) {
685 if (wait_write_ptr) { 671 if (wait_write_ptr) {
@@ -700,10 +686,6 @@ drop:
700 return -1; 686 return -1;
701} 687}
702 688
703#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
704
705#include "iwl-spectrum.h"
706
707#define BEACON_TIME_MASK_LOW 0x00FFFFFF 689#define BEACON_TIME_MASK_LOW 0x00FFFFFF
708#define BEACON_TIME_MASK_HIGH 0xFF000000 690#define BEACON_TIME_MASK_HIGH 0xFF000000
709#define TIME_UNIT 1024 691#define TIME_UNIT 1024
@@ -758,7 +740,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
758 u8 type) 740 u8 type)
759{ 741{
760 struct iwl_spectrum_cmd spectrum; 742 struct iwl_spectrum_cmd spectrum;
761 struct iwl_rx_packet *res; 743 struct iwl_rx_packet *pkt;
762 struct iwl_host_cmd cmd = { 744 struct iwl_host_cmd cmd = {
763 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 745 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
764 .data = (void *)&spectrum, 746 .data = (void *)&spectrum,
@@ -803,18 +785,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
803 if (rc) 785 if (rc)
804 return rc; 786 return rc;
805 787
806 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 788 pkt = (struct iwl_rx_packet *)cmd.reply_page;
807 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 789 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
808 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 790 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
809 rc = -EIO; 791 rc = -EIO;
810 } 792 }
811 793
812 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); 794 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
813 switch (spectrum_resp_status) { 795 switch (spectrum_resp_status) {
814 case 0: /* Command will be handled */ 796 case 0: /* Command will be handled */
815 if (res->u.spectrum.id != 0xff) { 797 if (pkt->u.spectrum.id != 0xff) {
816 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 798 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
817 res->u.spectrum.id); 799 pkt->u.spectrum.id);
818 priv->measurement_status &= ~MEASUREMENT_READY; 800 priv->measurement_status &= ~MEASUREMENT_READY;
819 } 801 }
820 priv->measurement_status |= MEASUREMENT_ACTIVE; 802 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -826,16 +808,15 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
826 break; 808 break;
827 } 809 }
828 810
829 dev_kfree_skb_any(cmd.reply_skb); 811 iwl_free_pages(priv, cmd.reply_page);
830 812
831 return rc; 813 return rc;
832} 814}
833#endif
834 815
835static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 816static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
836 struct iwl_rx_mem_buffer *rxb) 817 struct iwl_rx_mem_buffer *rxb)
837{ 818{
838 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
839 struct iwl_alive_resp *palive; 820 struct iwl_alive_resp *palive;
840 struct delayed_work *pwork; 821 struct delayed_work *pwork;
841 822
@@ -872,7 +853,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
872 struct iwl_rx_mem_buffer *rxb) 853 struct iwl_rx_mem_buffer *rxb)
873{ 854{
874#ifdef CONFIG_IWLWIFI_DEBUG 855#ifdef CONFIG_IWLWIFI_DEBUG
875 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 856 struct iwl_rx_packet *pkt = rxb_addr(rxb);
876#endif 857#endif
877 858
878 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 859 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -908,7 +889,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
908 struct iwl_rx_mem_buffer *rxb) 889 struct iwl_rx_mem_buffer *rxb)
909{ 890{
910#ifdef CONFIG_IWLWIFI_DEBUG 891#ifdef CONFIG_IWLWIFI_DEBUG
911 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 892 struct iwl_rx_packet *pkt = rxb_addr(rxb);
912 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 893 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
913 u8 rate = beacon->beacon_notify_hdr.rate; 894 u8 rate = beacon->beacon_notify_hdr.rate;
914 895
@@ -931,7 +912,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
931static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 912static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
932 struct iwl_rx_mem_buffer *rxb) 913 struct iwl_rx_mem_buffer *rxb)
933{ 914{
934 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 915 struct iwl_rx_packet *pkt = rxb_addr(rxb);
935 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 916 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
936 unsigned long status = priv->status; 917 unsigned long status = priv->status;
937 918
@@ -973,6 +954,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
973 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 954 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
974 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 955 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
975 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 956 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
957 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
958 iwl_rx_spectrum_measure_notif;
976 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 959 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
977 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 960 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
978 iwl_rx_pm_debug_statistics_notif; 961 iwl_rx_pm_debug_statistics_notif;
@@ -986,7 +969,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
986 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics; 969 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
987 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 970 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
988 971
989 iwl_setup_spectrum_handlers(priv);
990 iwl_setup_rx_scan_handlers(priv); 972 iwl_setup_rx_scan_handlers(priv);
991 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 973 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
992 974
@@ -1078,13 +1060,13 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
1078 * also updates the memory address in the firmware to reference the new 1060 * also updates the memory address in the firmware to reference the new
1079 * target buffer. 1061 * target buffer.
1080 */ 1062 */
1081static int iwl3945_rx_queue_restock(struct iwl_priv *priv) 1063static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1082{ 1064{
1083 struct iwl_rx_queue *rxq = &priv->rxq; 1065 struct iwl_rx_queue *rxq = &priv->rxq;
1084 struct list_head *element; 1066 struct list_head *element;
1085 struct iwl_rx_mem_buffer *rxb; 1067 struct iwl_rx_mem_buffer *rxb;
1086 unsigned long flags; 1068 unsigned long flags;
1087 int write, rc; 1069 int write;
1088 1070
1089 spin_lock_irqsave(&rxq->lock, flags); 1071 spin_lock_irqsave(&rxq->lock, flags);
1090 write = rxq->write & ~0x7; 1072 write = rxq->write & ~0x7;
@@ -1095,7 +1077,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1095 list_del(element); 1077 list_del(element);
1096 1078
1097 /* Point to Rx buffer via next RBD in circular buffer */ 1079 /* Point to Rx buffer via next RBD in circular buffer */
1098 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); 1080 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1099 rxq->queue[rxq->write] = rxb; 1081 rxq->queue[rxq->write] = rxb;
1100 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1082 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1101 rxq->free_count--; 1083 rxq->free_count--;
@@ -1114,12 +1096,8 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1114 spin_lock_irqsave(&rxq->lock, flags); 1096 spin_lock_irqsave(&rxq->lock, flags);
1115 rxq->need_update = 1; 1097 rxq->need_update = 1;
1116 spin_unlock_irqrestore(&rxq->lock, flags); 1098 spin_unlock_irqrestore(&rxq->lock, flags);
1117 rc = iwl_rx_queue_update_write_ptr(priv, rxq); 1099 iwl_rx_queue_update_write_ptr(priv, rxq);
1118 if (rc)
1119 return rc;
1120 } 1100 }
1121
1122 return 0;
1123} 1101}
1124 1102
1125/** 1103/**
@@ -1135,8 +1113,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1135 struct iwl_rx_queue *rxq = &priv->rxq; 1113 struct iwl_rx_queue *rxq = &priv->rxq;
1136 struct list_head *element; 1114 struct list_head *element;
1137 struct iwl_rx_mem_buffer *rxb; 1115 struct iwl_rx_mem_buffer *rxb;
1138 struct sk_buff *skb; 1116 struct page *page;
1139 unsigned long flags; 1117 unsigned long flags;
1118 gfp_t gfp_mask = priority;
1140 1119
1141 while (1) { 1120 while (1) {
1142 spin_lock_irqsave(&rxq->lock, flags); 1121 spin_lock_irqsave(&rxq->lock, flags);
@@ -1148,10 +1127,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1148 spin_unlock_irqrestore(&rxq->lock, flags); 1127 spin_unlock_irqrestore(&rxq->lock, flags);
1149 1128
1150 if (rxq->free_count > RX_LOW_WATERMARK) 1129 if (rxq->free_count > RX_LOW_WATERMARK)
1151 priority |= __GFP_NOWARN; 1130 gfp_mask |= __GFP_NOWARN;
1131
1132 if (priv->hw_params.rx_page_order > 0)
1133 gfp_mask |= __GFP_COMP;
1134
1152 /* Alloc a new receive buffer */ 1135 /* Alloc a new receive buffer */
1153 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1136 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1154 if (!skb) { 1137 if (!page) {
1155 if (net_ratelimit()) 1138 if (net_ratelimit())
1156 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1139 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1157 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1140 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1168,7 +1151,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1168 spin_lock_irqsave(&rxq->lock, flags); 1151 spin_lock_irqsave(&rxq->lock, flags);
1169 if (list_empty(&rxq->rx_used)) { 1152 if (list_empty(&rxq->rx_used)) {
1170 spin_unlock_irqrestore(&rxq->lock, flags); 1153 spin_unlock_irqrestore(&rxq->lock, flags);
1171 dev_kfree_skb_any(skb); 1154 __free_pages(page, priv->hw_params.rx_page_order);
1172 return; 1155 return;
1173 } 1156 }
1174 element = rxq->rx_used.next; 1157 element = rxq->rx_used.next;
@@ -1176,26 +1159,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1176 list_del(element); 1159 list_del(element);
1177 spin_unlock_irqrestore(&rxq->lock, flags); 1160 spin_unlock_irqrestore(&rxq->lock, flags);
1178 1161
1179 rxb->skb = skb; 1162 rxb->page = page;
1180
1181 /* If radiotap head is required, reserve some headroom here.
1182 * The physical head count is a variable rx_stats->phy_count.
1183 * We reserve 4 bytes here. Plus these extra bytes, the
1184 * headroom of the physical head should be enough for the
1185 * radiotap head that iwl3945 supported. See iwl3945_rt.
1186 */
1187 skb_reserve(rxb->skb, 4);
1188
1189 /* Get physical address of RB/SKB */ 1163 /* Get physical address of RB/SKB */
1190 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1164 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1191 rxb->skb->data, 1165 PAGE_SIZE << priv->hw_params.rx_page_order,
1192 priv->hw_params.rx_buf_size, 1166 PCI_DMA_FROMDEVICE);
1193 PCI_DMA_FROMDEVICE);
1194 1167
1195 spin_lock_irqsave(&rxq->lock, flags); 1168 spin_lock_irqsave(&rxq->lock, flags);
1169
1196 list_add_tail(&rxb->list, &rxq->rx_free); 1170 list_add_tail(&rxb->list, &rxq->rx_free);
1197 priv->alloc_rxb_skb++;
1198 rxq->free_count++; 1171 rxq->free_count++;
1172 priv->alloc_rxb_page++;
1173
1199 spin_unlock_irqrestore(&rxq->lock, flags); 1174 spin_unlock_irqrestore(&rxq->lock, flags);
1200 } 1175 }
1201} 1176}
@@ -1211,14 +1186,12 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1211 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1186 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1212 /* In the reset function, these buffers may have been allocated 1187 /* In the reset function, these buffers may have been allocated
1213 * to an SKB, so we need to unmap and free potential storage */ 1188 * to an SKB, so we need to unmap and free potential storage */
1214 if (rxq->pool[i].skb != NULL) { 1189 if (rxq->pool[i].page != NULL) {
1215 pci_unmap_single(priv->pci_dev, 1190 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1216 rxq->pool[i].real_dma_addr, 1191 PAGE_SIZE << priv->hw_params.rx_page_order,
1217 priv->hw_params.rx_buf_size, 1192 PCI_DMA_FROMDEVICE);
1218 PCI_DMA_FROMDEVICE); 1193 __iwl_free_pages(priv, rxq->pool[i].page);
1219 priv->alloc_rxb_skb--; 1194 rxq->pool[i].page = NULL;
1220 dev_kfree_skb(rxq->pool[i].skb);
1221 rxq->pool[i].skb = NULL;
1222 } 1195 }
1223 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1196 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1224 } 1197 }
@@ -1226,8 +1199,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1226 /* Set us so that we have processed and used all buffers, but have 1199 /* Set us so that we have processed and used all buffers, but have
1227 * not restocked the Rx queue with fresh buffers */ 1200 * not restocked the Rx queue with fresh buffers */
1228 rxq->read = rxq->write = 0; 1201 rxq->read = rxq->write = 0;
1229 rxq->free_count = 0;
1230 rxq->write_actual = 0; 1202 rxq->write_actual = 0;
1203 rxq->free_count = 0;
1231 spin_unlock_irqrestore(&rxq->lock, flags); 1204 spin_unlock_irqrestore(&rxq->lock, flags);
1232} 1205}
1233 1206
@@ -1260,19 +1233,19 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1260{ 1233{
1261 int i; 1234 int i;
1262 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1235 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1263 if (rxq->pool[i].skb != NULL) { 1236 if (rxq->pool[i].page != NULL) {
1264 pci_unmap_single(priv->pci_dev, 1237 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1265 rxq->pool[i].real_dma_addr, 1238 PAGE_SIZE << priv->hw_params.rx_page_order,
1266 priv->hw_params.rx_buf_size, 1239 PCI_DMA_FROMDEVICE);
1267 PCI_DMA_FROMDEVICE); 1240 __iwl_free_pages(priv, rxq->pool[i].page);
1268 dev_kfree_skb(rxq->pool[i].skb); 1241 rxq->pool[i].page = NULL;
1269 } 1242 }
1270 } 1243 }
1271 1244
1272 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1245 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1273 rxq->dma_addr); 1246 rxq->dma_addr);
1274 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status), 1247 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1275 rxq->rb_stts, rxq->rb_stts_dma); 1248 rxq->rb_stts, rxq->rb_stts_dma);
1276 rxq->bd = NULL; 1249 rxq->bd = NULL;
1277 rxq->rb_stts = NULL; 1250 rxq->rb_stts = NULL;
1278} 1251}
@@ -1315,47 +1288,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
1315 return (int)ratio2dB[sig_ratio]; 1288 return (int)ratio2dB[sig_ratio];
1316} 1289}
1317 1290
1318#define PERFECT_RSSI (-20) /* dBm */
1319#define WORST_RSSI (-95) /* dBm */
1320#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
1321
1322/* Calculate an indication of rx signal quality (a percentage, not dBm!).
1323 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
1324 * about formulas used below. */
1325int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
1326{
1327 int sig_qual;
1328 int degradation = PERFECT_RSSI - rssi_dbm;
1329
1330 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
1331 * as indicator; formula is (signal dbm - noise dbm).
1332 * SNR at or above 40 is a great signal (100%).
1333 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
1334 * Weakest usable signal is usually 10 - 15 dB SNR. */
1335 if (noise_dbm) {
1336 if (rssi_dbm - noise_dbm >= 40)
1337 return 100;
1338 else if (rssi_dbm < noise_dbm)
1339 return 0;
1340 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
1341
1342 /* Else use just the signal level.
1343 * This formula is a least squares fit of data points collected and
1344 * compared with a reference system that had a percentage (%) display
1345 * for signal quality. */
1346 } else
1347 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
1348 (15 * RSSI_RANGE + 62 * degradation)) /
1349 (RSSI_RANGE * RSSI_RANGE);
1350
1351 if (sig_qual > 100)
1352 sig_qual = 100;
1353 else if (sig_qual < 1)
1354 sig_qual = 0;
1355
1356 return sig_qual;
1357}
1358
1359/** 1291/**
1360 * iwl3945_rx_handle - Main entry function for receiving responses from uCode 1292 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1361 * 1293 *
@@ -1381,7 +1313,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1381 i = rxq->read; 1313 i = rxq->read;
1382 1314
1383 /* calculate total frames need to be restock after handling RX */ 1315 /* calculate total frames need to be restock after handling RX */
1384 total_empty = r - priv->rxq.write_actual; 1316 total_empty = r - rxq->write_actual;
1385 if (total_empty < 0) 1317 if (total_empty < 0)
1386 total_empty += RX_QUEUE_SIZE; 1318 total_empty += RX_QUEUE_SIZE;
1387 1319
@@ -1401,10 +1333,13 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1401 1333
1402 rxq->queue[i] = NULL; 1334 rxq->queue[i] = NULL;
1403 1335
1404 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1336 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1405 priv->hw_params.rx_buf_size, 1337 PAGE_SIZE << priv->hw_params.rx_page_order,
1406 PCI_DMA_FROMDEVICE); 1338 PCI_DMA_FROMDEVICE);
1407 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1339 pkt = rxb_addr(rxb);
1340
1341 trace_iwlwifi_dev_rx(priv, pkt,
1342 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
1408 1343
1409 /* Reclaim a command buffer only if this packet is a response 1344 /* Reclaim a command buffer only if this packet is a response
1410 * to a (driver-originated) command. 1345 * to a (driver-originated) command.
@@ -1422,44 +1357,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1422 if (priv->rx_handlers[pkt->hdr.cmd]) { 1357 if (priv->rx_handlers[pkt->hdr.cmd]) {
1423 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1358 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1424 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1359 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1425 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1426 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1360 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1361 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1427 } else { 1362 } else {
1428 /* No handling needed */ 1363 /* No handling needed */
1429 IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", 1364 IWL_DEBUG_RX(priv,
1365 "r %d i %d No handler needed for %s, 0x%02x\n",
1430 r, i, get_cmd_string(pkt->hdr.cmd), 1366 r, i, get_cmd_string(pkt->hdr.cmd),
1431 pkt->hdr.cmd); 1367 pkt->hdr.cmd);
1432 } 1368 }
1433 1369
1370 /*
1371 * XXX: After here, we should always check rxb->page
1372 * against NULL before touching it or its virtual
1373 * memory (pkt). Because some rx_handler might have
1374 * already taken or freed the pages.
1375 */
1376
1434 if (reclaim) { 1377 if (reclaim) {
1435 /* Invoke any callbacks, transfer the skb to caller, and 1378 /* Invoke any callbacks, transfer the buffer to caller,
1436 * fire off the (possibly) blocking iwl_send_cmd() 1379 * and fire off the (possibly) blocking iwl_send_cmd()
1437 * as we reclaim the driver command queue */ 1380 * as we reclaim the driver command queue */
1438 if (rxb && rxb->skb) 1381 if (rxb->page)
1439 iwl_tx_cmd_complete(priv, rxb); 1382 iwl_tx_cmd_complete(priv, rxb);
1440 else 1383 else
1441 IWL_WARN(priv, "Claim null rxb?\n"); 1384 IWL_WARN(priv, "Claim null rxb?\n");
1442 } 1385 }
1443 1386
1444 /* For now we just don't re-use anything. We can tweak this 1387 /* Reuse the page if possible. For notification packets and
1445 * later to try and re-use notification packets and SKBs that 1388 * SKBs that fail to Rx correctly, add them back into the
1446 * fail to Rx correctly */ 1389 * rx_free list for reuse later. */
1447 if (rxb->skb != NULL) {
1448 priv->alloc_rxb_skb--;
1449 dev_kfree_skb_any(rxb->skb);
1450 rxb->skb = NULL;
1451 }
1452
1453 spin_lock_irqsave(&rxq->lock, flags); 1390 spin_lock_irqsave(&rxq->lock, flags);
1454 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1391 if (rxb->page != NULL) {
1392 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1393 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1394 PCI_DMA_FROMDEVICE);
1395 list_add_tail(&rxb->list, &rxq->rx_free);
1396 rxq->free_count++;
1397 } else
1398 list_add_tail(&rxb->list, &rxq->rx_used);
1399
1455 spin_unlock_irqrestore(&rxq->lock, flags); 1400 spin_unlock_irqrestore(&rxq->lock, flags);
1401
1456 i = (i + 1) & RX_QUEUE_MASK; 1402 i = (i + 1) & RX_QUEUE_MASK;
1457 /* If there are a lot of unused frames, 1403 /* If there are a lot of unused frames,
1458 * restock the Rx queue so ucode won't assert. */ 1404 * restock the Rx queue so ucode won't assert. */
1459 if (fill_rx) { 1405 if (fill_rx) {
1460 count++; 1406 count++;
1461 if (count >= 8) { 1407 if (count >= 8) {
1462 priv->rxq.read = i; 1408 rxq->read = i;
1463 iwl3945_rx_replenish_now(priv); 1409 iwl3945_rx_replenish_now(priv);
1464 count = 0; 1410 count = 0;
1465 } 1411 }
@@ -1467,7 +1413,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1467 } 1413 }
1468 1414
1469 /* Backtrack one entry */ 1415 /* Backtrack one entry */
1470 priv->rxq.read = i; 1416 rxq->read = i;
1471 if (fill_rx) 1417 if (fill_rx)
1472 iwl3945_rx_replenish_now(priv); 1418 iwl3945_rx_replenish_now(priv);
1473 else 1419 else
@@ -1482,7 +1428,6 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1482 tasklet_kill(&priv->irq_tasklet); 1428 tasklet_kill(&priv->irq_tasklet);
1483} 1429}
1484 1430
1485#ifdef CONFIG_IWLWIFI_DEBUG
1486static const char *desc_lookup(int i) 1431static const char *desc_lookup(int i)
1487{ 1432{
1488 switch (i) { 1433 switch (i) {
@@ -1551,8 +1496,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1551 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1496 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1552 desc_lookup(desc), desc, time, blink1, blink2, 1497 desc_lookup(desc), desc, time, blink1, blink2,
1553 ilink1, ilink2, data1); 1498 ilink1, ilink2, data1);
1499 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
1500 0, blink1, blink2, ilink1, ilink2);
1554 } 1501 }
1555
1556} 1502}
1557 1503
1558#define EVENT_START_OFFSET (6 * sizeof(u32)) 1504#define EVENT_START_OFFSET (6 * sizeof(u32))
@@ -1561,17 +1507,19 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1561 * iwl3945_print_event_log - Dump error event log to syslog 1507 * iwl3945_print_event_log - Dump error event log to syslog
1562 * 1508 *
1563 */ 1509 */
1564static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, 1510static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1565 u32 num_events, u32 mode) 1511 u32 num_events, u32 mode,
1512 int pos, char **buf, size_t bufsz)
1566{ 1513{
1567 u32 i; 1514 u32 i;
1568 u32 base; /* SRAM byte address of event log header */ 1515 u32 base; /* SRAM byte address of event log header */
1569 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ 1516 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1570 u32 ptr; /* SRAM byte address of log data */ 1517 u32 ptr; /* SRAM byte address of log data */
1571 u32 ev, time, data; /* event log data */ 1518 u32 ev, time, data; /* event log data */
1519 unsigned long reg_flags;
1572 1520
1573 if (num_events == 0) 1521 if (num_events == 0)
1574 return; 1522 return pos;
1575 1523
1576 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1524 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1577 1525
@@ -1582,25 +1530,96 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1582 1530
1583 ptr = base + EVENT_START_OFFSET + (start_idx * event_size); 1531 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1584 1532
1533 /* Make sure device is powered up for SRAM reads */
1534 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1535 iwl_grab_nic_access(priv);
1536
1537 /* Set starting address; reads will auto-increment */
1538 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1539 rmb();
1540
1585 /* "time" is actually "data" for mode 0 (no timestamp). 1541 /* "time" is actually "data" for mode 0 (no timestamp).
1586 * place event id # at far right for easier visual parsing. */ 1542 * place event id # at far right for easier visual parsing. */
1587 for (i = 0; i < num_events; i++) { 1543 for (i = 0; i < num_events; i++) {
1588 ev = iwl_read_targ_mem(priv, ptr); 1544 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1589 ptr += sizeof(u32); 1545 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1590 time = iwl_read_targ_mem(priv, ptr);
1591 ptr += sizeof(u32);
1592 if (mode == 0) { 1546 if (mode == 0) {
1593 /* data, ev */ 1547 /* data, ev */
1594 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1548 if (bufsz) {
1549 pos += scnprintf(*buf + pos, bufsz - pos,
1550 "0x%08x:%04u\n",
1551 time, ev);
1552 } else {
1553 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1554 trace_iwlwifi_dev_ucode_event(priv, 0,
1555 time, ev);
1556 }
1595 } else { 1557 } else {
1596 data = iwl_read_targ_mem(priv, ptr); 1558 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1597 ptr += sizeof(u32); 1559 if (bufsz) {
1598 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1560 pos += scnprintf(*buf + pos, bufsz - pos,
1561 "%010u:0x%08x:%04u\n",
1562 time, data, ev);
1563 } else {
1564 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1565 time, data, ev);
1566 trace_iwlwifi_dev_ucode_event(priv, time,
1567 data, ev);
1568 }
1599 } 1569 }
1600 } 1570 }
1571
1572 /* Allow device to power down */
1573 iwl_release_nic_access(priv);
1574 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1575 return pos;
1601} 1576}
1602 1577
1603void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1578/**
1579 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1580 */
1581static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1582 u32 num_wraps, u32 next_entry,
1583 u32 size, u32 mode,
1584 int pos, char **buf, size_t bufsz)
1585{
1586 /*
1587 * display the newest DEFAULT_LOG_ENTRIES entries
1588 * i.e the entries just before the next ont that uCode would fill.
1589 */
1590 if (num_wraps) {
1591 if (next_entry < size) {
1592 pos = iwl3945_print_event_log(priv,
1593 capacity - (size - next_entry),
1594 size - next_entry, mode,
1595 pos, buf, bufsz);
1596 pos = iwl3945_print_event_log(priv, 0,
1597 next_entry, mode,
1598 pos, buf, bufsz);
1599 } else
1600 pos = iwl3945_print_event_log(priv, next_entry - size,
1601 size, mode,
1602 pos, buf, bufsz);
1603 } else {
1604 if (next_entry < size)
1605 pos = iwl3945_print_event_log(priv, 0,
1606 next_entry, mode,
1607 pos, buf, bufsz);
1608 else
1609 pos = iwl3945_print_event_log(priv, next_entry - size,
1610 size, mode,
1611 pos, buf, bufsz);
1612 }
1613 return pos;
1614}
1615
1616/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1617#define IWL3945_MAX_EVENT_LOG_SIZE (512)
1618
1619#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1620
1621int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1622 char **buf, bool display)
1604{ 1623{
1605 u32 base; /* SRAM byte address of event log header */ 1624 u32 base; /* SRAM byte address of event log header */
1606 u32 capacity; /* event log capacity in # entries */ 1625 u32 capacity; /* event log capacity in # entries */
@@ -1608,11 +1627,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1608 u32 num_wraps; /* # times uCode wrapped to top of log */ 1627 u32 num_wraps; /* # times uCode wrapped to top of log */
1609 u32 next_entry; /* index of next entry to be written by uCode */ 1628 u32 next_entry; /* index of next entry to be written by uCode */
1610 u32 size; /* # entries that we'll print */ 1629 u32 size; /* # entries that we'll print */
1630 int pos = 0;
1631 size_t bufsz = 0;
1611 1632
1612 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1633 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1613 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1634 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1614 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); 1635 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1615 return; 1636 return -EINVAL;
1616 } 1637 }
1617 1638
1618 /* event log header */ 1639 /* event log header */
@@ -1621,37 +1642,72 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1621 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1642 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1622 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1643 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1623 1644
1645 if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
1646 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1647 capacity, IWL3945_MAX_EVENT_LOG_SIZE);
1648 capacity = IWL3945_MAX_EVENT_LOG_SIZE;
1649 }
1650
1651 if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
1652 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1653 next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
1654 next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
1655 }
1656
1624 size = num_wraps ? capacity : next_entry; 1657 size = num_wraps ? capacity : next_entry;
1625 1658
1626 /* bail out if nothing in log */ 1659 /* bail out if nothing in log */
1627 if (size == 0) { 1660 if (size == 0) {
1628 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1661 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1629 return; 1662 return pos;
1630 } 1663 }
1631 1664
1632 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n", 1665#ifdef CONFIG_IWLWIFI_DEBUG
1633 size, num_wraps); 1666 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1634 1667 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1635 /* if uCode has wrapped back to top of log, start at the oldest entry, 1668 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1636 * i.e the next one that uCode would fill. */
1637 if (num_wraps)
1638 iwl3945_print_event_log(priv, next_entry,
1639 capacity - next_entry, mode);
1640
1641 /* (then/else) start at top of log */
1642 iwl3945_print_event_log(priv, 0, next_entry, mode);
1643
1644}
1645#else 1669#else
1646void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1670 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1647{ 1671 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1648} 1672#endif
1649 1673
1650void iwl3945_dump_nic_error_log(struct iwl_priv *priv) 1674 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1651{ 1675 size);
1652}
1653 1676
1677#ifdef CONFIG_IWLWIFI_DEBUG
1678 if (display) {
1679 if (full_log)
1680 bufsz = capacity * 48;
1681 else
1682 bufsz = size * 48;
1683 *buf = kmalloc(bufsz, GFP_KERNEL);
1684 if (!*buf)
1685 return -ENOMEM;
1686 }
1687 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1688 /* if uCode has wrapped back to top of log,
1689 * start at the oldest entry,
1690 * i.e the next one that uCode would fill.
1691 */
1692 if (num_wraps)
1693 pos = iwl3945_print_event_log(priv, next_entry,
1694 capacity - next_entry, mode,
1695 pos, buf, bufsz);
1696
1697 /* (then/else) start at top of log */
1698 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1699 pos, buf, bufsz);
1700 } else
1701 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1702 next_entry, size, mode,
1703 pos, buf, bufsz);
1704#else
1705 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1706 next_entry, size, mode,
1707 pos, buf, bufsz);
1654#endif 1708#endif
1709 return pos;
1710}
1655 1711
1656static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1712static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1657{ 1713{
@@ -1685,6 +1741,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1685 } 1741 }
1686#endif 1742#endif
1687 1743
1744 spin_unlock_irqrestore(&priv->lock, flags);
1745
1688 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1746 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1689 * atomic, make sure that inta covers all the interrupts that 1747 * atomic, make sure that inta covers all the interrupts that
1690 * we've discovered, even if FH interrupt came in just after 1748 * we've discovered, even if FH interrupt came in just after
@@ -1706,8 +1764,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1706 1764
1707 handled |= CSR_INT_BIT_HW_ERR; 1765 handled |= CSR_INT_BIT_HW_ERR;
1708 1766
1709 spin_unlock_irqrestore(&priv->lock, flags);
1710
1711 return; 1767 return;
1712 } 1768 }
1713 1769
@@ -1799,7 +1855,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1799 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1855 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1800 } 1856 }
1801#endif 1857#endif
1802 spin_unlock_irqrestore(&priv->lock, flags);
1803} 1858}
1804 1859
1805static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1860static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -1901,7 +1956,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1901{ 1956{
1902 int i; 1957 int i;
1903 1958
1904 for (i = 0; i < IWL_RATE_COUNT; i++) { 1959 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1905 rates[i].bitrate = iwl3945_rates[i].ieee * 5; 1960 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1906 rates[i].hw_value = i; /* Rate scaling will work on indexes */ 1961 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1907 rates[i].hw_value_short = i; 1962 rates[i].hw_value_short = i;
@@ -2158,6 +2213,14 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2158 IWL_UCODE_API(priv->ucode_ver), 2213 IWL_UCODE_API(priv->ucode_ver),
2159 IWL_UCODE_SERIAL(priv->ucode_ver)); 2214 IWL_UCODE_SERIAL(priv->ucode_ver));
2160 2215
2216 snprintf(priv->hw->wiphy->fw_version,
2217 sizeof(priv->hw->wiphy->fw_version),
2218 "%u.%u.%u.%u",
2219 IWL_UCODE_MAJOR(priv->ucode_ver),
2220 IWL_UCODE_MINOR(priv->ucode_ver),
2221 IWL_UCODE_API(priv->ucode_ver),
2222 IWL_UCODE_SERIAL(priv->ucode_ver));
2223
2161 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", 2224 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
2162 priv->ucode_ver); 2225 priv->ucode_ver);
2163 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", 2226 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
@@ -2458,7 +2521,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2458 priv->active_rate = priv->rates_mask; 2521 priv->active_rate = priv->rates_mask;
2459 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 2522 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2460 2523
2461 iwl_power_update_mode(priv, false); 2524 iwl_power_update_mode(priv, true);
2462 2525
2463 if (iwl_is_associated(priv)) { 2526 if (iwl_is_associated(priv)) {
2464 struct iwl3945_rxon_cmd *active_rxon = 2527 struct iwl3945_rxon_cmd *active_rxon =
@@ -2479,7 +2542,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2479 2542
2480 iwl3945_reg_txpower_periodic(priv); 2543 iwl3945_reg_txpower_periodic(priv);
2481 2544
2482 iwl3945_led_register(priv); 2545 iwl_leds_init(priv);
2483 2546
2484 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2547 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2485 set_bit(STATUS_READY, &priv->status); 2548 set_bit(STATUS_READY, &priv->status);
@@ -2517,7 +2580,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2517 if (!exit_pending) 2580 if (!exit_pending)
2518 set_bit(STATUS_EXIT_PENDING, &priv->status); 2581 set_bit(STATUS_EXIT_PENDING, &priv->status);
2519 2582
2520 iwl3945_led_unregister(priv);
2521 iwl_clear_stations_table(priv); 2583 iwl_clear_stations_table(priv);
2522 2584
2523 /* Unblock any waiting calls */ 2585 /* Unblock any waiting calls */
@@ -2563,23 +2625,15 @@ static void __iwl3945_down(struct iwl_priv *priv)
2563 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2625 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2564 STATUS_EXIT_PENDING; 2626 STATUS_EXIT_PENDING;
2565 2627
2566 priv->cfg->ops->lib->apm_ops.reset(priv);
2567 spin_lock_irqsave(&priv->lock, flags);
2568 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2569 spin_unlock_irqrestore(&priv->lock, flags);
2570
2571 iwl3945_hw_txq_ctx_stop(priv); 2628 iwl3945_hw_txq_ctx_stop(priv);
2572 iwl3945_hw_rxq_stop(priv); 2629 iwl3945_hw_rxq_stop(priv);
2573 2630
2574 iwl_write_prph(priv, APMG_CLK_DIS_REG, 2631 /* Power-down device's busmaster DMA clocks */
2575 APMG_CLK_VAL_DMA_CLK_RQT); 2632 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2576
2577 udelay(5); 2633 udelay(5);
2578 2634
2579 if (exit_pending) 2635 /* Stop the device, and put it in low power state */
2580 priv->cfg->ops->lib->apm_ops.stop(priv); 2636 priv->cfg->ops->lib->apm_ops.stop(priv);
2581 else
2582 priv->cfg->ops->lib->apm_ops.reset(priv);
2583 2637
2584 exit: 2638 exit:
2585 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2639 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2724,19 +2778,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2724 mutex_unlock(&priv->mutex); 2778 mutex_unlock(&priv->mutex);
2725} 2779}
2726 2780
2781/*
2782 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2783 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2784 * *is* readable even when device has been SW_RESET into low power mode
2785 * (e.g. during RF KILL).
2786 */
2727static void iwl3945_rfkill_poll(struct work_struct *data) 2787static void iwl3945_rfkill_poll(struct work_struct *data)
2728{ 2788{
2729 struct iwl_priv *priv = 2789 struct iwl_priv *priv =
2730 container_of(data, struct iwl_priv, rfkill_poll.work); 2790 container_of(data, struct iwl_priv, rfkill_poll.work);
2791 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2792 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2793 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2731 2794
2732 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2795 if (new_rfkill != old_rfkill) {
2733 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2796 if (new_rfkill)
2734 else 2797 set_bit(STATUS_RF_KILL_HW, &priv->status);
2735 set_bit(STATUS_RF_KILL_HW, &priv->status); 2798 else
2799 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2736 2800
2737 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 2801 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2738 test_bit(STATUS_RF_KILL_HW, &priv->status));
2739 2802
2803 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2804 new_rfkill ? "disable radio" : "enable radio");
2805 }
2806
2807 /* Keep this running, even if radio now enabled. This will be
2808 * cancelled in mac_start() if system decides to start again */
2740 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2809 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
2741 round_jiffies_relative(2 * HZ)); 2810 round_jiffies_relative(2 * HZ));
2742 2811
@@ -2898,7 +2967,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2898 * is marked passive, we can do active scanning if we 2967 * is marked passive, we can do active scanning if we
2899 * detect transmissions. 2968 * detect transmissions.
2900 */ 2969 */
2901 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2971 IWL_GOOD_CRC_TH_DISABLED;
2902 band = IEEE80211_BAND_5GHZ; 2972 band = IEEE80211_BAND_5GHZ;
2903 } else { 2973 } else {
2904 IWL_WARN(priv, "Invalid scan band count\n"); 2974 IWL_WARN(priv, "Invalid scan band count\n");
@@ -2957,18 +3027,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2957 mutex_unlock(&priv->mutex); 3027 mutex_unlock(&priv->mutex);
2958} 3028}
2959 3029
2960static void iwl3945_bg_up(struct work_struct *data)
2961{
2962 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
2963
2964 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2965 return;
2966
2967 mutex_lock(&priv->mutex);
2968 __iwl3945_up(priv);
2969 mutex_unlock(&priv->mutex);
2970}
2971
2972static void iwl3945_bg_restart(struct work_struct *data) 3030static void iwl3945_bg_restart(struct work_struct *data)
2973{ 3031{
2974 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 3032 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2985,7 +3043,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
2985 ieee80211_restart_hw(priv->hw); 3043 ieee80211_restart_hw(priv->hw);
2986 } else { 3044 } else {
2987 iwl3945_down(priv); 3045 iwl3945_down(priv);
2988 queue_work(priv->workqueue, &priv->up); 3046
3047 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3048 return;
3049
3050 mutex_lock(&priv->mutex);
3051 __iwl3945_up(priv);
3052 mutex_unlock(&priv->mutex);
2989 } 3053 }
2990} 3054}
2991 3055
@@ -3152,6 +3216,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3152 * no need to poll the killswitch state anymore */ 3216 * no need to poll the killswitch state anymore */
3153 cancel_delayed_work(&priv->rfkill_poll); 3217 cancel_delayed_work(&priv->rfkill_poll);
3154 3218
3219 iwl_led_start(priv);
3220
3155 priv->is_open = 1; 3221 priv->is_open = 1;
3156 IWL_DEBUG_MAC80211(priv, "leave\n"); 3222 IWL_DEBUG_MAC80211(priv, "leave\n");
3157 return 0; 3223 return 0;
@@ -3487,8 +3553,6 @@ static ssize_t store_filter_flags(struct device *d,
3487static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3553static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3488 store_filter_flags); 3554 store_filter_flags);
3489 3555
3490#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3491
3492static ssize_t show_measurement(struct device *d, 3556static ssize_t show_measurement(struct device *d,
3493 struct device_attribute *attr, char *buf) 3557 struct device_attribute *attr, char *buf)
3494{ 3558{
@@ -3558,7 +3622,6 @@ static ssize_t store_measurement(struct device *d,
3558 3622
3559static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3623static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3560 show_measurement, store_measurement); 3624 show_measurement, store_measurement);
3561#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
3562 3625
3563static ssize_t store_retry_rate(struct device *d, 3626static ssize_t store_retry_rate(struct device *d,
3564 struct device_attribute *attr, 3627 struct device_attribute *attr,
@@ -3606,7 +3669,7 @@ static ssize_t show_statistics(struct device *d,
3606 return -EAGAIN; 3669 return -EAGAIN;
3607 3670
3608 mutex_lock(&priv->mutex); 3671 mutex_lock(&priv->mutex);
3609 rc = iwl_send_statistics_request(priv, 0); 3672 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3610 mutex_unlock(&priv->mutex); 3673 mutex_unlock(&priv->mutex);
3611 3674
3612 if (rc) { 3675 if (rc) {
@@ -3707,7 +3770,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3707 3770
3708 init_waitqueue_head(&priv->wait_command_queue); 3771 init_waitqueue_head(&priv->wait_command_queue);
3709 3772
3710 INIT_WORK(&priv->up, iwl3945_bg_up);
3711 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3773 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3712 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3774 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3713 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3775 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3741,9 +3803,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3741 &dev_attr_dump_errors.attr, 3803 &dev_attr_dump_errors.attr,
3742 &dev_attr_flags.attr, 3804 &dev_attr_flags.attr,
3743 &dev_attr_filter_flags.attr, 3805 &dev_attr_filter_flags.attr,
3744#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3745 &dev_attr_measurement.attr, 3806 &dev_attr_measurement.attr,
3746#endif
3747 &dev_attr_retry_rate.attr, 3807 &dev_attr_retry_rate.attr,
3748 &dev_attr_statistics.attr, 3808 &dev_attr_statistics.attr,
3749 &dev_attr_status.attr, 3809 &dev_attr_status.attr,
@@ -3769,7 +3829,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3769 .config = iwl_mac_config, 3829 .config = iwl_mac_config,
3770 .configure_filter = iwl_configure_filter, 3830 .configure_filter = iwl_configure_filter,
3771 .set_key = iwl3945_mac_set_key, 3831 .set_key = iwl3945_mac_set_key,
3772 .get_tx_stats = iwl_mac_get_tx_stats,
3773 .conf_tx = iwl_mac_conf_tx, 3832 .conf_tx = iwl_mac_conf_tx,
3774 .reset_tsf = iwl_mac_reset_tsf, 3833 .reset_tsf = iwl_mac_reset_tsf,
3775 .bss_info_changed = iwl_bss_info_changed, 3834 .bss_info_changed = iwl_bss_info_changed,
@@ -3784,23 +3843,23 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3784 priv->retry_rate = 1; 3843 priv->retry_rate = 1;
3785 priv->ibss_beacon = NULL; 3844 priv->ibss_beacon = NULL;
3786 3845
3787 spin_lock_init(&priv->lock);
3788 spin_lock_init(&priv->sta_lock); 3846 spin_lock_init(&priv->sta_lock);
3789 spin_lock_init(&priv->hcmd_lock); 3847 spin_lock_init(&priv->hcmd_lock);
3790 3848
3791 INIT_LIST_HEAD(&priv->free_frames); 3849 INIT_LIST_HEAD(&priv->free_frames);
3792 3850
3793 mutex_init(&priv->mutex); 3851 mutex_init(&priv->mutex);
3852 mutex_init(&priv->sync_cmd_mutex);
3794 3853
3795 /* Clear the driver's (not device's) station table */ 3854 /* Clear the driver's (not device's) station table */
3796 iwl_clear_stations_table(priv); 3855 iwl_clear_stations_table(priv);
3797 3856
3798 priv->data_retry_limit = -1;
3799 priv->ieee_channels = NULL; 3857 priv->ieee_channels = NULL;
3800 priv->ieee_rates = NULL; 3858 priv->ieee_rates = NULL;
3801 priv->band = IEEE80211_BAND_2GHZ; 3859 priv->band = IEEE80211_BAND_2GHZ;
3802 3860
3803 priv->iw_mode = NL80211_IFTYPE_STATION; 3861 priv->iw_mode = NL80211_IFTYPE_STATION;
3862 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3804 3863
3805 iwl_reset_qos(priv); 3864 iwl_reset_qos(priv);
3806 3865
@@ -3854,18 +3913,18 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3854 /* Tell mac80211 our characteristics */ 3913 /* Tell mac80211 our characteristics */
3855 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3914 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3856 IEEE80211_HW_NOISE_DBM | 3915 IEEE80211_HW_NOISE_DBM |
3857 IEEE80211_HW_SPECTRUM_MGMT | 3916 IEEE80211_HW_SPECTRUM_MGMT;
3858 IEEE80211_HW_SUPPORTS_PS | 3917
3859 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3918 if (!priv->cfg->broken_powersave)
3919 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3920 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3860 3921
3861 hw->wiphy->interface_modes = 3922 hw->wiphy->interface_modes =
3862 BIT(NL80211_IFTYPE_STATION) | 3923 BIT(NL80211_IFTYPE_STATION) |
3863 BIT(NL80211_IFTYPE_ADHOC); 3924 BIT(NL80211_IFTYPE_ADHOC);
3864 3925
3865 hw->wiphy->custom_regulatory = true; 3926 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3866 3927 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3867 /* Firmware does not support this */
3868 hw->wiphy->disable_beacon_hints = true;
3869 3928
3870 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3929 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3871 /* we create the 802.11 header and a zero-length SSID element */ 3930 /* we create the 802.11 header and a zero-length SSID element */
@@ -3977,17 +4036,18 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3977 * PCI Tx retries from interfering with C3 CPU state */ 4036 * PCI Tx retries from interfering with C3 CPU state */
3978 pci_write_config_byte(pdev, 0x41, 0x00); 4037 pci_write_config_byte(pdev, 0x41, 0x00);
3979 4038
3980 /* this spin lock will be used in apm_ops.init and EEPROM access 4039 /* these spin locks will be used in apm_ops.init and EEPROM access
3981 * we should init now 4040 * we should init now
3982 */ 4041 */
3983 spin_lock_init(&priv->reg_lock); 4042 spin_lock_init(&priv->reg_lock);
4043 spin_lock_init(&priv->lock);
3984 4044
3985 /* amp init */ 4045 /*
3986 err = priv->cfg->ops->lib->apm_ops.init(priv); 4046 * stop and reset the on-board processor just in case it is in a
3987 if (err < 0) { 4047 * strange state ... like being left stranded by a primary kernel
3988 IWL_DEBUG_INFO(priv, "Failed to init the card\n"); 4048 * and this is now the kdump kernel trying to start up
3989 goto out_iounmap; 4049 */
3990 } 4050 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3991 4051
3992 /*********************** 4052 /***********************
3993 * 4. Read EEPROM 4053 * 4. Read EEPROM
@@ -4054,6 +4114,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4054 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4114 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
4055 iwl3945_setup_deferred_work(priv); 4115 iwl3945_setup_deferred_work(priv);
4056 iwl3945_setup_rx_handlers(priv); 4116 iwl3945_setup_rx_handlers(priv);
4117 iwl_power_initialize(priv);
4057 4118
4058 /********************************* 4119 /*********************************
4059 * 8. Setup and Register mac80211 4120 * 8. Setup and Register mac80211
@@ -4124,6 +4185,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4124 iwl3945_down(priv); 4185 iwl3945_down(priv);
4125 } 4186 }
4126 4187
4188 /*
4189 * Make sure device is reset to low power before unloading driver.
4190 * This may be redundant with iwl_down(), but there are paths to
4191 * run iwl_down() without calling apm_ops.stop(), and there are
4192 * paths to avoid running iwl_down() at all before leaving driver.
4193 * This (inexpensive) call *makes sure* device is reset.
4194 */
4195 priv->cfg->ops->lib->apm_ops.stop(priv);
4196
4127 /* make sure we flush any pending irq or 4197 /* make sure we flush any pending irq or
4128 * tasklet for the driver 4198 * tasklet for the driver
4129 */ 4199 */
@@ -4226,18 +4296,19 @@ static void __exit iwl3945_exit(void)
4226 4296
4227MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); 4297MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4228 4298
4229module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444); 4299module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4230MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4300MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4231module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444); 4301module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4232MODULE_PARM_DESC(swcrypto, 4302MODULE_PARM_DESC(swcrypto,
4233 "using software crypto (default 1 [software])\n"); 4303 "using software crypto (default 1 [software])\n");
4234#ifdef CONFIG_IWLWIFI_DEBUG 4304#ifdef CONFIG_IWLWIFI_DEBUG
4235module_param_named(debug, iwl_debug_level, uint, 0644); 4305module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4236MODULE_PARM_DESC(debug, "debug output mask"); 4306MODULE_PARM_DESC(debug, "debug output mask");
4237#endif 4307#endif
4238module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444); 4308module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4309 int, S_IRUGO);
4239MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4310MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4240module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444); 4311module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4241MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4312MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4242 4313
4243module_exit(iwl3945_exit); 4314module_exit(iwl3945_exit);