aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/net/wireless/iwlwifi
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig53
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile36
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h528
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h74
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3995
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c2439
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h917
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c597
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c224
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h43
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c1292
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c1670
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2175
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c387
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h47
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c3370
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h433
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c1151
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c1577
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c1188
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c1486
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c693
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h128
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c1384
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c557
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h277
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h57
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c137
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h372
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c1264
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h126
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c931
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h156
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c278
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h126
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h227
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h111
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h652
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c141
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c243
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c180
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c403
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h113
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c377
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h451
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c1297
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1358
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1691
65 files changed, 514 insertions, 41064 deletions
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 5cf43236421..ad3bdba6bee 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,12 +1,11 @@
1config IWLWIFI 1config IWLAGN
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
3 depends on PCI && MAC80211 && HAS_IOMEM 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS 5 select NEW_LEDS
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 select IWLDVM
10 ---help--- 9 ---help---
11 Select to build the driver supporting the: 10 Select to build the driver supporting the:
12 11
@@ -40,18 +39,14 @@ config IWLWIFI
40 If you want to compile the driver as a module ( = code which can be 39 If you want to compile the driver as a module ( = code which can be
41 inserted in and removed from the running kernel whenever you want), 40 inserted in and removed from the running kernel whenever you want),
42 say M here and read <file:Documentation/kbuild/modules.txt>. The 41 say M here and read <file:Documentation/kbuild/modules.txt>. The
43 module will be called iwlwifi. 42 module will be called iwlagn.
44
45config IWLDVM
46 tristate "Intel Wireless WiFi"
47 depends on IWLWIFI
48 43
49menu "Debugging Options" 44menu "Debugging Options"
50 depends on IWLWIFI 45 depends on IWLAGN
51 46
52config IWLWIFI_DEBUG 47config IWLWIFI_DEBUG
53 bool "Enable full debugging output in the iwlwifi driver" 48 bool "Enable full debugging output in the iwlagn driver"
54 depends on IWLWIFI 49 depends on IWLAGN
55 ---help--- 50 ---help---
56 This option will enable debug tracing output for the iwlwifi drivers 51 This option will enable debug tracing output for the iwlwifi drivers
57 52
@@ -59,13 +54,13 @@ config IWLWIFI_DEBUG
59 control which debug output is sent to the kernel log by setting the 54 control which debug output is sent to the kernel log by setting the
60 value in 55 value in
61 56
62 /sys/module/iwlwifi/parameters/debug 57 /sys/class/net/wlan0/device/debug_level
63 58
64 This entry will only exist if this option is enabled. 59 This entry will only exist if this option is enabled.
65 60
66 To set a value, simply echo an 8-byte hex value to the same file: 61 To set a value, simply echo an 8-byte hex value to the same file:
67 62
68 % echo 0x43fff > /sys/module/iwlwifi/parameters/debug 63 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
69 64
70 You can find the list of debug mask values in: 65 You can find the list of debug mask values in:
71 drivers/net/wireless/iwlwifi/iwl-debug.h 66 drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -75,8 +70,8 @@ config IWLWIFI_DEBUG
75 any problems you may encounter. 70 any problems you may encounter.
76 71
77config IWLWIFI_DEBUGFS 72config IWLWIFI_DEBUGFS
78 bool "iwlwifi debugfs support" 73 bool "iwlagn debugfs support"
79 depends on IWLWIFI && MAC80211_DEBUGFS 74 depends on IWLAGN && MAC80211_DEBUGFS
80 ---help--- 75 ---help---
81 Enable creation of debugfs files for the iwlwifi drivers. This 76 Enable creation of debugfs files for the iwlwifi drivers. This
82 is a low-impact option that allows getting insight into the 77 is a low-impact option that allows getting insight into the
@@ -84,13 +79,13 @@ config IWLWIFI_DEBUGFS
84 79
85config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE 80config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
86 bool "Experimental uCode support" 81 bool "Experimental uCode support"
87 depends on IWLWIFI && IWLWIFI_DEBUG 82 depends on IWLAGN && IWLWIFI_DEBUG
88 ---help--- 83 ---help---
89 Enable use of experimental ucode for testing and debugging. 84 Enable use of experimental ucode for testing and debugging.
90 85
91config IWLWIFI_DEVICE_TRACING 86config IWLWIFI_DEVICE_TRACING
92 bool "iwlwifi device access tracing" 87 bool "iwlwifi device access tracing"
93 depends on IWLWIFI 88 depends on IWLAGN
94 depends on EVENT_TRACING 89 depends on EVENT_TRACING
95 help 90 help
96 Say Y here to trace all commands, including TX frames and IO 91 Say Y here to trace all commands, including TX frames and IO
@@ -107,20 +102,19 @@ config IWLWIFI_DEVICE_TRACING
107 occur. 102 occur.
108endmenu 103endmenu
109 104
110config IWLWIFI_DEVICE_TESTMODE 105config IWLWIFI_DEVICE_SVTOOL
111 def_bool y 106 bool "iwlwifi device svtool support"
112 depends on IWLWIFI 107 depends on IWLAGN
113 depends on NL80211_TESTMODE 108 select NL80211_TESTMODE
114 help 109 help
115 This option enables the testmode support for iwlwifi device through 110 This option enables the svtool support for iwlwifi device through
116 NL80211_TESTMODE. This provide the capabilities of enable user space 111 NL80211_TESTMODE. svtool is a software validation tool that runs in
117 validation applications to interacts with the device through the 112 the user space and interacts with the device in the kernel space
118 generic netlink message via NL80211_TESTMODE channel. 113 through the generic netlink message via NL80211_TESTMODE channel.
119 114
120config IWLWIFI_P2P 115config IWL_P2P
121 def_bool y
122 bool "iwlwifi experimental P2P support" 116 bool "iwlwifi experimental P2P support"
123 depends on IWLWIFI 117 depends on IWLAGN
124 help 118 help
125 This option enables experimental P2P support for some devices 119 This option enables experimental P2P support for some devices
126 based on microcode support. Since P2P support is still under 120 based on microcode support. Since P2P support is still under
@@ -133,3 +127,4 @@ config IWLWIFI_P2P
133 support when it is loaded. 127 support when it is loaded.
134 128
135 Say Y only if you want to experiment with P2P. 129 Say Y only if you want to experiment with P2P.
130
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 170ec330d2a..48ab9142af3 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,19 +1,25 @@
1# common 1# AGN
2obj-$(CONFIG_IWLWIFI) += iwlwifi.o 2obj-$(CONFIG_IWLAGN) += iwlagn.o
3iwlwifi-objs += iwl-io.o 3iwlagn-objs := iwl-agn.o iwl-agn-rs.o
4iwlwifi-objs += iwl-drv.o 4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
5iwlwifi-objs += iwl-debug.o 5iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-notif-wait.o 6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
9iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
10 7
11iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o 9iwlagn-objs += iwl-rx.o iwl-sta.o
10iwlagn-objs += iwl-scan.o iwl-led.o
11iwlagn-objs += iwl-agn-rxon.o
12iwlagn-objs += iwl-5000.o
13iwlagn-objs += iwl-6000.o
14iwlagn-objs += iwl-1000.o
15iwlagn-objs += iwl-2000.o
16iwlagn-objs += iwl-pci.o
17iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
13 18
14ccflags-y += -D__CHECK_ENDIAN__ -I$(src) 19iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
15 20iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
16 21iwlagn-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
17obj-$(CONFIG_IWLDVM) += dvm/
18 22
19CFLAGS_iwl-devtrace.o := -I$(src) 23CFLAGS_iwl-devtrace.o := -I$(src)
24
25ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
deleted file mode 100644
index 5ff76b20414..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
1# DVM
2obj-$(CONFIG_IWLDVM) += iwldvm.o
3iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5
6iwldvm-objs += power.o
7iwldvm-objs += scan.o led.o
8iwldvm-objs += rxon.o devices.o
9
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
12
13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
deleted file mode 100644
index 33b3ad2e546..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ /dev/null
@@ -1,528 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__
65
66#include "iwl-config.h"
67
68#include "dev.h"
69
70/* The first 11 queues (0-10) are used otherwise */
71#define IWLAGN_FIRST_AMPDU_QUEUE 11
72
73/* AUX (TX during scan dwell) queue */
74#define IWL_AUX_QUEUE 10
75
76/* device operations */
77extern struct iwl_lib_ops iwl1000_lib;
78extern struct iwl_lib_ops iwl2000_lib;
79extern struct iwl_lib_ops iwl2030_lib;
80extern struct iwl_lib_ops iwl5000_lib;
81extern struct iwl_lib_ops iwl5150_lib;
82extern struct iwl_lib_ops iwl6000_lib;
83extern struct iwl_lib_ops iwl6030_lib;
84
85
86#define TIME_UNIT 1024
87
88/*****************************************************
89* DRIVER STATUS FUNCTIONS
90******************************************************/
91#define STATUS_RF_KILL_HW 0
92#define STATUS_CT_KILL 1
93#define STATUS_ALIVE 2
94#define STATUS_READY 3
95#define STATUS_EXIT_PENDING 5
96#define STATUS_STATISTICS 6
97#define STATUS_SCANNING 7
98#define STATUS_SCAN_ABORTING 8
99#define STATUS_SCAN_HW 9
100#define STATUS_FW_ERROR 10
101#define STATUS_CHANNEL_SWITCH_PENDING 11
102#define STATUS_SCAN_COMPLETE 12
103#define STATUS_POWER_PMI 13
104#define STATUS_SCAN_ROC_EXPIRED 14
105
106struct iwl_ucode_capabilities;
107
108extern struct ieee80211_ops iwlagn_hw_ops;
109
110static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
111{
112 hdr->op_code = cmd;
113 hdr->first_group = 0;
114 hdr->groups_num = 1;
115 hdr->data_valid = 1;
116}
117
118void iwl_down(struct iwl_priv *priv);
119void iwl_cancel_deferred_work(struct iwl_priv *priv);
120void iwlagn_prepare_restart(struct iwl_priv *priv);
121int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
122 struct iwl_rx_cmd_buffer *rxb,
123 struct iwl_device_cmd *cmd);
124
125bool iwl_check_for_ct_kill(struct iwl_priv *priv);
126
127void iwlagn_lift_passive_no_rx(struct iwl_priv *priv);
128
129/* MAC80211 */
130struct ieee80211_hw *iwl_alloc_all(void);
131int iwlagn_mac_setup_register(struct iwl_priv *priv,
132 const struct iwl_ucode_capabilities *capa);
133void iwlagn_mac_unregister(struct iwl_priv *priv);
134
135/* commands */
136int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
137int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
138 u32 flags, u16 len, const void *data);
139
140/* RXON */
141void iwl_connection_init_rx_config(struct iwl_priv *priv,
142 struct iwl_rxon_context *ctx);
143int iwlagn_set_pan_params(struct iwl_priv *priv);
144int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
145void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
146int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
147void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
148 struct ieee80211_vif *vif,
149 struct ieee80211_bss_conf *bss_conf,
150 u32 changes);
151void iwlagn_config_ht40(struct ieee80211_conf *conf,
152 struct iwl_rxon_context *ctx);
153void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
154void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
155 struct iwl_rxon_context *ctx);
156void iwl_set_flags_for_band(struct iwl_priv *priv,
157 struct iwl_rxon_context *ctx,
158 enum ieee80211_band band,
159 struct ieee80211_vif *vif);
160
161/* uCode */
162int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
163void iwl_send_prio_tbl(struct iwl_priv *priv);
164int iwl_init_alive_start(struct iwl_priv *priv);
165int iwl_run_init_ucode(struct iwl_priv *priv);
166int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
167 enum iwl_ucode_type ucode_type);
168int iwl_send_calib_results(struct iwl_priv *priv);
169int iwl_calib_set(struct iwl_priv *priv,
170 const struct iwl_calib_hdr *cmd, int len);
171void iwl_calib_free_results(struct iwl_priv *priv);
172int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
173 char **buf, bool display);
174int iwlagn_hw_valid_rtc_data_addr(u32 addr);
175
176/* lib */
177int iwlagn_send_tx_power(struct iwl_priv *priv);
178void iwlagn_temperature(struct iwl_priv *priv);
179int iwlagn_txfifo_flush(struct iwl_priv *priv);
180void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
181int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
182int iwl_send_statistics_request(struct iwl_priv *priv,
183 u8 flags, bool clear);
184
185static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
186 struct iwl_priv *priv, enum ieee80211_band band)
187{
188 return priv->hw->wiphy->bands[band];
189}
190
191#ifdef CONFIG_PM_SLEEP
192int iwlagn_send_patterns(struct iwl_priv *priv,
193 struct cfg80211_wowlan *wowlan);
194int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
195#endif
196
197/* rx */
198int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
199void iwl_setup_rx_handlers(struct iwl_priv *priv);
200void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
201
202
203/* tx */
204int iwlagn_tx_skb(struct iwl_priv *priv,
205 struct ieee80211_sta *sta,
206 struct sk_buff *skb);
207int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
208 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
209int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
210 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
211int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
212 struct ieee80211_sta *sta, u16 tid);
213int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
214 struct iwl_rx_cmd_buffer *rxb,
215 struct iwl_device_cmd *cmd);
216int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
217 struct iwl_device_cmd *cmd);
218
219static inline u32 iwl_tx_status_to_mac80211(u32 status)
220{
221 status &= TX_STATUS_MSK;
222
223 switch (status) {
224 case TX_STATUS_SUCCESS:
225 case TX_STATUS_DIRECT_DONE:
226 return IEEE80211_TX_STAT_ACK;
227 case TX_STATUS_FAIL_DEST_PS:
228 case TX_STATUS_FAIL_PASSIVE_NO_RX:
229 return IEEE80211_TX_STAT_TX_FILTERED;
230 default:
231 return 0;
232 }
233}
234
235static inline bool iwl_is_tx_success(u32 status)
236{
237 status &= TX_STATUS_MSK;
238 return (status == TX_STATUS_SUCCESS) ||
239 (status == TX_STATUS_DIRECT_DONE);
240}
241
242u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
243
244/* scan */
245void iwlagn_post_scan(struct iwl_priv *priv);
246void iwlagn_disable_roc(struct iwl_priv *priv);
247int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
248void iwl_init_scan_params(struct iwl_priv *priv);
249int iwl_scan_cancel(struct iwl_priv *priv);
250void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
251void iwl_force_scan_end(struct iwl_priv *priv);
252void iwl_internal_short_hw_scan(struct iwl_priv *priv);
253void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
254void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
255void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
256int __must_check iwl_scan_initiate(struct iwl_priv *priv,
257 struct ieee80211_vif *vif,
258 enum iwl_scan_type scan_type,
259 enum ieee80211_band band);
260
261void iwl_scan_roc_expired(struct iwl_priv *priv);
262void iwl_scan_offchannel_skb(struct iwl_priv *priv);
263void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
264
265/* For faster active scanning, scan will move to the next channel if fewer than
266 * PLCP_QUIET_THRESH packets are heard on this channel within
267 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
268 * time if it's a quiet channel (nothing responded to our probe, and there's
269 * no other traffic).
270 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
271#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
272#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
273
274#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
275
276
277/* bt coex */
278void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
279int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
280 struct iwl_rx_cmd_buffer *rxb,
281 struct iwl_device_cmd *cmd);
282void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
283void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
284void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
285void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
286void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
287
288static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
289{
290 return priv->cfg->bt_params &&
291 priv->cfg->bt_params->advanced_bt_coexist;
292}
293
294#ifdef CONFIG_IWLWIFI_DEBUG
295const char *iwl_get_tx_fail_reason(u32 status);
296const char *iwl_get_agg_tx_fail_reason(u16 status);
297#else
298static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
299static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
300#endif
301
302
303/* station management */
304int iwlagn_manage_ibss_station(struct iwl_priv *priv,
305 struct ieee80211_vif *vif, bool add);
306#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
307#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
308#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
309 being activated */
310#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
311 (this is for the IBSS BSSID stations) */
312#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
313
314
315void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
316void iwl_clear_ucode_stations(struct iwl_priv *priv,
317 struct iwl_rxon_context *ctx);
318void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
319int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
320int iwl_send_add_sta(struct iwl_priv *priv,
321 struct iwl_addsta_cmd *sta, u8 flags);
322int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
323 const u8 *addr, bool is_ap,
324 struct ieee80211_sta *sta, u8 *sta_id_r);
325int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
326 const u8 *addr);
327void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
328 const u8 *addr);
329u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
330 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
331
332int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
333 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
334int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
335 struct iwl_device_cmd *cmd);
336int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
337 struct ieee80211_sta *sta);
338
339bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
340 struct iwl_rxon_context *ctx,
341 struct ieee80211_sta_ht_cap *ht_cap);
342
343static inline int iwl_sta_id(struct ieee80211_sta *sta)
344{
345 if (WARN_ON(!sta))
346 return IWL_INVALID_STATION;
347
348 return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
349}
350
351int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
352 struct iwl_rxon_context *ctx);
353int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
354 const u8 *addr, u8 *sta_id_r);
355int iwl_remove_default_wep_key(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *key);
358int iwl_set_default_wep_key(struct iwl_priv *priv,
359 struct iwl_rxon_context *ctx,
360 struct ieee80211_key_conf *key);
361int iwl_restore_default_wep_keys(struct iwl_priv *priv,
362 struct iwl_rxon_context *ctx);
363int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
364 struct ieee80211_key_conf *key,
365 struct ieee80211_sta *sta);
366int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
367 struct ieee80211_key_conf *key,
368 struct ieee80211_sta *sta);
369void iwl_update_tkip_key(struct iwl_priv *priv,
370 struct ieee80211_vif *vif,
371 struct ieee80211_key_conf *keyconf,
372 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
373int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
374int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
375 int tid, u16 ssn);
376int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
377 int tid);
378void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
379int iwl_update_bcast_station(struct iwl_priv *priv,
380 struct iwl_rxon_context *ctx);
381int iwl_update_bcast_stations(struct iwl_priv *priv);
382
383/* rate */
384static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
385{
386 return BIT(ant_idx) << RATE_MCS_ANT_POS;
387}
388
389static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
390{
391 return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK;
392}
393
394static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
395{
396 return cpu_to_le32(flags|(u32)rate);
397}
398
399extern int iwl_alive_start(struct iwl_priv *priv);
400
401/* testmode support */
402#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
403
404extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
405 int len);
406extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
407 struct sk_buff *skb,
408 struct netlink_callback *cb,
409 void *data, int len);
410extern void iwl_testmode_init(struct iwl_priv *priv);
411extern void iwl_testmode_free(struct iwl_priv *priv);
412
413#else
414
415static inline
416int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
417{
418 return -ENOSYS;
419}
420
421static inline
422int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
423 struct netlink_callback *cb,
424 void *data, int len)
425{
426 return -ENOSYS;
427}
428
429static inline void iwl_testmode_init(struct iwl_priv *priv)
430{
431}
432
433static inline void iwl_testmode_free(struct iwl_priv *priv)
434{
435}
436#endif
437
438#ifdef CONFIG_IWLWIFI_DEBUG
439void iwl_print_rx_config_cmd(struct iwl_priv *priv,
440 enum iwl_rxon_context_id ctxid);
441#else
442static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
443 enum iwl_rxon_context_id ctxid)
444{
445}
446#endif
447
448/* status checks */
449
450static inline int iwl_is_ready(struct iwl_priv *priv)
451{
452 /* The adapter is 'ready' if READY EXIT_PENDING is not set */
453 return test_bit(STATUS_READY, &priv->status) &&
454 !test_bit(STATUS_EXIT_PENDING, &priv->status);
455}
456
457static inline int iwl_is_alive(struct iwl_priv *priv)
458{
459 return test_bit(STATUS_ALIVE, &priv->status);
460}
461
462static inline int iwl_is_rfkill(struct iwl_priv *priv)
463{
464 return test_bit(STATUS_RF_KILL_HW, &priv->status);
465}
466
467static inline int iwl_is_ctkill(struct iwl_priv *priv)
468{
469 return test_bit(STATUS_CT_KILL, &priv->status);
470}
471
472static inline int iwl_is_ready_rf(struct iwl_priv *priv)
473{
474 if (iwl_is_rfkill(priv))
475 return 0;
476
477 return iwl_is_ready(priv);
478}
479
480static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
481{
482 if (state)
483 set_bit(STATUS_POWER_PMI, &priv->status);
484 else
485 clear_bit(STATUS_POWER_PMI, &priv->status);
486 iwl_trans_set_pmi(priv->trans, state);
487}
488
489#ifdef CONFIG_IWLWIFI_DEBUGFS
490int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
491#else
492static inline int iwl_dbgfs_register(struct iwl_priv *priv,
493 struct dentry *dbgfs_dir)
494{
495 return 0;
496}
497#endif /* CONFIG_IWLWIFI_DEBUGFS */
498
499#ifdef CONFIG_IWLWIFI_DEBUG
500#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
501do { \
502 if (!iwl_is_rfkill((m))) \
503 IWL_ERR(m, fmt, ##args); \
504 else \
505 __iwl_err((m)->dev, true, \
506 !iwl_have_debug_level(IWL_DL_RADIO), \
507 fmt, ##args); \
508} while (0)
509#else
510#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
511do { \
512 if (!iwl_is_rfkill((m))) \
513 IWL_ERR(m, fmt, ##args); \
514 else \
515 __iwl_err((m)->dev, true, true, fmt, ##args); \
516} while (0)
517#endif /* CONFIG_IWLWIFI_DEBUG */
518
519extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
520
521static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
522{
523 const char *s = iwl_dvm_cmd_strings[cmd];
524 if (s)
525 return s;
526 return "UNKNOWN";
527}
528#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
deleted file mode 100644
index de54713b680..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ /dev/null
@@ -1,1114 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-trans.h"
67
68#include "dev.h"
69#include "calib.h"
70#include "agn.h"
71
72/*****************************************************************************
73 * INIT calibrations framework
74 *****************************************************************************/
75
76/* Opaque calibration results */
77struct iwl_calib_result {
78 struct list_head list;
79 size_t cmd_len;
80 struct iwl_calib_hdr hdr;
81 /* data follows */
82};
83
84struct statistics_general_data {
85 u32 beacon_silence_rssi_a;
86 u32 beacon_silence_rssi_b;
87 u32 beacon_silence_rssi_c;
88 u32 beacon_energy_a;
89 u32 beacon_energy_b;
90 u32 beacon_energy_c;
91};
92
93int iwl_send_calib_results(struct iwl_priv *priv)
94{
95 struct iwl_host_cmd hcmd = {
96 .id = REPLY_PHY_CALIBRATION_CMD,
97 .flags = CMD_SYNC,
98 };
99 struct iwl_calib_result *res;
100
101 list_for_each_entry(res, &priv->calib_results, list) {
102 int ret;
103
104 hcmd.len[0] = res->cmd_len;
105 hcmd.data[0] = &res->hdr;
106 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
107 ret = iwl_dvm_send_cmd(priv, &hcmd);
108 if (ret) {
109 IWL_ERR(priv, "Error %d on calib cmd %d\n",
110 ret, res->hdr.op_code);
111 return ret;
112 }
113 }
114
115 return 0;
116}
117
118int iwl_calib_set(struct iwl_priv *priv,
119 const struct iwl_calib_hdr *cmd, int len)
120{
121 struct iwl_calib_result *res, *tmp;
122
123 res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
124 GFP_ATOMIC);
125 if (!res)
126 return -ENOMEM;
127 memcpy(&res->hdr, cmd, len);
128 res->cmd_len = len;
129
130 list_for_each_entry(tmp, &priv->calib_results, list) {
131 if (tmp->hdr.op_code == res->hdr.op_code) {
132 list_replace(&tmp->list, &res->list);
133 kfree(tmp);
134 return 0;
135 }
136 }
137
138 /* wasn't in list already */
139 list_add_tail(&res->list, &priv->calib_results);
140
141 return 0;
142}
143
144void iwl_calib_free_results(struct iwl_priv *priv)
145{
146 struct iwl_calib_result *res, *tmp;
147
148 list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
149 list_del(&res->list);
150 kfree(res);
151 }
152}
153
154/*****************************************************************************
155 * RUNTIME calibrations framework
156 *****************************************************************************/
157
158/* "false alarms" are signals that our DSP tries to lock onto,
159 * but then determines that they are either noise, or transmissions
160 * from a distant wireless network (also "noise", really) that get
161 * "stepped on" by stronger transmissions within our own network.
162 * This algorithm attempts to set a sensitivity level that is high
163 * enough to receive all of our own network traffic, but not so
164 * high that our DSP gets too busy trying to lock onto non-network
165 * activity/noise. */
166static int iwl_sens_energy_cck(struct iwl_priv *priv,
167 u32 norm_fa,
168 u32 rx_enable_time,
169 struct statistics_general_data *rx_info)
170{
171 u32 max_nrg_cck = 0;
172 int i = 0;
173 u8 max_silence_rssi = 0;
174 u32 silence_ref = 0;
175 u8 silence_rssi_a = 0;
176 u8 silence_rssi_b = 0;
177 u8 silence_rssi_c = 0;
178 u32 val;
179
180 /* "false_alarms" values below are cross-multiplications to assess the
181 * numbers of false alarms within the measured period of actual Rx
182 * (Rx is off when we're txing), vs the min/max expected false alarms
183 * (some should be expected if rx is sensitive enough) in a
184 * hypothetical listening period of 200 time units (TU), 204.8 msec:
185 *
186 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
187 *
188 * */
189 u32 false_alarms = norm_fa * 200 * 1024;
190 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
191 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
192 struct iwl_sensitivity_data *data = NULL;
193 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
194
195 data = &(priv->sensitivity_data);
196
197 data->nrg_auto_corr_silence_diff = 0;
198
199 /* Find max silence rssi among all 3 receivers.
200 * This is background noise, which may include transmissions from other
201 * networks, measured during silence before our network's beacon */
202 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
203 ALL_BAND_FILTER) >> 8);
204 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
205 ALL_BAND_FILTER) >> 8);
206 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
207 ALL_BAND_FILTER) >> 8);
208
209 val = max(silence_rssi_b, silence_rssi_c);
210 max_silence_rssi = max(silence_rssi_a, (u8) val);
211
212 /* Store silence rssi in 20-beacon history table */
213 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
214 data->nrg_silence_idx++;
215 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
216 data->nrg_silence_idx = 0;
217
218 /* Find max silence rssi across 20 beacon history */
219 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
220 val = data->nrg_silence_rssi[i];
221 silence_ref = max(silence_ref, val);
222 }
223 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
224 silence_rssi_a, silence_rssi_b, silence_rssi_c,
225 silence_ref);
226
227 /* Find max rx energy (min value!) among all 3 receivers,
228 * measured during beacon frame.
229 * Save it in 10-beacon history table. */
230 i = data->nrg_energy_idx;
231 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
232 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
233
234 data->nrg_energy_idx++;
235 if (data->nrg_energy_idx >= 10)
236 data->nrg_energy_idx = 0;
237
238 /* Find min rx energy (max value) across 10 beacon history.
239 * This is the minimum signal level that we want to receive well.
240 * Add backoff (margin so we don't miss slightly lower energy frames).
241 * This establishes an upper bound (min value) for energy threshold. */
242 max_nrg_cck = data->nrg_value[0];
243 for (i = 1; i < 10; i++)
244 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
245 max_nrg_cck += 6;
246
247 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
248 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
249 rx_info->beacon_energy_c, max_nrg_cck - 6);
250
251 /* Count number of consecutive beacons with fewer-than-desired
252 * false alarms. */
253 if (false_alarms < min_false_alarms)
254 data->num_in_cck_no_fa++;
255 else
256 data->num_in_cck_no_fa = 0;
257 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
258 data->num_in_cck_no_fa);
259
260 /* If we got too many false alarms this time, reduce sensitivity */
261 if ((false_alarms > max_false_alarms) &&
262 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
263 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
264 false_alarms, max_false_alarms);
265 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
266 data->nrg_curr_state = IWL_FA_TOO_MANY;
267 /* Store for "fewer than desired" on later beacon */
268 data->nrg_silence_ref = silence_ref;
269
270 /* increase energy threshold (reduce nrg value)
271 * to decrease sensitivity */
272 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
273 /* Else if we got fewer than desired, increase sensitivity */
274 } else if (false_alarms < min_false_alarms) {
275 data->nrg_curr_state = IWL_FA_TOO_FEW;
276
277 /* Compare silence level with silence level for most recent
278 * healthy number or too many false alarms */
279 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
280 (s32)silence_ref;
281
282 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n",
283 false_alarms, min_false_alarms,
284 data->nrg_auto_corr_silence_diff);
285
286 /* Increase value to increase sensitivity, but only if:
287 * 1a) previous beacon did *not* have *too many* false alarms
288 * 1b) AND there's a significant difference in Rx levels
289 * from a previous beacon with too many, or healthy # FAs
290 * OR 2) We've seen a lot of beacons (100) with too few
291 * false alarms */
292 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
297 /* Increase nrg value to increase sensitivity */
298 val = data->nrg_th_cck + NRG_STEP_CCK;
299 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
300 } else {
301 IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n");
302 }
303
304 /* Else we got a healthy number of false alarms, keep status quo */
305 } else {
306 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
307 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
308
309 /* Store for use in "fewer than desired" with later beacon */
310 data->nrg_silence_ref = silence_ref;
311
312 /* If previous beacon had too many false alarms,
313 * give it some extra margin by reducing sensitivity again
314 * (but don't go below measured energy of desired Rx) */
315 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
316 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
317 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
318 data->nrg_th_cck -= NRG_MARGIN;
319 else
320 data->nrg_th_cck = max_nrg_cck;
321 }
322 }
323
324 /* Make sure the energy threshold does not go above the measured
325 * energy of the desired Rx signals (reduced by backoff margin),
326 * or else we might start missing Rx frames.
327 * Lower value is higher energy, so we use max()!
328 */
329 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
330 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
331
332 data->nrg_prev_state = data->nrg_curr_state;
333
334 /* Auto-correlation CCK algorithm */
335 if (false_alarms > min_false_alarms) {
336
337 /* increase auto_corr values to decrease sensitivity
338 * so the DSP won't be disturbed by the noise
339 */
340 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
341 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
342 else {
343 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
344 data->auto_corr_cck =
345 min((u32)ranges->auto_corr_max_cck, val);
346 }
347 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
348 data->auto_corr_cck_mrc =
349 min((u32)ranges->auto_corr_max_cck_mrc, val);
350 } else if ((false_alarms < min_false_alarms) &&
351 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
352 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
353
354 /* Decrease auto_corr values to increase sensitivity */
355 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
356 data->auto_corr_cck =
357 max((u32)ranges->auto_corr_min_cck, val);
358 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
359 data->auto_corr_cck_mrc =
360 max((u32)ranges->auto_corr_min_cck_mrc, val);
361 }
362
363 return 0;
364}
365
366
367static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
368 u32 norm_fa,
369 u32 rx_enable_time)
370{
371 u32 val;
372 u32 false_alarms = norm_fa * 200 * 1024;
373 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
374 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
375 struct iwl_sensitivity_data *data = NULL;
376 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
377
378 data = &(priv->sensitivity_data);
379
380 /* If we got too many false alarms this time, reduce sensitivity */
381 if (false_alarms > max_false_alarms) {
382
383 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
384 false_alarms, max_false_alarms);
385
386 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
387 data->auto_corr_ofdm =
388 min((u32)ranges->auto_corr_max_ofdm, val);
389
390 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
391 data->auto_corr_ofdm_mrc =
392 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
393
394 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
395 data->auto_corr_ofdm_x1 =
396 min((u32)ranges->auto_corr_max_ofdm_x1, val);
397
398 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
399 data->auto_corr_ofdm_mrc_x1 =
400 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
401 }
402
403 /* Else if we got fewer than desired, increase sensitivity */
404 else if (false_alarms < min_false_alarms) {
405
406 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
407 false_alarms, min_false_alarms);
408
409 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
410 data->auto_corr_ofdm =
411 max((u32)ranges->auto_corr_min_ofdm, val);
412
413 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
414 data->auto_corr_ofdm_mrc =
415 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
416
417 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
418 data->auto_corr_ofdm_x1 =
419 max((u32)ranges->auto_corr_min_ofdm_x1, val);
420
421 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
422 data->auto_corr_ofdm_mrc_x1 =
423 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
424 } else {
425 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
426 min_false_alarms, false_alarms, max_false_alarms);
427 }
428 return 0;
429}
430
431static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
432 struct iwl_sensitivity_data *data,
433 __le16 *tbl)
434{
435 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
436 cpu_to_le16((u16)data->auto_corr_ofdm);
437 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
438 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
439 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
440 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
441 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
442 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
443
444 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
445 cpu_to_le16((u16)data->auto_corr_cck);
446 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
447 cpu_to_le16((u16)data->auto_corr_cck_mrc);
448
449 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
450 cpu_to_le16((u16)data->nrg_th_cck);
451 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
452 cpu_to_le16((u16)data->nrg_th_ofdm);
453
454 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
455 cpu_to_le16(data->barker_corr_th_min);
456 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
457 cpu_to_le16(data->barker_corr_th_min_mrc);
458 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
459 cpu_to_le16(data->nrg_th_cca);
460
461 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
462 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
463 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
464 data->nrg_th_ofdm);
465
466 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
467 data->auto_corr_cck, data->auto_corr_cck_mrc,
468 data->nrg_th_cck);
469}
470
471/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
472static int iwl_sensitivity_write(struct iwl_priv *priv)
473{
474 struct iwl_sensitivity_cmd cmd;
475 struct iwl_sensitivity_data *data = NULL;
476 struct iwl_host_cmd cmd_out = {
477 .id = SENSITIVITY_CMD,
478 .len = { sizeof(struct iwl_sensitivity_cmd), },
479 .flags = CMD_ASYNC,
480 .data = { &cmd, },
481 };
482
483 data = &(priv->sensitivity_data);
484
485 memset(&cmd, 0, sizeof(cmd));
486
487 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
488
489 /* Update uCode's "work" table, and copy it to DSP */
490 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
491
492 /* Don't send command to uCode if nothing has changed */
493 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
494 sizeof(u16)*HD_TABLE_SIZE)) {
495 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
496 return 0;
497 }
498
499 /* Copy table for comparison next time */
500 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
501 sizeof(u16)*HD_TABLE_SIZE);
502
503 return iwl_dvm_send_cmd(priv, &cmd_out);
504}
505
506/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
507static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
508{
509 struct iwl_enhance_sensitivity_cmd cmd;
510 struct iwl_sensitivity_data *data = NULL;
511 struct iwl_host_cmd cmd_out = {
512 .id = SENSITIVITY_CMD,
513 .len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
514 .flags = CMD_ASYNC,
515 .data = { &cmd, },
516 };
517
518 data = &(priv->sensitivity_data);
519
520 memset(&cmd, 0, sizeof(cmd));
521
522 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
523
524 if (priv->cfg->base_params->hd_v2) {
525 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
526 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
527 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
528 HD_INA_NON_SQUARE_DET_CCK_DATA_V2;
529 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
530 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2;
531 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
532 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
533 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
534 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
535 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
536 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2;
537 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
538 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2;
539 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
540 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
541 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
542 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
543 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
544 HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2;
545 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
546 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2;
547 } else {
548 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
549 HD_INA_NON_SQUARE_DET_OFDM_DATA_V1;
550 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
551 HD_INA_NON_SQUARE_DET_CCK_DATA_V1;
552 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
553 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1;
554 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
555 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
556 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
557 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
558 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
559 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1;
560 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
561 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1;
562 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
563 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
564 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
565 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
566 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
567 HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1;
568 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
569 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1;
570 }
571
572 /* Update uCode's "work" table, and copy it to DSP */
573 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
574
575 /* Don't send command to uCode if nothing has changed */
576 if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]),
577 sizeof(u16)*HD_TABLE_SIZE) &&
578 !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX],
579 &(priv->enhance_sensitivity_tbl[0]),
580 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) {
581 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
582 return 0;
583 }
584
585 /* Copy table for comparison next time */
586 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]),
587 sizeof(u16)*HD_TABLE_SIZE);
588 memcpy(&(priv->enhance_sensitivity_tbl[0]),
589 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
590 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
591
592 return iwl_dvm_send_cmd(priv, &cmd_out);
593}
594
595void iwl_init_sensitivity(struct iwl_priv *priv)
596{
597 int ret = 0;
598 int i;
599 struct iwl_sensitivity_data *data = NULL;
600 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
601
602 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
603 return;
604
605 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
606
607 /* Clear driver's sensitivity algo data */
608 data = &(priv->sensitivity_data);
609
610 if (ranges == NULL)
611 return;
612
613 memset(data, 0, sizeof(struct iwl_sensitivity_data));
614
615 data->num_in_cck_no_fa = 0;
616 data->nrg_curr_state = IWL_FA_TOO_MANY;
617 data->nrg_prev_state = IWL_FA_TOO_MANY;
618 data->nrg_silence_ref = 0;
619 data->nrg_silence_idx = 0;
620 data->nrg_energy_idx = 0;
621
622 for (i = 0; i < 10; i++)
623 data->nrg_value[i] = 0;
624
625 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
626 data->nrg_silence_rssi[i] = 0;
627
628 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
629 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
630 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
631 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
632 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
633 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
634 data->nrg_th_cck = ranges->nrg_th_cck;
635 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
636 data->barker_corr_th_min = ranges->barker_corr_th_min;
637 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
638 data->nrg_th_cca = ranges->nrg_th_cca;
639
640 data->last_bad_plcp_cnt_ofdm = 0;
641 data->last_fa_cnt_ofdm = 0;
642 data->last_bad_plcp_cnt_cck = 0;
643 data->last_fa_cnt_cck = 0;
644
645 if (priv->fw->enhance_sensitivity_table)
646 ret |= iwl_enhance_sensitivity_write(priv);
647 else
648 ret |= iwl_sensitivity_write(priv);
649 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
650}
651
652void iwl_sensitivity_calibration(struct iwl_priv *priv)
653{
654 u32 rx_enable_time;
655 u32 fa_cck;
656 u32 fa_ofdm;
657 u32 bad_plcp_cck;
658 u32 bad_plcp_ofdm;
659 u32 norm_fa_ofdm;
660 u32 norm_fa_cck;
661 struct iwl_sensitivity_data *data = NULL;
662 struct statistics_rx_non_phy *rx_info;
663 struct statistics_rx_phy *ofdm, *cck;
664 struct statistics_general_data statis;
665
666 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
667 return;
668
669 data = &(priv->sensitivity_data);
670
671 if (!iwl_is_any_associated(priv)) {
672 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
673 return;
674 }
675
676 spin_lock_bh(&priv->statistics.lock);
677 rx_info = &priv->statistics.rx_non_phy;
678 ofdm = &priv->statistics.rx_ofdm;
679 cck = &priv->statistics.rx_cck;
680 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
681 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
682 spin_unlock_bh(&priv->statistics.lock);
683 return;
684 }
685
686 /* Extract Statistics: */
687 rx_enable_time = le32_to_cpu(rx_info->channel_load);
688 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
689 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
690 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
691 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
692
693 statis.beacon_silence_rssi_a =
694 le32_to_cpu(rx_info->beacon_silence_rssi_a);
695 statis.beacon_silence_rssi_b =
696 le32_to_cpu(rx_info->beacon_silence_rssi_b);
697 statis.beacon_silence_rssi_c =
698 le32_to_cpu(rx_info->beacon_silence_rssi_c);
699 statis.beacon_energy_a =
700 le32_to_cpu(rx_info->beacon_energy_a);
701 statis.beacon_energy_b =
702 le32_to_cpu(rx_info->beacon_energy_b);
703 statis.beacon_energy_c =
704 le32_to_cpu(rx_info->beacon_energy_c);
705
706 spin_unlock_bh(&priv->statistics.lock);
707
708 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
709
710 if (!rx_enable_time) {
711 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
712 return;
713 }
714
715 /* These statistics increase monotonically, and do not reset
716 * at each beacon. Calculate difference from last value, or just
717 * use the new statistics value if it has reset or wrapped around. */
718 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
719 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
720 else {
721 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
722 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
723 }
724
725 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
726 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
727 else {
728 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
729 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
730 }
731
732 if (data->last_fa_cnt_ofdm > fa_ofdm)
733 data->last_fa_cnt_ofdm = fa_ofdm;
734 else {
735 fa_ofdm -= data->last_fa_cnt_ofdm;
736 data->last_fa_cnt_ofdm += fa_ofdm;
737 }
738
739 if (data->last_fa_cnt_cck > fa_cck)
740 data->last_fa_cnt_cck = fa_cck;
741 else {
742 fa_cck -= data->last_fa_cnt_cck;
743 data->last_fa_cnt_cck += fa_cck;
744 }
745
746 /* Total aborted signal locks */
747 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
748 norm_fa_cck = fa_cck + bad_plcp_cck;
749
750 IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
751 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
752
753 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
754 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
755 if (priv->fw->enhance_sensitivity_table)
756 iwl_enhance_sensitivity_write(priv);
757 else
758 iwl_sensitivity_write(priv);
759}
760
761static inline u8 find_first_chain(u8 mask)
762{
763 if (mask & ANT_A)
764 return CHAIN_A;
765 if (mask & ANT_B)
766 return CHAIN_B;
767 return CHAIN_C;
768}
769
770/**
771 * Run disconnected antenna algorithm to find out which antennas are
772 * disconnected.
773 */
774static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
775 struct iwl_chain_noise_data *data)
776{
777 u32 active_chains = 0;
778 u32 max_average_sig;
779 u16 max_average_sig_antenna_i;
780 u8 num_tx_chains;
781 u8 first_chain;
782 u16 i = 0;
783
784 average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS;
785 average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS;
786 average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS;
787
788 if (average_sig[0] >= average_sig[1]) {
789 max_average_sig = average_sig[0];
790 max_average_sig_antenna_i = 0;
791 active_chains = (1 << max_average_sig_antenna_i);
792 } else {
793 max_average_sig = average_sig[1];
794 max_average_sig_antenna_i = 1;
795 active_chains = (1 << max_average_sig_antenna_i);
796 }
797
798 if (average_sig[2] >= max_average_sig) {
799 max_average_sig = average_sig[2];
800 max_average_sig_antenna_i = 2;
801 active_chains = (1 << max_average_sig_antenna_i);
802 }
803
804 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
805 average_sig[0], average_sig[1], average_sig[2]);
806 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
807 max_average_sig, max_average_sig_antenna_i);
808
809 /* Compare signal strengths for all 3 receivers. */
810 for (i = 0; i < NUM_RX_CHAINS; i++) {
811 if (i != max_average_sig_antenna_i) {
812 s32 rssi_delta = (max_average_sig - average_sig[i]);
813
814 /* If signal is very weak, compared with
815 * strongest, mark it as disconnected. */
816 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
817 data->disconn_array[i] = 1;
818 else
819 active_chains |= (1 << i);
820 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
821 "disconn_array[i] = %d\n",
822 i, rssi_delta, data->disconn_array[i]);
823 }
824 }
825
826 /*
827 * The above algorithm sometimes fails when the ucode
828 * reports 0 for all chains. It's not clear why that
829 * happens to start with, but it is then causing trouble
830 * because this can make us enable more chains than the
831 * hardware really has.
832 *
833 * To be safe, simply mask out any chains that we know
834 * are not on the device.
835 */
836 active_chains &= priv->nvm_data->valid_rx_ant;
837
838 num_tx_chains = 0;
839 for (i = 0; i < NUM_RX_CHAINS; i++) {
840 /* loops on all the bits of
841 * priv->hw_setting.valid_tx_ant */
842 u8 ant_msk = (1 << i);
843 if (!(priv->nvm_data->valid_tx_ant & ant_msk))
844 continue;
845
846 num_tx_chains++;
847 if (data->disconn_array[i] == 0)
848 /* there is a Tx antenna connected */
849 break;
850 if (num_tx_chains == priv->hw_params.tx_chains_num &&
851 data->disconn_array[i]) {
852 /*
853 * If all chains are disconnected
854 * connect the first valid tx chain
855 */
856 first_chain =
857 find_first_chain(priv->nvm_data->valid_tx_ant);
858 data->disconn_array[first_chain] = 0;
859 active_chains |= BIT(first_chain);
860 IWL_DEBUG_CALIB(priv,
861 "All Tx chains are disconnected W/A - declare %d as connected\n",
862 first_chain);
863 break;
864 }
865 }
866
867 if (active_chains != priv->nvm_data->valid_rx_ant &&
868 active_chains != priv->chain_noise_data.active_chains)
869 IWL_DEBUG_CALIB(priv,
870 "Detected that not all antennas are connected! "
871 "Connected: %#x, valid: %#x.\n",
872 active_chains,
873 priv->nvm_data->valid_rx_ant);
874
875 /* Save for use within RXON, TX, SCAN commands, etc. */
876 data->active_chains = active_chains;
877 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
878 active_chains);
879}
880
881static void iwlagn_gain_computation(struct iwl_priv *priv,
882 u32 average_noise[NUM_RX_CHAINS],
883 u8 default_chain)
884{
885 int i;
886 s32 delta_g;
887 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
888
889 /*
890 * Find Gain Code for the chains based on "default chain"
891 */
892 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
893 if ((data->disconn_array[i])) {
894 data->delta_gain_code[i] = 0;
895 continue;
896 }
897
898 delta_g = (priv->cfg->base_params->chain_noise_scale *
899 ((s32)average_noise[default_chain] -
900 (s32)average_noise[i])) / 1500;
901
902 /* bound gain by 2 bits value max, 3rd bit is sign */
903 data->delta_gain_code[i] =
904 min(abs(delta_g),
905 (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
906
907 if (delta_g < 0)
908 /*
909 * set negative sign ...
910 * note to Intel developers: This is uCode API format,
911 * not the format of any internal device registers.
912 * Do not change this format for e.g. 6050 or similar
913 * devices. Change format only if more resolution
914 * (i.e. more than 2 bits magnitude) is needed.
915 */
916 data->delta_gain_code[i] |= (1 << 2);
917 }
918
919 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
920 data->delta_gain_code[1], data->delta_gain_code[2]);
921
922 if (!data->radio_write) {
923 struct iwl_calib_chain_noise_gain_cmd cmd;
924
925 memset(&cmd, 0, sizeof(cmd));
926
927 iwl_set_calib_hdr(&cmd.hdr,
928 priv->phy_calib_chain_noise_gain_cmd);
929 cmd.delta_gain_1 = data->delta_gain_code[1];
930 cmd.delta_gain_2 = data->delta_gain_code[2];
931 iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
932 CMD_ASYNC, sizeof(cmd), &cmd);
933
934 data->radio_write = 1;
935 data->state = IWL_CHAIN_NOISE_CALIBRATED;
936 }
937}
938
939/*
940 * Accumulate 16 beacons of signal and noise statistics for each of
941 * 3 receivers/antennas/rx-chains, then figure out:
942 * 1) Which antennas are connected.
943 * 2) Differential rx gain settings to balance the 3 receivers.
944 */
945void iwl_chain_noise_calibration(struct iwl_priv *priv)
946{
947 struct iwl_chain_noise_data *data = NULL;
948
949 u32 chain_noise_a;
950 u32 chain_noise_b;
951 u32 chain_noise_c;
952 u32 chain_sig_a;
953 u32 chain_sig_b;
954 u32 chain_sig_c;
955 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
956 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
957 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
958 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
959 u16 i = 0;
960 u16 rxon_chnum = INITIALIZATION_VALUE;
961 u16 stat_chnum = INITIALIZATION_VALUE;
962 u8 rxon_band24;
963 u8 stat_band24;
964 struct statistics_rx_non_phy *rx_info;
965
966 /*
967 * MULTI-FIXME:
968 * When we support multiple interfaces on different channels,
969 * this must be modified/fixed.
970 */
971 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
972
973 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
974 return;
975
976 data = &(priv->chain_noise_data);
977
978 /*
979 * Accumulate just the first "chain_noise_num_beacons" after
980 * the first association, then we're done forever.
981 */
982 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
983 if (data->state == IWL_CHAIN_NOISE_ALIVE)
984 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
985 return;
986 }
987
988 spin_lock_bh(&priv->statistics.lock);
989
990 rx_info = &priv->statistics.rx_non_phy;
991
992 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
993 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
994 spin_unlock_bh(&priv->statistics.lock);
995 return;
996 }
997
998 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
999 rxon_chnum = le16_to_cpu(ctx->staging.channel);
1000 stat_band24 =
1001 !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
1002 stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16;
1003
1004 /* Make sure we accumulate data for just the associated channel
1005 * (even if scanning). */
1006 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
1007 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
1008 rxon_chnum, rxon_band24);
1009 spin_unlock_bh(&priv->statistics.lock);
1010 return;
1011 }
1012
1013 /*
1014 * Accumulate beacon statistics values across
1015 * "chain_noise_num_beacons"
1016 */
1017 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1018 IN_BAND_FILTER;
1019 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1020 IN_BAND_FILTER;
1021 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1022 IN_BAND_FILTER;
1023
1024 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1025 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1026 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1027
1028 spin_unlock_bh(&priv->statistics.lock);
1029
1030 data->beacon_count++;
1031
1032 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1033 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1034 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1035
1036 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1037 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1038 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1039
1040 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
1041 rxon_chnum, rxon_band24, data->beacon_count);
1042 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
1043 chain_sig_a, chain_sig_b, chain_sig_c);
1044 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
1045 chain_noise_a, chain_noise_b, chain_noise_c);
1046
1047 /* If this is the "chain_noise_num_beacons", determine:
1048 * 1) Disconnected antennas (using signal strengths)
1049 * 2) Differential gain (using silence noise) to balance receivers */
1050 if (data->beacon_count != IWL_CAL_NUM_BEACONS)
1051 return;
1052
1053 /* Analyze signal for disconnected antenna */
1054 if (priv->cfg->bt_params &&
1055 priv->cfg->bt_params->advanced_bt_coexist) {
1056 /* Disable disconnected antenna algorithm for advanced
1057 bt coex, assuming valid antennas are connected */
1058 data->active_chains = priv->nvm_data->valid_rx_ant;
1059 for (i = 0; i < NUM_RX_CHAINS; i++)
1060 if (!(data->active_chains & (1<<i)))
1061 data->disconn_array[i] = 1;
1062 } else
1063 iwl_find_disconn_antenna(priv, average_sig, data);
1064
1065 /* Analyze noise for rx balance */
1066 average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS;
1067 average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS;
1068 average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS;
1069
1070 for (i = 0; i < NUM_RX_CHAINS; i++) {
1071 if (!(data->disconn_array[i]) &&
1072 (average_noise[i] <= min_average_noise)) {
1073 /* This means that chain i is active and has
1074 * lower noise values so far: */
1075 min_average_noise = average_noise[i];
1076 min_average_noise_antenna_i = i;
1077 }
1078 }
1079
1080 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
1081 average_noise[0], average_noise[1],
1082 average_noise[2]);
1083
1084 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
1085 min_average_noise, min_average_noise_antenna_i);
1086
1087 iwlagn_gain_computation(
1088 priv, average_noise,
1089 find_first_chain(priv->nvm_data->valid_rx_ant));
1090
1091 /* Some power changes may have been made during the calibration.
1092 * Update and commit the RXON
1093 */
1094 iwl_update_chain_flags(priv);
1095
1096 data->state = IWL_CHAIN_NOISE_DONE;
1097 iwl_power_update_mode(priv, false);
1098}
1099
1100void iwl_reset_run_time_calib(struct iwl_priv *priv)
1101{
1102 int i;
1103 memset(&(priv->sensitivity_data), 0,
1104 sizeof(struct iwl_sensitivity_data));
1105 memset(&(priv->chain_noise_data), 0,
1106 sizeof(struct iwl_chain_noise_data));
1107 for (i = 0; i < NUM_RX_CHAINS; i++)
1108 priv->chain_noise_data.delta_gain_code[i] =
1109 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1110
1111 /* Ask for statistics now, the uCode will send notification
1112 * periodically after association */
1113 iwl_send_statistics_request(priv, CMD_ASYNC, true);
1114}
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
deleted file mode 100644
index 2349f393cc4..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include "dev.h"
66#include "commands.h"
67
68void iwl_chain_noise_calibration(struct iwl_priv *priv);
69void iwl_sensitivity_calibration(struct iwl_priv *priv);
70
71void iwl_init_sensitivity(struct iwl_priv *priv);
72void iwl_reset_run_time_calib(struct iwl_priv *priv);
73
74#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
deleted file mode 100644
index 71ab76b2b39..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ /dev/null
@@ -1,3995 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_commands_h__
70#define __iwl_commands_h__
71
72#include <linux/ieee80211.h>
73#include <linux/types.h>
74
75
76enum {
77 REPLY_ALIVE = 0x1,
78 REPLY_ERROR = 0x2,
79 REPLY_ECHO = 0x3, /* test command */
80
81 /* RXON and QOS commands */
82 REPLY_RXON = 0x10,
83 REPLY_RXON_ASSOC = 0x11,
84 REPLY_QOS_PARAM = 0x13,
85 REPLY_RXON_TIMING = 0x14,
86
87 /* Multi-Station support */
88 REPLY_ADD_STA = 0x18,
89 REPLY_REMOVE_STA = 0x19,
90 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
91 REPLY_TXFIFO_FLUSH = 0x1e,
92
93 /* Security */
94 REPLY_WEPKEY = 0x20,
95
96 /* RX, TX, LEDs */
97 REPLY_TX = 0x1c,
98 REPLY_LEDS_CMD = 0x48,
99 REPLY_TX_LINK_QUALITY_CMD = 0x4e,
100
101 /* WiMAX coexistence */
102 COEX_PRIORITY_TABLE_CMD = 0x5a,
103 COEX_MEDIUM_NOTIFICATION = 0x5b,
104 COEX_EVENT_CMD = 0x5c,
105
106 /* Calibration */
107 TEMPERATURE_NOTIFICATION = 0x62,
108 CALIBRATION_CFG_CMD = 0x65,
109 CALIBRATION_RES_NOTIFICATION = 0x66,
110 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
111
112 /* 802.11h related */
113 REPLY_QUIET_CMD = 0x71, /* not used */
114 REPLY_CHANNEL_SWITCH = 0x72,
115 CHANNEL_SWITCH_NOTIFICATION = 0x73,
116 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
117 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
118
119 /* Power Management */
120 POWER_TABLE_CMD = 0x77,
121 PM_SLEEP_NOTIFICATION = 0x7A,
122 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
123
124 /* Scan commands and notifications */
125 REPLY_SCAN_CMD = 0x80,
126 REPLY_SCAN_ABORT_CMD = 0x81,
127 SCAN_START_NOTIFICATION = 0x82,
128 SCAN_RESULTS_NOTIFICATION = 0x83,
129 SCAN_COMPLETE_NOTIFICATION = 0x84,
130
131 /* IBSS/AP commands */
132 BEACON_NOTIFICATION = 0x90,
133 REPLY_TX_BEACON = 0x91,
134 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
135
136 /* Miscellaneous commands */
137 REPLY_TX_POWER_DBM_CMD = 0x95,
138 QUIET_NOTIFICATION = 0x96, /* not used */
139 REPLY_TX_PWR_TABLE_CMD = 0x97,
140 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
141 TX_ANT_CONFIGURATION_CMD = 0x98,
142 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
143
144 /* Bluetooth device coexistence config command */
145 REPLY_BT_CONFIG = 0x9b,
146
147 /* Statistics */
148 REPLY_STATISTICS_CMD = 0x9c,
149 STATISTICS_NOTIFICATION = 0x9d,
150
151 /* RF-KILL commands and notifications */
152 REPLY_CARD_STATE_CMD = 0xa0,
153 CARD_STATE_NOTIFICATION = 0xa1,
154
155 /* Missed beacons notification */
156 MISSED_BEACONS_NOTIFICATION = 0xa2,
157
158 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
159 SENSITIVITY_CMD = 0xa8,
160 REPLY_PHY_CALIBRATION_CMD = 0xb0,
161 REPLY_RX_PHY_CMD = 0xc0,
162 REPLY_RX_MPDU_CMD = 0xc1,
163 REPLY_RX = 0xc3,
164 REPLY_COMPRESSED_BA = 0xc5,
165
166 /* BT Coex */
167 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
168 REPLY_BT_COEX_PROT_ENV = 0xcd,
169 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
170
171 /* PAN commands */
172 REPLY_WIPAN_PARAMS = 0xb2,
173 REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
174 REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
175 REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
176 REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
177 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
178 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
179 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
180 REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
181
182 REPLY_WOWLAN_PATTERNS = 0xe0,
183 REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
184 REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
185 REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
186 REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
187 REPLY_WOWLAN_GET_STATUS = 0xe5,
188 REPLY_D3_CONFIG = 0xd3,
189
190 REPLY_MAX = 0xff
191};
192
193/*
194 * Minimum number of queues. MAX_NUM is defined in hw specific files.
195 * Set the minimum to accommodate
196 * - 4 standard TX queues
197 * - the command queue
198 * - 4 PAN TX queues
199 * - the PAN multicast queue, and
200 * - the AUX (TX during scan dwell) queue.
201 */
202#define IWL_MIN_NUM_QUEUES 11
203
204/*
205 * Command queue depends on iPAN support.
206 */
207#define IWL_DEFAULT_CMD_QUEUE_NUM 4
208#define IWL_IPAN_CMD_QUEUE_NUM 9
209
210#define IWL_TX_FIFO_BK 0 /* shared */
211#define IWL_TX_FIFO_BE 1
212#define IWL_TX_FIFO_VI 2 /* shared */
213#define IWL_TX_FIFO_VO 3
214#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
215#define IWL_TX_FIFO_BE_IPAN 4
216#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
217#define IWL_TX_FIFO_VO_IPAN 5
218/* re-uses the VO FIFO, uCode will properly flush/schedule */
219#define IWL_TX_FIFO_AUX 5
220#define IWL_TX_FIFO_UNUSED 255
221
222#define IWLAGN_CMD_FIFO_NUM 7
223
224/*
225 * This queue number is required for proper operation
226 * because the ucode will stop/start the scheduler as
227 * required.
228 */
229#define IWL_IPAN_MCAST_QUEUE 8
230
231/******************************************************************************
232 * (0)
233 * Commonly used structures and definitions:
234 * Command header, rate_n_flags, txpower
235 *
236 *****************************************************************************/
237
238/**
239 * iwlagn rate_n_flags bit fields
240 *
241 * rate_n_flags format is used in following iwlagn commands:
242 * REPLY_RX (response only)
243 * REPLY_RX_MPDU (response only)
244 * REPLY_TX (both command and response)
245 * REPLY_TX_LINK_QUALITY_CMD
246 *
247 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
248 * 2-0: 0) 6 Mbps
249 * 1) 12 Mbps
250 * 2) 18 Mbps
251 * 3) 24 Mbps
252 * 4) 36 Mbps
253 * 5) 48 Mbps
254 * 6) 54 Mbps
255 * 7) 60 Mbps
256 *
257 * 4-3: 0) Single stream (SISO)
258 * 1) Dual stream (MIMO)
259 * 2) Triple stream (MIMO)
260 *
261 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
262 *
263 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
264 * 3-0: 0xD) 6 Mbps
265 * 0xF) 9 Mbps
266 * 0x5) 12 Mbps
267 * 0x7) 18 Mbps
268 * 0x9) 24 Mbps
269 * 0xB) 36 Mbps
270 * 0x1) 48 Mbps
271 * 0x3) 54 Mbps
272 *
273 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
274 * 6-0: 10) 1 Mbps
275 * 20) 2 Mbps
276 * 55) 5.5 Mbps
277 * 110) 11 Mbps
278 */
279#define RATE_MCS_CODE_MSK 0x7
280#define RATE_MCS_SPATIAL_POS 3
281#define RATE_MCS_SPATIAL_MSK 0x18
282#define RATE_MCS_HT_DUP_POS 5
283#define RATE_MCS_HT_DUP_MSK 0x20
284/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */
285#define RATE_MCS_RATE_MSK 0xff
286
287/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
288#define RATE_MCS_FLAGS_POS 8
289#define RATE_MCS_HT_POS 8
290#define RATE_MCS_HT_MSK 0x100
291
292/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
293#define RATE_MCS_CCK_POS 9
294#define RATE_MCS_CCK_MSK 0x200
295
296/* Bit 10: (1) Use Green Field preamble */
297#define RATE_MCS_GF_POS 10
298#define RATE_MCS_GF_MSK 0x400
299
300/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
301#define RATE_MCS_HT40_POS 11
302#define RATE_MCS_HT40_MSK 0x800
303
304/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
305#define RATE_MCS_DUP_POS 12
306#define RATE_MCS_DUP_MSK 0x1000
307
308/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
309#define RATE_MCS_SGI_POS 13
310#define RATE_MCS_SGI_MSK 0x2000
311
312/**
313 * rate_n_flags Tx antenna masks
314 * 4965 has 2 transmitters
315 * 5100 has 1 transmitter B
316 * 5150 has 1 transmitter A
317 * 5300 has 3 transmitters
318 * 5350 has 3 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * struct tx_power_dual_stream
338 *
339 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 *
341 * Same format as iwl_tx_power_dual_stream, but __le32
342 */
343struct tx_power_dual_stream {
344 __le32 dw;
345} __packed;
346
347/**
348 * Command REPLY_TX_POWER_DBM_CMD = 0x98
349 * struct iwlagn_tx_power_dbm_cmd
350 */
351#define IWLAGN_TX_POWER_AUTO 0x7f
352#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6)
353
354struct iwlagn_tx_power_dbm_cmd {
355 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
356 u8 flags;
357 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
358 u8 reserved;
359} __packed;
360
361/**
362 * Command TX_ANT_CONFIGURATION_CMD = 0x98
363 * This command is used to configure valid Tx antenna.
364 * By default uCode concludes the valid antenna according to the radio flavor.
365 * This command enables the driver to override/modify this conclusion.
366 */
367struct iwl_tx_ant_config_cmd {
368 __le32 valid;
369} __packed;
370
371/******************************************************************************
372 * (0a)
373 * Alive and Error Commands & Responses:
374 *
375 *****************************************************************************/
376
377#define UCODE_VALID_OK cpu_to_le32(0x1)
378
379/**
380 * REPLY_ALIVE = 0x1 (response only, not a command)
381 *
382 * uCode issues this "alive" notification once the runtime image is ready
383 * to receive commands from the driver. This is the *second* "alive"
384 * notification that the driver will receive after rebooting uCode;
385 * this "alive" is indicated by subtype field != 9.
386 *
387 * See comments documenting "BSM" (bootstrap state machine).
388 *
389 * This response includes two pointers to structures within the device's
390 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
391 *
392 * 1) log_event_table_ptr indicates base of the event log. This traces
393 * a 256-entry history of uCode execution within a circular buffer.
394 * Its header format is:
395 *
396 * __le32 log_size; log capacity (in number of entries)
397 * __le32 type; (1) timestamp with each entry, (0) no timestamp
398 * __le32 wraps; # times uCode has wrapped to top of circular buffer
399 * __le32 write_index; next circular buffer entry that uCode would fill
400 *
401 * The header is followed by the circular buffer of log entries. Entries
402 * with timestamps have the following format:
403 *
404 * __le32 event_id; range 0 - 1500
405 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
406 * __le32 data; event_id-specific data value
407 *
408 * Entries without timestamps contain only event_id and data.
409 *
410 *
411 * 2) error_event_table_ptr indicates base of the error log. This contains
412 * information about any uCode error that occurs. For agn, the format
413 * of the error log is defined by struct iwl_error_event_table.
414 *
415 * The Linux driver can print both logs to the system log when a uCode error
416 * occurs.
417 */
418
419/*
420 * Note: This structure is read from the device with IO accesses,
421 * and the reading already does the endian conversion. As it is
422 * read with u32-sized accesses, any members with a different size
423 * need to be ordered correctly though!
424 */
425struct iwl_error_event_table {
426 u32 valid; /* (nonzero) valid, (0) log is empty */
427 u32 error_id; /* type of error */
428 u32 pc; /* program counter */
429 u32 blink1; /* branch link */
430 u32 blink2; /* branch link */
431 u32 ilink1; /* interrupt link */
432 u32 ilink2; /* interrupt link */
433 u32 data1; /* error-specific data */
434 u32 data2; /* error-specific data */
435 u32 line; /* source code line of error */
436 u32 bcon_time; /* beacon timer */
437 u32 tsf_low; /* network timestamp function timer */
438 u32 tsf_hi; /* network timestamp function timer */
439 u32 gp1; /* GP1 timer register */
440 u32 gp2; /* GP2 timer register */
441 u32 gp3; /* GP3 timer register */
442 u32 ucode_ver; /* uCode version */
443 u32 hw_ver; /* HW Silicon version */
444 u32 brd_ver; /* HW board version */
445 u32 log_pc; /* log program counter */
446 u32 frame_ptr; /* frame pointer */
447 u32 stack_ptr; /* stack pointer */
448 u32 hcmd; /* last host command header */
449 u32 isr0; /* isr status register LMPM_NIC_ISR0:
450 * rxtx_flag */
451 u32 isr1; /* isr status register LMPM_NIC_ISR1:
452 * host_flag */
453 u32 isr2; /* isr status register LMPM_NIC_ISR2:
454 * enc_flag */
455 u32 isr3; /* isr status register LMPM_NIC_ISR3:
456 * time_flag */
457 u32 isr4; /* isr status register LMPM_NIC_ISR4:
458 * wico interrupt */
459 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
460 u32 wait_event; /* wait event() caller address */
461 u32 l2p_control; /* L2pControlField */
462 u32 l2p_duration; /* L2pDurationField */
463 u32 l2p_mhvalid; /* L2pMhValidBits */
464 u32 l2p_addr_match; /* L2pAddrMatchStat */
465 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
466 * (LMPM_PMG_SEL) */
467 u32 u_timestamp; /* indicate when the date and time of the
468 * compilation */
469 u32 flow_handler; /* FH read/write pointers, RX credit */
470} __packed;
471
472struct iwl_alive_resp {
473 u8 ucode_minor;
474 u8 ucode_major;
475 __le16 reserved1;
476 u8 sw_rev[8];
477 u8 ver_type;
478 u8 ver_subtype; /* not "9" for runtime alive */
479 __le16 reserved2;
480 __le32 log_event_table_ptr; /* SRAM address for event log */
481 __le32 error_event_table_ptr; /* SRAM address for error log */
482 __le32 timestamp;
483 __le32 is_valid;
484} __packed;
485
486/*
487 * REPLY_ERROR = 0x2 (response only, not a command)
488 */
489struct iwl_error_resp {
490 __le32 error_type;
491 u8 cmd_id;
492 u8 reserved1;
493 __le16 bad_cmd_seq_num;
494 __le32 error_info;
495 __le64 timestamp;
496} __packed;
497
498/******************************************************************************
499 * (1)
500 * RXON Commands & Responses:
501 *
502 *****************************************************************************/
503
504/*
505 * Rx config defines & structure
506 */
507/* rx_config device types */
508enum {
509 RXON_DEV_TYPE_AP = 1,
510 RXON_DEV_TYPE_ESS = 3,
511 RXON_DEV_TYPE_IBSS = 4,
512 RXON_DEV_TYPE_SNIFFER = 6,
513 RXON_DEV_TYPE_CP = 7,
514 RXON_DEV_TYPE_2STA = 8,
515 RXON_DEV_TYPE_P2P = 9,
516};
517
518
519#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
520#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
521#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
522#define RXON_RX_CHAIN_VALID_POS (1)
523#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
524#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
525#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
526#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
527#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
528#define RXON_RX_CHAIN_CNT_POS (10)
529#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
530#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
531#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
532#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
533
534/* rx_config flags */
535/* band & modulation selection */
536#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
537#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
538/* auto detection enable */
539#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
540/* TGg protection when tx */
541#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
542/* cck short slot & preamble */
543#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
544#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
545/* antenna selection */
546#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
547#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
548#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
549#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
550/* radar detection enable */
551#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
552#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
553/* rx response to host with 8-byte TSF
554* (according to ON_AIR deassertion) */
555#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
556
557
558/* HT flags */
559#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
560#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
561
562#define RXON_FLG_HT_OPERATING_MODE_POS (23)
563
564#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
565#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
566
567#define RXON_FLG_CHANNEL_MODE_POS (25)
568#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
569
570/* channel mode */
571enum {
572 CHANNEL_MODE_LEGACY = 0,
573 CHANNEL_MODE_PURE_40 = 1,
574 CHANNEL_MODE_MIXED = 2,
575 CHANNEL_MODE_RESERVED = 3,
576};
577#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
578#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
579#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
580
581/* CTS to self (if spec allows) flag */
582#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
583
584/* rx_config filter flags */
585/* accept all data frames */
586#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
587/* pass control & management to host */
588#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
589/* accept multi-cast */
590#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
591/* don't decrypt uni-cast frames */
592#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
593/* don't decrypt multi-cast frames */
594#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
595/* STA is associated */
596#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
597/* transfer to host non bssid beacons in associated state */
598#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
599
600/**
601 * REPLY_RXON = 0x10 (command, has simple generic response)
602 *
603 * RXON tunes the radio tuner to a service channel, and sets up a number
604 * of parameters that are used primarily for Rx, but also for Tx operations.
605 *
606 * NOTE: When tuning to a new channel, driver must set the
607 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
608 * info within the device, including the station tables, tx retry
609 * rate tables, and txpower tables. Driver must build a new station
610 * table and txpower table before transmitting anything on the RXON
611 * channel.
612 *
613 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
614 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
615 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
616 */
617
618struct iwl_rxon_cmd {
619 u8 node_addr[6];
620 __le16 reserved1;
621 u8 bssid_addr[6];
622 __le16 reserved2;
623 u8 wlap_bssid_addr[6];
624 __le16 reserved3;
625 u8 dev_type;
626 u8 air_propagation;
627 __le16 rx_chain;
628 u8 ofdm_basic_rates;
629 u8 cck_basic_rates;
630 __le16 assoc_id;
631 __le32 flags;
632 __le32 filter_flags;
633 __le16 channel;
634 u8 ofdm_ht_single_stream_basic_rates;
635 u8 ofdm_ht_dual_stream_basic_rates;
636 u8 ofdm_ht_triple_stream_basic_rates;
637 u8 reserved5;
638 __le16 acquisition_data;
639 __le16 reserved6;
640} __packed;
641
642/*
643 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
644 */
645struct iwl_rxon_assoc_cmd {
646 __le32 flags;
647 __le32 filter_flags;
648 u8 ofdm_basic_rates;
649 u8 cck_basic_rates;
650 __le16 reserved1;
651 u8 ofdm_ht_single_stream_basic_rates;
652 u8 ofdm_ht_dual_stream_basic_rates;
653 u8 ofdm_ht_triple_stream_basic_rates;
654 u8 reserved2;
655 __le16 rx_chain_select_flags;
656 __le16 acquisition_data;
657 __le32 reserved3;
658} __packed;
659
660#define IWL_CONN_MAX_LISTEN_INTERVAL 10
661#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
662
663/*
664 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
665 */
666struct iwl_rxon_time_cmd {
667 __le64 timestamp;
668 __le16 beacon_interval;
669 __le16 atim_window;
670 __le32 beacon_init_val;
671 __le16 listen_interval;
672 u8 dtim_period;
673 u8 delta_cp_bss_tbtts;
674} __packed;
675
676/*
677 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
678 */
679/**
680 * struct iwl5000_channel_switch_cmd
681 * @band: 0- 5.2GHz, 1- 2.4GHz
682 * @expect_beacon: 0- resume transmits after channel switch
683 * 1- wait for beacon to resume transmits
684 * @channel: new channel number
685 * @rxon_flags: Rx on flags
686 * @rxon_filter_flags: filtering parameters
687 * @switch_time: switch time in extended beacon format
688 * @reserved: reserved bytes
689 */
690struct iwl5000_channel_switch_cmd {
691 u8 band;
692 u8 expect_beacon;
693 __le16 channel;
694 __le32 rxon_flags;
695 __le32 rxon_filter_flags;
696 __le32 switch_time;
697 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
698} __packed;
699
700/**
701 * struct iwl6000_channel_switch_cmd
702 * @band: 0- 5.2GHz, 1- 2.4GHz
703 * @expect_beacon: 0- resume transmits after channel switch
704 * 1- wait for beacon to resume transmits
705 * @channel: new channel number
706 * @rxon_flags: Rx on flags
707 * @rxon_filter_flags: filtering parameters
708 * @switch_time: switch time in extended beacon format
709 * @reserved: reserved bytes
710 */
711struct iwl6000_channel_switch_cmd {
712 u8 band;
713 u8 expect_beacon;
714 __le16 channel;
715 __le32 rxon_flags;
716 __le32 rxon_filter_flags;
717 __le32 switch_time;
718 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
719} __packed;
720
721/*
722 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
723 */
724struct iwl_csa_notification {
725 __le16 band;
726 __le16 channel;
727 __le32 status; /* 0 - OK, 1 - fail */
728} __packed;
729
730/******************************************************************************
731 * (2)
732 * Quality-of-Service (QOS) Commands & Responses:
733 *
734 *****************************************************************************/
735
736/**
737 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
738 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
739 *
740 * @cw_min: Contention window, start value in numbers of slots.
741 * Should be a power-of-2, minus 1. Device's default is 0x0f.
742 * @cw_max: Contention window, max value in numbers of slots.
743 * Should be a power-of-2, minus 1. Device's default is 0x3f.
744 * @aifsn: Number of slots in Arbitration Interframe Space (before
745 * performing random backoff timing prior to Tx). Device default 1.
746 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
747 *
748 * Device will automatically increase contention window by (2*CW) + 1 for each
749 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
750 * value, to cap the CW value.
751 */
752struct iwl_ac_qos {
753 __le16 cw_min;
754 __le16 cw_max;
755 u8 aifsn;
756 u8 reserved1;
757 __le16 edca_txop;
758} __packed;
759
760/* QoS flags defines */
761#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
762#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
763#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
764
765/* Number of Access Categories (AC) (EDCA), queues 0..3 */
766#define AC_NUM 4
767
768/*
769 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
770 *
771 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
772 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
773 */
774struct iwl_qosparam_cmd {
775 __le32 qos_flags;
776 struct iwl_ac_qos ac[AC_NUM];
777} __packed;
778
779/******************************************************************************
780 * (3)
781 * Add/Modify Stations Commands & Responses:
782 *
783 *****************************************************************************/
784/*
785 * Multi station support
786 */
787
788/* Special, dedicated locations within device's station table */
789#define IWL_AP_ID 0
790#define IWL_AP_ID_PAN 1
791#define IWL_STA_ID 2
792#define IWLAGN_PAN_BCAST_ID 14
793#define IWLAGN_BROADCAST_ID 15
794#define IWLAGN_STATION_COUNT 16
795
796#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
797
798#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
799#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
800#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
801#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
802#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
803#define STA_FLG_MAX_AGG_SIZE_POS (19)
804#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
805#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
806#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
807#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
808#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
809
810/* Use in mode field. 1: modify existing entry, 0: add new station entry */
811#define STA_CONTROL_MODIFY_MSK 0x01
812
813/* key flags __le16*/
814#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
815#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
816#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
817#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
818#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
819
820#define STA_KEY_FLG_KEYID_POS 8
821#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
822/* wep key is either from global key (0) or from station info array (1) */
823#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
824
825/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
826#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
827#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
828#define STA_KEY_MAX_NUM 8
829#define STA_KEY_MAX_NUM_PAN 16
830/* must not match WEP_INVALID_OFFSET */
831#define IWLAGN_HW_KEY_DEFAULT 0xfe
832
833/* Flags indicate whether to modify vs. don't change various station params */
834#define STA_MODIFY_KEY_MASK 0x01
835#define STA_MODIFY_TID_DISABLE_TX 0x02
836#define STA_MODIFY_TX_RATE_MSK 0x04
837#define STA_MODIFY_ADDBA_TID_MSK 0x08
838#define STA_MODIFY_DELBA_TID_MSK 0x10
839#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
840
841/* Receiver address (actually, Rx station's index into station table),
842 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
843#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
844
845/* agn */
846struct iwl_keyinfo {
847 __le16 key_flags;
848 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
849 u8 reserved1;
850 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
851 u8 key_offset;
852 u8 reserved2;
853 u8 key[16]; /* 16-byte unicast decryption key */
854 __le64 tx_secur_seq_cnt;
855 __le64 hw_tkip_mic_rx_key;
856 __le64 hw_tkip_mic_tx_key;
857} __packed;
858
859/**
860 * struct sta_id_modify
861 * @addr[ETH_ALEN]: station's MAC address
862 * @sta_id: index of station in uCode's station table
863 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
864 *
865 * Driver selects unused table index when adding new station,
866 * or the index to a pre-existing station entry when modifying that station.
867 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
868 *
869 * modify_mask flags select which parameters to modify vs. leave alone.
870 */
871struct sta_id_modify {
872 u8 addr[ETH_ALEN];
873 __le16 reserved1;
874 u8 sta_id;
875 u8 modify_mask;
876 __le16 reserved2;
877} __packed;
878
879/*
880 * REPLY_ADD_STA = 0x18 (command)
881 *
882 * The device contains an internal table of per-station information,
883 * with info on security keys, aggregation parameters, and Tx rates for
884 * initial Tx attempt and any retries (agn devices uses
885 * REPLY_TX_LINK_QUALITY_CMD,
886 *
887 * REPLY_ADD_STA sets up the table entry for one station, either creating
888 * a new entry, or modifying a pre-existing one.
889 *
890 * NOTE: RXON command (without "associated" bit set) wipes the station table
891 * clean. Moving into RF_KILL state does this also. Driver must set up
892 * new station table before transmitting anything on the RXON channel
893 * (except active scans or active measurements; those commands carry
894 * their own txpower/rate setup data).
895 *
896 * When getting started on a new channel, driver must set up the
897 * IWL_BROADCAST_ID entry (last entry in the table). For a client
898 * station in a BSS, once an AP is selected, driver sets up the AP STA
899 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
900 * are all that are needed for a BSS client station. If the device is
901 * used as AP, or in an IBSS network, driver must set up station table
902 * entries for all STAs in network, starting with index IWL_STA_ID.
903 */
904
905struct iwl_addsta_cmd {
906 u8 mode; /* 1: modify existing, 0: add new station */
907 u8 reserved[3];
908 struct sta_id_modify sta;
909 struct iwl_keyinfo key;
910 __le32 station_flags; /* STA_FLG_* */
911 __le32 station_flags_msk; /* STA_FLG_* */
912
913 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
914 * corresponding to bit (e.g. bit 5 controls TID 5).
915 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
916 __le16 tid_disable_tx;
917 __le16 legacy_reserved;
918
919 /* TID for which to add block-ack support.
920 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
921 u8 add_immediate_ba_tid;
922
923 /* TID for which to remove block-ack support.
924 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
925 u8 remove_immediate_ba_tid;
926
927 /* Starting Sequence Number for added block-ack support.
928 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
929 __le16 add_immediate_ba_ssn;
930
931 /*
932 * Number of packets OK to transmit to station even though
933 * it is asleep -- used to synchronise PS-poll and u-APSD
934 * responses while ucode keeps track of STA sleep state.
935 */
936 __le16 sleep_tx_count;
937
938 __le16 reserved2;
939} __packed;
940
941
942#define ADD_STA_SUCCESS_MSK 0x1
943#define ADD_STA_NO_ROOM_IN_TABLE 0x2
944#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
945#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
946/*
947 * REPLY_ADD_STA = 0x18 (response)
948 */
949struct iwl_add_sta_resp {
950 u8 status; /* ADD_STA_* */
951} __packed;
952
953#define REM_STA_SUCCESS_MSK 0x1
954/*
955 * REPLY_REM_STA = 0x19 (response)
956 */
957struct iwl_rem_sta_resp {
958 u8 status;
959} __packed;
960
961/*
962 * REPLY_REM_STA = 0x19 (command)
963 */
964struct iwl_rem_sta_cmd {
965 u8 num_sta; /* number of removed stations */
966 u8 reserved[3];
967 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
968 u8 reserved2[2];
969} __packed;
970
971
972/* WiFi queues mask */
973#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
974#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
975#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
976#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
977#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
978
979/* PAN queues mask */
980#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
981#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
982#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
983#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
984#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
985#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
986
987#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
988
989#define IWL_DROP_ALL BIT(1)
990
991/*
992 * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
993 *
994 * When using full FIFO flush this command checks the scheduler HW block WR/RD
995 * pointers to check if all the frames were transferred by DMA into the
996 * relevant TX FIFO queue. Only when the DMA is finished and the queue is
997 * empty the command can finish.
998 * This command is used to flush the TXFIFO from transmit commands, it may
999 * operate on single or multiple queues, the command queue can't be flushed by
1000 * this command. The command response is returned when all the queue flush
1001 * operations are done. Each TX command flushed return response with the FLUSH
1002 * status set in the TX response status. When FIFO flush operation is used,
1003 * the flush operation ends when both the scheduler DMA done and TXFIFO empty
1004 * are set.
1005 *
1006 * @queue_control: bit mask for which queues to flush
1007 * @flush_control: flush controls
1008 * 0: Dump single MSDU
1009 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
1010 * 2: Dump all FIFO
1011 */
1012struct iwl_txfifo_flush_cmd {
1013 __le32 queue_control;
1014 __le16 flush_control;
1015 __le16 reserved;
1016} __packed;
1017
1018/*
1019 * REPLY_WEP_KEY = 0x20
1020 */
1021struct iwl_wep_key {
1022 u8 key_index;
1023 u8 key_offset;
1024 u8 reserved1[2];
1025 u8 key_size;
1026 u8 reserved2[3];
1027 u8 key[16];
1028} __packed;
1029
1030struct iwl_wep_cmd {
1031 u8 num_keys;
1032 u8 global_key_type;
1033 u8 flags;
1034 u8 reserved;
1035 struct iwl_wep_key key[0];
1036} __packed;
1037
1038#define WEP_KEY_WEP_TYPE 1
1039#define WEP_KEYS_MAX 4
1040#define WEP_INVALID_OFFSET 0xff
1041#define WEP_KEY_LEN_64 5
1042#define WEP_KEY_LEN_128 13
1043
1044/******************************************************************************
1045 * (4)
1046 * Rx Responses:
1047 *
1048 *****************************************************************************/
1049
1050#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1051#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1052
1053#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1054#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1055#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1056#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1057#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
1058#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1059#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
1060
1061#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1062#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1063#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1064#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1065#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1066#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1067
1068#define RX_RES_STATUS_STATION_FOUND (1<<6)
1069#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1070
1071#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1072#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1073#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1074#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1075#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1076
1077#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1078#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1079#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1080#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1081
1082
1083#define IWLAGN_RX_RES_PHY_CNT 8
1084#define IWLAGN_RX_RES_AGC_IDX 1
1085#define IWLAGN_RX_RES_RSSI_AB_IDX 2
1086#define IWLAGN_RX_RES_RSSI_C_IDX 3
1087#define IWLAGN_OFDM_AGC_MSK 0xfe00
1088#define IWLAGN_OFDM_AGC_BIT_POS 9
1089#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
1090#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
1091#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
1092#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
1093#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
1094#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
1095#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
1096#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
1097#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
1098
1099struct iwlagn_non_cfg_phy {
1100 __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1101} __packed;
1102
1103
1104/*
1105 * REPLY_RX = 0xc3 (response only, not a command)
1106 * Used only for legacy (non 11n) frames.
1107 */
1108struct iwl_rx_phy_res {
1109 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1110 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1111 u8 stat_id; /* configurable DSP phy data set ID */
1112 u8 reserved1;
1113 __le64 timestamp; /* TSF at on air rise */
1114 __le32 beacon_time_stamp; /* beacon at on-air rise */
1115 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1116 __le16 channel; /* channel number */
1117 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1118 __le32 rate_n_flags; /* RATE_MCS_* */
1119 __le16 byte_count; /* frame's byte-count */
1120 __le16 frame_time; /* frame's time on the air */
1121} __packed;
1122
1123struct iwl_rx_mpdu_res_start {
1124 __le16 byte_count;
1125 __le16 reserved;
1126} __packed;
1127
1128
1129/******************************************************************************
1130 * (5)
1131 * Tx Commands & Responses:
1132 *
1133 * Driver must place each REPLY_TX command into one of the prioritized Tx
1134 * queues in host DRAM, shared between driver and device (see comments for
1135 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1136 * are preparing to transmit, the device pulls the Tx command over the PCI
1137 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1138 * from which data will be transmitted.
1139 *
1140 * uCode handles all timing and protocol related to control frames
1141 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1142 * handle reception of block-acks; uCode updates the host driver via
1143 * REPLY_COMPRESSED_BA.
1144 *
1145 * uCode handles retrying Tx when an ACK is expected but not received.
1146 * This includes trying lower data rates than the one requested in the Tx
1147 * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
1148 *
1149 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1150 * This command must be executed after every RXON command, before Tx can occur.
1151 *****************************************************************************/
1152
1153/* REPLY_TX Tx flags field */
1154
1155/*
1156 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1157 * before this frame. if CTS-to-self required check
1158 * RXON_FLG_SELF_CTS_EN status.
1159 */
1160#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
1161
1162/* 1: Expect ACK from receiving station
1163 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1164 * Set this for unicast frames, but not broadcast/multicast. */
1165#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1166
1167/* For agn devices:
1168 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1169 * Tx command's initial_rate_index indicates first rate to try;
1170 * uCode walks through table for additional Tx attempts.
1171 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1172 * This rate will be used for all Tx attempts; it will not be scaled. */
1173#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1174
1175/* 1: Expect immediate block-ack.
1176 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1177#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1178
1179/* Tx antenna selection field; reserved (0) for agn devices. */
1180#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1181
1182/* 1: Ignore Bluetooth priority for this frame.
1183 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1184#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
1185
1186/* 1: uCode overrides sequence control field in MAC header.
1187 * 0: Driver provides sequence control field in MAC header.
1188 * Set this for management frames, non-QOS data frames, non-unicast frames,
1189 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1190#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1191
1192/* 1: This frame is non-last MPDU; more fragments are coming.
1193 * 0: Last fragment, or not using fragmentation. */
1194#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1195
1196/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1197 * 0: No TSF required in outgoing frame.
1198 * Set this for transmitting beacons and probe responses. */
1199#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1200
1201/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1202 * alignment of frame's payload data field.
1203 * 0: No pad
1204 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1205 * field (but not both). Driver must align frame data (i.e. data following
1206 * MAC header) to DWORD boundary. */
1207#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1208
1209/* accelerate aggregation support
1210 * 0 - no CCMP encryption; 1 - CCMP encryption */
1211#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1212
1213/* HCCA-AP - disable duration overwriting. */
1214#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1215
1216
1217/*
1218 * TX command security control
1219 */
1220#define TX_CMD_SEC_WEP 0x01
1221#define TX_CMD_SEC_CCM 0x02
1222#define TX_CMD_SEC_TKIP 0x03
1223#define TX_CMD_SEC_MSK 0x03
1224#define TX_CMD_SEC_SHIFT 6
1225#define TX_CMD_SEC_KEY128 0x08
1226
1227/*
1228 * security overhead sizes
1229 */
1230#define WEP_IV_LEN 4
1231#define WEP_ICV_LEN 4
1232#define CCMP_MIC_LEN 8
1233#define TKIP_ICV_LEN 4
1234
1235/*
1236 * REPLY_TX = 0x1c (command)
1237 */
1238
1239/*
1240 * 4965 uCode updates these Tx attempt count values in host DRAM.
1241 * Used for managing Tx retries when expecting block-acks.
1242 * Driver should set these fields to 0.
1243 */
1244struct iwl_dram_scratch {
1245 u8 try_cnt; /* Tx attempts */
1246 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1247 __le16 reserved;
1248} __packed;
1249
1250struct iwl_tx_cmd {
1251 /*
1252 * MPDU byte count:
1253 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1254 * + 8 byte IV for CCM or TKIP (not used for WEP)
1255 * + Data payload
1256 * + 8-byte MIC (not used for CCM/WEP)
1257 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1258 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1259 * Range: 14-2342 bytes.
1260 */
1261 __le16 len;
1262
1263 /*
1264 * MPDU or MSDU byte count for next frame.
1265 * Used for fragmentation and bursting, but not 11n aggregation.
1266 * Same as "len", but for next frame. Set to 0 if not applicable.
1267 */
1268 __le16 next_frame_len;
1269
1270 __le32 tx_flags; /* TX_CMD_FLG_* */
1271
1272 /* uCode may modify this field of the Tx command (in host DRAM!).
1273 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1274 struct iwl_dram_scratch scratch;
1275
1276 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1277 __le32 rate_n_flags; /* RATE_MCS_* */
1278
1279 /* Index of destination station in uCode's station table */
1280 u8 sta_id;
1281
1282 /* Type of security encryption: CCM or TKIP */
1283 u8 sec_ctl; /* TX_CMD_SEC_* */
1284
1285 /*
1286 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1287 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1288 * data frames, this field may be used to selectively reduce initial
1289 * rate (via non-0 value) for special frames (e.g. management), while
1290 * still supporting rate scaling for all frames.
1291 */
1292 u8 initial_rate_index;
1293 u8 reserved;
1294 u8 key[16];
1295 __le16 next_frame_flags;
1296 __le16 reserved2;
1297 union {
1298 __le32 life_time;
1299 __le32 attempt;
1300 } stop_time;
1301
1302 /* Host DRAM physical address pointer to "scratch" in this command.
1303 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1304 __le32 dram_lsb_ptr;
1305 u8 dram_msb_ptr;
1306
1307 u8 rts_retry_limit; /*byte 50 */
1308 u8 data_retry_limit; /*byte 51 */
1309 u8 tid_tspec;
1310 union {
1311 __le16 pm_frame_timeout;
1312 __le16 attempt_duration;
1313 } timeout;
1314
1315 /*
1316 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1317 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1318 */
1319 __le16 driver_txop;
1320
1321 /*
1322 * MAC header goes here, followed by 2 bytes padding if MAC header
1323 * length is 26 or 30 bytes, followed by payload data
1324 */
1325 u8 payload[0];
1326 struct ieee80211_hdr hdr[0];
1327} __packed;
1328
1329/*
1330 * TX command response is sent after *agn* transmission attempts.
1331 *
1332 * both postpone and abort status are expected behavior from uCode. there is
1333 * no special operation required from driver; except for RFKILL_FLUSH,
1334 * which required tx flush host command to flush all the tx frames in queues
1335 */
1336enum {
1337 TX_STATUS_SUCCESS = 0x01,
1338 TX_STATUS_DIRECT_DONE = 0x02,
1339 /* postpone TX */
1340 TX_STATUS_POSTPONE_DELAY = 0x40,
1341 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1342 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
1343 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1344 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1345 /* abort TX */
1346 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1347 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1348 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1349 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1350 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1351 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1352 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1353 TX_STATUS_FAIL_DEST_PS = 0x88,
1354 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1355 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1356 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1357 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1358 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1359 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1360 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1361 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1362 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1363};
1364
1365#define TX_PACKET_MODE_REGULAR 0x0000
1366#define TX_PACKET_MODE_BURST_SEQ 0x0100
1367#define TX_PACKET_MODE_BURST_FIRST 0x0200
1368
1369enum {
1370 TX_POWER_PA_NOT_ACTIVE = 0x0,
1371};
1372
1373enum {
1374 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1375 TX_STATUS_DELAY_MSK = 0x00000040,
1376 TX_STATUS_ABORT_MSK = 0x00000080,
1377 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1378 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1379 TX_RESERVED = 0x00780000, /* bits 19:22 */
1380 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1381 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1382};
1383
1384/* *******************************
1385 * TX aggregation status
1386 ******************************* */
1387
1388enum {
1389 AGG_TX_STATE_TRANSMITTED = 0x00,
1390 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1391 AGG_TX_STATE_BT_PRIO_MSK = 0x02,
1392 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1393 AGG_TX_STATE_ABORT_MSK = 0x08,
1394 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1395 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1396 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40,
1397 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1398 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1399 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1400 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1401 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1402};
1403
1404#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1405#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1406
1407#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1408 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1409 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
1410
1411/* # tx attempts for first frame in aggregation */
1412#define AGG_TX_STATE_TRY_CNT_POS 12
1413#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1414
1415/* Command ID and sequence number of Tx command for this frame */
1416#define AGG_TX_STATE_SEQ_NUM_POS 16
1417#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1418
1419/*
1420 * REPLY_TX = 0x1c (response)
1421 *
1422 * This response may be in one of two slightly different formats, indicated
1423 * by the frame_count field:
1424 *
1425 * 1) No aggregation (frame_count == 1). This reports Tx results for
1426 * a single frame. Multiple attempts, at various bit rates, may have
1427 * been made for this frame.
1428 *
1429 * 2) Aggregation (frame_count > 1). This reports Tx results for
1430 * 2 or more frames that used block-acknowledge. All frames were
1431 * transmitted at same rate. Rate scaling may have been used if first
1432 * frame in this new agg block failed in previous agg block(s).
1433 *
1434 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1435 * block-ack has not been received by the time the agn device records
1436 * this status.
1437 * This status relates to reasons the tx might have been blocked or aborted
1438 * within the sending station (this agn device), rather than whether it was
1439 * received successfully by the destination station.
1440 */
1441struct agg_tx_status {
1442 __le16 status;
1443 __le16 sequence;
1444} __packed;
1445
1446/*
1447 * definitions for initial rate index field
1448 * bits [3:0] initial rate index
1449 * bits [6:4] rate table color, used for the initial rate
1450 * bit-7 invalid rate indication
1451 * i.e. rate was not chosen from rate table
1452 * or rate table color was changed during frame retries
1453 * refer tlc rate info
1454 */
1455
1456#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0
1457#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f
1458#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4
1459#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70
1460#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
1461
1462/* refer to ra_tid */
1463#define IWLAGN_TX_RES_TID_POS 0
1464#define IWLAGN_TX_RES_TID_MSK 0x0f
1465#define IWLAGN_TX_RES_RA_POS 4
1466#define IWLAGN_TX_RES_RA_MSK 0xf0
1467
1468struct iwlagn_tx_resp {
1469 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1470 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1471 u8 failure_rts; /* # failures due to unsuccessful RTS */
1472 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1473
1474 /* For non-agg: Rate at which frame was successful.
1475 * For agg: Rate at which all frames were transmitted. */
1476 __le32 rate_n_flags; /* RATE_MCS_* */
1477
1478 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1479 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1480 __le16 wireless_media_time; /* uSecs */
1481
1482 u8 pa_status; /* RF power amplifier measurement (not used) */
1483 u8 pa_integ_res_a[3];
1484 u8 pa_integ_res_b[3];
1485 u8 pa_integ_res_C[3];
1486
1487 __le32 tfd_info;
1488 __le16 seq_ctl;
1489 __le16 byte_cnt;
1490 u8 tlc_info;
1491 u8 ra_tid; /* tid (0:3), sta_id (4:7) */
1492 __le16 frame_ctrl;
1493 /*
1494 * For non-agg: frame status TX_STATUS_*
1495 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1496 * fields follow this one, up to frame_count.
1497 * Bit fields:
1498 * 11- 0: AGG_TX_STATE_* status code
1499 * 15-12: Retry count for 1st frame in aggregation (retries
1500 * occur if tx failed for this frame when it was a
1501 * member of a previous aggregation block). If rate
1502 * scaling is used, retry count indicates the rate
1503 * table entry used for all frames in the new agg.
1504 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1505 */
1506 struct agg_tx_status status; /* TX status (in aggregation -
1507 * status of 1st frame) */
1508} __packed;
1509/*
1510 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1511 *
1512 * Reports Block-Acknowledge from recipient station
1513 */
1514struct iwl_compressed_ba_resp {
1515 __le32 sta_addr_lo32;
1516 __le16 sta_addr_hi16;
1517 __le16 reserved;
1518
1519 /* Index of recipient (BA-sending) station in uCode's station table */
1520 u8 sta_id;
1521 u8 tid;
1522 __le16 seq_ctl;
1523 __le64 bitmap;
1524 __le16 scd_flow;
1525 __le16 scd_ssn;
1526 u8 txed; /* number of frames sent */
1527 u8 txed_2_done; /* number of frames acked */
1528} __packed;
1529
1530/*
1531 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1532 *
1533 */
1534
1535/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1536#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1537
1538/* # of EDCA prioritized tx fifos */
1539#define LINK_QUAL_AC_NUM AC_NUM
1540
1541/* # entries in rate scale table to support Tx retries */
1542#define LINK_QUAL_MAX_RETRY_NUM 16
1543
1544/* Tx antenna selection values */
1545#define LINK_QUAL_ANT_A_MSK (1 << 0)
1546#define LINK_QUAL_ANT_B_MSK (1 << 1)
1547#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1548
1549
1550/**
1551 * struct iwl_link_qual_general_params
1552 *
1553 * Used in REPLY_TX_LINK_QUALITY_CMD
1554 */
1555struct iwl_link_qual_general_params {
1556 u8 flags;
1557
1558 /* No entries at or above this (driver chosen) index contain MIMO */
1559 u8 mimo_delimiter;
1560
1561 /* Best single antenna to use for single stream (legacy, SISO). */
1562 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1563
1564 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1565 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1566
1567 /*
1568 * If driver needs to use different initial rates for different
1569 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1570 * this table will set that up, by indicating the indexes in the
1571 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1572 * Otherwise, driver should set all entries to 0.
1573 *
1574 * Entry usage:
1575 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1576 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1577 */
1578 u8 start_rate_index[LINK_QUAL_AC_NUM];
1579} __packed;
1580
1581#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1582#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1583#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1584
1585#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1586#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1587#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1588
1589#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
1590#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1591#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1592
1593/**
1594 * struct iwl_link_qual_agg_params
1595 *
1596 * Used in REPLY_TX_LINK_QUALITY_CMD
1597 */
1598struct iwl_link_qual_agg_params {
1599
1600 /*
1601 *Maximum number of uSec in aggregation.
1602 * default set to 4000 (4 milliseconds) if not configured in .cfg
1603 */
1604 __le16 agg_time_limit;
1605
1606 /*
1607 * Number of Tx retries allowed for a frame, before that frame will
1608 * no longer be considered for the start of an aggregation sequence
1609 * (scheduler will then try to tx it as single frame).
1610 * Driver should set this to 3.
1611 */
1612 u8 agg_dis_start_th;
1613
1614 /*
1615 * Maximum number of frames in aggregation.
1616 * 0 = no limit (default). 1 = no aggregation.
1617 * Other values = max # frames in aggregation.
1618 */
1619 u8 agg_frame_cnt_limit;
1620
1621 __le32 reserved;
1622} __packed;
1623
1624/*
1625 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1626 *
1627 * For agn devices
1628 *
1629 * Each station in the agn device's internal station table has its own table
1630 * of 16
1631 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1632 * an ACK is not received. This command replaces the entire table for
1633 * one station.
1634 *
1635 * NOTE: Station must already be in agn device's station table.
1636 * Use REPLY_ADD_STA.
1637 *
1638 * The rate scaling procedures described below work well. Of course, other
1639 * procedures are possible, and may work better for particular environments.
1640 *
1641 *
1642 * FILLING THE RATE TABLE
1643 *
1644 * Given a particular initial rate and mode, as determined by the rate
1645 * scaling algorithm described below, the Linux driver uses the following
1646 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1647 * Link Quality command:
1648 *
1649 *
1650 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1651 * a) Use this same initial rate for first 3 entries.
1652 * b) Find next lower available rate using same mode (SISO or MIMO),
1653 * use for next 3 entries. If no lower rate available, switch to
1654 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1655 * c) If using MIMO, set command's mimo_delimiter to number of entries
1656 * using MIMO (3 or 6).
1657 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1658 * no MIMO, no short guard interval), at the next lower bit rate
1659 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1660 * legacy procedure for remaining table entries.
1661 *
1662 * 2) If using legacy initial rate:
1663 * a) Use the initial rate for only one entry.
1664 * b) For each following entry, reduce the rate to next lower available
1665 * rate, until reaching the lowest available rate.
1666 * c) When reducing rate, also switch antenna selection.
1667 * d) Once lowest available rate is reached, repeat this rate until
1668 * rate table is filled (16 entries), switching antenna each entry.
1669 *
1670 *
1671 * ACCUMULATING HISTORY
1672 *
1673 * The rate scaling algorithm for agn devices, as implemented in Linux driver,
1674 * uses two sets of frame Tx success history: One for the current/active
1675 * modulation mode, and one for a speculative/search mode that is being
1676 * attempted. If the speculative mode turns out to be more effective (i.e.
1677 * actual transfer rate is better), then the driver continues to use the
1678 * speculative mode as the new current active mode.
1679 *
1680 * Each history set contains, separately for each possible rate, data for a
1681 * sliding window of the 62 most recent tx attempts at that rate. The data
1682 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1683 * and attempted frames, from which the driver can additionally calculate a
1684 * success ratio (success / attempted) and number of failures
1685 * (attempted - success), and control the size of the window (attempted).
1686 * The driver uses the bit map to remove successes from the success sum, as
1687 * the oldest tx attempts fall out of the window.
1688 *
1689 * When the agn device makes multiple tx attempts for a given frame, each
1690 * attempt might be at a different rate, and have different modulation
1691 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1692 * up in the rate scaling table in the Link Quality command. The driver must
1693 * determine which rate table entry was used for each tx attempt, to determine
1694 * which rate-specific history to update, and record only those attempts that
1695 * match the modulation characteristics of the history set.
1696 *
1697 * When using block-ack (aggregation), all frames are transmitted at the same
1698 * rate, since there is no per-attempt acknowledgment from the destination
1699 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1700 * rate_n_flags field. After receiving a block-ack, the driver can update
1701 * history for the entire block all at once.
1702 *
1703 *
1704 * FINDING BEST STARTING RATE:
1705 *
1706 * When working with a selected initial modulation mode (see below), the
1707 * driver attempts to find a best initial rate. The initial rate is the
1708 * first entry in the Link Quality command's rate table.
1709 *
1710 * 1) Calculate actual throughput (success ratio * expected throughput, see
1711 * table below) for current initial rate. Do this only if enough frames
1712 * have been attempted to make the value meaningful: at least 6 failed
1713 * tx attempts, or at least 8 successes. If not enough, don't try rate
1714 * scaling yet.
1715 *
1716 * 2) Find available rates adjacent to current initial rate. Available means:
1717 * a) supported by hardware &&
1718 * b) supported by association &&
1719 * c) within any constraints selected by user
1720 *
1721 * 3) Gather measured throughputs for adjacent rates. These might not have
1722 * enough history to calculate a throughput. That's okay, we might try
1723 * using one of them anyway!
1724 *
1725 * 4) Try decreasing rate if, for current rate:
1726 * a) success ratio is < 15% ||
1727 * b) lower adjacent rate has better measured throughput ||
1728 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1729 *
1730 * As a sanity check, if decrease was determined above, leave rate
1731 * unchanged if:
1732 * a) lower rate unavailable
1733 * b) success ratio at current rate > 85% (very good)
1734 * c) current measured throughput is better than expected throughput
1735 * of lower rate (under perfect 100% tx conditions, see table below)
1736 *
1737 * 5) Try increasing rate if, for current rate:
1738 * a) success ratio is < 15% ||
1739 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
1740 * b) higher adjacent rate has better measured throughput ||
1741 * c) lower adjacent rate has worse throughput, and higher is unmeasured
1742 *
1743 * As a sanity check, if increase was determined above, leave rate
1744 * unchanged if:
1745 * a) success ratio at current rate < 70%. This is not particularly
1746 * good performance; higher rate is sure to have poorer success.
1747 *
1748 * 6) Re-evaluate the rate after each tx frame. If working with block-
1749 * acknowledge, history and statistics may be calculated for the entire
1750 * block (including prior history that fits within the history windows),
1751 * before re-evaluation.
1752 *
1753 * FINDING BEST STARTING MODULATION MODE:
1754 *
1755 * After working with a modulation mode for a "while" (and doing rate scaling),
1756 * the driver searches for a new initial mode in an attempt to improve
1757 * throughput. The "while" is measured by numbers of attempted frames:
1758 *
1759 * For legacy mode, search for new mode after:
1760 * 480 successful frames, or 160 failed frames
1761 * For high-throughput modes (SISO or MIMO), search for new mode after:
1762 * 4500 successful frames, or 400 failed frames
1763 *
1764 * Mode switch possibilities are (3 for each mode):
1765 *
1766 * For legacy:
1767 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
1768 * For SISO:
1769 * Change antenna, try MIMO, try shortened guard interval (SGI)
1770 * For MIMO:
1771 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
1772 *
1773 * When trying a new mode, use the same bit rate as the old/current mode when
1774 * trying antenna switches and shortened guard interval. When switching to
1775 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
1776 * for which the expected throughput (under perfect conditions) is about the
1777 * same or slightly better than the actual measured throughput delivered by
1778 * the old/current mode.
1779 *
1780 * Actual throughput can be estimated by multiplying the expected throughput
1781 * by the success ratio (successful / attempted tx frames). Frame size is
1782 * not considered in this calculation; it assumes that frame size will average
1783 * out to be fairly consistent over several samples. The following are
1784 * metric values for expected throughput assuming 100% success ratio.
1785 * Only G band has support for CCK rates:
1786 *
1787 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
1788 *
1789 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
1790 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
1791 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
1792 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
1793 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
1794 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
1795 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
1796 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
1797 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
1798 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
1799 *
1800 * After the new mode has been tried for a short while (minimum of 6 failed
1801 * frames or 8 successful frames), compare success ratio and actual throughput
1802 * estimate of the new mode with the old. If either is better with the new
1803 * mode, continue to use the new mode.
1804 *
1805 * Continue comparing modes until all 3 possibilities have been tried.
1806 * If moving from legacy to HT, try all 3 possibilities from the new HT
1807 * mode. After trying all 3, a best mode is found. Continue to use this mode
1808 * for the longer "while" described above (e.g. 480 successful frames for
1809 * legacy), and then repeat the search process.
1810 *
1811 */
1812struct iwl_link_quality_cmd {
1813
1814 /* Index of destination/recipient station in uCode's station table */
1815 u8 sta_id;
1816 u8 reserved1;
1817 __le16 control; /* not used */
1818 struct iwl_link_qual_general_params general_params;
1819 struct iwl_link_qual_agg_params agg_params;
1820
1821 /*
1822 * Rate info; when using rate-scaling, Tx command's initial_rate_index
1823 * specifies 1st Tx rate attempted, via index into this table.
1824 * agn devices works its way through table when retrying Tx.
1825 */
1826 struct {
1827 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
1828 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
1829 __le32 reserved2;
1830} __packed;
1831
1832/*
1833 * BT configuration enable flags:
1834 * bit 0 - 1: BT channel announcement enabled
1835 * 0: disable
1836 * bit 1 - 1: priority of BT device enabled
1837 * 0: disable
1838 * bit 2 - 1: BT 2 wire support enabled
1839 * 0: disable
1840 */
1841#define BT_COEX_DISABLE (0x0)
1842#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
1843#define BT_ENABLE_PRIORITY BIT(1)
1844#define BT_ENABLE_2_WIRE BIT(2)
1845
1846#define BT_COEX_DISABLE (0x0)
1847#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
1848
1849#define BT_LEAD_TIME_MIN (0x0)
1850#define BT_LEAD_TIME_DEF (0x1E)
1851#define BT_LEAD_TIME_MAX (0xFF)
1852
1853#define BT_MAX_KILL_MIN (0x1)
1854#define BT_MAX_KILL_DEF (0x5)
1855#define BT_MAX_KILL_MAX (0xFF)
1856
1857#define BT_DURATION_LIMIT_DEF 625
1858#define BT_DURATION_LIMIT_MAX 1250
1859#define BT_DURATION_LIMIT_MIN 625
1860
1861#define BT_ON_THRESHOLD_DEF 4
1862#define BT_ON_THRESHOLD_MAX 1000
1863#define BT_ON_THRESHOLD_MIN 1
1864
1865#define BT_FRAG_THRESHOLD_DEF 0
1866#define BT_FRAG_THRESHOLD_MAX 0
1867#define BT_FRAG_THRESHOLD_MIN 0
1868
1869#define BT_AGG_THRESHOLD_DEF 1200
1870#define BT_AGG_THRESHOLD_MAX 8000
1871#define BT_AGG_THRESHOLD_MIN 400
1872
1873/*
1874 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
1875 *
1876 * agn devices support hardware handshake with Bluetooth device on
1877 * same platform. Bluetooth device alerts wireless device when it will Tx;
1878 * wireless device can delay or kill its own Tx to accommodate.
1879 */
1880struct iwl_bt_cmd {
1881 u8 flags;
1882 u8 lead_time;
1883 u8 max_kill;
1884 u8 reserved;
1885 __le32 kill_ack_mask;
1886 __le32 kill_cts_mask;
1887} __packed;
1888
1889#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
1890
1891#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
1892#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
1893#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
1894#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
1895#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
1896#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
1897
1898#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
1899/* Disable Sync PSPoll on SCO/eSCO */
1900#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
1901
1902#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */
1903#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */
1904
1905#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
1906#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
1907#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
1908#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0
1909
1910#define IWLAGN_BT_MAX_KILL_DEFAULT 5
1911
1912#define IWLAGN_BT3_T7_DEFAULT 1
1913
1914enum iwl_bt_kill_idx {
1915 IWL_BT_KILL_DEFAULT = 0,
1916 IWL_BT_KILL_OVERRIDE = 1,
1917 IWL_BT_KILL_REDUCE = 2,
1918};
1919
1920#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
1921#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
1922#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
1923#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0)
1924
1925#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
1926
1927#define IWLAGN_BT3_T2_DEFAULT 0xc
1928
1929#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
1930#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
1931#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
1932#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
1933#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
1934#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
1935#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6))
1936#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
1937
1938#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
1939 IWLAGN_BT_VALID_BOOST | \
1940 IWLAGN_BT_VALID_MAX_KILL | \
1941 IWLAGN_BT_VALID_3W_TIMERS | \
1942 IWLAGN_BT_VALID_KILL_ACK_MASK | \
1943 IWLAGN_BT_VALID_KILL_CTS_MASK | \
1944 IWLAGN_BT_VALID_REDUCED_TX_PWR | \
1945 IWLAGN_BT_VALID_3W_LUT)
1946
1947#define IWLAGN_BT_REDUCED_TX_PWR BIT(0)
1948
1949#define IWLAGN_BT_DECISION_LUT_SIZE 12
1950
1951struct iwl_basic_bt_cmd {
1952 u8 flags;
1953 u8 ledtime; /* unused */
1954 u8 max_kill;
1955 u8 bt3_timer_t7_value;
1956 __le32 kill_ack_mask;
1957 __le32 kill_cts_mask;
1958 u8 bt3_prio_sample_time;
1959 u8 bt3_timer_t2_value;
1960 __le16 bt4_reaction_time; /* unused */
1961 __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
1962 /*
1963 * bit 0: use reduced tx power for control frame
1964 * bit 1 - 7: reserved
1965 */
1966 u8 reduce_txpower;
1967 u8 reserved;
1968 __le16 valid;
1969};
1970
1971struct iwl_bt_cmd_v1 {
1972 struct iwl_basic_bt_cmd basic;
1973 u8 prio_boost;
1974 /*
1975 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
1976 * if configure the following patterns
1977 */
1978 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
1979 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1980};
1981
1982struct iwl_bt_cmd_v2 {
1983 struct iwl_basic_bt_cmd basic;
1984 __le32 prio_boost;
1985 /*
1986 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
1987 * if configure the following patterns
1988 */
1989 u8 reserved;
1990 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
1991 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1992};
1993
1994#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
1995
1996struct iwlagn_bt_sco_cmd {
1997 __le32 flags;
1998};
1999
2000/******************************************************************************
2001 * (6)
2002 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2003 *
2004 *****************************************************************************/
2005
2006/*
2007 * Spectrum Management
2008 */
2009#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2010 RXON_FILTER_CTL2HOST_MSK | \
2011 RXON_FILTER_ACCEPT_GRP_MSK | \
2012 RXON_FILTER_DIS_DECRYPT_MSK | \
2013 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2014 RXON_FILTER_ASSOC_MSK | \
2015 RXON_FILTER_BCON_AWARE_MSK)
2016
2017struct iwl_measure_channel {
2018 __le32 duration; /* measurement duration in extended beacon
2019 * format */
2020 u8 channel; /* channel to measure */
2021 u8 type; /* see enum iwl_measure_type */
2022 __le16 reserved;
2023} __packed;
2024
2025/*
2026 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2027 */
2028struct iwl_spectrum_cmd {
2029 __le16 len; /* number of bytes starting from token */
2030 u8 token; /* token id */
2031 u8 id; /* measurement id -- 0 or 1 */
2032 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2033 u8 periodic; /* 1 = periodic */
2034 __le16 path_loss_timeout;
2035 __le32 start_time; /* start time in extended beacon format */
2036 __le32 reserved2;
2037 __le32 flags; /* rxon flags */
2038 __le32 filter_flags; /* rxon filter flags */
2039 __le16 channel_count; /* minimum 1, maximum 10 */
2040 __le16 reserved3;
2041 struct iwl_measure_channel channels[10];
2042} __packed;
2043
2044/*
2045 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2046 */
2047struct iwl_spectrum_resp {
2048 u8 token;
2049 u8 id; /* id of the prior command replaced, or 0xff */
2050 __le16 status; /* 0 - command will be handled
2051 * 1 - cannot handle (conflicts with another
2052 * measurement) */
2053} __packed;
2054
2055enum iwl_measurement_state {
2056 IWL_MEASUREMENT_START = 0,
2057 IWL_MEASUREMENT_STOP = 1,
2058};
2059
2060enum iwl_measurement_status {
2061 IWL_MEASUREMENT_OK = 0,
2062 IWL_MEASUREMENT_CONCURRENT = 1,
2063 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2064 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2065 /* 4-5 reserved */
2066 IWL_MEASUREMENT_STOPPED = 6,
2067 IWL_MEASUREMENT_TIMEOUT = 7,
2068 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2069};
2070
2071#define NUM_ELEMENTS_IN_HISTOGRAM 8
2072
2073struct iwl_measurement_histogram {
2074 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2075 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2076} __packed;
2077
2078/* clear channel availability counters */
2079struct iwl_measurement_cca_counters {
2080 __le32 ofdm;
2081 __le32 cck;
2082} __packed;
2083
2084enum iwl_measure_type {
2085 IWL_MEASURE_BASIC = (1 << 0),
2086 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2087 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2088 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2089 IWL_MEASURE_FRAME = (1 << 4),
2090 /* bits 5:6 are reserved */
2091 IWL_MEASURE_IDLE = (1 << 7),
2092};
2093
2094/*
2095 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2096 */
2097struct iwl_spectrum_notification {
2098 u8 id; /* measurement id -- 0 or 1 */
2099 u8 token;
2100 u8 channel_index; /* index in measurement channel list */
2101 u8 state; /* 0 - start, 1 - stop */
2102 __le32 start_time; /* lower 32-bits of TSF */
2103 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2104 u8 channel;
2105 u8 type; /* see enum iwl_measurement_type */
2106 u8 reserved1;
2107 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2108 * valid if applicable for measurement type requested. */
2109 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2110 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2111 __le32 cca_time; /* channel load time in usecs */
2112 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2113 * unidentified */
2114 u8 reserved2[3];
2115 struct iwl_measurement_histogram histogram;
2116 __le32 stop_time; /* lower 32-bits of TSF */
2117 __le32 status; /* see iwl_measurement_status */
2118} __packed;
2119
2120/******************************************************************************
2121 * (7)
2122 * Power Management Commands, Responses, Notifications:
2123 *
2124 *****************************************************************************/
2125
2126/**
2127 * struct iwl_powertable_cmd - Power Table Command
2128 * @flags: See below:
2129 *
2130 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2131 *
2132 * PM allow:
2133 * bit 0 - '0' Driver not allow power management
2134 * '1' Driver allow PM (use rest of parameters)
2135 *
2136 * uCode send sleep notifications:
2137 * bit 1 - '0' Don't send sleep notification
2138 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2139 *
2140 * Sleep over DTIM
2141 * bit 2 - '0' PM have to walk up every DTIM
2142 * '1' PM could sleep over DTIM till listen Interval.
2143 *
2144 * PCI power managed
2145 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2146 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2147 *
2148 * Fast PD
2149 * bit 4 - '1' Put radio to sleep when receiving frame for others
2150 *
2151 * Force sleep Modes
2152 * bit 31/30- '00' use both mac/xtal sleeps
2153 * '01' force Mac sleep
2154 * '10' force xtal sleep
2155 * '11' Illegal set
2156 *
2157 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2158 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2159 * for every DTIM.
2160 */
2161#define IWL_POWER_VEC_SIZE 5
2162
2163#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2164#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2165#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2166#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2167#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2168#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2169#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2170#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2171#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2172#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2173#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2174
2175struct iwl_powertable_cmd {
2176 __le16 flags;
2177 u8 keep_alive_seconds;
2178 u8 debug_flags;
2179 __le32 rx_data_timeout;
2180 __le32 tx_data_timeout;
2181 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2182 __le32 keep_alive_beacons;
2183} __packed;
2184
2185/*
2186 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2187 * all devices identical.
2188 */
2189struct iwl_sleep_notification {
2190 u8 pm_sleep_mode;
2191 u8 pm_wakeup_src;
2192 __le16 reserved;
2193 __le32 sleep_time;
2194 __le32 tsf_low;
2195 __le32 bcon_timer;
2196} __packed;
2197
2198/* Sleep states. all devices identical. */
2199enum {
2200 IWL_PM_NO_SLEEP = 0,
2201 IWL_PM_SLP_MAC = 1,
2202 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2203 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2204 IWL_PM_SLP_PHY = 4,
2205 IWL_PM_SLP_REPENT = 5,
2206 IWL_PM_WAKEUP_BY_TIMER = 6,
2207 IWL_PM_WAKEUP_BY_DRIVER = 7,
2208 IWL_PM_WAKEUP_BY_RFKILL = 8,
2209 /* 3 reserved */
2210 IWL_PM_NUM_OF_MODES = 12,
2211};
2212
2213/*
2214 * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
2215 */
2216#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
2217#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
2218#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2219struct iwl_card_state_cmd {
2220 __le32 status; /* CARD_STATE_CMD_* request new power state */
2221} __packed;
2222
2223/*
2224 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2225 */
2226struct iwl_card_state_notif {
2227 __le32 flags;
2228} __packed;
2229
2230#define HW_CARD_DISABLED 0x01
2231#define SW_CARD_DISABLED 0x02
2232#define CT_CARD_DISABLED 0x04
2233#define RXON_CARD_DISABLED 0x10
2234
2235struct iwl_ct_kill_config {
2236 __le32 reserved;
2237 __le32 critical_temperature_M;
2238 __le32 critical_temperature_R;
2239} __packed;
2240
2241/* 1000, and 6x00 */
2242struct iwl_ct_kill_throttling_config {
2243 __le32 critical_temperature_exit;
2244 __le32 reserved;
2245 __le32 critical_temperature_enter;
2246} __packed;
2247
2248/******************************************************************************
2249 * (8)
2250 * Scan Commands, Responses, Notifications:
2251 *
2252 *****************************************************************************/
2253
2254#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2255#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2256
2257/**
2258 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2259 *
2260 * One for each channel in the scan list.
2261 * Each channel can independently select:
2262 * 1) SSID for directed active scans
2263 * 2) Txpower setting (for rate specified within Tx command)
2264 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2265 * quiet_plcp_th, good_CRC_th)
2266 *
2267 * To avoid uCode errors, make sure the following are true (see comments
2268 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2269 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2270 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2271 * 2) quiet_time <= active_dwell
2272 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2273 * passive_dwell < max_out_time
2274 * active_dwell < max_out_time
2275 */
2276
2277struct iwl_scan_channel {
2278 /*
2279 * type is defined as:
2280 * 0:0 1 = active, 0 = passive
2281 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2282 * SSID IE is transmitted in probe request.
2283 * 21:31 reserved
2284 */
2285 __le32 type;
2286 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2287 u8 tx_gain; /* gain for analog radio */
2288 u8 dsp_atten; /* gain for DSP */
2289 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2290 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2291} __packed;
2292
2293/* set number of direct probes __le32 type */
2294#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2295
2296/**
2297 * struct iwl_ssid_ie - directed scan network information element
2298 *
2299 * Up to 20 of these may appear in REPLY_SCAN_CMD,
2300 * selected by "type" bit field in struct iwl_scan_channel;
2301 * each channel may select different ssids from among the 20 entries.
2302 * SSID IEs get transmitted in reverse order of entry.
2303 */
2304struct iwl_ssid_ie {
2305 u8 id;
2306 u8 len;
2307 u8 ssid[32];
2308} __packed;
2309
2310#define PROBE_OPTION_MAX 20
2311#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2312#define IWL_GOOD_CRC_TH_DISABLED 0
2313#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2314#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2315#define IWL_MAX_CMD_SIZE 4096
2316
2317/*
2318 * REPLY_SCAN_CMD = 0x80 (command)
2319 *
2320 * The hardware scan command is very powerful; the driver can set it up to
2321 * maintain (relatively) normal network traffic while doing a scan in the
2322 * background. The max_out_time and suspend_time control the ratio of how
2323 * long the device stays on an associated network channel ("service channel")
2324 * vs. how long it's away from the service channel, i.e. tuned to other channels
2325 * for scanning.
2326 *
2327 * max_out_time is the max time off-channel (in usec), and suspend_time
2328 * is how long (in "extended beacon" format) that the scan is "suspended"
2329 * after returning to the service channel. That is, suspend_time is the
2330 * time that we stay on the service channel, doing normal work, between
2331 * scan segments. The driver may set these parameters differently to support
2332 * scanning when associated vs. not associated, and light vs. heavy traffic
2333 * loads when associated.
2334 *
2335 * After receiving this command, the device's scan engine does the following;
2336 *
2337 * 1) Sends SCAN_START notification to driver
2338 * 2) Checks to see if it has time to do scan for one channel
2339 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2340 * to tell AP that we're going off-channel
2341 * 4) Tunes to first channel in scan list, does active or passive scan
2342 * 5) Sends SCAN_RESULT notification to driver
2343 * 6) Checks to see if it has time to do scan on *next* channel in list
2344 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2345 * before max_out_time expires
2346 * 8) Returns to service channel
2347 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2348 * 10) Stays on service channel until suspend_time expires
2349 * 11) Repeats entire process 2-10 until list is complete
2350 * 12) Sends SCAN_COMPLETE notification
2351 *
2352 * For fast, efficient scans, the scan command also has support for staying on
2353 * a channel for just a short time, if doing active scanning and getting no
2354 * responses to the transmitted probe request. This time is controlled by
2355 * quiet_time, and the number of received packets below which a channel is
2356 * considered "quiet" is controlled by quiet_plcp_threshold.
2357 *
2358 * For active scanning on channels that have regulatory restrictions against
2359 * blindly transmitting, the scan can listen before transmitting, to make sure
2360 * that there is already legitimate activity on the channel. If enough
2361 * packets are cleanly received on the channel (controlled by good_CRC_th,
2362 * typical value 1), the scan engine starts transmitting probe requests.
2363 *
2364 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2365 *
2366 * To avoid uCode errors, see timing restrictions described under
2367 * struct iwl_scan_channel.
2368 */
2369
2370enum iwl_scan_flags {
2371 /* BIT(0) currently unused */
2372 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
2373 /* bits 2-7 reserved */
2374};
2375
2376struct iwl_scan_cmd {
2377 __le16 len;
2378 u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
2379 u8 channel_count; /* # channels in channel list */
2380 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2381 * (only for active scan) */
2382 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2383 __le16 good_CRC_th; /* passive -> active promotion threshold */
2384 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2385 __le32 max_out_time; /* max usec to be away from associated (service)
2386 * channel */
2387 __le32 suspend_time; /* pause scan this long (in "extended beacon
2388 * format") when returning to service chnl:
2389 */
2390 __le32 flags; /* RXON_FLG_* */
2391 __le32 filter_flags; /* RXON_FILTER_* */
2392
2393 /* For active scans (set to all-0s for passive scans).
2394 * Does not include payload. Must specify Tx rate; no rate scaling. */
2395 struct iwl_tx_cmd tx_cmd;
2396
2397 /* For directed active scans (set to all-0s otherwise) */
2398 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2399
2400 /*
2401 * Probe request frame, followed by channel list.
2402 *
2403 * Size of probe request frame is specified by byte count in tx_cmd.
2404 * Channel list follows immediately after probe request frame.
2405 * Number of channels in list is specified by channel_count.
2406 * Each channel in list is of type:
2407 *
2408 * struct iwl_scan_channel channels[0];
2409 *
2410 * NOTE: Only one band of channels can be scanned per pass. You
2411 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2412 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2413 * before requesting another scan.
2414 */
2415 u8 data[0];
2416} __packed;
2417
2418/* Can abort will notify by complete notification with abort status. */
2419#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2420/* complete notification statuses */
2421#define ABORT_STATUS 0x2
2422
2423/*
2424 * REPLY_SCAN_CMD = 0x80 (response)
2425 */
2426struct iwl_scanreq_notification {
2427 __le32 status; /* 1: okay, 2: cannot fulfill request */
2428} __packed;
2429
2430/*
2431 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2432 */
2433struct iwl_scanstart_notification {
2434 __le32 tsf_low;
2435 __le32 tsf_high;
2436 __le32 beacon_timer;
2437 u8 channel;
2438 u8 band;
2439 u8 reserved[2];
2440 __le32 status;
2441} __packed;
2442
2443#define SCAN_OWNER_STATUS 0x1
2444#define MEASURE_OWNER_STATUS 0x2
2445
2446#define IWL_PROBE_STATUS_OK 0
2447#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2448/* error statuses combined with TX_FAILED */
2449#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2450#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2451
2452#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2453/*
2454 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2455 */
2456struct iwl_scanresults_notification {
2457 u8 channel;
2458 u8 band;
2459 u8 probe_status;
2460 u8 num_probe_not_sent; /* not enough time to send */
2461 __le32 tsf_low;
2462 __le32 tsf_high;
2463 __le32 statistics[NUMBER_OF_STATISTICS];
2464} __packed;
2465
2466/*
2467 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2468 */
2469struct iwl_scancomplete_notification {
2470 u8 scanned_channels;
2471 u8 status;
2472 u8 bt_status; /* BT On/Off status */
2473 u8 last_channel;
2474 __le32 tsf_low;
2475 __le32 tsf_high;
2476} __packed;
2477
2478
2479/******************************************************************************
2480 * (9)
2481 * IBSS/AP Commands and Notifications:
2482 *
2483 *****************************************************************************/
2484
2485enum iwl_ibss_manager {
2486 IWL_NOT_IBSS_MANAGER = 0,
2487 IWL_IBSS_MANAGER = 1,
2488};
2489
2490/*
2491 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2492 */
2493
2494struct iwlagn_beacon_notif {
2495 struct iwlagn_tx_resp beacon_notify_hdr;
2496 __le32 low_tsf;
2497 __le32 high_tsf;
2498 __le32 ibss_mgr_status;
2499} __packed;
2500
2501/*
2502 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2503 */
2504
2505struct iwl_tx_beacon_cmd {
2506 struct iwl_tx_cmd tx;
2507 __le16 tim_idx;
2508 u8 tim_size;
2509 u8 reserved1;
2510 struct ieee80211_hdr frame[0]; /* beacon frame */
2511} __packed;
2512
2513/******************************************************************************
2514 * (10)
2515 * Statistics Commands and Notifications:
2516 *
2517 *****************************************************************************/
2518
2519#define IWL_TEMP_CONVERT 260
2520
2521#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2522#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2523#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2524
2525/* Used for passing to driver number of successes and failures per rate */
2526struct rate_histogram {
2527 union {
2528 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2529 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2530 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2531 } success;
2532 union {
2533 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2534 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2535 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2536 } failed;
2537} __packed;
2538
2539/* statistics command response */
2540
2541struct statistics_dbg {
2542 __le32 burst_check;
2543 __le32 burst_count;
2544 __le32 wait_for_silence_timeout_cnt;
2545 __le32 reserved[3];
2546} __packed;
2547
2548struct statistics_rx_phy {
2549 __le32 ina_cnt;
2550 __le32 fina_cnt;
2551 __le32 plcp_err;
2552 __le32 crc32_err;
2553 __le32 overrun_err;
2554 __le32 early_overrun_err;
2555 __le32 crc32_good;
2556 __le32 false_alarm_cnt;
2557 __le32 fina_sync_err_cnt;
2558 __le32 sfd_timeout;
2559 __le32 fina_timeout;
2560 __le32 unresponded_rts;
2561 __le32 rxe_frame_limit_overrun;
2562 __le32 sent_ack_cnt;
2563 __le32 sent_cts_cnt;
2564 __le32 sent_ba_rsp_cnt;
2565 __le32 dsp_self_kill;
2566 __le32 mh_format_err;
2567 __le32 re_acq_main_rssi_sum;
2568 __le32 reserved3;
2569} __packed;
2570
2571struct statistics_rx_ht_phy {
2572 __le32 plcp_err;
2573 __le32 overrun_err;
2574 __le32 early_overrun_err;
2575 __le32 crc32_good;
2576 __le32 crc32_err;
2577 __le32 mh_format_err;
2578 __le32 agg_crc32_good;
2579 __le32 agg_mpdu_cnt;
2580 __le32 agg_cnt;
2581 __le32 unsupport_mcs;
2582} __packed;
2583
2584#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2585
2586struct statistics_rx_non_phy {
2587 __le32 bogus_cts; /* CTS received when not expecting CTS */
2588 __le32 bogus_ack; /* ACK received when not expecting ACK */
2589 __le32 non_bssid_frames; /* number of frames with BSSID that
2590 * doesn't belong to the STA BSSID */
2591 __le32 filtered_frames; /* count frames that were dumped in the
2592 * filtering process */
2593 __le32 non_channel_beacons; /* beacons with our bss id but not on
2594 * our serving channel */
2595 __le32 channel_beacons; /* beacons with our bss id and in our
2596 * serving channel */
2597 __le32 num_missed_bcon; /* number of missed beacons */
2598 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2599 * ADC was in saturation */
2600 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2601 * for INA */
2602 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2603 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2604 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2605 __le32 interference_data_flag; /* flag for interference data
2606 * availability. 1 when data is
2607 * available. */
2608 __le32 channel_load; /* counts RX Enable time in uSec */
2609 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2610 * and CCK) counter */
2611 __le32 beacon_rssi_a;
2612 __le32 beacon_rssi_b;
2613 __le32 beacon_rssi_c;
2614 __le32 beacon_energy_a;
2615 __le32 beacon_energy_b;
2616 __le32 beacon_energy_c;
2617} __packed;
2618
2619struct statistics_rx_non_phy_bt {
2620 struct statistics_rx_non_phy common;
2621 /* additional stats for bt */
2622 __le32 num_bt_kills;
2623 __le32 reserved[2];
2624} __packed;
2625
2626struct statistics_rx {
2627 struct statistics_rx_phy ofdm;
2628 struct statistics_rx_phy cck;
2629 struct statistics_rx_non_phy general;
2630 struct statistics_rx_ht_phy ofdm_ht;
2631} __packed;
2632
2633struct statistics_rx_bt {
2634 struct statistics_rx_phy ofdm;
2635 struct statistics_rx_phy cck;
2636 struct statistics_rx_non_phy_bt general;
2637 struct statistics_rx_ht_phy ofdm_ht;
2638} __packed;
2639
2640/**
2641 * struct statistics_tx_power - current tx power
2642 *
2643 * @ant_a: current tx power on chain a in 1/2 dB step
2644 * @ant_b: current tx power on chain b in 1/2 dB step
2645 * @ant_c: current tx power on chain c in 1/2 dB step
2646 */
2647struct statistics_tx_power {
2648 u8 ant_a;
2649 u8 ant_b;
2650 u8 ant_c;
2651 u8 reserved;
2652} __packed;
2653
2654struct statistics_tx_non_phy_agg {
2655 __le32 ba_timeout;
2656 __le32 ba_reschedule_frames;
2657 __le32 scd_query_agg_frame_cnt;
2658 __le32 scd_query_no_agg;
2659 __le32 scd_query_agg;
2660 __le32 scd_query_mismatch;
2661 __le32 frame_not_ready;
2662 __le32 underrun;
2663 __le32 bt_prio_kill;
2664 __le32 rx_ba_rsp_cnt;
2665} __packed;
2666
2667struct statistics_tx {
2668 __le32 preamble_cnt;
2669 __le32 rx_detected_cnt;
2670 __le32 bt_prio_defer_cnt;
2671 __le32 bt_prio_kill_cnt;
2672 __le32 few_bytes_cnt;
2673 __le32 cts_timeout;
2674 __le32 ack_timeout;
2675 __le32 expected_ack_cnt;
2676 __le32 actual_ack_cnt;
2677 __le32 dump_msdu_cnt;
2678 __le32 burst_abort_next_frame_mismatch_cnt;
2679 __le32 burst_abort_missing_next_frame_cnt;
2680 __le32 cts_timeout_collision;
2681 __le32 ack_or_ba_timeout_collision;
2682 struct statistics_tx_non_phy_agg agg;
2683 /*
2684 * "tx_power" are optional parameters provided by uCode,
2685 * 6000 series is the only device provide the information,
2686 * Those are reserved fields for all the other devices
2687 */
2688 struct statistics_tx_power tx_power;
2689 __le32 reserved1;
2690} __packed;
2691
2692
2693struct statistics_div {
2694 __le32 tx_on_a;
2695 __le32 tx_on_b;
2696 __le32 exec_time;
2697 __le32 probe_time;
2698 __le32 reserved1;
2699 __le32 reserved2;
2700} __packed;
2701
2702struct statistics_general_common {
2703 __le32 temperature; /* radio temperature */
2704 __le32 temperature_m; /* radio voltage */
2705 struct statistics_dbg dbg;
2706 __le32 sleep_time;
2707 __le32 slots_out;
2708 __le32 slots_idle;
2709 __le32 ttl_timestamp;
2710 struct statistics_div div;
2711 __le32 rx_enable_counter;
2712 /*
2713 * num_of_sos_states:
2714 * count the number of times we have to re-tune
2715 * in order to get out of bad PHY status
2716 */
2717 __le32 num_of_sos_states;
2718} __packed;
2719
2720struct statistics_bt_activity {
2721 /* Tx statistics */
2722 __le32 hi_priority_tx_req_cnt;
2723 __le32 hi_priority_tx_denied_cnt;
2724 __le32 lo_priority_tx_req_cnt;
2725 __le32 lo_priority_tx_denied_cnt;
2726 /* Rx statistics */
2727 __le32 hi_priority_rx_req_cnt;
2728 __le32 hi_priority_rx_denied_cnt;
2729 __le32 lo_priority_rx_req_cnt;
2730 __le32 lo_priority_rx_denied_cnt;
2731} __packed;
2732
2733struct statistics_general {
2734 struct statistics_general_common common;
2735 __le32 reserved2;
2736 __le32 reserved3;
2737} __packed;
2738
2739struct statistics_general_bt {
2740 struct statistics_general_common common;
2741 struct statistics_bt_activity activity;
2742 __le32 reserved2;
2743 __le32 reserved3;
2744} __packed;
2745
2746#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2747#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2748#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2749
2750/*
2751 * REPLY_STATISTICS_CMD = 0x9c,
2752 * all devices identical.
2753 *
2754 * This command triggers an immediate response containing uCode statistics.
2755 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2756 *
2757 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2758 * internal copy of the statistics (counters) after issuing the response.
2759 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2760 *
2761 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2762 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2763 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2764 */
2765#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2766#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2767struct iwl_statistics_cmd {
2768 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2769} __packed;
2770
2771/*
2772 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2773 *
2774 * By default, uCode issues this notification after receiving a beacon
2775 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2776 * REPLY_STATISTICS_CMD 0x9c, above.
2777 *
2778 * Statistics counters continue to increment beacon after beacon, but are
2779 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
2780 * 0x9c with CLEAR_STATS bit set (see above).
2781 *
2782 * uCode also issues this notification during scans. uCode clears statistics
2783 * appropriately so that each notification contains statistics for only the
2784 * one channel that has just been scanned.
2785 */
2786#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2787#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
2788
2789struct iwl_notif_statistics {
2790 __le32 flag;
2791 struct statistics_rx rx;
2792 struct statistics_tx tx;
2793 struct statistics_general general;
2794} __packed;
2795
2796struct iwl_bt_notif_statistics {
2797 __le32 flag;
2798 struct statistics_rx_bt rx;
2799 struct statistics_tx tx;
2800 struct statistics_general_bt general;
2801} __packed;
2802
2803/*
2804 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
2805 *
2806 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
2807 * in regardless of how many missed beacons, which mean when driver receive the
2808 * notification, inside the command, it can find all the beacons information
2809 * which include number of total missed beacons, number of consecutive missed
2810 * beacons, number of beacons received and number of beacons expected to
2811 * receive.
2812 *
2813 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
2814 * in order to bring the radio/PHY back to working state; which has no relation
2815 * to when driver will perform sensitivity calibration.
2816 *
2817 * Driver should set it own missed_beacon_threshold to decide when to perform
2818 * sensitivity calibration based on number of consecutive missed beacons in
2819 * order to improve overall performance, especially in noisy environment.
2820 *
2821 */
2822
2823#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
2824#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
2825#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
2826
2827struct iwl_missed_beacon_notif {
2828 __le32 consecutive_missed_beacons;
2829 __le32 total_missed_becons;
2830 __le32 num_expected_beacons;
2831 __le32 num_recvd_beacons;
2832} __packed;
2833
2834
2835/******************************************************************************
2836 * (11)
2837 * Rx Calibration Commands:
2838 *
2839 * With the uCode used for open source drivers, most Tx calibration (except
2840 * for Tx Power) and most Rx calibration is done by uCode during the
2841 * "initialize" phase of uCode boot. Driver must calibrate only:
2842 *
2843 * 1) Tx power (depends on temperature), described elsewhere
2844 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
2845 * 3) Receiver sensitivity (to optimize signal detection)
2846 *
2847 *****************************************************************************/
2848
2849/**
2850 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
2851 *
2852 * This command sets up the Rx signal detector for a sensitivity level that
2853 * is high enough to lock onto all signals within the associated network,
2854 * but low enough to ignore signals that are below a certain threshold, so as
2855 * not to have too many "false alarms". False alarms are signals that the
2856 * Rx DSP tries to lock onto, but then discards after determining that they
2857 * are noise.
2858 *
2859 * The optimum number of false alarms is between 5 and 50 per 200 TUs
2860 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
2861 * time listening, not transmitting). Driver must adjust sensitivity so that
2862 * the ratio of actual false alarms to actual Rx time falls within this range.
2863 *
2864 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
2865 * received beacon. These provide information to the driver to analyze the
2866 * sensitivity. Don't analyze statistics that come in from scanning, or any
2867 * other non-associated-network source. Pertinent statistics include:
2868 *
2869 * From "general" statistics (struct statistics_rx_non_phy):
2870 *
2871 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
2872 * Measure of energy of desired signal. Used for establishing a level
2873 * below which the device does not detect signals.
2874 *
2875 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
2876 * Measure of background noise in silent period after beacon.
2877 *
2878 * channel_load
2879 * uSecs of actual Rx time during beacon period (varies according to
2880 * how much time was spent transmitting).
2881 *
2882 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
2883 *
2884 * false_alarm_cnt
2885 * Signal locks abandoned early (before phy-level header).
2886 *
2887 * plcp_err
2888 * Signal locks abandoned late (during phy-level header).
2889 *
2890 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
2891 * beacon to beacon, i.e. each value is an accumulation of all errors
2892 * before and including the latest beacon. Values will wrap around to 0
2893 * after counting up to 2^32 - 1. Driver must differentiate vs.
2894 * previous beacon's values to determine # false alarms in the current
2895 * beacon period.
2896 *
2897 * Total number of false alarms = false_alarms + plcp_errs
2898 *
2899 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
2900 * (notice that the start points for OFDM are at or close to settings for
2901 * maximum sensitivity):
2902 *
2903 * START / MIN / MAX
2904 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
2905 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
2906 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
2907 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
2908 *
2909 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
2910 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
2911 * by *adding* 1 to all 4 of the table entries above, up to the max for
2912 * each entry. Conversely, if false alarm rate is too low (less than 5
2913 * for each 204.8 msecs listening), *subtract* 1 from each entry to
2914 * increase sensitivity.
2915 *
2916 * For CCK sensitivity, keep track of the following:
2917 *
2918 * 1). 20-beacon history of maximum background noise, indicated by
2919 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
2920 * 3 receivers. For any given beacon, the "silence reference" is
2921 * the maximum of last 60 samples (20 beacons * 3 receivers).
2922 *
2923 * 2). 10-beacon history of strongest signal level, as indicated
2924 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
2925 * i.e. the strength of the signal through the best receiver at the
2926 * moment. These measurements are "upside down", with lower values
2927 * for stronger signals, so max energy will be *minimum* value.
2928 *
2929 * Then for any given beacon, the driver must determine the *weakest*
2930 * of the strongest signals; this is the minimum level that needs to be
2931 * successfully detected, when using the best receiver at the moment.
2932 * "Max cck energy" is the maximum (higher value means lower energy!)
2933 * of the last 10 minima. Once this is determined, driver must add
2934 * a little margin by adding "6" to it.
2935 *
2936 * 3). Number of consecutive beacon periods with too few false alarms.
2937 * Reset this to 0 at the first beacon period that falls within the
2938 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
2939 *
2940 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
2941 * (notice that the start points for CCK are at maximum sensitivity):
2942 *
2943 * START / MIN / MAX
2944 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
2945 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
2946 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
2947 *
2948 * If actual rate of CCK false alarms (+ plcp_errors) is too high
2949 * (greater than 50 for each 204.8 msecs listening), method for reducing
2950 * sensitivity is:
2951 *
2952 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
2953 * up to max 400.
2954 *
2955 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
2956 * sensitivity has been reduced a significant amount; bring it up to
2957 * a moderate 161. Otherwise, *add* 3, up to max 200.
2958 *
2959 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
2960 * sensitivity has been reduced only a moderate or small amount;
2961 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
2962 * down to min 0. Otherwise (if gain has been significantly reduced),
2963 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
2964 *
2965 * b) Save a snapshot of the "silence reference".
2966 *
2967 * If actual rate of CCK false alarms (+ plcp_errors) is too low
2968 * (less than 5 for each 204.8 msecs listening), method for increasing
2969 * sensitivity is used only if:
2970 *
2971 * 1a) Previous beacon did not have too many false alarms
2972 * 1b) AND difference between previous "silence reference" and current
2973 * "silence reference" (prev - current) is 2 or more,
2974 * OR 2) 100 or more consecutive beacon periods have had rate of
2975 * less than 5 false alarms per 204.8 milliseconds rx time.
2976 *
2977 * Method for increasing sensitivity:
2978 *
2979 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
2980 * down to min 125.
2981 *
2982 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
2983 * down to min 200.
2984 *
2985 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
2986 *
2987 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
2988 * (between 5 and 50 for each 204.8 msecs listening):
2989 *
2990 * 1) Save a snapshot of the silence reference.
2991 *
2992 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
2993 * give some extra margin to energy threshold by *subtracting* 8
2994 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
2995 *
2996 * For all cases (too few, too many, good range), make sure that the CCK
2997 * detection threshold (energy) is below the energy level for robust
2998 * detection over the past 10 beacon periods, the "Max cck energy".
2999 * Lower values mean higher energy; this means making sure that the value
3000 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3001 *
3002 */
3003
3004/*
3005 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3006 */
3007#define HD_TABLE_SIZE (11) /* number of entries */
3008#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3009#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3010#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3011#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3012#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3013#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3014#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3015#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3016#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3017#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3018#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3019
3020/*
3021 * Additional table entries in enhance SENSITIVITY_CMD
3022 */
3023#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11)
3024#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12)
3025#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13)
3026#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14)
3027#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15)
3028#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16)
3029#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17)
3030#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18)
3031#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19)
3032#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20)
3033#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21)
3034#define HD_RESERVED (22)
3035
3036/* number of entries for enhanced tbl */
3037#define ENHANCE_HD_TABLE_SIZE (23)
3038
3039/* number of additional entries for enhanced tbl */
3040#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
3041
3042#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0)
3043#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0)
3044#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0)
3045#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668)
3046#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
3047#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486)
3048#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37)
3049#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853)
3050#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
3051#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476)
3052#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99)
3053
3054#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1)
3055#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1)
3056#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1)
3057#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600)
3058#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40)
3059#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486)
3060#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45)
3061#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853)
3062#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60)
3063#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476)
3064#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99)
3065
3066
3067/* Control field in struct iwl_sensitivity_cmd */
3068#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3069#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3070
3071/**
3072 * struct iwl_sensitivity_cmd
3073 * @control: (1) updates working table, (0) updates default table
3074 * @table: energy threshold values, use HD_* as index into table
3075 *
3076 * Always use "1" in "control" to update uCode's working table and DSP.
3077 */
3078struct iwl_sensitivity_cmd {
3079 __le16 control; /* always use "1" */
3080 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3081} __packed;
3082
3083/*
3084 *
3085 */
3086struct iwl_enhance_sensitivity_cmd {
3087 __le16 control; /* always use "1" */
3088 __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */
3089} __packed;
3090
3091
3092/**
3093 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3094 *
3095 * This command sets the relative gains of agn device's 3 radio receiver chains.
3096 *
3097 * After the first association, driver should accumulate signal and noise
3098 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3099 * beacons from the associated network (don't collect statistics that come
3100 * in from scanning, or any other non-network source).
3101 *
3102 * DISCONNECTED ANTENNA:
3103 *
3104 * Driver should determine which antennas are actually connected, by comparing
3105 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3106 * following values over 20 beacons, one accumulator for each of the chains
3107 * a/b/c, from struct statistics_rx_non_phy:
3108 *
3109 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3110 *
3111 * Find the strongest signal from among a/b/c. Compare the other two to the
3112 * strongest. If any signal is more than 15 dB (times 20, unless you
3113 * divide the accumulated values by 20) below the strongest, the driver
3114 * considers that antenna to be disconnected, and should not try to use that
3115 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3116 * driver should declare the stronger one as connected, and attempt to use it
3117 * (A and B are the only 2 Tx chains!).
3118 *
3119 *
3120 * RX BALANCE:
3121 *
3122 * Driver should balance the 3 receivers (but just the ones that are connected
3123 * to antennas, see above) for gain, by comparing the average signal levels
3124 * detected during the silence after each beacon (background noise).
3125 * Accumulate (add) the following values over 20 beacons, one accumulator for
3126 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3127 *
3128 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3129 *
3130 * Find the weakest background noise level from among a/b/c. This Rx chain
3131 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3132 * finding noise difference:
3133 *
3134 * (accum_noise[i] - accum_noise[reference]) / 30
3135 *
3136 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3137 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3138 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3139 * and set bit 2 to indicate "reduce gain". The value for the reference
3140 * (weakest) chain should be "0".
3141 *
3142 * diff_gain_[abc] bit fields:
3143 * 2: (1) reduce gain, (0) increase gain
3144 * 1-0: amount of gain, units of 1.5 dB
3145 */
3146
3147/* Phy calibration command for series */
3148enum {
3149 IWL_PHY_CALIBRATE_DC_CMD = 8,
3150 IWL_PHY_CALIBRATE_LO_CMD = 9,
3151 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
3152 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3153 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3154 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
3155 IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
3156};
3157
3158/* This enum defines the bitmap of various calibrations to enable in both
3159 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
3160 */
3161enum iwl_ucode_calib_cfg {
3162 IWL_CALIB_CFG_RX_BB_IDX = BIT(0),
3163 IWL_CALIB_CFG_DC_IDX = BIT(1),
3164 IWL_CALIB_CFG_LO_IDX = BIT(2),
3165 IWL_CALIB_CFG_TX_IQ_IDX = BIT(3),
3166 IWL_CALIB_CFG_RX_IQ_IDX = BIT(4),
3167 IWL_CALIB_CFG_NOISE_IDX = BIT(5),
3168 IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6),
3169 IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7),
3170 IWL_CALIB_CFG_PAPD_IDX = BIT(8),
3171 IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9),
3172 IWL_CALIB_CFG_TX_PWR_IDX = BIT(10),
3173};
3174
3175#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
3176 IWL_CALIB_CFG_DC_IDX | \
3177 IWL_CALIB_CFG_LO_IDX | \
3178 IWL_CALIB_CFG_TX_IQ_IDX | \
3179 IWL_CALIB_CFG_RX_IQ_IDX | \
3180 IWL_CALIB_CFG_CRYSTAL_IDX)
3181
3182#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
3183 IWL_CALIB_CFG_DC_IDX | \
3184 IWL_CALIB_CFG_LO_IDX | \
3185 IWL_CALIB_CFG_TX_IQ_IDX | \
3186 IWL_CALIB_CFG_RX_IQ_IDX | \
3187 IWL_CALIB_CFG_TEMPERATURE_IDX | \
3188 IWL_CALIB_CFG_PAPD_IDX | \
3189 IWL_CALIB_CFG_TX_PWR_IDX | \
3190 IWL_CALIB_CFG_CRYSTAL_IDX)
3191
3192#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
3193
3194struct iwl_calib_cfg_elmnt_s {
3195 __le32 is_enable;
3196 __le32 start;
3197 __le32 send_res;
3198 __le32 apply_res;
3199 __le32 reserved;
3200} __packed;
3201
3202struct iwl_calib_cfg_status_s {
3203 struct iwl_calib_cfg_elmnt_s once;
3204 struct iwl_calib_cfg_elmnt_s perd;
3205 __le32 flags;
3206} __packed;
3207
3208struct iwl_calib_cfg_cmd {
3209 struct iwl_calib_cfg_status_s ucd_calib_cfg;
3210 struct iwl_calib_cfg_status_s drv_calib_cfg;
3211 __le32 reserved1;
3212} __packed;
3213
3214struct iwl_calib_hdr {
3215 u8 op_code;
3216 u8 first_group;
3217 u8 groups_num;
3218 u8 data_valid;
3219} __packed;
3220
3221struct iwl_calib_cmd {
3222 struct iwl_calib_hdr hdr;
3223 u8 data[0];
3224} __packed;
3225
3226struct iwl_calib_xtal_freq_cmd {
3227 struct iwl_calib_hdr hdr;
3228 u8 cap_pin1;
3229 u8 cap_pin2;
3230 u8 pad[2];
3231} __packed;
3232
3233#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700)
3234struct iwl_calib_temperature_offset_cmd {
3235 struct iwl_calib_hdr hdr;
3236 __le16 radio_sensor_offset;
3237 __le16 reserved;
3238} __packed;
3239
3240struct iwl_calib_temperature_offset_v2_cmd {
3241 struct iwl_calib_hdr hdr;
3242 __le16 radio_sensor_offset_high;
3243 __le16 radio_sensor_offset_low;
3244 __le16 burntVoltageRef;
3245 __le16 reserved;
3246} __packed;
3247
3248/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3249struct iwl_calib_chain_noise_reset_cmd {
3250 struct iwl_calib_hdr hdr;
3251 u8 data[0];
3252};
3253
3254/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
3255struct iwl_calib_chain_noise_gain_cmd {
3256 struct iwl_calib_hdr hdr;
3257 u8 delta_gain_1;
3258 u8 delta_gain_2;
3259 u8 pad[2];
3260} __packed;
3261
3262/******************************************************************************
3263 * (12)
3264 * Miscellaneous Commands:
3265 *
3266 *****************************************************************************/
3267
3268/*
3269 * LEDs Command & Response
3270 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3271 *
3272 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3273 * this command turns it on or off, or sets up a periodic blinking cycle.
3274 */
3275struct iwl_led_cmd {
3276 __le32 interval; /* "interval" in uSec */
3277 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3278 u8 off; /* # intervals off while blinking;
3279 * "0", with >0 "on" value, turns LED on */
3280 u8 on; /* # intervals on while blinking;
3281 * "0", regardless of "off", turns LED off */
3282 u8 reserved;
3283} __packed;
3284
3285/*
3286 * station priority table entries
3287 * also used as potential "events" value for both
3288 * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
3289 */
3290
3291/*
3292 * COEX events entry flag masks
3293 * RP - Requested Priority
3294 * WP - Win Medium Priority: priority assigned when the contention has been won
3295 */
3296#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1)
3297#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2)
3298#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4)
3299
3300#define COEX_CU_UNASSOC_IDLE_RP 4
3301#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4
3302#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4
3303#define COEX_CU_CALIBRATION_RP 4
3304#define COEX_CU_PERIODIC_CALIBRATION_RP 4
3305#define COEX_CU_CONNECTION_ESTAB_RP 4
3306#define COEX_CU_ASSOCIATED_IDLE_RP 4
3307#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4
3308#define COEX_CU_ASSOC_AUTO_SCAN_RP 4
3309#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4
3310#define COEX_CU_RF_ON_RP 6
3311#define COEX_CU_RF_OFF_RP 4
3312#define COEX_CU_STAND_ALONE_DEBUG_RP 6
3313#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4
3314#define COEX_CU_RSRVD1_RP 4
3315#define COEX_CU_RSRVD2_RP 4
3316
3317#define COEX_CU_UNASSOC_IDLE_WP 3
3318#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3
3319#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3
3320#define COEX_CU_CALIBRATION_WP 3
3321#define COEX_CU_PERIODIC_CALIBRATION_WP 3
3322#define COEX_CU_CONNECTION_ESTAB_WP 3
3323#define COEX_CU_ASSOCIATED_IDLE_WP 3
3324#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3
3325#define COEX_CU_ASSOC_AUTO_SCAN_WP 3
3326#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3
3327#define COEX_CU_RF_ON_WP 3
3328#define COEX_CU_RF_OFF_WP 3
3329#define COEX_CU_STAND_ALONE_DEBUG_WP 6
3330#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3
3331#define COEX_CU_RSRVD1_WP 3
3332#define COEX_CU_RSRVD2_WP 3
3333
3334#define COEX_UNASSOC_IDLE_FLAGS 0
3335#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \
3336 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3337 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3338#define COEX_UNASSOC_AUTO_SCAN_FLAGS \
3339 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3340 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3341#define COEX_CALIBRATION_FLAGS \
3342 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3343 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3344#define COEX_PERIODIC_CALIBRATION_FLAGS 0
3345/*
3346 * COEX_CONNECTION_ESTAB:
3347 * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3348 */
3349#define COEX_CONNECTION_ESTAB_FLAGS \
3350 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3351 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3352 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3353#define COEX_ASSOCIATED_IDLE_FLAGS 0
3354#define COEX_ASSOC_MANUAL_SCAN_FLAGS \
3355 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3356 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3357#define COEX_ASSOC_AUTO_SCAN_FLAGS \
3358 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3359 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3360#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
3361#define COEX_RF_ON_FLAGS 0
3362#define COEX_RF_OFF_FLAGS 0
3363#define COEX_STAND_ALONE_DEBUG_FLAGS \
3364 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3365 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3366#define COEX_IPAN_ASSOC_LEVEL_FLAGS \
3367 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3368 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3369 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3370#define COEX_RSRVD1_FLAGS 0
3371#define COEX_RSRVD2_FLAGS 0
3372/*
3373 * COEX_CU_RF_ON is the event wrapping all radio ownership.
3374 * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3375 */
3376#define COEX_CU_RF_ON_FLAGS \
3377 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3378 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3379 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3380
3381
3382enum {
3383 /* un-association part */
3384 COEX_UNASSOC_IDLE = 0,
3385 COEX_UNASSOC_MANUAL_SCAN = 1,
3386 COEX_UNASSOC_AUTO_SCAN = 2,
3387 /* calibration */
3388 COEX_CALIBRATION = 3,
3389 COEX_PERIODIC_CALIBRATION = 4,
3390 /* connection */
3391 COEX_CONNECTION_ESTAB = 5,
3392 /* association part */
3393 COEX_ASSOCIATED_IDLE = 6,
3394 COEX_ASSOC_MANUAL_SCAN = 7,
3395 COEX_ASSOC_AUTO_SCAN = 8,
3396 COEX_ASSOC_ACTIVE_LEVEL = 9,
3397 /* RF ON/OFF */
3398 COEX_RF_ON = 10,
3399 COEX_RF_OFF = 11,
3400 COEX_STAND_ALONE_DEBUG = 12,
3401 /* IPAN */
3402 COEX_IPAN_ASSOC_LEVEL = 13,
3403 /* reserved */
3404 COEX_RSRVD1 = 14,
3405 COEX_RSRVD2 = 15,
3406 COEX_NUM_OF_EVENTS = 16
3407};
3408
3409/*
3410 * Coexistence WIFI/WIMAX Command
3411 * COEX_PRIORITY_TABLE_CMD = 0x5a
3412 *
3413 */
3414struct iwl_wimax_coex_event_entry {
3415 u8 request_prio;
3416 u8 win_medium_prio;
3417 u8 reserved;
3418 u8 flags;
3419} __packed;
3420
3421/* COEX flag masks */
3422
3423/* Station table is valid */
3424#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
3425/* UnMask wake up src at unassociated sleep */
3426#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
3427/* UnMask wake up src at associated sleep */
3428#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
3429/* Enable CoEx feature. */
3430#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
3431
3432struct iwl_wimax_coex_cmd {
3433 u8 flags;
3434 u8 reserved[3];
3435 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3436} __packed;
3437
3438/*
3439 * Coexistence MEDIUM NOTIFICATION
3440 * COEX_MEDIUM_NOTIFICATION = 0x5b
3441 *
3442 * notification from uCode to host to indicate medium changes
3443 *
3444 */
3445/*
3446 * status field
3447 * bit 0 - 2: medium status
3448 * bit 3: medium change indication
3449 * bit 4 - 31: reserved
3450 */
3451/* status option values, (0 - 2 bits) */
3452#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */
3453#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */
3454#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */
3455#define COEX_MEDIUM_MSK (0x7)
3456
3457/* send notification status (1 bit) */
3458#define COEX_MEDIUM_CHANGED (0x8)
3459#define COEX_MEDIUM_CHANGED_MSK (0x8)
3460#define COEX_MEDIUM_SHIFT (3)
3461
3462struct iwl_coex_medium_notification {
3463 __le32 status;
3464 __le32 events;
3465} __packed;
3466
3467/*
3468 * Coexistence EVENT Command
3469 * COEX_EVENT_CMD = 0x5c
3470 *
3471 * send from host to uCode for coex event request.
3472 */
3473/* flags options */
3474#define COEX_EVENT_REQUEST_MSK (0x1)
3475
3476struct iwl_coex_event_cmd {
3477 u8 flags;
3478 u8 event;
3479 __le16 reserved;
3480} __packed;
3481
3482struct iwl_coex_event_resp {
3483 __le32 status;
3484} __packed;
3485
3486
3487/******************************************************************************
3488 * Bluetooth Coexistence commands
3489 *
3490 *****************************************************************************/
3491
3492/*
3493 * BT Status notification
3494 * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
3495 */
3496enum iwl_bt_coex_profile_traffic_load {
3497 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
3498 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
3499 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
3500 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
3501/*
3502 * There are no more even though below is a u8, the
3503 * indication from the BT device only has two bits.
3504 */
3505};
3506
3507#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
3508#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
3509
3510/* BT UART message - Share Part (BT -> WiFi) */
3511#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
3512#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
3513 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
3514#define BT_UART_MSG_FRAME1SSN_POS (3)
3515#define BT_UART_MSG_FRAME1SSN_MSK \
3516 (0x3 << BT_UART_MSG_FRAME1SSN_POS)
3517#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
3518#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
3519 (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
3520#define BT_UART_MSG_FRAME1RESERVED_POS (6)
3521#define BT_UART_MSG_FRAME1RESERVED_MSK \
3522 (0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
3523
3524#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
3525#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
3526 (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
3527#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
3528#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
3529 (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
3530#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
3531#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
3532 (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
3533#define BT_UART_MSG_FRAME2INBAND_POS (5)
3534#define BT_UART_MSG_FRAME2INBAND_MSK \
3535 (0x1 << BT_UART_MSG_FRAME2INBAND_POS)
3536#define BT_UART_MSG_FRAME2RESERVED_POS (6)
3537#define BT_UART_MSG_FRAME2RESERVED_MSK \
3538 (0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
3539
3540#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
3541#define BT_UART_MSG_FRAME3SCOESCO_MSK \
3542 (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
3543#define BT_UART_MSG_FRAME3SNIFF_POS (1)
3544#define BT_UART_MSG_FRAME3SNIFF_MSK \
3545 (0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
3546#define BT_UART_MSG_FRAME3A2DP_POS (2)
3547#define BT_UART_MSG_FRAME3A2DP_MSK \
3548 (0x1 << BT_UART_MSG_FRAME3A2DP_POS)
3549#define BT_UART_MSG_FRAME3ACL_POS (3)
3550#define BT_UART_MSG_FRAME3ACL_MSK \
3551 (0x1 << BT_UART_MSG_FRAME3ACL_POS)
3552#define BT_UART_MSG_FRAME3MASTER_POS (4)
3553#define BT_UART_MSG_FRAME3MASTER_MSK \
3554 (0x1 << BT_UART_MSG_FRAME3MASTER_POS)
3555#define BT_UART_MSG_FRAME3OBEX_POS (5)
3556#define BT_UART_MSG_FRAME3OBEX_MSK \
3557 (0x1 << BT_UART_MSG_FRAME3OBEX_POS)
3558#define BT_UART_MSG_FRAME3RESERVED_POS (6)
3559#define BT_UART_MSG_FRAME3RESERVED_MSK \
3560 (0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
3561
3562#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
3563#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
3564 (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
3565#define BT_UART_MSG_FRAME4RESERVED_POS (6)
3566#define BT_UART_MSG_FRAME4RESERVED_MSK \
3567 (0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
3568
3569#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
3570#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
3571 (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
3572#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
3573#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
3574 (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
3575#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
3576#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
3577 (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
3578#define BT_UART_MSG_FRAME5RESERVED_POS (6)
3579#define BT_UART_MSG_FRAME5RESERVED_MSK \
3580 (0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
3581
3582#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
3583#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
3584 (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
3585#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
3586#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
3587 (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
3588#define BT_UART_MSG_FRAME6RESERVED_POS (6)
3589#define BT_UART_MSG_FRAME6RESERVED_MSK \
3590 (0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
3591
3592#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
3593#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
3594 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
3595#define BT_UART_MSG_FRAME7PAGE_POS (3)
3596#define BT_UART_MSG_FRAME7PAGE_MSK \
3597 (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
3598#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
3599#define BT_UART_MSG_FRAME7INQUIRY_MSK \
3600 (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
3601#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
3602#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
3603 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
3604#define BT_UART_MSG_FRAME7RESERVED_POS (6)
3605#define BT_UART_MSG_FRAME7RESERVED_MSK \
3606 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
3607
3608/* BT Session Activity 2 UART message (BT -> WiFi) */
3609#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
3610#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
3611 (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
3612#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
3613#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
3614 (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
3615
3616#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
3617#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
3618 (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
3619#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
3620#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
3621 (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
3622
3623#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
3624#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
3625 (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
3626#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
3627#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
3628 (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
3629#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
3630#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
3631 (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
3632#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
3633#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
3634 (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
3635
3636#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
3637#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
3638 (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
3639#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
3640#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
3641 (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
3642#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
3643#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
3644 (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
3645
3646#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
3647#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
3648 (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
3649#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
3650#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
3651 (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
3652#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
3653#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
3654 (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
3655#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
3656#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
3657 (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
3658
3659#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
3660#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
3661 (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
3662#define BT_UART_MSG_2_FRAME6RFU_POS (5)
3663#define BT_UART_MSG_2_FRAME6RFU_MSK \
3664 (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
3665#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
3666#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
3667 (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
3668
3669#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
3670#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
3671 (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
3672#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
3673#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
3674 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
3675#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
3676#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
3677 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
3678#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
3679#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
3680 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
3681#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
3682#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
3683 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
3684
3685
3686#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
3687#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
3688
3689struct iwl_bt_uart_msg {
3690 u8 header;
3691 u8 frame1;
3692 u8 frame2;
3693 u8 frame3;
3694 u8 frame4;
3695 u8 frame5;
3696 u8 frame6;
3697 u8 frame7;
3698} __attribute__((packed));
3699
3700struct iwl_bt_coex_profile_notif {
3701 struct iwl_bt_uart_msg last_bt_uart_msg;
3702 u8 bt_status; /* 0 - off, 1 - on */
3703 u8 bt_traffic_load; /* 0 .. 3? */
3704 u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
3705 u8 reserved;
3706} __attribute__((packed));
3707
3708#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
3709#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
3710#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
3711#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
3712#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
3713#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
3714#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
3715
3716/*
3717 * BT Coexistence Priority table
3718 * REPLY_BT_COEX_PRIO_TABLE = 0xcc
3719 */
3720enum bt_coex_prio_table_events {
3721 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
3722 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
3723 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
3724 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
3725 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
3726 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
3727 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
3728 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
3729 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
3730 BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
3731 BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
3732 BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
3733 BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
3734 BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
3735 BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
3736 BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
3737 /* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
3738 BT_COEX_PRIO_TBL_EVT_MAX,
3739};
3740
3741enum bt_coex_prio_table_priorities {
3742 BT_COEX_PRIO_TBL_DISABLED = 0,
3743 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
3744 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
3745 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
3746 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
3747 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
3748 BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
3749 BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
3750 BT_COEX_PRIO_TBL_MAX,
3751};
3752
3753struct iwl_bt_coex_prio_table_cmd {
3754 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
3755} __attribute__((packed));
3756
3757#define IWL_BT_COEX_ENV_CLOSE 0
3758#define IWL_BT_COEX_ENV_OPEN 1
3759/*
3760 * BT Protection Envelope
3761 * REPLY_BT_COEX_PROT_ENV = 0xcd
3762 */
3763struct iwl_bt_coex_prot_env_cmd {
3764 u8 action; /* 0 = closed, 1 = open */
3765 u8 type; /* 0 .. 15 */
3766 u8 reserved[2];
3767} __attribute__((packed));
3768
3769/*
3770 * REPLY_D3_CONFIG
3771 */
3772enum iwlagn_d3_wakeup_filters {
3773 IWLAGN_D3_WAKEUP_RFKILL = BIT(0),
3774 IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1),
3775};
3776
3777struct iwlagn_d3_config_cmd {
3778 __le32 min_sleep_time;
3779 __le32 wakeup_flags;
3780} __packed;
3781
3782/*
3783 * REPLY_WOWLAN_PATTERNS
3784 */
3785#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
3786#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128
3787
3788struct iwlagn_wowlan_pattern {
3789 u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
3790 u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
3791 u8 mask_size;
3792 u8 pattern_size;
3793 __le16 reserved;
3794} __packed;
3795
3796#define IWLAGN_WOWLAN_MAX_PATTERNS 20
3797
3798struct iwlagn_wowlan_patterns_cmd {
3799 __le32 n_patterns;
3800 struct iwlagn_wowlan_pattern patterns[];
3801} __packed;
3802
3803/*
3804 * REPLY_WOWLAN_WAKEUP_FILTER
3805 */
3806enum iwlagn_wowlan_wakeup_filters {
3807 IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
3808 IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
3809 IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
3810 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
3811 IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
3812 IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
3813 IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
3814 IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7),
3815 IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8),
3816};
3817
3818struct iwlagn_wowlan_wakeup_filter_cmd {
3819 __le32 enabled;
3820 __le16 non_qos_seq;
3821 __le16 reserved;
3822 __le16 qos_seq[8];
3823};
3824
3825/*
3826 * REPLY_WOWLAN_TSC_RSC_PARAMS
3827 */
3828#define IWLAGN_NUM_RSC 16
3829
3830struct tkip_sc {
3831 __le16 iv16;
3832 __le16 pad;
3833 __le32 iv32;
3834} __packed;
3835
3836struct iwlagn_tkip_rsc_tsc {
3837 struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
3838 struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
3839 struct tkip_sc tsc;
3840} __packed;
3841
3842struct aes_sc {
3843 __le64 pn;
3844} __packed;
3845
3846struct iwlagn_aes_rsc_tsc {
3847 struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
3848 struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
3849 struct aes_sc tsc;
3850} __packed;
3851
3852union iwlagn_all_tsc_rsc {
3853 struct iwlagn_tkip_rsc_tsc tkip;
3854 struct iwlagn_aes_rsc_tsc aes;
3855};
3856
3857struct iwlagn_wowlan_rsc_tsc_params_cmd {
3858 union iwlagn_all_tsc_rsc all_tsc_rsc;
3859} __packed;
3860
3861/*
3862 * REPLY_WOWLAN_TKIP_PARAMS
3863 */
3864#define IWLAGN_MIC_KEY_SIZE 8
3865#define IWLAGN_P1K_SIZE 5
3866struct iwlagn_mic_keys {
3867 u8 tx[IWLAGN_MIC_KEY_SIZE];
3868 u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
3869 u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
3870} __packed;
3871
3872struct iwlagn_p1k_cache {
3873 __le16 p1k[IWLAGN_P1K_SIZE];
3874} __packed;
3875
3876#define IWLAGN_NUM_RX_P1K_CACHE 2
3877
3878struct iwlagn_wowlan_tkip_params_cmd {
3879 struct iwlagn_mic_keys mic_keys;
3880 struct iwlagn_p1k_cache tx;
3881 struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
3882 struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
3883} __packed;
3884
3885/*
3886 * REPLY_WOWLAN_KEK_KCK_MATERIAL
3887 */
3888
3889#define IWLAGN_KCK_MAX_SIZE 32
3890#define IWLAGN_KEK_MAX_SIZE 32
3891
3892struct iwlagn_wowlan_kek_kck_material_cmd {
3893 u8 kck[IWLAGN_KCK_MAX_SIZE];
3894 u8 kek[IWLAGN_KEK_MAX_SIZE];
3895 __le16 kck_len;
3896 __le16 kek_len;
3897 __le64 replay_ctr;
3898} __packed;
3899
3900/*
3901 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
3902 */
3903
3904/*
3905 * Minimum slot time in TU
3906 */
3907#define IWL_MIN_SLOT_TIME 20
3908
3909/**
3910 * struct iwl_wipan_slot
3911 * @width: Time in TU
3912 * @type:
3913 * 0 - BSS
3914 * 1 - PAN
3915 */
3916struct iwl_wipan_slot {
3917 __le16 width;
3918 u8 type;
3919 u8 reserved;
3920} __packed;
3921
3922#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
3923#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
3924#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
3925#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
3926#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
3927
3928/**
3929 * struct iwl_wipan_params_cmd
3930 * @flags:
3931 * bit0: reserved
3932 * bit1: CP leave channel with CTS
3933 * bit2: CP leave channel qith Quiet
3934 * bit3: slotted mode
3935 * 1 - work in slotted mode
3936 * 0 - work in non slotted mode
3937 * bit4: filter beacon notification
3938 * bit5: full tx slotted mode. if this flag is set,
3939 * uCode will perform leaving channel methods in context switch
3940 * also when working in same channel mode
3941 * @num_slots: 1 - 10
3942 */
3943struct iwl_wipan_params_cmd {
3944 __le16 flags;
3945 u8 reserved;
3946 u8 num_slots;
3947 struct iwl_wipan_slot slots[10];
3948} __packed;
3949
3950/*
3951 * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
3952 *
3953 * TODO: Figure out what this is used for,
3954 * it can only switch between 2.4 GHz
3955 * channels!!
3956 */
3957
3958struct iwl_wipan_p2p_channel_switch_cmd {
3959 __le16 channel;
3960 __le16 reserved;
3961};
3962
3963/*
3964 * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
3965 *
3966 * This is used by the device to notify us of the
3967 * NoA schedule it determined so we can forward it
3968 * to userspace for inclusion in probe responses.
3969 *
3970 * In beacons, the NoA schedule is simply appended
3971 * to the frame we give the device.
3972 */
3973
3974struct iwl_wipan_noa_descriptor {
3975 u8 count;
3976 __le32 duration;
3977 __le32 interval;
3978 __le32 starttime;
3979} __packed;
3980
3981struct iwl_wipan_noa_attribute {
3982 u8 id;
3983 __le16 length;
3984 u8 index;
3985 u8 ct_window;
3986 struct iwl_wipan_noa_descriptor descr0, descr1;
3987 u8 reserved;
3988} __packed;
3989
3990struct iwl_wipan_noa_notification {
3991 u32 noa_active;
3992 struct iwl_wipan_noa_attribute noa_attribute;
3993} __packed;
3994
3995#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
deleted file mode 100644
index 5b9533eef54..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ /dev/null
@@ -1,2439 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/slab.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/debugfs.h>
33#include <linux/ieee80211.h>
34#include <net/mac80211.h>
35#include "iwl-debug.h"
36#include "iwl-io.h"
37#include "dev.h"
38#include "agn.h"
39
40/* create and remove of files */
41#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
42 if (!debugfs_create_file(#name, mode, parent, priv, \
43 &iwl_dbgfs_##name##_ops)) \
44 goto err; \
45} while (0)
46
47#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
48 struct dentry *__tmp; \
49 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
50 parent, ptr); \
51 if (IS_ERR(__tmp) || !__tmp) \
52 goto err; \
53} while (0)
54
55#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
56 struct dentry *__tmp; \
57 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
58 parent, ptr); \
59 if (IS_ERR(__tmp) || !__tmp) \
60 goto err; \
61} while (0)
62
63#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
64 struct dentry *__tmp; \
65 __tmp = debugfs_create_u32(#name, mode, \
66 parent, ptr); \
67 if (IS_ERR(__tmp) || !__tmp) \
68 goto err; \
69} while (0)
70
71/* file operation */
72#define DEBUGFS_READ_FUNC(name) \
73static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
74 char __user *user_buf, \
75 size_t count, loff_t *ppos);
76
77#define DEBUGFS_WRITE_FUNC(name) \
78static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
79 const char __user *user_buf, \
80 size_t count, loff_t *ppos);
81
82
83#define DEBUGFS_READ_FILE_OPS(name) \
84 DEBUGFS_READ_FUNC(name); \
85static const struct file_operations iwl_dbgfs_##name##_ops = { \
86 .read = iwl_dbgfs_##name##_read, \
87 .open = simple_open, \
88 .llseek = generic_file_llseek, \
89};
90
91#define DEBUGFS_WRITE_FILE_OPS(name) \
92 DEBUGFS_WRITE_FUNC(name); \
93static const struct file_operations iwl_dbgfs_##name##_ops = { \
94 .write = iwl_dbgfs_##name##_write, \
95 .open = simple_open, \
96 .llseek = generic_file_llseek, \
97};
98
99
100#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
101 DEBUGFS_READ_FUNC(name); \
102 DEBUGFS_WRITE_FUNC(name); \
103static const struct file_operations iwl_dbgfs_##name##_ops = { \
104 .write = iwl_dbgfs_##name##_write, \
105 .read = iwl_dbgfs_##name##_read, \
106 .open = simple_open, \
107 .llseek = generic_file_llseek, \
108};
109
110static ssize_t iwl_dbgfs_sram_read(struct file *file,
111 char __user *user_buf,
112 size_t count, loff_t *ppos)
113{
114 u32 val = 0;
115 char *buf;
116 ssize_t ret;
117 int i = 0;
118 bool device_format = false;
119 int offset = 0;
120 int len = 0;
121 int pos = 0;
122 int sram;
123 struct iwl_priv *priv = file->private_data;
124 const struct fw_img *img;
125 size_t bufsz;
126
127 if (!iwl_is_ready_rf(priv))
128 return -EAGAIN;
129
130 /* default is to dump the entire data segment */
131 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
132 priv->dbgfs_sram_offset = 0x800000;
133 if (!priv->ucode_loaded)
134 return -EINVAL;
135 img = &priv->fw->img[priv->cur_ucode];
136 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
137 }
138 len = priv->dbgfs_sram_len;
139
140 if (len == -4) {
141 device_format = true;
142 len = 4;
143 }
144
145 bufsz = 50 + len * 4;
146 buf = kmalloc(bufsz, GFP_KERNEL);
147 if (!buf)
148 return -ENOMEM;
149
150 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
151 len);
152 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
153 priv->dbgfs_sram_offset);
154
155 /* adjust sram address since reads are only on even u32 boundaries */
156 offset = priv->dbgfs_sram_offset & 0x3;
157 sram = priv->dbgfs_sram_offset & ~0x3;
158
159 /* read the first u32 from sram */
160 val = iwl_read_targ_mem(priv->trans, sram);
161
162 for (; len; len--) {
163 /* put the address at the start of every line */
164 if (i == 0)
165 pos += scnprintf(buf + pos, bufsz - pos,
166 "%08X: ", sram + offset);
167
168 if (device_format)
169 pos += scnprintf(buf + pos, bufsz - pos,
170 "%02x", (val >> (8 * (3 - offset))) & 0xff);
171 else
172 pos += scnprintf(buf + pos, bufsz - pos,
173 "%02x ", (val >> (8 * offset)) & 0xff);
174
175 /* if all bytes processed, read the next u32 from sram */
176 if (++offset == 4) {
177 sram += 4;
178 offset = 0;
179 val = iwl_read_targ_mem(priv->trans, sram);
180 }
181
182 /* put in extra spaces and split lines for human readability */
183 if (++i == 16) {
184 i = 0;
185 pos += scnprintf(buf + pos, bufsz - pos, "\n");
186 } else if (!(i & 7)) {
187 pos += scnprintf(buf + pos, bufsz - pos, " ");
188 } else if (!(i & 3)) {
189 pos += scnprintf(buf + pos, bufsz - pos, " ");
190 }
191 }
192 if (i)
193 pos += scnprintf(buf + pos, bufsz - pos, "\n");
194
195 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
196 kfree(buf);
197 return ret;
198}
199
200static ssize_t iwl_dbgfs_sram_write(struct file *file,
201 const char __user *user_buf,
202 size_t count, loff_t *ppos)
203{
204 struct iwl_priv *priv = file->private_data;
205 char buf[64];
206 int buf_size;
207 u32 offset, len;
208
209 memset(buf, 0, sizeof(buf));
210 buf_size = min(count, sizeof(buf) - 1);
211 if (copy_from_user(buf, user_buf, buf_size))
212 return -EFAULT;
213
214 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
215 priv->dbgfs_sram_offset = offset;
216 priv->dbgfs_sram_len = len;
217 } else if (sscanf(buf, "%x", &offset) == 1) {
218 priv->dbgfs_sram_offset = offset;
219 priv->dbgfs_sram_len = -4;
220 } else {
221 priv->dbgfs_sram_offset = 0;
222 priv->dbgfs_sram_len = 0;
223 }
224
225 return count;
226}
227
228static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
229 char __user *user_buf,
230 size_t count, loff_t *ppos)
231{
232 struct iwl_priv *priv = file->private_data;
233 const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN];
234
235 if (!priv->wowlan_sram)
236 return -ENODATA;
237
238 return simple_read_from_buffer(user_buf, count, ppos,
239 priv->wowlan_sram,
240 img->sec[IWL_UCODE_SECTION_DATA].len);
241}
242static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
243 size_t count, loff_t *ppos)
244{
245 struct iwl_priv *priv = file->private_data;
246 struct iwl_station_entry *station;
247 struct iwl_tid_data *tid_data;
248 char *buf;
249 int i, j, pos = 0;
250 ssize_t ret;
251 /* Add 30 for initial string */
252 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
253
254 buf = kmalloc(bufsz, GFP_KERNEL);
255 if (!buf)
256 return -ENOMEM;
257
258 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
259 priv->num_stations);
260
261 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
262 station = &priv->stations[i];
263 if (!station->used)
264 continue;
265 pos += scnprintf(buf + pos, bufsz - pos,
266 "station %d - addr: %pM, flags: %#x\n",
267 i, station->sta.sta.addr,
268 station->sta.station_flags_msk);
269 pos += scnprintf(buf + pos, bufsz - pos,
270 "TID seqno next_rclmd "
271 "rate_n_flags state txq\n");
272
273 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
274 tid_data = &priv->tid_data[i][j];
275 pos += scnprintf(buf + pos, bufsz - pos,
276 "%d: 0x%.4x 0x%.4x 0x%.8x "
277 "%d %.2d",
278 j, tid_data->seq_number,
279 tid_data->next_reclaimed,
280 tid_data->agg.rate_n_flags,
281 tid_data->agg.state,
282 tid_data->agg.txq_id);
283
284 if (tid_data->agg.wait_for_ba)
285 pos += scnprintf(buf + pos, bufsz - pos,
286 " - waitforba");
287 pos += scnprintf(buf + pos, bufsz - pos, "\n");
288 }
289
290 pos += scnprintf(buf + pos, bufsz - pos, "\n");
291 }
292
293 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
294 kfree(buf);
295 return ret;
296}
297
298static ssize_t iwl_dbgfs_nvm_read(struct file *file,
299 char __user *user_buf,
300 size_t count,
301 loff_t *ppos)
302{
303 ssize_t ret;
304 struct iwl_priv *priv = file->private_data;
305 int pos = 0, ofs = 0, buf_size = 0;
306 const u8 *ptr;
307 char *buf;
308 u16 nvm_ver;
309 size_t eeprom_len = priv->eeprom_blob_size;
310 buf_size = 4 * eeprom_len + 256;
311
312 if (eeprom_len % 16)
313 return -ENODATA;
314
315 ptr = priv->eeprom_blob;
316 if (!ptr)
317 return -ENOMEM;
318
319 /* 4 characters for byte 0xYY */
320 buf = kzalloc(buf_size, GFP_KERNEL);
321 if (!buf)
322 return -ENOMEM;
323
324 nvm_ver = priv->nvm_data->nvm_version;
325 pos += scnprintf(buf + pos, buf_size - pos,
326 "NVM version: 0x%x\n", nvm_ver);
327 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
328 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
329 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
330 buf_size - pos, 0);
331 pos += strlen(buf + pos);
332 if (buf_size - pos > 0)
333 buf[pos++] = '\n';
334 }
335
336 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
337 kfree(buf);
338 return ret;
339}
340
341static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
342 size_t count, loff_t *ppos)
343{
344 struct iwl_priv *priv = file->private_data;
345 struct ieee80211_channel *channels = NULL;
346 const struct ieee80211_supported_band *supp_band = NULL;
347 int pos = 0, i, bufsz = PAGE_SIZE;
348 char *buf;
349 ssize_t ret;
350
351 buf = kzalloc(bufsz, GFP_KERNEL);
352 if (!buf)
353 return -ENOMEM;
354
355 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
356 if (supp_band) {
357 channels = supp_band->channels;
358
359 pos += scnprintf(buf + pos, bufsz - pos,
360 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
361 supp_band->n_channels);
362
363 for (i = 0; i < supp_band->n_channels; i++)
364 pos += scnprintf(buf + pos, bufsz - pos,
365 "%d: %ddBm: BSS%s%s, %s.\n",
366 channels[i].hw_value,
367 channels[i].max_power,
368 channels[i].flags & IEEE80211_CHAN_RADAR ?
369 " (IEEE 802.11h required)" : "",
370 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
371 || (channels[i].flags &
372 IEEE80211_CHAN_RADAR)) ? "" :
373 ", IBSS",
374 channels[i].flags &
375 IEEE80211_CHAN_PASSIVE_SCAN ?
376 "passive only" : "active/passive");
377 }
378 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
379 if (supp_band) {
380 channels = supp_band->channels;
381
382 pos += scnprintf(buf + pos, bufsz - pos,
383 "Displaying %d channels in 5.2GHz band (802.11a)\n",
384 supp_band->n_channels);
385
386 for (i = 0; i < supp_band->n_channels; i++)
387 pos += scnprintf(buf + pos, bufsz - pos,
388 "%d: %ddBm: BSS%s%s, %s.\n",
389 channels[i].hw_value,
390 channels[i].max_power,
391 channels[i].flags & IEEE80211_CHAN_RADAR ?
392 " (IEEE 802.11h required)" : "",
393 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
394 || (channels[i].flags &
395 IEEE80211_CHAN_RADAR)) ? "" :
396 ", IBSS",
397 channels[i].flags &
398 IEEE80211_CHAN_PASSIVE_SCAN ?
399 "passive only" : "active/passive");
400 }
401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
402 kfree(buf);
403 return ret;
404}
405
406static ssize_t iwl_dbgfs_status_read(struct file *file,
407 char __user *user_buf,
408 size_t count, loff_t *ppos) {
409
410 struct iwl_priv *priv = file->private_data;
411 char buf[512];
412 int pos = 0;
413 const size_t bufsz = sizeof(buf);
414
415 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
416 test_bit(STATUS_RF_KILL_HW, &priv->status));
417 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
418 test_bit(STATUS_CT_KILL, &priv->status));
419 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
420 test_bit(STATUS_ALIVE, &priv->status));
421 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
422 test_bit(STATUS_READY, &priv->status));
423 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
424 test_bit(STATUS_EXIT_PENDING, &priv->status));
425 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
426 test_bit(STATUS_STATISTICS, &priv->status));
427 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
428 test_bit(STATUS_SCANNING, &priv->status));
429 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
430 test_bit(STATUS_SCAN_ABORTING, &priv->status));
431 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
432 test_bit(STATUS_SCAN_HW, &priv->status));
433 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
434 test_bit(STATUS_POWER_PMI, &priv->status));
435 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
436 test_bit(STATUS_FW_ERROR, &priv->status));
437 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
438}
439
440static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
441 char __user *user_buf,
442 size_t count, loff_t *ppos) {
443
444 struct iwl_priv *priv = file->private_data;
445
446 int pos = 0;
447 int cnt = 0;
448 char *buf;
449 int bufsz = 24 * 64; /* 24 items * 64 char per item */
450 ssize_t ret;
451
452 buf = kzalloc(bufsz, GFP_KERNEL);
453 if (!buf)
454 return -ENOMEM;
455
456 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
457 if (priv->rx_handlers_stats[cnt] > 0)
458 pos += scnprintf(buf + pos, bufsz - pos,
459 "\tRx handler[%36s]:\t\t %u\n",
460 iwl_dvm_get_cmd_string(cnt),
461 priv->rx_handlers_stats[cnt]);
462 }
463
464 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
465 kfree(buf);
466 return ret;
467}
468
469static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
470 const char __user *user_buf,
471 size_t count, loff_t *ppos)
472{
473 struct iwl_priv *priv = file->private_data;
474
475 char buf[8];
476 int buf_size;
477 u32 reset_flag;
478
479 memset(buf, 0, sizeof(buf));
480 buf_size = min(count, sizeof(buf) - 1);
481 if (copy_from_user(buf, user_buf, buf_size))
482 return -EFAULT;
483 if (sscanf(buf, "%x", &reset_flag) != 1)
484 return -EFAULT;
485 if (reset_flag == 0)
486 memset(&priv->rx_handlers_stats[0], 0,
487 sizeof(priv->rx_handlers_stats));
488
489 return count;
490}
491
492static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
493 size_t count, loff_t *ppos)
494{
495 struct iwl_priv *priv = file->private_data;
496 struct iwl_rxon_context *ctx;
497 int pos = 0, i;
498 char buf[256 * NUM_IWL_RXON_CTX];
499 const size_t bufsz = sizeof(buf);
500
501 for_each_context(priv, ctx) {
502 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
503 ctx->ctxid);
504 for (i = 0; i < AC_NUM; i++) {
505 pos += scnprintf(buf + pos, bufsz - pos,
506 "\tcw_min\tcw_max\taifsn\ttxop\n");
507 pos += scnprintf(buf + pos, bufsz - pos,
508 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
509 ctx->qos_data.def_qos_parm.ac[i].cw_min,
510 ctx->qos_data.def_qos_parm.ac[i].cw_max,
511 ctx->qos_data.def_qos_parm.ac[i].aifsn,
512 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
513 }
514 pos += scnprintf(buf + pos, bufsz - pos, "\n");
515 }
516 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
517}
518
519static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
520 char __user *user_buf,
521 size_t count, loff_t *ppos)
522{
523 struct iwl_priv *priv = file->private_data;
524 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
525 struct iwl_tt_restriction *restriction;
526 char buf[100];
527 int pos = 0;
528 const size_t bufsz = sizeof(buf);
529
530 pos += scnprintf(buf + pos, bufsz - pos,
531 "Thermal Throttling Mode: %s\n",
532 tt->advanced_tt ? "Advance" : "Legacy");
533 pos += scnprintf(buf + pos, bufsz - pos,
534 "Thermal Throttling State: %d\n",
535 tt->state);
536 if (tt->advanced_tt) {
537 restriction = tt->restriction + tt->state;
538 pos += scnprintf(buf + pos, bufsz - pos,
539 "Tx mode: %d\n",
540 restriction->tx_stream);
541 pos += scnprintf(buf + pos, bufsz - pos,
542 "Rx mode: %d\n",
543 restriction->rx_stream);
544 pos += scnprintf(buf + pos, bufsz - pos,
545 "HT mode: %d\n",
546 restriction->is_ht);
547 }
548 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
549}
550
551static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
552 const char __user *user_buf,
553 size_t count, loff_t *ppos)
554{
555 struct iwl_priv *priv = file->private_data;
556 char buf[8];
557 int buf_size;
558 int ht40;
559
560 memset(buf, 0, sizeof(buf));
561 buf_size = min(count, sizeof(buf) - 1);
562 if (copy_from_user(buf, user_buf, buf_size))
563 return -EFAULT;
564 if (sscanf(buf, "%d", &ht40) != 1)
565 return -EFAULT;
566 if (!iwl_is_any_associated(priv))
567 priv->disable_ht40 = ht40 ? true : false;
568 else
569 return -EINVAL;
570
571 return count;
572}
573
574static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
575 char __user *user_buf,
576 size_t count, loff_t *ppos)
577{
578 struct iwl_priv *priv = file->private_data;
579 char buf[100];
580 int pos = 0;
581 const size_t bufsz = sizeof(buf);
582
583 pos += scnprintf(buf + pos, bufsz - pos,
584 "11n 40MHz Mode: %s\n",
585 priv->disable_ht40 ? "Disabled" : "Enabled");
586 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
587}
588
589static ssize_t iwl_dbgfs_temperature_read(struct file *file,
590 char __user *user_buf,
591 size_t count, loff_t *ppos)
592{
593 struct iwl_priv *priv = file->private_data;
594 char buf[8];
595 int pos = 0;
596 const size_t bufsz = sizeof(buf);
597
598 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature);
599 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
600}
601
602
603static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
604 const char __user *user_buf,
605 size_t count, loff_t *ppos)
606{
607 struct iwl_priv *priv = file->private_data;
608 char buf[8];
609 int buf_size;
610 int value;
611
612 memset(buf, 0, sizeof(buf));
613 buf_size = min(count, sizeof(buf) - 1);
614 if (copy_from_user(buf, user_buf, buf_size))
615 return -EFAULT;
616
617 if (sscanf(buf, "%d", &value) != 1)
618 return -EINVAL;
619
620 /*
621 * Our users expect 0 to be "CAM", but 0 isn't actually
622 * valid here. However, let's not confuse them and present
623 * IWL_POWER_INDEX_1 as "1", not "0".
624 */
625 if (value == 0)
626 return -EINVAL;
627 else if (value > 0)
628 value -= 1;
629
630 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
631 return -EINVAL;
632
633 if (!iwl_is_ready_rf(priv))
634 return -EAGAIN;
635
636 priv->power_data.debug_sleep_level_override = value;
637
638 mutex_lock(&priv->mutex);
639 iwl_power_update_mode(priv, true);
640 mutex_unlock(&priv->mutex);
641
642 return count;
643}
644
645static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
646 char __user *user_buf,
647 size_t count, loff_t *ppos)
648{
649 struct iwl_priv *priv = file->private_data;
650 char buf[10];
651 int pos, value;
652 const size_t bufsz = sizeof(buf);
653
654 /* see the write function */
655 value = priv->power_data.debug_sleep_level_override;
656 if (value >= 0)
657 value += 1;
658
659 pos = scnprintf(buf, bufsz, "%d\n", value);
660 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
661}
662
663static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
664 char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 char buf[200];
669 int pos = 0, i;
670 const size_t bufsz = sizeof(buf);
671 struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd;
672
673 pos += scnprintf(buf + pos, bufsz - pos,
674 "flags: %#.2x\n", le16_to_cpu(cmd->flags));
675 pos += scnprintf(buf + pos, bufsz - pos,
676 "RX/TX timeout: %d/%d usec\n",
677 le32_to_cpu(cmd->rx_data_timeout),
678 le32_to_cpu(cmd->tx_data_timeout));
679 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
680 pos += scnprintf(buf + pos, bufsz - pos,
681 "sleep_interval[%d]: %d\n", i,
682 le32_to_cpu(cmd->sleep_interval[i]));
683
684 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
685}
686
687DEBUGFS_READ_WRITE_FILE_OPS(sram);
688DEBUGFS_READ_FILE_OPS(wowlan_sram);
689DEBUGFS_READ_FILE_OPS(nvm);
690DEBUGFS_READ_FILE_OPS(stations);
691DEBUGFS_READ_FILE_OPS(channels);
692DEBUGFS_READ_FILE_OPS(status);
693DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
694DEBUGFS_READ_FILE_OPS(qos);
695DEBUGFS_READ_FILE_OPS(thermal_throttling);
696DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
697DEBUGFS_READ_FILE_OPS(temperature);
698DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
699DEBUGFS_READ_FILE_OPS(current_sleep_command);
700
701static const char *fmt_value = " %-30s %10u\n";
702static const char *fmt_hex = " %-30s 0x%02X\n";
703static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
704static const char *fmt_header =
705 "%-32s current cumulative delta max\n";
706
707static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
708{
709 int p = 0;
710 u32 flag;
711
712 lockdep_assert_held(&priv->statistics.lock);
713
714 flag = le32_to_cpu(priv->statistics.flag);
715
716 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
717 if (flag & UCODE_STATISTICS_CLEAR_MSK)
718 p += scnprintf(buf + p, bufsz - p,
719 "\tStatistics have been cleared\n");
720 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
721 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
722 ? "2.4 GHz" : "5.2 GHz");
723 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
724 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
725 ? "enabled" : "disabled");
726
727 return p;
728}
729
730static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
731 char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct iwl_priv *priv = file->private_data;
735 int pos = 0;
736 char *buf;
737 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
738 sizeof(struct statistics_rx_non_phy) * 40 +
739 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
740 ssize_t ret;
741 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
742 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
743 struct statistics_rx_non_phy *general, *accum_general;
744 struct statistics_rx_non_phy *delta_general, *max_general;
745 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
746
747 if (!iwl_is_alive(priv))
748 return -EAGAIN;
749
750 buf = kzalloc(bufsz, GFP_KERNEL);
751 if (!buf)
752 return -ENOMEM;
753
754 /*
755 * the statistic information display here is based on
756 * the last statistics notification from uCode
757 * might not reflect the current uCode activity
758 */
759 spin_lock_bh(&priv->statistics.lock);
760 ofdm = &priv->statistics.rx_ofdm;
761 cck = &priv->statistics.rx_cck;
762 general = &priv->statistics.rx_non_phy;
763 ht = &priv->statistics.rx_ofdm_ht;
764 accum_ofdm = &priv->accum_stats.rx_ofdm;
765 accum_cck = &priv->accum_stats.rx_cck;
766 accum_general = &priv->accum_stats.rx_non_phy;
767 accum_ht = &priv->accum_stats.rx_ofdm_ht;
768 delta_ofdm = &priv->delta_stats.rx_ofdm;
769 delta_cck = &priv->delta_stats.rx_cck;
770 delta_general = &priv->delta_stats.rx_non_phy;
771 delta_ht = &priv->delta_stats.rx_ofdm_ht;
772 max_ofdm = &priv->max_delta_stats.rx_ofdm;
773 max_cck = &priv->max_delta_stats.rx_cck;
774 max_general = &priv->max_delta_stats.rx_non_phy;
775 max_ht = &priv->max_delta_stats.rx_ofdm_ht;
776
777 pos += iwl_statistics_flag(priv, buf, bufsz);
778 pos += scnprintf(buf + pos, bufsz - pos,
779 fmt_header, "Statistics_Rx - OFDM:");
780 pos += scnprintf(buf + pos, bufsz - pos,
781 fmt_table, "ina_cnt:",
782 le32_to_cpu(ofdm->ina_cnt),
783 accum_ofdm->ina_cnt,
784 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
785 pos += scnprintf(buf + pos, bufsz - pos,
786 fmt_table, "fina_cnt:",
787 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
788 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
789 pos += scnprintf(buf + pos, bufsz - pos,
790 fmt_table, "plcp_err:",
791 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
792 delta_ofdm->plcp_err, max_ofdm->plcp_err);
793 pos += scnprintf(buf + pos, bufsz - pos,
794 fmt_table, "crc32_err:",
795 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
796 delta_ofdm->crc32_err, max_ofdm->crc32_err);
797 pos += scnprintf(buf + pos, bufsz - pos,
798 fmt_table, "overrun_err:",
799 le32_to_cpu(ofdm->overrun_err),
800 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
801 max_ofdm->overrun_err);
802 pos += scnprintf(buf + pos, bufsz - pos,
803 fmt_table, "early_overrun_err:",
804 le32_to_cpu(ofdm->early_overrun_err),
805 accum_ofdm->early_overrun_err,
806 delta_ofdm->early_overrun_err,
807 max_ofdm->early_overrun_err);
808 pos += scnprintf(buf + pos, bufsz - pos,
809 fmt_table, "crc32_good:",
810 le32_to_cpu(ofdm->crc32_good),
811 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
812 max_ofdm->crc32_good);
813 pos += scnprintf(buf + pos, bufsz - pos,
814 fmt_table, "false_alarm_cnt:",
815 le32_to_cpu(ofdm->false_alarm_cnt),
816 accum_ofdm->false_alarm_cnt,
817 delta_ofdm->false_alarm_cnt,
818 max_ofdm->false_alarm_cnt);
819 pos += scnprintf(buf + pos, bufsz - pos,
820 fmt_table, "fina_sync_err_cnt:",
821 le32_to_cpu(ofdm->fina_sync_err_cnt),
822 accum_ofdm->fina_sync_err_cnt,
823 delta_ofdm->fina_sync_err_cnt,
824 max_ofdm->fina_sync_err_cnt);
825 pos += scnprintf(buf + pos, bufsz - pos,
826 fmt_table, "sfd_timeout:",
827 le32_to_cpu(ofdm->sfd_timeout),
828 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
829 max_ofdm->sfd_timeout);
830 pos += scnprintf(buf + pos, bufsz - pos,
831 fmt_table, "fina_timeout:",
832 le32_to_cpu(ofdm->fina_timeout),
833 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
834 max_ofdm->fina_timeout);
835 pos += scnprintf(buf + pos, bufsz - pos,
836 fmt_table, "unresponded_rts:",
837 le32_to_cpu(ofdm->unresponded_rts),
838 accum_ofdm->unresponded_rts,
839 delta_ofdm->unresponded_rts,
840 max_ofdm->unresponded_rts);
841 pos += scnprintf(buf + pos, bufsz - pos,
842 fmt_table, "rxe_frame_lmt_ovrun:",
843 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
844 accum_ofdm->rxe_frame_limit_overrun,
845 delta_ofdm->rxe_frame_limit_overrun,
846 max_ofdm->rxe_frame_limit_overrun);
847 pos += scnprintf(buf + pos, bufsz - pos,
848 fmt_table, "sent_ack_cnt:",
849 le32_to_cpu(ofdm->sent_ack_cnt),
850 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
851 max_ofdm->sent_ack_cnt);
852 pos += scnprintf(buf + pos, bufsz - pos,
853 fmt_table, "sent_cts_cnt:",
854 le32_to_cpu(ofdm->sent_cts_cnt),
855 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
856 max_ofdm->sent_cts_cnt);
857 pos += scnprintf(buf + pos, bufsz - pos,
858 fmt_table, "sent_ba_rsp_cnt:",
859 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
860 accum_ofdm->sent_ba_rsp_cnt,
861 delta_ofdm->sent_ba_rsp_cnt,
862 max_ofdm->sent_ba_rsp_cnt);
863 pos += scnprintf(buf + pos, bufsz - pos,
864 fmt_table, "dsp_self_kill:",
865 le32_to_cpu(ofdm->dsp_self_kill),
866 accum_ofdm->dsp_self_kill,
867 delta_ofdm->dsp_self_kill,
868 max_ofdm->dsp_self_kill);
869 pos += scnprintf(buf + pos, bufsz - pos,
870 fmt_table, "mh_format_err:",
871 le32_to_cpu(ofdm->mh_format_err),
872 accum_ofdm->mh_format_err,
873 delta_ofdm->mh_format_err,
874 max_ofdm->mh_format_err);
875 pos += scnprintf(buf + pos, bufsz - pos,
876 fmt_table, "re_acq_main_rssi_sum:",
877 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
878 accum_ofdm->re_acq_main_rssi_sum,
879 delta_ofdm->re_acq_main_rssi_sum,
880 max_ofdm->re_acq_main_rssi_sum);
881
882 pos += scnprintf(buf + pos, bufsz - pos,
883 fmt_header, "Statistics_Rx - CCK:");
884 pos += scnprintf(buf + pos, bufsz - pos,
885 fmt_table, "ina_cnt:",
886 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
887 delta_cck->ina_cnt, max_cck->ina_cnt);
888 pos += scnprintf(buf + pos, bufsz - pos,
889 fmt_table, "fina_cnt:",
890 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
891 delta_cck->fina_cnt, max_cck->fina_cnt);
892 pos += scnprintf(buf + pos, bufsz - pos,
893 fmt_table, "plcp_err:",
894 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
895 delta_cck->plcp_err, max_cck->plcp_err);
896 pos += scnprintf(buf + pos, bufsz - pos,
897 fmt_table, "crc32_err:",
898 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
899 delta_cck->crc32_err, max_cck->crc32_err);
900 pos += scnprintf(buf + pos, bufsz - pos,
901 fmt_table, "overrun_err:",
902 le32_to_cpu(cck->overrun_err),
903 accum_cck->overrun_err, delta_cck->overrun_err,
904 max_cck->overrun_err);
905 pos += scnprintf(buf + pos, bufsz - pos,
906 fmt_table, "early_overrun_err:",
907 le32_to_cpu(cck->early_overrun_err),
908 accum_cck->early_overrun_err,
909 delta_cck->early_overrun_err,
910 max_cck->early_overrun_err);
911 pos += scnprintf(buf + pos, bufsz - pos,
912 fmt_table, "crc32_good:",
913 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
914 delta_cck->crc32_good, max_cck->crc32_good);
915 pos += scnprintf(buf + pos, bufsz - pos,
916 fmt_table, "false_alarm_cnt:",
917 le32_to_cpu(cck->false_alarm_cnt),
918 accum_cck->false_alarm_cnt,
919 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
920 pos += scnprintf(buf + pos, bufsz - pos,
921 fmt_table, "fina_sync_err_cnt:",
922 le32_to_cpu(cck->fina_sync_err_cnt),
923 accum_cck->fina_sync_err_cnt,
924 delta_cck->fina_sync_err_cnt,
925 max_cck->fina_sync_err_cnt);
926 pos += scnprintf(buf + pos, bufsz - pos,
927 fmt_table, "sfd_timeout:",
928 le32_to_cpu(cck->sfd_timeout),
929 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
930 max_cck->sfd_timeout);
931 pos += scnprintf(buf + pos, bufsz - pos,
932 fmt_table, "fina_timeout:",
933 le32_to_cpu(cck->fina_timeout),
934 accum_cck->fina_timeout, delta_cck->fina_timeout,
935 max_cck->fina_timeout);
936 pos += scnprintf(buf + pos, bufsz - pos,
937 fmt_table, "unresponded_rts:",
938 le32_to_cpu(cck->unresponded_rts),
939 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
940 max_cck->unresponded_rts);
941 pos += scnprintf(buf + pos, bufsz - pos,
942 fmt_table, "rxe_frame_lmt_ovrun:",
943 le32_to_cpu(cck->rxe_frame_limit_overrun),
944 accum_cck->rxe_frame_limit_overrun,
945 delta_cck->rxe_frame_limit_overrun,
946 max_cck->rxe_frame_limit_overrun);
947 pos += scnprintf(buf + pos, bufsz - pos,
948 fmt_table, "sent_ack_cnt:",
949 le32_to_cpu(cck->sent_ack_cnt),
950 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
951 max_cck->sent_ack_cnt);
952 pos += scnprintf(buf + pos, bufsz - pos,
953 fmt_table, "sent_cts_cnt:",
954 le32_to_cpu(cck->sent_cts_cnt),
955 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
956 max_cck->sent_cts_cnt);
957 pos += scnprintf(buf + pos, bufsz - pos,
958 fmt_table, "sent_ba_rsp_cnt:",
959 le32_to_cpu(cck->sent_ba_rsp_cnt),
960 accum_cck->sent_ba_rsp_cnt,
961 delta_cck->sent_ba_rsp_cnt,
962 max_cck->sent_ba_rsp_cnt);
963 pos += scnprintf(buf + pos, bufsz - pos,
964 fmt_table, "dsp_self_kill:",
965 le32_to_cpu(cck->dsp_self_kill),
966 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
967 max_cck->dsp_self_kill);
968 pos += scnprintf(buf + pos, bufsz - pos,
969 fmt_table, "mh_format_err:",
970 le32_to_cpu(cck->mh_format_err),
971 accum_cck->mh_format_err, delta_cck->mh_format_err,
972 max_cck->mh_format_err);
973 pos += scnprintf(buf + pos, bufsz - pos,
974 fmt_table, "re_acq_main_rssi_sum:",
975 le32_to_cpu(cck->re_acq_main_rssi_sum),
976 accum_cck->re_acq_main_rssi_sum,
977 delta_cck->re_acq_main_rssi_sum,
978 max_cck->re_acq_main_rssi_sum);
979
980 pos += scnprintf(buf + pos, bufsz - pos,
981 fmt_header, "Statistics_Rx - GENERAL:");
982 pos += scnprintf(buf + pos, bufsz - pos,
983 fmt_table, "bogus_cts:",
984 le32_to_cpu(general->bogus_cts),
985 accum_general->bogus_cts, delta_general->bogus_cts,
986 max_general->bogus_cts);
987 pos += scnprintf(buf + pos, bufsz - pos,
988 fmt_table, "bogus_ack:",
989 le32_to_cpu(general->bogus_ack),
990 accum_general->bogus_ack, delta_general->bogus_ack,
991 max_general->bogus_ack);
992 pos += scnprintf(buf + pos, bufsz - pos,
993 fmt_table, "non_bssid_frames:",
994 le32_to_cpu(general->non_bssid_frames),
995 accum_general->non_bssid_frames,
996 delta_general->non_bssid_frames,
997 max_general->non_bssid_frames);
998 pos += scnprintf(buf + pos, bufsz - pos,
999 fmt_table, "filtered_frames:",
1000 le32_to_cpu(general->filtered_frames),
1001 accum_general->filtered_frames,
1002 delta_general->filtered_frames,
1003 max_general->filtered_frames);
1004 pos += scnprintf(buf + pos, bufsz - pos,
1005 fmt_table, "non_channel_beacons:",
1006 le32_to_cpu(general->non_channel_beacons),
1007 accum_general->non_channel_beacons,
1008 delta_general->non_channel_beacons,
1009 max_general->non_channel_beacons);
1010 pos += scnprintf(buf + pos, bufsz - pos,
1011 fmt_table, "channel_beacons:",
1012 le32_to_cpu(general->channel_beacons),
1013 accum_general->channel_beacons,
1014 delta_general->channel_beacons,
1015 max_general->channel_beacons);
1016 pos += scnprintf(buf + pos, bufsz - pos,
1017 fmt_table, "num_missed_bcon:",
1018 le32_to_cpu(general->num_missed_bcon),
1019 accum_general->num_missed_bcon,
1020 delta_general->num_missed_bcon,
1021 max_general->num_missed_bcon);
1022 pos += scnprintf(buf + pos, bufsz - pos,
1023 fmt_table, "adc_rx_saturation_time:",
1024 le32_to_cpu(general->adc_rx_saturation_time),
1025 accum_general->adc_rx_saturation_time,
1026 delta_general->adc_rx_saturation_time,
1027 max_general->adc_rx_saturation_time);
1028 pos += scnprintf(buf + pos, bufsz - pos,
1029 fmt_table, "ina_detect_search_tm:",
1030 le32_to_cpu(general->ina_detection_search_time),
1031 accum_general->ina_detection_search_time,
1032 delta_general->ina_detection_search_time,
1033 max_general->ina_detection_search_time);
1034 pos += scnprintf(buf + pos, bufsz - pos,
1035 fmt_table, "beacon_silence_rssi_a:",
1036 le32_to_cpu(general->beacon_silence_rssi_a),
1037 accum_general->beacon_silence_rssi_a,
1038 delta_general->beacon_silence_rssi_a,
1039 max_general->beacon_silence_rssi_a);
1040 pos += scnprintf(buf + pos, bufsz - pos,
1041 fmt_table, "beacon_silence_rssi_b:",
1042 le32_to_cpu(general->beacon_silence_rssi_b),
1043 accum_general->beacon_silence_rssi_b,
1044 delta_general->beacon_silence_rssi_b,
1045 max_general->beacon_silence_rssi_b);
1046 pos += scnprintf(buf + pos, bufsz - pos,
1047 fmt_table, "beacon_silence_rssi_c:",
1048 le32_to_cpu(general->beacon_silence_rssi_c),
1049 accum_general->beacon_silence_rssi_c,
1050 delta_general->beacon_silence_rssi_c,
1051 max_general->beacon_silence_rssi_c);
1052 pos += scnprintf(buf + pos, bufsz - pos,
1053 fmt_table, "interference_data_flag:",
1054 le32_to_cpu(general->interference_data_flag),
1055 accum_general->interference_data_flag,
1056 delta_general->interference_data_flag,
1057 max_general->interference_data_flag);
1058 pos += scnprintf(buf + pos, bufsz - pos,
1059 fmt_table, "channel_load:",
1060 le32_to_cpu(general->channel_load),
1061 accum_general->channel_load,
1062 delta_general->channel_load,
1063 max_general->channel_load);
1064 pos += scnprintf(buf + pos, bufsz - pos,
1065 fmt_table, "dsp_false_alarms:",
1066 le32_to_cpu(general->dsp_false_alarms),
1067 accum_general->dsp_false_alarms,
1068 delta_general->dsp_false_alarms,
1069 max_general->dsp_false_alarms);
1070 pos += scnprintf(buf + pos, bufsz - pos,
1071 fmt_table, "beacon_rssi_a:",
1072 le32_to_cpu(general->beacon_rssi_a),
1073 accum_general->beacon_rssi_a,
1074 delta_general->beacon_rssi_a,
1075 max_general->beacon_rssi_a);
1076 pos += scnprintf(buf + pos, bufsz - pos,
1077 fmt_table, "beacon_rssi_b:",
1078 le32_to_cpu(general->beacon_rssi_b),
1079 accum_general->beacon_rssi_b,
1080 delta_general->beacon_rssi_b,
1081 max_general->beacon_rssi_b);
1082 pos += scnprintf(buf + pos, bufsz - pos,
1083 fmt_table, "beacon_rssi_c:",
1084 le32_to_cpu(general->beacon_rssi_c),
1085 accum_general->beacon_rssi_c,
1086 delta_general->beacon_rssi_c,
1087 max_general->beacon_rssi_c);
1088 pos += scnprintf(buf + pos, bufsz - pos,
1089 fmt_table, "beacon_energy_a:",
1090 le32_to_cpu(general->beacon_energy_a),
1091 accum_general->beacon_energy_a,
1092 delta_general->beacon_energy_a,
1093 max_general->beacon_energy_a);
1094 pos += scnprintf(buf + pos, bufsz - pos,
1095 fmt_table, "beacon_energy_b:",
1096 le32_to_cpu(general->beacon_energy_b),
1097 accum_general->beacon_energy_b,
1098 delta_general->beacon_energy_b,
1099 max_general->beacon_energy_b);
1100 pos += scnprintf(buf + pos, bufsz - pos,
1101 fmt_table, "beacon_energy_c:",
1102 le32_to_cpu(general->beacon_energy_c),
1103 accum_general->beacon_energy_c,
1104 delta_general->beacon_energy_c,
1105 max_general->beacon_energy_c);
1106
1107 pos += scnprintf(buf + pos, bufsz - pos,
1108 fmt_header, "Statistics_Rx - OFDM_HT:");
1109 pos += scnprintf(buf + pos, bufsz - pos,
1110 fmt_table, "plcp_err:",
1111 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1112 delta_ht->plcp_err, max_ht->plcp_err);
1113 pos += scnprintf(buf + pos, bufsz - pos,
1114 fmt_table, "overrun_err:",
1115 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1116 delta_ht->overrun_err, max_ht->overrun_err);
1117 pos += scnprintf(buf + pos, bufsz - pos,
1118 fmt_table, "early_overrun_err:",
1119 le32_to_cpu(ht->early_overrun_err),
1120 accum_ht->early_overrun_err,
1121 delta_ht->early_overrun_err,
1122 max_ht->early_overrun_err);
1123 pos += scnprintf(buf + pos, bufsz - pos,
1124 fmt_table, "crc32_good:",
1125 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1126 delta_ht->crc32_good, max_ht->crc32_good);
1127 pos += scnprintf(buf + pos, bufsz - pos,
1128 fmt_table, "crc32_err:",
1129 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1130 delta_ht->crc32_err, max_ht->crc32_err);
1131 pos += scnprintf(buf + pos, bufsz - pos,
1132 fmt_table, "mh_format_err:",
1133 le32_to_cpu(ht->mh_format_err),
1134 accum_ht->mh_format_err,
1135 delta_ht->mh_format_err, max_ht->mh_format_err);
1136 pos += scnprintf(buf + pos, bufsz - pos,
1137 fmt_table, "agg_crc32_good:",
1138 le32_to_cpu(ht->agg_crc32_good),
1139 accum_ht->agg_crc32_good,
1140 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1141 pos += scnprintf(buf + pos, bufsz - pos,
1142 fmt_table, "agg_mpdu_cnt:",
1143 le32_to_cpu(ht->agg_mpdu_cnt),
1144 accum_ht->agg_mpdu_cnt,
1145 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1146 pos += scnprintf(buf + pos, bufsz - pos,
1147 fmt_table, "agg_cnt:",
1148 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1149 delta_ht->agg_cnt, max_ht->agg_cnt);
1150 pos += scnprintf(buf + pos, bufsz - pos,
1151 fmt_table, "unsupport_mcs:",
1152 le32_to_cpu(ht->unsupport_mcs),
1153 accum_ht->unsupport_mcs,
1154 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1155
1156 spin_unlock_bh(&priv->statistics.lock);
1157
1158 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1159 kfree(buf);
1160 return ret;
1161}
1162
1163static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1164 char __user *user_buf,
1165 size_t count, loff_t *ppos)
1166{
1167 struct iwl_priv *priv = file->private_data;
1168 int pos = 0;
1169 char *buf;
1170 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1171 ssize_t ret;
1172 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1173
1174 if (!iwl_is_alive(priv))
1175 return -EAGAIN;
1176
1177 buf = kzalloc(bufsz, GFP_KERNEL);
1178 if (!buf)
1179 return -ENOMEM;
1180
1181 /* the statistic information display here is based on
1182 * the last statistics notification from uCode
1183 * might not reflect the current uCode activity
1184 */
1185 spin_lock_bh(&priv->statistics.lock);
1186
1187 tx = &priv->statistics.tx;
1188 accum_tx = &priv->accum_stats.tx;
1189 delta_tx = &priv->delta_stats.tx;
1190 max_tx = &priv->max_delta_stats.tx;
1191
1192 pos += iwl_statistics_flag(priv, buf, bufsz);
1193 pos += scnprintf(buf + pos, bufsz - pos,
1194 fmt_header, "Statistics_Tx:");
1195 pos += scnprintf(buf + pos, bufsz - pos,
1196 fmt_table, "preamble:",
1197 le32_to_cpu(tx->preamble_cnt),
1198 accum_tx->preamble_cnt,
1199 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1200 pos += scnprintf(buf + pos, bufsz - pos,
1201 fmt_table, "rx_detected_cnt:",
1202 le32_to_cpu(tx->rx_detected_cnt),
1203 accum_tx->rx_detected_cnt,
1204 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1205 pos += scnprintf(buf + pos, bufsz - pos,
1206 fmt_table, "bt_prio_defer_cnt:",
1207 le32_to_cpu(tx->bt_prio_defer_cnt),
1208 accum_tx->bt_prio_defer_cnt,
1209 delta_tx->bt_prio_defer_cnt,
1210 max_tx->bt_prio_defer_cnt);
1211 pos += scnprintf(buf + pos, bufsz - pos,
1212 fmt_table, "bt_prio_kill_cnt:",
1213 le32_to_cpu(tx->bt_prio_kill_cnt),
1214 accum_tx->bt_prio_kill_cnt,
1215 delta_tx->bt_prio_kill_cnt,
1216 max_tx->bt_prio_kill_cnt);
1217 pos += scnprintf(buf + pos, bufsz - pos,
1218 fmt_table, "few_bytes_cnt:",
1219 le32_to_cpu(tx->few_bytes_cnt),
1220 accum_tx->few_bytes_cnt,
1221 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1222 pos += scnprintf(buf + pos, bufsz - pos,
1223 fmt_table, "cts_timeout:",
1224 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1225 delta_tx->cts_timeout, max_tx->cts_timeout);
1226 pos += scnprintf(buf + pos, bufsz - pos,
1227 fmt_table, "ack_timeout:",
1228 le32_to_cpu(tx->ack_timeout),
1229 accum_tx->ack_timeout,
1230 delta_tx->ack_timeout, max_tx->ack_timeout);
1231 pos += scnprintf(buf + pos, bufsz - pos,
1232 fmt_table, "expected_ack_cnt:",
1233 le32_to_cpu(tx->expected_ack_cnt),
1234 accum_tx->expected_ack_cnt,
1235 delta_tx->expected_ack_cnt,
1236 max_tx->expected_ack_cnt);
1237 pos += scnprintf(buf + pos, bufsz - pos,
1238 fmt_table, "actual_ack_cnt:",
1239 le32_to_cpu(tx->actual_ack_cnt),
1240 accum_tx->actual_ack_cnt,
1241 delta_tx->actual_ack_cnt,
1242 max_tx->actual_ack_cnt);
1243 pos += scnprintf(buf + pos, bufsz - pos,
1244 fmt_table, "dump_msdu_cnt:",
1245 le32_to_cpu(tx->dump_msdu_cnt),
1246 accum_tx->dump_msdu_cnt,
1247 delta_tx->dump_msdu_cnt,
1248 max_tx->dump_msdu_cnt);
1249 pos += scnprintf(buf + pos, bufsz - pos,
1250 fmt_table, "abort_nxt_frame_mismatch:",
1251 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1252 accum_tx->burst_abort_next_frame_mismatch_cnt,
1253 delta_tx->burst_abort_next_frame_mismatch_cnt,
1254 max_tx->burst_abort_next_frame_mismatch_cnt);
1255 pos += scnprintf(buf + pos, bufsz - pos,
1256 fmt_table, "abort_missing_nxt_frame:",
1257 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1258 accum_tx->burst_abort_missing_next_frame_cnt,
1259 delta_tx->burst_abort_missing_next_frame_cnt,
1260 max_tx->burst_abort_missing_next_frame_cnt);
1261 pos += scnprintf(buf + pos, bufsz - pos,
1262 fmt_table, "cts_timeout_collision:",
1263 le32_to_cpu(tx->cts_timeout_collision),
1264 accum_tx->cts_timeout_collision,
1265 delta_tx->cts_timeout_collision,
1266 max_tx->cts_timeout_collision);
1267 pos += scnprintf(buf + pos, bufsz - pos,
1268 fmt_table, "ack_ba_timeout_collision:",
1269 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1270 accum_tx->ack_or_ba_timeout_collision,
1271 delta_tx->ack_or_ba_timeout_collision,
1272 max_tx->ack_or_ba_timeout_collision);
1273 pos += scnprintf(buf + pos, bufsz - pos,
1274 fmt_table, "agg ba_timeout:",
1275 le32_to_cpu(tx->agg.ba_timeout),
1276 accum_tx->agg.ba_timeout,
1277 delta_tx->agg.ba_timeout,
1278 max_tx->agg.ba_timeout);
1279 pos += scnprintf(buf + pos, bufsz - pos,
1280 fmt_table, "agg ba_resched_frames:",
1281 le32_to_cpu(tx->agg.ba_reschedule_frames),
1282 accum_tx->agg.ba_reschedule_frames,
1283 delta_tx->agg.ba_reschedule_frames,
1284 max_tx->agg.ba_reschedule_frames);
1285 pos += scnprintf(buf + pos, bufsz - pos,
1286 fmt_table, "agg scd_query_agg_frame:",
1287 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1288 accum_tx->agg.scd_query_agg_frame_cnt,
1289 delta_tx->agg.scd_query_agg_frame_cnt,
1290 max_tx->agg.scd_query_agg_frame_cnt);
1291 pos += scnprintf(buf + pos, bufsz - pos,
1292 fmt_table, "agg scd_query_no_agg:",
1293 le32_to_cpu(tx->agg.scd_query_no_agg),
1294 accum_tx->agg.scd_query_no_agg,
1295 delta_tx->agg.scd_query_no_agg,
1296 max_tx->agg.scd_query_no_agg);
1297 pos += scnprintf(buf + pos, bufsz - pos,
1298 fmt_table, "agg scd_query_agg:",
1299 le32_to_cpu(tx->agg.scd_query_agg),
1300 accum_tx->agg.scd_query_agg,
1301 delta_tx->agg.scd_query_agg,
1302 max_tx->agg.scd_query_agg);
1303 pos += scnprintf(buf + pos, bufsz - pos,
1304 fmt_table, "agg scd_query_mismatch:",
1305 le32_to_cpu(tx->agg.scd_query_mismatch),
1306 accum_tx->agg.scd_query_mismatch,
1307 delta_tx->agg.scd_query_mismatch,
1308 max_tx->agg.scd_query_mismatch);
1309 pos += scnprintf(buf + pos, bufsz - pos,
1310 fmt_table, "agg frame_not_ready:",
1311 le32_to_cpu(tx->agg.frame_not_ready),
1312 accum_tx->agg.frame_not_ready,
1313 delta_tx->agg.frame_not_ready,
1314 max_tx->agg.frame_not_ready);
1315 pos += scnprintf(buf + pos, bufsz - pos,
1316 fmt_table, "agg underrun:",
1317 le32_to_cpu(tx->agg.underrun),
1318 accum_tx->agg.underrun,
1319 delta_tx->agg.underrun, max_tx->agg.underrun);
1320 pos += scnprintf(buf + pos, bufsz - pos,
1321 fmt_table, "agg bt_prio_kill:",
1322 le32_to_cpu(tx->agg.bt_prio_kill),
1323 accum_tx->agg.bt_prio_kill,
1324 delta_tx->agg.bt_prio_kill,
1325 max_tx->agg.bt_prio_kill);
1326 pos += scnprintf(buf + pos, bufsz - pos,
1327 fmt_table, "agg rx_ba_rsp_cnt:",
1328 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1329 accum_tx->agg.rx_ba_rsp_cnt,
1330 delta_tx->agg.rx_ba_rsp_cnt,
1331 max_tx->agg.rx_ba_rsp_cnt);
1332
1333 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1334 pos += scnprintf(buf + pos, bufsz - pos,
1335 "tx power: (1/2 dB step)\n");
1336 if ((priv->nvm_data->valid_tx_ant & ANT_A) &&
1337 tx->tx_power.ant_a)
1338 pos += scnprintf(buf + pos, bufsz - pos,
1339 fmt_hex, "antenna A:",
1340 tx->tx_power.ant_a);
1341 if ((priv->nvm_data->valid_tx_ant & ANT_B) &&
1342 tx->tx_power.ant_b)
1343 pos += scnprintf(buf + pos, bufsz - pos,
1344 fmt_hex, "antenna B:",
1345 tx->tx_power.ant_b);
1346 if ((priv->nvm_data->valid_tx_ant & ANT_C) &&
1347 tx->tx_power.ant_c)
1348 pos += scnprintf(buf + pos, bufsz - pos,
1349 fmt_hex, "antenna C:",
1350 tx->tx_power.ant_c);
1351 }
1352
1353 spin_unlock_bh(&priv->statistics.lock);
1354
1355 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1356 kfree(buf);
1357 return ret;
1358}
1359
1360static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1361 char __user *user_buf,
1362 size_t count, loff_t *ppos)
1363{
1364 struct iwl_priv *priv = file->private_data;
1365 int pos = 0;
1366 char *buf;
1367 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1368 ssize_t ret;
1369 struct statistics_general_common *general, *accum_general;
1370 struct statistics_general_common *delta_general, *max_general;
1371 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1372 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1373
1374 if (!iwl_is_alive(priv))
1375 return -EAGAIN;
1376
1377 buf = kzalloc(bufsz, GFP_KERNEL);
1378 if (!buf)
1379 return -ENOMEM;
1380
1381 /* the statistic information display here is based on
1382 * the last statistics notification from uCode
1383 * might not reflect the current uCode activity
1384 */
1385
1386 spin_lock_bh(&priv->statistics.lock);
1387
1388 general = &priv->statistics.common;
1389 dbg = &priv->statistics.common.dbg;
1390 div = &priv->statistics.common.div;
1391 accum_general = &priv->accum_stats.common;
1392 accum_dbg = &priv->accum_stats.common.dbg;
1393 accum_div = &priv->accum_stats.common.div;
1394 delta_general = &priv->delta_stats.common;
1395 max_general = &priv->max_delta_stats.common;
1396 delta_dbg = &priv->delta_stats.common.dbg;
1397 max_dbg = &priv->max_delta_stats.common.dbg;
1398 delta_div = &priv->delta_stats.common.div;
1399 max_div = &priv->max_delta_stats.common.div;
1400
1401 pos += iwl_statistics_flag(priv, buf, bufsz);
1402 pos += scnprintf(buf + pos, bufsz - pos,
1403 fmt_header, "Statistics_General:");
1404 pos += scnprintf(buf + pos, bufsz - pos,
1405 fmt_value, "temperature:",
1406 le32_to_cpu(general->temperature));
1407 pos += scnprintf(buf + pos, bufsz - pos,
1408 fmt_value, "temperature_m:",
1409 le32_to_cpu(general->temperature_m));
1410 pos += scnprintf(buf + pos, bufsz - pos,
1411 fmt_value, "ttl_timestamp:",
1412 le32_to_cpu(general->ttl_timestamp));
1413 pos += scnprintf(buf + pos, bufsz - pos,
1414 fmt_table, "burst_check:",
1415 le32_to_cpu(dbg->burst_check),
1416 accum_dbg->burst_check,
1417 delta_dbg->burst_check, max_dbg->burst_check);
1418 pos += scnprintf(buf + pos, bufsz - pos,
1419 fmt_table, "burst_count:",
1420 le32_to_cpu(dbg->burst_count),
1421 accum_dbg->burst_count,
1422 delta_dbg->burst_count, max_dbg->burst_count);
1423 pos += scnprintf(buf + pos, bufsz - pos,
1424 fmt_table, "wait_for_silence_timeout_count:",
1425 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
1426 accum_dbg->wait_for_silence_timeout_cnt,
1427 delta_dbg->wait_for_silence_timeout_cnt,
1428 max_dbg->wait_for_silence_timeout_cnt);
1429 pos += scnprintf(buf + pos, bufsz - pos,
1430 fmt_table, "sleep_time:",
1431 le32_to_cpu(general->sleep_time),
1432 accum_general->sleep_time,
1433 delta_general->sleep_time, max_general->sleep_time);
1434 pos += scnprintf(buf + pos, bufsz - pos,
1435 fmt_table, "slots_out:",
1436 le32_to_cpu(general->slots_out),
1437 accum_general->slots_out,
1438 delta_general->slots_out, max_general->slots_out);
1439 pos += scnprintf(buf + pos, bufsz - pos,
1440 fmt_table, "slots_idle:",
1441 le32_to_cpu(general->slots_idle),
1442 accum_general->slots_idle,
1443 delta_general->slots_idle, max_general->slots_idle);
1444 pos += scnprintf(buf + pos, bufsz - pos,
1445 fmt_table, "tx_on_a:",
1446 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1447 delta_div->tx_on_a, max_div->tx_on_a);
1448 pos += scnprintf(buf + pos, bufsz - pos,
1449 fmt_table, "tx_on_b:",
1450 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1451 delta_div->tx_on_b, max_div->tx_on_b);
1452 pos += scnprintf(buf + pos, bufsz - pos,
1453 fmt_table, "exec_time:",
1454 le32_to_cpu(div->exec_time), accum_div->exec_time,
1455 delta_div->exec_time, max_div->exec_time);
1456 pos += scnprintf(buf + pos, bufsz - pos,
1457 fmt_table, "probe_time:",
1458 le32_to_cpu(div->probe_time), accum_div->probe_time,
1459 delta_div->probe_time, max_div->probe_time);
1460 pos += scnprintf(buf + pos, bufsz - pos,
1461 fmt_table, "rx_enable_counter:",
1462 le32_to_cpu(general->rx_enable_counter),
1463 accum_general->rx_enable_counter,
1464 delta_general->rx_enable_counter,
1465 max_general->rx_enable_counter);
1466 pos += scnprintf(buf + pos, bufsz - pos,
1467 fmt_table, "num_of_sos_states:",
1468 le32_to_cpu(general->num_of_sos_states),
1469 accum_general->num_of_sos_states,
1470 delta_general->num_of_sos_states,
1471 max_general->num_of_sos_states);
1472
1473 spin_unlock_bh(&priv->statistics.lock);
1474
1475 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1476 kfree(buf);
1477 return ret;
1478}
1479
1480static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1481 char __user *user_buf,
1482 size_t count, loff_t *ppos)
1483{
1484 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1485 int pos = 0;
1486 char *buf;
1487 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
1488 ssize_t ret;
1489 struct statistics_bt_activity *bt, *accum_bt;
1490
1491 if (!iwl_is_alive(priv))
1492 return -EAGAIN;
1493
1494 if (!priv->bt_enable_flag)
1495 return -EINVAL;
1496
1497 /* make request to uCode to retrieve statistics information */
1498 mutex_lock(&priv->mutex);
1499 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1500 mutex_unlock(&priv->mutex);
1501
1502 if (ret)
1503 return -EAGAIN;
1504 buf = kzalloc(bufsz, GFP_KERNEL);
1505 if (!buf)
1506 return -ENOMEM;
1507
1508 /*
1509 * the statistic information display here is based on
1510 * the last statistics notification from uCode
1511 * might not reflect the current uCode activity
1512 */
1513
1514 spin_lock_bh(&priv->statistics.lock);
1515
1516 bt = &priv->statistics.bt_activity;
1517 accum_bt = &priv->accum_stats.bt_activity;
1518
1519 pos += iwl_statistics_flag(priv, buf, bufsz);
1520 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
1521 pos += scnprintf(buf + pos, bufsz - pos,
1522 "\t\t\tcurrent\t\t\taccumulative\n");
1523 pos += scnprintf(buf + pos, bufsz - pos,
1524 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1525 le32_to_cpu(bt->hi_priority_tx_req_cnt),
1526 accum_bt->hi_priority_tx_req_cnt);
1527 pos += scnprintf(buf + pos, bufsz - pos,
1528 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1529 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
1530 accum_bt->hi_priority_tx_denied_cnt);
1531 pos += scnprintf(buf + pos, bufsz - pos,
1532 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1533 le32_to_cpu(bt->lo_priority_tx_req_cnt),
1534 accum_bt->lo_priority_tx_req_cnt);
1535 pos += scnprintf(buf + pos, bufsz - pos,
1536 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1537 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
1538 accum_bt->lo_priority_tx_denied_cnt);
1539 pos += scnprintf(buf + pos, bufsz - pos,
1540 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1541 le32_to_cpu(bt->hi_priority_rx_req_cnt),
1542 accum_bt->hi_priority_rx_req_cnt);
1543 pos += scnprintf(buf + pos, bufsz - pos,
1544 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1545 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
1546 accum_bt->hi_priority_rx_denied_cnt);
1547 pos += scnprintf(buf + pos, bufsz - pos,
1548 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1549 le32_to_cpu(bt->lo_priority_rx_req_cnt),
1550 accum_bt->lo_priority_rx_req_cnt);
1551 pos += scnprintf(buf + pos, bufsz - pos,
1552 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1553 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
1554 accum_bt->lo_priority_rx_denied_cnt);
1555
1556 pos += scnprintf(buf + pos, bufsz - pos,
1557 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
1558 le32_to_cpu(priv->statistics.num_bt_kills),
1559 priv->statistics.accum_num_bt_kills);
1560
1561 spin_unlock_bh(&priv->statistics.lock);
1562
1563 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1564 kfree(buf);
1565 return ret;
1566}
1567
1568static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1569 char __user *user_buf,
1570 size_t count, loff_t *ppos)
1571{
1572 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1573 int pos = 0;
1574 char *buf;
1575 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
1576 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
1577 ssize_t ret;
1578
1579 if (!iwl_is_alive(priv))
1580 return -EAGAIN;
1581
1582 buf = kzalloc(bufsz, GFP_KERNEL);
1583 if (!buf)
1584 return -ENOMEM;
1585
1586 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1587 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
1588 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
1589 priv->reply_tx_stats.pp_delay);
1590 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1591 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
1592 priv->reply_tx_stats.pp_few_bytes);
1593 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1594 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
1595 priv->reply_tx_stats.pp_bt_prio);
1596 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1597 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
1598 priv->reply_tx_stats.pp_quiet_period);
1599 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1600 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
1601 priv->reply_tx_stats.pp_calc_ttak);
1602 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1603 iwl_get_tx_fail_reason(
1604 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
1605 priv->reply_tx_stats.int_crossed_retry);
1606 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1607 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
1608 priv->reply_tx_stats.short_limit);
1609 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1610 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
1611 priv->reply_tx_stats.long_limit);
1612 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1613 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
1614 priv->reply_tx_stats.fifo_underrun);
1615 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1616 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
1617 priv->reply_tx_stats.drain_flow);
1618 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1619 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
1620 priv->reply_tx_stats.rfkill_flush);
1621 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1622 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
1623 priv->reply_tx_stats.life_expire);
1624 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1625 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
1626 priv->reply_tx_stats.dest_ps);
1627 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1628 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
1629 priv->reply_tx_stats.host_abort);
1630 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1631 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
1632 priv->reply_tx_stats.pp_delay);
1633 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1634 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1635 priv->reply_tx_stats.sta_invalid);
1636 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1637 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1638 priv->reply_tx_stats.frag_drop);
1639 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1640 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1641 priv->reply_tx_stats.tid_disable);
1642 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1643 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1644 priv->reply_tx_stats.fifo_flush);
1645 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1646 iwl_get_tx_fail_reason(
1647 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1648 priv->reply_tx_stats.insuff_cf_poll);
1649 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1650 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1651 priv->reply_tx_stats.fail_hw_drop);
1652 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1653 iwl_get_tx_fail_reason(
1654 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1655 priv->reply_tx_stats.sta_color_mismatch);
1656 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1657 priv->reply_tx_stats.unknown);
1658
1659 pos += scnprintf(buf + pos, bufsz - pos,
1660 "\nStatistics_Agg_TX_Error:\n");
1661
1662 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1663 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1664 priv->reply_agg_tx_stats.underrun);
1665 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1666 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1667 priv->reply_agg_tx_stats.bt_prio);
1668 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1669 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1670 priv->reply_agg_tx_stats.few_bytes);
1671 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1672 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
1673 priv->reply_agg_tx_stats.abort);
1674 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1675 iwl_get_agg_tx_fail_reason(
1676 AGG_TX_STATE_LAST_SENT_TTL_MSK),
1677 priv->reply_agg_tx_stats.last_sent_ttl);
1678 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1679 iwl_get_agg_tx_fail_reason(
1680 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
1681 priv->reply_agg_tx_stats.last_sent_try);
1682 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1683 iwl_get_agg_tx_fail_reason(
1684 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
1685 priv->reply_agg_tx_stats.last_sent_bt_kill);
1686 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1687 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
1688 priv->reply_agg_tx_stats.scd_query);
1689 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1690 iwl_get_agg_tx_fail_reason(
1691 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
1692 priv->reply_agg_tx_stats.bad_crc32);
1693 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1694 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
1695 priv->reply_agg_tx_stats.response);
1696 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1697 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
1698 priv->reply_agg_tx_stats.dump_tx);
1699 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1700 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
1701 priv->reply_agg_tx_stats.delay_tx);
1702 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1703 priv->reply_agg_tx_stats.unknown);
1704
1705 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1706 kfree(buf);
1707 return ret;
1708}
1709
1710static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1711 char __user *user_buf,
1712 size_t count, loff_t *ppos) {
1713
1714 struct iwl_priv *priv = file->private_data;
1715 int pos = 0;
1716 int cnt = 0;
1717 char *buf;
1718 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
1719 ssize_t ret;
1720 struct iwl_sensitivity_data *data;
1721
1722 data = &priv->sensitivity_data;
1723 buf = kzalloc(bufsz, GFP_KERNEL);
1724 if (!buf)
1725 return -ENOMEM;
1726
1727 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
1728 data->auto_corr_ofdm);
1729 pos += scnprintf(buf + pos, bufsz - pos,
1730 "auto_corr_ofdm_mrc:\t\t %u\n",
1731 data->auto_corr_ofdm_mrc);
1732 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
1733 data->auto_corr_ofdm_x1);
1734 pos += scnprintf(buf + pos, bufsz - pos,
1735 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
1736 data->auto_corr_ofdm_mrc_x1);
1737 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
1738 data->auto_corr_cck);
1739 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
1740 data->auto_corr_cck_mrc);
1741 pos += scnprintf(buf + pos, bufsz - pos,
1742 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
1743 data->last_bad_plcp_cnt_ofdm);
1744 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
1745 data->last_fa_cnt_ofdm);
1746 pos += scnprintf(buf + pos, bufsz - pos,
1747 "last_bad_plcp_cnt_cck:\t\t %u\n",
1748 data->last_bad_plcp_cnt_cck);
1749 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
1750 data->last_fa_cnt_cck);
1751 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
1752 data->nrg_curr_state);
1753 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
1754 data->nrg_prev_state);
1755 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
1756 for (cnt = 0; cnt < 10; cnt++) {
1757 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1758 data->nrg_value[cnt]);
1759 }
1760 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1761 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
1762 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
1763 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1764 data->nrg_silence_rssi[cnt]);
1765 }
1766 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1767 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1768 data->nrg_silence_ref);
1769 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1770 data->nrg_energy_idx);
1771 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1772 data->nrg_silence_idx);
1773 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1774 data->nrg_th_cck);
1775 pos += scnprintf(buf + pos, bufsz - pos,
1776 "nrg_auto_corr_silence_diff:\t %u\n",
1777 data->nrg_auto_corr_silence_diff);
1778 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1779 data->num_in_cck_no_fa);
1780 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1781 data->nrg_th_ofdm);
1782
1783 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1784 kfree(buf);
1785 return ret;
1786}
1787
1788
1789static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1790 char __user *user_buf,
1791 size_t count, loff_t *ppos) {
1792
1793 struct iwl_priv *priv = file->private_data;
1794 int pos = 0;
1795 int cnt = 0;
1796 char *buf;
1797 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1798 ssize_t ret;
1799 struct iwl_chain_noise_data *data;
1800
1801 data = &priv->chain_noise_data;
1802 buf = kzalloc(bufsz, GFP_KERNEL);
1803 if (!buf)
1804 return -ENOMEM;
1805
1806 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1807 data->active_chains);
1808 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1809 data->chain_noise_a);
1810 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1811 data->chain_noise_b);
1812 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1813 data->chain_noise_c);
1814 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1815 data->chain_signal_a);
1816 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1817 data->chain_signal_b);
1818 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1819 data->chain_signal_c);
1820 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1821 data->beacon_count);
1822
1823 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1824 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1825 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1826 data->disconn_array[cnt]);
1827 }
1828 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1829 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1830 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1831 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1832 data->delta_gain_code[cnt]);
1833 }
1834 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1835 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1836 data->radio_write);
1837 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1838 data->state);
1839
1840 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1841 kfree(buf);
1842 return ret;
1843}
1844
1845static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1846 char __user *user_buf,
1847 size_t count, loff_t *ppos)
1848{
1849 struct iwl_priv *priv = file->private_data;
1850 char buf[60];
1851 int pos = 0;
1852 const size_t bufsz = sizeof(buf);
1853 u32 pwrsave_status;
1854
1855 pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
1856 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1857
1858 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1859 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1860 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1861 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1862 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1863 "error");
1864
1865 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1866}
1867
1868static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1869 const char __user *user_buf,
1870 size_t count, loff_t *ppos)
1871{
1872 struct iwl_priv *priv = file->private_data;
1873 char buf[8];
1874 int buf_size;
1875 int clear;
1876
1877 memset(buf, 0, sizeof(buf));
1878 buf_size = min(count, sizeof(buf) - 1);
1879 if (copy_from_user(buf, user_buf, buf_size))
1880 return -EFAULT;
1881 if (sscanf(buf, "%d", &clear) != 1)
1882 return -EFAULT;
1883
1884 /* make request to uCode to retrieve statistics information */
1885 mutex_lock(&priv->mutex);
1886 iwl_send_statistics_request(priv, CMD_SYNC, true);
1887 mutex_unlock(&priv->mutex);
1888
1889 return count;
1890}
1891
1892static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
1893 char __user *user_buf,
1894 size_t count, loff_t *ppos) {
1895
1896 struct iwl_priv *priv = file->private_data;
1897 int pos = 0;
1898 char buf[128];
1899 const size_t bufsz = sizeof(buf);
1900
1901 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1902 priv->event_log.ucode_trace ? "On" : "Off");
1903 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1904 priv->event_log.non_wraps_count);
1905 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1906 priv->event_log.wraps_once_count);
1907 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1908 priv->event_log.wraps_more_count);
1909
1910 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1911}
1912
1913static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
1914 const char __user *user_buf,
1915 size_t count, loff_t *ppos)
1916{
1917 struct iwl_priv *priv = file->private_data;
1918 char buf[8];
1919 int buf_size;
1920 int trace;
1921
1922 memset(buf, 0, sizeof(buf));
1923 buf_size = min(count, sizeof(buf) - 1);
1924 if (copy_from_user(buf, user_buf, buf_size))
1925 return -EFAULT;
1926 if (sscanf(buf, "%d", &trace) != 1)
1927 return -EFAULT;
1928
1929 if (trace) {
1930 priv->event_log.ucode_trace = true;
1931 if (iwl_is_alive(priv)) {
1932 /* start collecting data now */
1933 mod_timer(&priv->ucode_trace, jiffies);
1934 }
1935 } else {
1936 priv->event_log.ucode_trace = false;
1937 del_timer_sync(&priv->ucode_trace);
1938 }
1939
1940 return count;
1941}
1942
1943static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1944 char __user *user_buf,
1945 size_t count, loff_t *ppos) {
1946
1947 struct iwl_priv *priv = file->private_data;
1948 int len = 0;
1949 char buf[20];
1950
1951 len = sprintf(buf, "0x%04X\n",
1952 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1953 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1954}
1955
1956static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1957 char __user *user_buf,
1958 size_t count, loff_t *ppos) {
1959
1960 struct iwl_priv *priv = file->private_data;
1961 int len = 0;
1962 char buf[20];
1963
1964 len = sprintf(buf, "0x%04X\n",
1965 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1966 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1967}
1968
1969static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
1970 char __user *user_buf,
1971 size_t count, loff_t *ppos) {
1972
1973 struct iwl_priv *priv = file->private_data;
1974 int pos = 0;
1975 char buf[12];
1976 const size_t bufsz = sizeof(buf);
1977
1978 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1979 priv->missed_beacon_threshold);
1980
1981 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1982}
1983
1984static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
1985 const char __user *user_buf,
1986 size_t count, loff_t *ppos)
1987{
1988 struct iwl_priv *priv = file->private_data;
1989 char buf[8];
1990 int buf_size;
1991 int missed;
1992
1993 memset(buf, 0, sizeof(buf));
1994 buf_size = min(count, sizeof(buf) - 1);
1995 if (copy_from_user(buf, user_buf, buf_size))
1996 return -EFAULT;
1997 if (sscanf(buf, "%d", &missed) != 1)
1998 return -EINVAL;
1999
2000 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
2001 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
2002 priv->missed_beacon_threshold =
2003 IWL_MISSED_BEACON_THRESHOLD_DEF;
2004 else
2005 priv->missed_beacon_threshold = missed;
2006
2007 return count;
2008}
2009
2010static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2011 char __user *user_buf,
2012 size_t count, loff_t *ppos) {
2013
2014 struct iwl_priv *priv = file->private_data;
2015 int pos = 0;
2016 char buf[12];
2017 const size_t bufsz = sizeof(buf);
2018
2019 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2020 priv->plcp_delta_threshold);
2021
2022 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2023}
2024
2025static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2026 const char __user *user_buf,
2027 size_t count, loff_t *ppos) {
2028
2029 struct iwl_priv *priv = file->private_data;
2030 char buf[8];
2031 int buf_size;
2032 int plcp;
2033
2034 memset(buf, 0, sizeof(buf));
2035 buf_size = min(count, sizeof(buf) - 1);
2036 if (copy_from_user(buf, user_buf, buf_size))
2037 return -EFAULT;
2038 if (sscanf(buf, "%d", &plcp) != 1)
2039 return -EINVAL;
2040 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
2041 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
2042 priv->plcp_delta_threshold =
2043 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
2044 else
2045 priv->plcp_delta_threshold = plcp;
2046 return count;
2047}
2048
2049static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
2050 char __user *user_buf,
2051 size_t count, loff_t *ppos)
2052{
2053 struct iwl_priv *priv = file->private_data;
2054 int pos = 0;
2055 char buf[300];
2056 const size_t bufsz = sizeof(buf);
2057 struct iwl_rf_reset *rf_reset = &priv->rf_reset;
2058
2059 pos += scnprintf(buf + pos, bufsz - pos,
2060 "RF reset statistics\n");
2061 pos += scnprintf(buf + pos, bufsz - pos,
2062 "\tnumber of reset request: %d\n",
2063 rf_reset->reset_request_count);
2064 pos += scnprintf(buf + pos, bufsz - pos,
2065 "\tnumber of reset request success: %d\n",
2066 rf_reset->reset_success_count);
2067 pos += scnprintf(buf + pos, bufsz - pos,
2068 "\tnumber of reset request reject: %d\n",
2069 rf_reset->reset_reject_count);
2070
2071 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2072}
2073
2074static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
2075 const char __user *user_buf,
2076 size_t count, loff_t *ppos) {
2077
2078 struct iwl_priv *priv = file->private_data;
2079 int ret;
2080
2081 ret = iwl_force_rf_reset(priv, true);
2082 return ret ? ret : count;
2083}
2084
2085static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2086 const char __user *user_buf,
2087 size_t count, loff_t *ppos) {
2088
2089 struct iwl_priv *priv = file->private_data;
2090 char buf[8];
2091 int buf_size;
2092 int flush;
2093
2094 memset(buf, 0, sizeof(buf));
2095 buf_size = min(count, sizeof(buf) - 1);
2096 if (copy_from_user(buf, user_buf, buf_size))
2097 return -EFAULT;
2098 if (sscanf(buf, "%d", &flush) != 1)
2099 return -EINVAL;
2100
2101 if (iwl_is_rfkill(priv))
2102 return -EFAULT;
2103
2104 iwlagn_dev_txfifo_flush(priv);
2105
2106 return count;
2107}
2108
2109static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
2110 char __user *user_buf,
2111 size_t count, loff_t *ppos) {
2112
2113 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2114 int pos = 0;
2115 char buf[200];
2116 const size_t bufsz = sizeof(buf);
2117
2118 if (!priv->bt_enable_flag) {
2119 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
2120 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2121 }
2122 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
2123 priv->bt_enable_flag);
2124 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
2125 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
2126 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
2127 "last traffic notif: %d\n",
2128 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
2129 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
2130 "kill_ack_mask: %x, kill_cts_mask: %x\n",
2131 priv->bt_ch_announce, priv->kill_ack_mask,
2132 priv->kill_cts_mask);
2133
2134 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
2135 switch (priv->bt_traffic_load) {
2136 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
2137 pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
2138 break;
2139 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
2140 pos += scnprintf(buf + pos, bufsz - pos, "High\n");
2141 break;
2142 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
2143 pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
2144 break;
2145 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
2146 default:
2147 pos += scnprintf(buf + pos, bufsz - pos, "None\n");
2148 break;
2149 }
2150
2151 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2152}
2153
2154static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
2155 char __user *user_buf,
2156 size_t count, loff_t *ppos)
2157{
2158 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2159
2160 int pos = 0;
2161 char buf[40];
2162 const size_t bufsz = sizeof(buf);
2163
2164 if (priv->cfg->ht_params)
2165 pos += scnprintf(buf + pos, bufsz - pos,
2166 "use %s for aggregation\n",
2167 (priv->hw_params.use_rts_for_aggregation) ?
2168 "rts/cts" : "cts-to-self");
2169 else
2170 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
2171
2172 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2173}
2174
2175static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2176 const char __user *user_buf,
2177 size_t count, loff_t *ppos) {
2178
2179 struct iwl_priv *priv = file->private_data;
2180 char buf[8];
2181 int buf_size;
2182 int rts;
2183
2184 if (!priv->cfg->ht_params)
2185 return -EINVAL;
2186
2187 memset(buf, 0, sizeof(buf));
2188 buf_size = min(count, sizeof(buf) - 1);
2189 if (copy_from_user(buf, user_buf, buf_size))
2190 return -EFAULT;
2191 if (sscanf(buf, "%d", &rts) != 1)
2192 return -EINVAL;
2193 if (rts)
2194 priv->hw_params.use_rts_for_aggregation = true;
2195 else
2196 priv->hw_params.use_rts_for_aggregation = false;
2197 return count;
2198}
2199
2200static int iwl_cmd_echo_test(struct iwl_priv *priv)
2201{
2202 int ret;
2203 struct iwl_host_cmd cmd = {
2204 .id = REPLY_ECHO,
2205 .len = { 0 },
2206 .flags = CMD_SYNC,
2207 };
2208
2209 ret = iwl_dvm_send_cmd(priv, &cmd);
2210 if (ret)
2211 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
2212 else
2213 IWL_DEBUG_INFO(priv, "echo testing pass\n");
2214 return ret;
2215}
2216
2217static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2218 const char __user *user_buf,
2219 size_t count, loff_t *ppos)
2220{
2221 struct iwl_priv *priv = file->private_data;
2222 char buf[8];
2223 int buf_size;
2224
2225 memset(buf, 0, sizeof(buf));
2226 buf_size = min(count, sizeof(buf) - 1);
2227 if (copy_from_user(buf, user_buf, buf_size))
2228 return -EFAULT;
2229
2230 iwl_cmd_echo_test(priv);
2231 return count;
2232}
2233
2234#ifdef CONFIG_IWLWIFI_DEBUG
2235static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2236 char __user *user_buf,
2237 size_t count, loff_t *ppos)
2238{
2239 struct iwl_priv *priv = file->private_data;
2240 char *buf;
2241 int pos = 0;
2242 ssize_t ret = -ENOMEM;
2243
2244 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
2245 if (buf) {
2246 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2247 kfree(buf);
2248 }
2249 return ret;
2250}
2251
2252static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2253 const char __user *user_buf,
2254 size_t count, loff_t *ppos)
2255{
2256 struct iwl_priv *priv = file->private_data;
2257 u32 event_log_flag;
2258 char buf[8];
2259 int buf_size;
2260
2261 /* check that the interface is up */
2262 if (!iwl_is_ready(priv))
2263 return -EAGAIN;
2264
2265 memset(buf, 0, sizeof(buf));
2266 buf_size = min(count, sizeof(buf) - 1);
2267 if (copy_from_user(buf, user_buf, buf_size))
2268 return -EFAULT;
2269 if (sscanf(buf, "%d", &event_log_flag) != 1)
2270 return -EFAULT;
2271 if (event_log_flag == 1)
2272 iwl_dump_nic_event_log(priv, true, NULL, false);
2273
2274 return count;
2275}
2276#endif
2277
2278static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
2279 char __user *user_buf,
2280 size_t count, loff_t *ppos)
2281{
2282 struct iwl_priv *priv = file->private_data;
2283 char buf[120];
2284 int pos = 0;
2285 const size_t bufsz = sizeof(buf);
2286
2287 pos += scnprintf(buf + pos, bufsz - pos,
2288 "Sensitivity calibrations %s\n",
2289 (priv->calib_disabled &
2290 IWL_SENSITIVITY_CALIB_DISABLED) ?
2291 "DISABLED" : "ENABLED");
2292 pos += scnprintf(buf + pos, bufsz - pos,
2293 "Chain noise calibrations %s\n",
2294 (priv->calib_disabled &
2295 IWL_CHAIN_NOISE_CALIB_DISABLED) ?
2296 "DISABLED" : "ENABLED");
2297 pos += scnprintf(buf + pos, bufsz - pos,
2298 "Tx power calibrations %s\n",
2299 (priv->calib_disabled &
2300 IWL_TX_POWER_CALIB_DISABLED) ?
2301 "DISABLED" : "ENABLED");
2302
2303 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2304}
2305
2306static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2307 const char __user *user_buf,
2308 size_t count, loff_t *ppos)
2309{
2310 struct iwl_priv *priv = file->private_data;
2311 char buf[8];
2312 u32 calib_disabled;
2313 int buf_size;
2314
2315 memset(buf, 0, sizeof(buf));
2316 buf_size = min(count, sizeof(buf) - 1);
2317 if (copy_from_user(buf, user_buf, buf_size))
2318 return -EFAULT;
2319 if (sscanf(buf, "%x", &calib_disabled) != 1)
2320 return -EFAULT;
2321
2322 priv->calib_disabled = calib_disabled;
2323
2324 return count;
2325}
2326
2327DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2328DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2329DEBUGFS_READ_FILE_OPS(ucode_general_stats);
2330DEBUGFS_READ_FILE_OPS(sensitivity);
2331DEBUGFS_READ_FILE_OPS(chain_noise);
2332DEBUGFS_READ_FILE_OPS(power_save_status);
2333DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2334DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2335DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2336DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2337DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
2338DEBUGFS_READ_FILE_OPS(rxon_flags);
2339DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2340DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
2341DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
2342DEBUGFS_READ_FILE_OPS(bt_traffic);
2343DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2344DEBUGFS_READ_FILE_OPS(reply_tx_error);
2345DEBUGFS_WRITE_FILE_OPS(echo_test);
2346#ifdef CONFIG_IWLWIFI_DEBUG
2347DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2348#endif
2349DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2350
2351/*
2352 * Create the debugfs files and directories
2353 *
2354 */
2355int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2356{
2357 struct dentry *dir_data, *dir_rf, *dir_debug;
2358
2359 priv->debugfs_dir = dbgfs_dir;
2360
2361 dir_data = debugfs_create_dir("data", dbgfs_dir);
2362 if (!dir_data)
2363 goto err;
2364 dir_rf = debugfs_create_dir("rf", dbgfs_dir);
2365 if (!dir_rf)
2366 goto err;
2367 dir_debug = debugfs_create_dir("debug", dbgfs_dir);
2368 if (!dir_debug)
2369 goto err;
2370
2371 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
2372 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
2373 DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
2374 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
2375 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
2376 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
2377 DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
2378 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
2379 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
2380 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
2381 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
2382 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2383 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
2384
2385 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2386 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2387 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2388 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2389 DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR);
2390 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2391 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2392 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
2393 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
2394 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
2395 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
2396 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2397 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
2398 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
2399 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
2400 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2401 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2402 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2403#ifdef CONFIG_IWLWIFI_DEBUG
2404 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2405#endif
2406
2407 if (iwl_advanced_bt_coexist(priv))
2408 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
2409
2410 /* Calibrations disabled/enabled status*/
2411 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2412
2413 /*
2414 * Create a symlink with mac80211. This is not very robust, as it does
2415 * not remove the symlink created. The implicit assumption is that
2416 * when the opmode exits, mac80211 will also exit, and will remove
2417 * this symlink as part of its cleanup.
2418 */
2419 if (priv->mac80211_registered) {
2420 char buf[100];
2421 struct dentry *mac80211_dir, *dev_dir, *root_dir;
2422
2423 dev_dir = dbgfs_dir->d_parent;
2424 root_dir = dev_dir->d_parent;
2425 mac80211_dir = priv->hw->wiphy->debugfsdir;
2426
2427 snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
2428 dev_dir->d_name.name);
2429
2430 if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
2431 goto err;
2432 }
2433
2434 return 0;
2435
2436err:
2437 IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
2438 return -ENOMEM;
2439}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
deleted file mode 100644
index 2653a891cc7..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ /dev/null
@@ -1,917 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (dev.h) for driver implementation definitions.
28 * Please use commands.h for uCode API definitions.
29 */
30
31#ifndef __iwl_dev_h__
32#define __iwl_dev_h__
33
34#include <linux/interrupt.h>
35#include <linux/kernel.h>
36#include <linux/wait.h>
37#include <linux/leds.h>
38#include <linux/slab.h>
39#include <linux/mutex.h>
40
41#include "iwl-fw.h"
42#include "iwl-eeprom-parse.h"
43#include "iwl-csr.h"
44#include "iwl-debug.h"
45#include "iwl-agn-hw.h"
46#include "iwl-op-mode.h"
47#include "iwl-notif-wait.h"
48#include "iwl-trans.h"
49
50#include "led.h"
51#include "power.h"
52#include "rs.h"
53#include "tt.h"
54
55#include "iwl-test.h"
56
57/* CT-KILL constants */
58#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
59#define CT_KILL_THRESHOLD 114 /* in Celsius */
60#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */
61
62/* Default noise level to report when noise measurement is not available.
63 * This may be because we're:
64 * 1) Not associated no beacon statistics being sent to driver)
65 * 2) Scanning (noise measurement does not apply to associated channel)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/*
75 * RTS threshold here is total size [2347] minus 4 FCS bytes
76 * Per spec:
77 * a value of 0 means RTS on all data/management packets
78 * a value > max MSDU size means no RTS
79 * else RTS for data/management frames where MPDU is larger
80 * than RTS value.
81 */
82#define DEFAULT_RTS_THRESHOLD 2347U
83#define MIN_RTS_THRESHOLD 0U
84#define MAX_RTS_THRESHOLD 2347U
85#define MAX_MSDU_SIZE 2304U
86#define MAX_MPDU_SIZE 2346U
87#define DEFAULT_BEACON_INTERVAL 200U
88#define DEFAULT_SHORT_RETRY_LIMIT 7U
89#define DEFAULT_LONG_RETRY_LIMIT 4U
90
91#define IWL_NUM_SCAN_RATES (2)
92
93
94#define IEEE80211_DATA_LEN 2304
95#define IEEE80211_4ADDR_LEN 30
96#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
97#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
98
99#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
100#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
101#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
102
103#define IWL_SUPPORTED_RATES_IE_LEN 8
104
105#define IWL_INVALID_RATE 0xFF
106#define IWL_INVALID_VALUE -1
107
108union iwl_ht_rate_supp {
109 u16 rates;
110 struct {
111 u8 siso_rate;
112 u8 mimo_rate;
113 };
114};
115
116struct iwl_ht_config {
117 bool single_chain_sufficient;
118 enum ieee80211_smps_mode smps; /* current smps mode */
119};
120
121/* QoS structures */
122struct iwl_qos_info {
123 int qos_active;
124 struct iwl_qosparam_cmd def_qos_parm;
125};
126
127/**
128 * enum iwl_agg_state
129 *
130 * The state machine of the BA agreement establishment / tear down.
131 * These states relate to a specific RA / TID.
132 *
133 * @IWL_AGG_OFF: aggregation is not used
134 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
135 * @IWL_AGG_ON: aggregation session is up
136 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
137 * HW queue to be empty from packets for this RA /TID.
138 * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
139 * HW queue to be empty from packets for this RA /TID.
140 */
141enum iwl_agg_state {
142 IWL_AGG_OFF = 0,
143 IWL_AGG_STARTING,
144 IWL_AGG_ON,
145 IWL_EMPTYING_HW_QUEUE_ADDBA,
146 IWL_EMPTYING_HW_QUEUE_DELBA,
147};
148
149/**
150 * struct iwl_ht_agg - aggregation state machine
151
152 * This structs holds the states for the BA agreement establishment and tear
153 * down. It also holds the state during the BA session itself. This struct is
154 * duplicated for each RA / TID.
155
156 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
157 * Tx response (REPLY_TX), and the block ack notification
158 * (REPLY_COMPRESSED_BA).
159 * @state: state of the BA agreement establishment / tear down.
160 * @txq_id: Tx queue used by the BA session
161 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
162 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
163 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
164 * we are ready to finish the Tx AGG stop / start flow.
165 * @wait_for_ba: Expect block-ack before next Tx reply
166 */
167struct iwl_ht_agg {
168 u32 rate_n_flags;
169 enum iwl_agg_state state;
170 u16 txq_id;
171 u16 ssn;
172 bool wait_for_ba;
173};
174
175/**
176 * struct iwl_tid_data - one for each RA / TID
177
178 * This structs holds the states for each RA / TID.
179
180 * @seq_number: the next WiFi sequence number to use
181 * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
182 * This is basically (last acked packet++).
183 * @agg: aggregation state machine
184 */
185struct iwl_tid_data {
186 u16 seq_number;
187 u16 next_reclaimed;
188 struct iwl_ht_agg agg;
189};
190
191/*
192 * Structure should be accessed with sta_lock held. When station addition
193 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
194 * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
195 * held.
196 */
197struct iwl_station_entry {
198 struct iwl_addsta_cmd sta;
199 u8 used, ctxid;
200 struct iwl_link_quality_cmd *lq;
201};
202
203/*
204 * iwl_station_priv: Driver's private station information
205 *
206 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
207 * in the structure for use by driver. This structure is places in that
208 * space.
209 */
210struct iwl_station_priv {
211 struct iwl_rxon_context *ctx;
212 struct iwl_lq_sta lq_sta;
213 atomic_t pending_frames;
214 bool client;
215 bool asleep;
216 u8 max_agg_bufsize;
217 u8 sta_id;
218};
219
220/**
221 * struct iwl_vif_priv - driver's private per-interface information
222 *
223 * When mac80211 allocates a virtual interface, it can allocate
224 * space for us to put data into.
225 */
226struct iwl_vif_priv {
227 struct iwl_rxon_context *ctx;
228 u8 ibss_bssid_sta_id;
229};
230
231struct iwl_sensitivity_ranges {
232 u16 min_nrg_cck;
233
234 u16 nrg_th_cck;
235 u16 nrg_th_ofdm;
236
237 u16 auto_corr_min_ofdm;
238 u16 auto_corr_min_ofdm_mrc;
239 u16 auto_corr_min_ofdm_x1;
240 u16 auto_corr_min_ofdm_mrc_x1;
241
242 u16 auto_corr_max_ofdm;
243 u16 auto_corr_max_ofdm_mrc;
244 u16 auto_corr_max_ofdm_x1;
245 u16 auto_corr_max_ofdm_mrc_x1;
246
247 u16 auto_corr_max_cck;
248 u16 auto_corr_max_cck_mrc;
249 u16 auto_corr_min_cck;
250 u16 auto_corr_min_cck_mrc;
251
252 u16 barker_corr_th_min;
253 u16 barker_corr_th_min_mrc;
254 u16 nrg_th_cca;
255};
256
257
258#define KELVIN_TO_CELSIUS(x) ((x)-273)
259#define CELSIUS_TO_KELVIN(x) ((x)+273)
260
261
262/******************************************************************************
263 *
264 * Functions implemented in core module which are forward declared here
265 * for use by iwl-[4-5].c
266 *
267 * NOTE: The implementation of these functions are not hardware specific
268 * which is why they are in the core module files.
269 *
270 * Naming convention --
271 * iwl_ <-- Is part of iwlwifi
272 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
273 *
274 ****************************************************************************/
275extern void iwl_update_chain_flags(struct iwl_priv *priv);
276extern const u8 iwl_bcast_addr[ETH_ALEN];
277
278#define IWL_OPERATION_MODE_AUTO 0
279#define IWL_OPERATION_MODE_HT_ONLY 1
280#define IWL_OPERATION_MODE_MIXED 2
281#define IWL_OPERATION_MODE_20MHZ 3
282
283#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
284
285/* Sensitivity and chain noise calibration */
286#define INITIALIZATION_VALUE 0xFFFF
287#define IWL_CAL_NUM_BEACONS 16
288#define MAXIMUM_ALLOWED_PATHLOSS 15
289
290#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
291
292#define MAX_FA_OFDM 50
293#define MIN_FA_OFDM 5
294#define MAX_FA_CCK 50
295#define MIN_FA_CCK 5
296
297#define AUTO_CORR_STEP_OFDM 1
298
299#define AUTO_CORR_STEP_CCK 3
300#define AUTO_CORR_MAX_TH_CCK 160
301
302#define NRG_DIFF 2
303#define NRG_STEP_CCK 2
304#define NRG_MARGIN 8
305#define MAX_NUMBER_CCK_NO_FA 100
306
307#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
308
309#define CHAIN_A 0
310#define CHAIN_B 1
311#define CHAIN_C 2
312#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
313#define ALL_BAND_FILTER 0xFF00
314#define IN_BAND_FILTER 0xFF
315#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
316
317#define NRG_NUM_PREV_STAT_L 20
318#define NUM_RX_CHAINS 3
319
320enum iwlagn_false_alarm_state {
321 IWL_FA_TOO_MANY = 0,
322 IWL_FA_TOO_FEW = 1,
323 IWL_FA_GOOD_RANGE = 2,
324};
325
326enum iwlagn_chain_noise_state {
327 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
328 IWL_CHAIN_NOISE_ACCUMULATE,
329 IWL_CHAIN_NOISE_CALIBRATED,
330 IWL_CHAIN_NOISE_DONE,
331};
332
333/* Sensitivity calib data */
334struct iwl_sensitivity_data {
335 u32 auto_corr_ofdm;
336 u32 auto_corr_ofdm_mrc;
337 u32 auto_corr_ofdm_x1;
338 u32 auto_corr_ofdm_mrc_x1;
339 u32 auto_corr_cck;
340 u32 auto_corr_cck_mrc;
341
342 u32 last_bad_plcp_cnt_ofdm;
343 u32 last_fa_cnt_ofdm;
344 u32 last_bad_plcp_cnt_cck;
345 u32 last_fa_cnt_cck;
346
347 u32 nrg_curr_state;
348 u32 nrg_prev_state;
349 u32 nrg_value[10];
350 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
351 u32 nrg_silence_ref;
352 u32 nrg_energy_idx;
353 u32 nrg_silence_idx;
354 u32 nrg_th_cck;
355 s32 nrg_auto_corr_silence_diff;
356 u32 num_in_cck_no_fa;
357 u32 nrg_th_ofdm;
358
359 u16 barker_corr_th_min;
360 u16 barker_corr_th_min_mrc;
361 u16 nrg_th_cca;
362};
363
364/* Chain noise (differential Rx gain) calib data */
365struct iwl_chain_noise_data {
366 u32 active_chains;
367 u32 chain_noise_a;
368 u32 chain_noise_b;
369 u32 chain_noise_c;
370 u32 chain_signal_a;
371 u32 chain_signal_b;
372 u32 chain_signal_c;
373 u16 beacon_count;
374 u8 disconn_array[NUM_RX_CHAINS];
375 u8 delta_gain_code[NUM_RX_CHAINS];
376 u8 radio_write;
377 u8 state;
378};
379
380enum {
381 MEASUREMENT_READY = (1 << 0),
382 MEASUREMENT_ACTIVE = (1 << 1),
383};
384
385/* reply_tx_statistics (for _agn devices) */
386struct reply_tx_error_statistics {
387 u32 pp_delay;
388 u32 pp_few_bytes;
389 u32 pp_bt_prio;
390 u32 pp_quiet_period;
391 u32 pp_calc_ttak;
392 u32 int_crossed_retry;
393 u32 short_limit;
394 u32 long_limit;
395 u32 fifo_underrun;
396 u32 drain_flow;
397 u32 rfkill_flush;
398 u32 life_expire;
399 u32 dest_ps;
400 u32 host_abort;
401 u32 bt_retry;
402 u32 sta_invalid;
403 u32 frag_drop;
404 u32 tid_disable;
405 u32 fifo_flush;
406 u32 insuff_cf_poll;
407 u32 fail_hw_drop;
408 u32 sta_color_mismatch;
409 u32 unknown;
410};
411
412/* reply_agg_tx_statistics (for _agn devices) */
413struct reply_agg_tx_error_statistics {
414 u32 underrun;
415 u32 bt_prio;
416 u32 few_bytes;
417 u32 abort;
418 u32 last_sent_ttl;
419 u32 last_sent_try;
420 u32 last_sent_bt_kill;
421 u32 scd_query;
422 u32 bad_crc32;
423 u32 response;
424 u32 dump_tx;
425 u32 delay_tx;
426 u32 unknown;
427};
428
429/*
430 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
431 * to perform continuous uCode event logging operation if enabled
432 */
433#define UCODE_TRACE_PERIOD (10)
434
435/*
436 * iwl_event_log: current uCode event log position
437 *
438 * @ucode_trace: enable/disable ucode continuous trace timer
439 * @num_wraps: how many times the event buffer wraps
440 * @next_entry: the entry just before the next one that uCode would fill
441 * @non_wraps_count: counter for no wrap detected when dump ucode events
442 * @wraps_once_count: counter for wrap once detected when dump ucode events
443 * @wraps_more_count: counter for wrap more than once detected
444 * when dump ucode events
445 */
446struct iwl_event_log {
447 bool ucode_trace;
448 u32 num_wraps;
449 u32 next_entry;
450 int non_wraps_count;
451 int wraps_once_count;
452 int wraps_more_count;
453};
454
455#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
456
457/* BT Antenna Coupling Threshold (dB) */
458#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
459
460/* Firmware reload counter and Timestamp */
461#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
462#define IWL_MAX_CONTINUE_RELOAD_CNT 4
463
464
465struct iwl_rf_reset {
466 int reset_request_count;
467 int reset_success_count;
468 int reset_reject_count;
469 unsigned long last_reset_jiffies;
470};
471
472enum iwl_rxon_context_id {
473 IWL_RXON_CTX_BSS,
474 IWL_RXON_CTX_PAN,
475
476 NUM_IWL_RXON_CTX
477};
478
479/* extend beacon time format bit shifting */
480/*
481 * for _agn devices
482 * bits 31:22 - extended
483 * bits 21:0 - interval
484 */
485#define IWLAGN_EXT_BEACON_TIME_POS 22
486
487struct iwl_rxon_context {
488 struct ieee80211_vif *vif;
489
490 u8 mcast_queue;
491 u8 ac_to_queue[IEEE80211_NUM_ACS];
492 u8 ac_to_fifo[IEEE80211_NUM_ACS];
493
494 /*
495 * We could use the vif to indicate active, but we
496 * also need it to be active during disabling when
497 * we already removed the vif for type setting.
498 */
499 bool always_active, is_active;
500
501 bool ht_need_multiple_chains;
502
503 enum iwl_rxon_context_id ctxid;
504
505 u32 interface_modes, exclusive_interface_modes;
506 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
507
508 /*
509 * We declare this const so it can only be
510 * changed via explicit cast within the
511 * routines that actually update the physical
512 * hardware.
513 */
514 const struct iwl_rxon_cmd active;
515 struct iwl_rxon_cmd staging;
516
517 struct iwl_rxon_time_cmd timing;
518
519 struct iwl_qos_info qos_data;
520
521 u8 bcast_sta_id, ap_sta_id;
522
523 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
524 u8 qos_cmd;
525 u8 wep_key_cmd;
526
527 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
528 u8 key_mapping_keys;
529
530 __le32 station_flags;
531
532 int beacon_int;
533
534 struct {
535 bool non_gf_sta_present;
536 u8 protection;
537 bool enabled, is_40mhz;
538 u8 extension_chan_offset;
539 } ht;
540};
541
542enum iwl_scan_type {
543 IWL_SCAN_NORMAL,
544 IWL_SCAN_RADIO_RESET,
545 IWL_SCAN_ROC,
546};
547
548/**
549 * struct iwl_hw_params
550 *
551 * Holds the module parameters
552 *
553 * @tx_chains_num: Number of TX chains
554 * @rx_chains_num: Number of RX chains
555 * @ct_kill_threshold: temperature threshold - in hw dependent unit
556 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
557 * relevant for 1000, 6000 and up
558 * @struct iwl_sensitivity_ranges: range of sensitivity values
559 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
560 */
561struct iwl_hw_params {
562 u8 tx_chains_num;
563 u8 rx_chains_num;
564 bool use_rts_for_aggregation;
565 u32 ct_kill_threshold;
566 u32 ct_kill_exit_threshold;
567
568 const struct iwl_sensitivity_ranges *sens;
569};
570
571struct iwl_lib_ops {
572 /* set hw dependent parameters */
573 void (*set_hw_params)(struct iwl_priv *priv);
574 int (*set_channel_switch)(struct iwl_priv *priv,
575 struct ieee80211_channel_switch *ch_switch);
576 /* device specific configuration */
577 void (*nic_config)(struct iwl_priv *priv);
578
579 /* temperature */
580 void (*temperature)(struct iwl_priv *priv);
581};
582
583struct iwl_wipan_noa_data {
584 struct rcu_head rcu_head;
585 u32 length;
586 u8 data[];
587};
588
589/* Calibration disabling bit mask */
590enum {
591 IWL_CALIB_ENABLE_ALL = 0,
592
593 IWL_SENSITIVITY_CALIB_DISABLED = BIT(0),
594 IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1),
595 IWL_TX_POWER_CALIB_DISABLED = BIT(2),
596
597 IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF,
598};
599
600#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
601 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
602
603#define IWL_MAC80211_GET_DVM(_hw) \
604 ((struct iwl_priv *) ((struct iwl_op_mode *) \
605 (_hw)->priv)->op_mode_specific)
606
607struct iwl_priv {
608
609 struct iwl_trans *trans;
610 struct device *dev; /* for debug prints only */
611 const struct iwl_cfg *cfg;
612 const struct iwl_fw *fw;
613 const struct iwl_lib_ops *lib;
614 unsigned long status;
615
616 spinlock_t sta_lock;
617 struct mutex mutex;
618
619 unsigned long transport_queue_stop;
620 bool passive_no_rx;
621#define IWL_INVALID_MAC80211_QUEUE 0xff
622 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
623 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
624
625 unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
626
627 /* ieee device used by generic ieee processing code */
628 struct ieee80211_hw *hw;
629
630 struct list_head calib_results;
631
632 struct workqueue_struct *workqueue;
633
634 struct iwl_hw_params hw_params;
635
636 enum ieee80211_band band;
637 u8 valid_contexts;
638
639 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
640 struct iwl_rx_cmd_buffer *rxb,
641 struct iwl_device_cmd *cmd);
642
643 struct iwl_notif_wait_data notif_wait;
644
645 /* spectrum measurement report caching */
646 struct iwl_spectrum_notification measure_report;
647 u8 measurement_status;
648
649#define IWL_OWNERSHIP_DRIVER 0
650#define IWL_OWNERSHIP_TM 1
651 u8 ucode_owner;
652
653 /* ucode beacon time */
654 u32 ucode_beacon_time;
655 int missed_beacon_threshold;
656
657 /* track IBSS manager (last beacon) status */
658 u32 ibss_manager;
659
660 /* jiffies when last recovery from statistics was performed */
661 unsigned long rx_statistics_jiffies;
662
663 /*counters */
664 u32 rx_handlers_stats[REPLY_MAX];
665
666 /* rf reset */
667 struct iwl_rf_reset rf_reset;
668
669 /* firmware reload counter and timestamp */
670 unsigned long reload_jiffies;
671 int reload_count;
672 bool ucode_loaded;
673 bool init_ucode_run; /* Don't run init uCode again */
674
675 u8 plcp_delta_threshold;
676
677 /* thermal calibration */
678 s32 temperature; /* Celsius */
679 s32 last_temperature;
680
681 struct iwl_wipan_noa_data __rcu *noa_data;
682
683 /* Scan related variables */
684 unsigned long scan_start;
685 unsigned long scan_start_tsf;
686 void *scan_cmd;
687 enum ieee80211_band scan_band;
688 struct cfg80211_scan_request *scan_request;
689 struct ieee80211_vif *scan_vif;
690 enum iwl_scan_type scan_type;
691 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
692 u8 mgmt_tx_ant;
693
694 /* max number of station keys */
695 u8 sta_key_max_num;
696
697 bool new_scan_threshold_behaviour;
698
699 bool wowlan;
700
701 /* EEPROM MAC addresses */
702 struct mac_address addresses[2];
703
704 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
705
706 __le16 switch_channel;
707
708 u8 start_calib;
709 struct iwl_sensitivity_data sensitivity_data;
710 struct iwl_chain_noise_data chain_noise_data;
711 __le16 sensitivity_tbl[HD_TABLE_SIZE];
712 __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
713
714 struct iwl_ht_config current_ht_config;
715
716 /* Rate scaling data */
717 u8 retry_rate;
718
719 int activity_timer_active;
720
721 struct iwl_power_mgr power_data;
722 struct iwl_tt_mgmt thermal_throttle;
723
724 /* station table variables */
725 int num_stations;
726 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
727 unsigned long ucode_key_table;
728 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
729 atomic_t num_aux_in_flight;
730
731 u8 mac80211_registered;
732
733 /* Indication if ieee80211_ops->open has been called */
734 u8 is_open;
735
736 enum nl80211_iftype iw_mode;
737
738 /* Last Rx'd beacon timestamp */
739 u64 timestamp;
740
741 struct {
742 __le32 flag;
743 struct statistics_general_common common;
744 struct statistics_rx_non_phy rx_non_phy;
745 struct statistics_rx_phy rx_ofdm;
746 struct statistics_rx_ht_phy rx_ofdm_ht;
747 struct statistics_rx_phy rx_cck;
748 struct statistics_tx tx;
749#ifdef CONFIG_IWLWIFI_DEBUGFS
750 struct statistics_bt_activity bt_activity;
751 __le32 num_bt_kills, accum_num_bt_kills;
752#endif
753 spinlock_t lock;
754 } statistics;
755#ifdef CONFIG_IWLWIFI_DEBUGFS
756 struct {
757 struct statistics_general_common common;
758 struct statistics_rx_non_phy rx_non_phy;
759 struct statistics_rx_phy rx_ofdm;
760 struct statistics_rx_ht_phy rx_ofdm_ht;
761 struct statistics_rx_phy rx_cck;
762 struct statistics_tx tx;
763 struct statistics_bt_activity bt_activity;
764 } accum_stats, delta_stats, max_delta_stats;
765#endif
766
767 /*
768 * reporting the number of tids has AGG on. 0 means
769 * no AGGREGATION
770 */
771 u8 agg_tids_count;
772
773 struct iwl_rx_phy_res last_phy_res;
774 u32 ampdu_ref;
775 bool last_phy_res_valid;
776
777 /*
778 * chain noise reset and gain commands are the
779 * two extra calibration commands follows the standard
780 * phy calibration commands
781 */
782 u8 phy_calib_chain_noise_reset_cmd;
783 u8 phy_calib_chain_noise_gain_cmd;
784
785 /* counts reply_tx error */
786 struct reply_tx_error_statistics reply_tx_stats;
787 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
788
789 /* remain-on-channel offload support */
790 struct ieee80211_channel *hw_roc_channel;
791 struct delayed_work hw_roc_disable_work;
792 int hw_roc_duration;
793 bool hw_roc_setup, hw_roc_start_notified;
794
795 /* bt coex */
796 u8 bt_enable_flag;
797 u8 bt_status;
798 u8 bt_traffic_load, last_bt_traffic_load;
799 bool bt_ch_announce;
800 bool bt_full_concurrent;
801 bool bt_ant_couple_ok;
802 __le32 kill_ack_mask;
803 __le32 kill_cts_mask;
804 __le16 bt_valid;
805 bool reduced_txpower;
806 u16 bt_on_thresh;
807 u16 bt_duration;
808 u16 dynamic_frag_thresh;
809 u8 bt_ci_compliance;
810 struct work_struct bt_traffic_change_work;
811 bool bt_enable_pspoll;
812 struct iwl_rxon_context *cur_rssi_ctx;
813 bool bt_is_sco;
814
815 struct work_struct restart;
816 struct work_struct scan_completed;
817 struct work_struct abort_scan;
818
819 struct work_struct beacon_update;
820 struct iwl_rxon_context *beacon_ctx;
821 struct sk_buff *beacon_skb;
822 void *beacon_cmd;
823
824 struct work_struct tt_work;
825 struct work_struct ct_enter;
826 struct work_struct ct_exit;
827 struct work_struct start_internal_scan;
828 struct work_struct tx_flush;
829 struct work_struct bt_full_concurrency;
830 struct work_struct bt_runtime_config;
831
832 struct delayed_work scan_check;
833
834 /* TX Power settings */
835 s8 tx_power_user_lmt;
836 s8 tx_power_next;
837
838#ifdef CONFIG_IWLWIFI_DEBUGFS
839 /* debugfs */
840 struct dentry *debugfs_dir;
841 u32 dbgfs_sram_offset, dbgfs_sram_len;
842 bool disable_ht40;
843 void *wowlan_sram;
844#endif /* CONFIG_IWLWIFI_DEBUGFS */
845
846 struct iwl_nvm_data *nvm_data;
847 /* eeprom blob for debugfs/testmode */
848 u8 *eeprom_blob;
849 size_t eeprom_blob_size;
850
851 struct work_struct txpower_work;
852 u32 calib_disabled;
853 struct work_struct run_time_calib_work;
854 struct timer_list statistics_periodic;
855 struct timer_list ucode_trace;
856
857 struct iwl_event_log event_log;
858
859 struct led_classdev led;
860 unsigned long blink_on, blink_off;
861 bool led_registered;
862
863#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
864 struct iwl_test tst;
865 u32 tm_fixed_rate;
866#endif
867
868 /* WoWLAN GTK rekey data */
869 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
870 __le64 replay_ctr;
871 __le16 last_seq_ctl;
872 bool have_rekey_data;
873
874 /* device_pointers: pointers to ucode event tables */
875 struct {
876 u32 error_event_table;
877 u32 log_event_table;
878 } device_pointers;
879
880 /* indicator of loaded ucode image */
881 enum iwl_ucode_type cur_ucode;
882}; /*iwl_priv */
883
884static inline struct iwl_rxon_context *
885iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
886{
887 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
888
889 return vif_priv->ctx;
890}
891
892#define for_each_context(priv, ctx) \
893 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
894 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
895 if (priv->valid_contexts & BIT(ctx->ctxid))
896
897static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
898{
899 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
900}
901
902static inline int iwl_is_associated(struct iwl_priv *priv,
903 enum iwl_rxon_context_id ctxid)
904{
905 return iwl_is_associated_ctx(&priv->contexts[ctxid]);
906}
907
908static inline int iwl_is_any_associated(struct iwl_priv *priv)
909{
910 struct iwl_rxon_context *ctx;
911 for_each_context(priv, ctx)
912 if (iwl_is_associated_ctx(ctx))
913 return true;
914 return false;
915}
916
917#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
deleted file mode 100644
index 8c72be3f37c..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ /dev/null
@@ -1,597 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27/*
28 * DVM device-specific data & functions
29 */
30#include "iwl-io.h"
31#include "iwl-prph.h"
32#include "iwl-eeprom-parse.h"
33
34#include "agn.h"
35#include "dev.h"
36#include "commands.h"
37
38
39/*
40 * 1000 series
41 * ===========
42 */
43
44/*
45 * For 1000, use advance thermal throttling critical temperature threshold,
46 * but legacy thermal management implementation for now.
47 * This is for the reason of 1000 uCode using advance thermal throttling API
48 * but not implement ct_kill_exit based on ct_kill exit temperature
49 * so the thermal throttling will still based on legacy thermal throttling
50 * management.
51 * The code here need to be modified once 1000 uCode has the advanced thermal
52 * throttling algorithm in place
53 */
54static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
55{
56 /* want Celsius */
57 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
58 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
59}
60
61/* NIC configuration for 1000 series */
62static void iwl1000_nic_config(struct iwl_priv *priv)
63{
64 /* Setting digital SVR for 1000 card to 1.32V */
65 /* locking is acquired in iwl_set_bits_mask_prph() function */
66 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
67 APMG_SVR_DIGITAL_VOLTAGE_1_32,
68 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
69}
70
71/**
72 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
73 * @priv -- pointer to iwl_priv data structure
74 * @tsf_bits -- number of bits need to shift for masking)
75 */
76static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
77 u16 tsf_bits)
78{
79 return (1 << tsf_bits) - 1;
80}
81
82/**
83 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
84 * @priv -- pointer to iwl_priv data structure
85 * @tsf_bits -- number of bits need to shift for masking)
86 */
87static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
88 u16 tsf_bits)
89{
90 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
91}
92
93/*
94 * extended beacon time format
95 * time in usec will be changed into a 32-bit value in extended:internal format
96 * the extended part is the beacon counts
97 * the internal part is the time in usec within one beacon interval
98 */
99static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
100 u32 beacon_interval)
101{
102 u32 quot;
103 u32 rem;
104 u32 interval = beacon_interval * TIME_UNIT;
105
106 if (!interval || !usec)
107 return 0;
108
109 quot = (usec / interval) &
110 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
111 IWLAGN_EXT_BEACON_TIME_POS);
112 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
113 IWLAGN_EXT_BEACON_TIME_POS);
114
115 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
116}
117
118/* base is usually what we get from ucode with each received frame,
119 * the same as HW timer counter counting down
120 */
121static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
122 u32 addon, u32 beacon_interval)
123{
124 u32 base_low = base & iwl_beacon_time_mask_low(priv,
125 IWLAGN_EXT_BEACON_TIME_POS);
126 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
127 IWLAGN_EXT_BEACON_TIME_POS);
128 u32 interval = beacon_interval * TIME_UNIT;
129 u32 res = (base & iwl_beacon_time_mask_high(priv,
130 IWLAGN_EXT_BEACON_TIME_POS)) +
131 (addon & iwl_beacon_time_mask_high(priv,
132 IWLAGN_EXT_BEACON_TIME_POS));
133
134 if (base_low > addon_low)
135 res += base_low - addon_low;
136 else if (base_low < addon_low) {
137 res += interval + base_low - addon_low;
138 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
139 } else
140 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
141
142 return cpu_to_le32(res);
143}
144
145static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
146 .min_nrg_cck = 95,
147 .auto_corr_min_ofdm = 90,
148 .auto_corr_min_ofdm_mrc = 170,
149 .auto_corr_min_ofdm_x1 = 120,
150 .auto_corr_min_ofdm_mrc_x1 = 240,
151
152 .auto_corr_max_ofdm = 120,
153 .auto_corr_max_ofdm_mrc = 210,
154 .auto_corr_max_ofdm_x1 = 155,
155 .auto_corr_max_ofdm_mrc_x1 = 290,
156
157 .auto_corr_min_cck = 125,
158 .auto_corr_max_cck = 200,
159 .auto_corr_min_cck_mrc = 170,
160 .auto_corr_max_cck_mrc = 400,
161 .nrg_th_cck = 95,
162 .nrg_th_ofdm = 95,
163
164 .barker_corr_th_min = 190,
165 .barker_corr_th_min_mrc = 390,
166 .nrg_th_cca = 62,
167};
168
169static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
170{
171 iwl1000_set_ct_threshold(priv);
172
173 /* Set initial sensitivity parameters */
174 priv->hw_params.sens = &iwl1000_sensitivity;
175}
176
177struct iwl_lib_ops iwl1000_lib = {
178 .set_hw_params = iwl1000_hw_set_hw_params,
179 .nic_config = iwl1000_nic_config,
180 .temperature = iwlagn_temperature,
181};
182
183
184/*
185 * 2000 series
186 * ===========
187 */
188
189static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
190{
191 /* want Celsius */
192 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
193 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
194}
195
196/* NIC configuration for 2000 series */
197static void iwl2000_nic_config(struct iwl_priv *priv)
198{
199 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
200 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
201}
202
203static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
204 .min_nrg_cck = 97,
205 .auto_corr_min_ofdm = 80,
206 .auto_corr_min_ofdm_mrc = 128,
207 .auto_corr_min_ofdm_x1 = 105,
208 .auto_corr_min_ofdm_mrc_x1 = 192,
209
210 .auto_corr_max_ofdm = 145,
211 .auto_corr_max_ofdm_mrc = 232,
212 .auto_corr_max_ofdm_x1 = 110,
213 .auto_corr_max_ofdm_mrc_x1 = 232,
214
215 .auto_corr_min_cck = 125,
216 .auto_corr_max_cck = 175,
217 .auto_corr_min_cck_mrc = 160,
218 .auto_corr_max_cck_mrc = 310,
219 .nrg_th_cck = 97,
220 .nrg_th_ofdm = 100,
221
222 .barker_corr_th_min = 190,
223 .barker_corr_th_min_mrc = 390,
224 .nrg_th_cca = 62,
225};
226
227static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
228{
229 iwl2000_set_ct_threshold(priv);
230
231 /* Set initial sensitivity parameters */
232 priv->hw_params.sens = &iwl2000_sensitivity;
233}
234
235struct iwl_lib_ops iwl2000_lib = {
236 .set_hw_params = iwl2000_hw_set_hw_params,
237 .nic_config = iwl2000_nic_config,
238 .temperature = iwlagn_temperature,
239};
240
241struct iwl_lib_ops iwl2030_lib = {
242 .set_hw_params = iwl2000_hw_set_hw_params,
243 .nic_config = iwl2000_nic_config,
244 .temperature = iwlagn_temperature,
245};
246
247/*
248 * 5000 series
249 * ===========
250 */
251
252/* NIC configuration for 5000 series */
253static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
254 .min_nrg_cck = 100,
255 .auto_corr_min_ofdm = 90,
256 .auto_corr_min_ofdm_mrc = 170,
257 .auto_corr_min_ofdm_x1 = 105,
258 .auto_corr_min_ofdm_mrc_x1 = 220,
259
260 .auto_corr_max_ofdm = 120,
261 .auto_corr_max_ofdm_mrc = 210,
262 .auto_corr_max_ofdm_x1 = 120,
263 .auto_corr_max_ofdm_mrc_x1 = 240,
264
265 .auto_corr_min_cck = 125,
266 .auto_corr_max_cck = 200,
267 .auto_corr_min_cck_mrc = 200,
268 .auto_corr_max_cck_mrc = 400,
269 .nrg_th_cck = 100,
270 .nrg_th_ofdm = 100,
271
272 .barker_corr_th_min = 190,
273 .barker_corr_th_min_mrc = 390,
274 .nrg_th_cca = 62,
275};
276
277static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
278 .min_nrg_cck = 95,
279 .auto_corr_min_ofdm = 90,
280 .auto_corr_min_ofdm_mrc = 170,
281 .auto_corr_min_ofdm_x1 = 105,
282 .auto_corr_min_ofdm_mrc_x1 = 220,
283
284 .auto_corr_max_ofdm = 120,
285 .auto_corr_max_ofdm_mrc = 210,
286 /* max = min for performance bug in 5150 DSP */
287 .auto_corr_max_ofdm_x1 = 105,
288 .auto_corr_max_ofdm_mrc_x1 = 220,
289
290 .auto_corr_min_cck = 125,
291 .auto_corr_max_cck = 200,
292 .auto_corr_min_cck_mrc = 170,
293 .auto_corr_max_cck_mrc = 400,
294 .nrg_th_cck = 95,
295 .nrg_th_ofdm = 95,
296
297 .barker_corr_th_min = 190,
298 .barker_corr_th_min_mrc = 390,
299 .nrg_th_cca = 62,
300};
301
302#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
303
304static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
305{
306 u16 temperature, voltage;
307
308 temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature);
309 voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage);
310
311 /* offset = temp - volt / coeff */
312 return (s32)(temperature -
313 voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
314}
315
316static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
317{
318 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
319 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
320 iwl_temp_calib_to_offset(priv);
321
322 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
323}
324
325static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
326{
327 /* want Celsius */
328 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
329}
330
331static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
332{
333 iwl5000_set_ct_threshold(priv);
334
335 /* Set initial sensitivity parameters */
336 priv->hw_params.sens = &iwl5000_sensitivity;
337}
338
339static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
340{
341 iwl5150_set_ct_threshold(priv);
342
343 /* Set initial sensitivity parameters */
344 priv->hw_params.sens = &iwl5150_sensitivity;
345}
346
347static void iwl5150_temperature(struct iwl_priv *priv)
348{
349 u32 vt = 0;
350 s32 offset = iwl_temp_calib_to_offset(priv);
351
352 vt = le32_to_cpu(priv->statistics.common.temperature);
353 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
354 /* now vt hold the temperature in Kelvin */
355 priv->temperature = KELVIN_TO_CELSIUS(vt);
356 iwl_tt_handler(priv);
357}
358
359static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
360 struct ieee80211_channel_switch *ch_switch)
361{
362 /*
363 * MULTI-FIXME
364 * See iwlagn_mac_channel_switch.
365 */
366 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
367 struct iwl5000_channel_switch_cmd cmd;
368 u32 switch_time_in_usec, ucode_switch_time;
369 u16 ch;
370 u32 tsf_low;
371 u8 switch_count;
372 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
373 struct ieee80211_vif *vif = ctx->vif;
374 struct iwl_host_cmd hcmd = {
375 .id = REPLY_CHANNEL_SWITCH,
376 .len = { sizeof(cmd), },
377 .flags = CMD_SYNC,
378 .data = { &cmd, },
379 };
380
381 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
382 ch = ch_switch->channel->hw_value;
383 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
384 ctx->active.channel, ch);
385 cmd.channel = cpu_to_le16(ch);
386 cmd.rxon_flags = ctx->staging.flags;
387 cmd.rxon_filter_flags = ctx->staging.filter_flags;
388 switch_count = ch_switch->count;
389 tsf_low = ch_switch->timestamp & 0x0ffffffff;
390 /*
391 * calculate the ucode channel switch time
392 * adding TSF as one of the factor for when to switch
393 */
394 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
395 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
396 beacon_interval)) {
397 switch_count -= (priv->ucode_beacon_time -
398 tsf_low) / beacon_interval;
399 } else
400 switch_count = 0;
401 }
402 if (switch_count <= 1)
403 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
404 else {
405 switch_time_in_usec =
406 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
407 ucode_switch_time = iwl_usecs_to_beacons(priv,
408 switch_time_in_usec,
409 beacon_interval);
410 cmd.switch_time = iwl_add_beacon_time(priv,
411 priv->ucode_beacon_time,
412 ucode_switch_time,
413 beacon_interval);
414 }
415 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
416 cmd.switch_time);
417 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
418
419 return iwl_dvm_send_cmd(priv, &hcmd);
420}
421
422struct iwl_lib_ops iwl5000_lib = {
423 .set_hw_params = iwl5000_hw_set_hw_params,
424 .set_channel_switch = iwl5000_hw_channel_switch,
425 .temperature = iwlagn_temperature,
426};
427
428struct iwl_lib_ops iwl5150_lib = {
429 .set_hw_params = iwl5150_hw_set_hw_params,
430 .set_channel_switch = iwl5000_hw_channel_switch,
431 .temperature = iwl5150_temperature,
432};
433
434
435
436/*
437 * 6000 series
438 * ===========
439 */
440
441static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
442{
443 /* want Celsius */
444 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
445 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
446}
447
448/* NIC configuration for 6000 series */
449static void iwl6000_nic_config(struct iwl_priv *priv)
450{
451 switch (priv->cfg->device_family) {
452 case IWL_DEVICE_FAMILY_6005:
453 case IWL_DEVICE_FAMILY_6030:
454 case IWL_DEVICE_FAMILY_6000:
455 break;
456 case IWL_DEVICE_FAMILY_6000i:
457 /* 2x2 IPA phy type */
458 iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
459 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
460 break;
461 case IWL_DEVICE_FAMILY_6050:
462 /* Indicate calibration version to uCode. */
463 if (priv->nvm_data->calib_version >= 6)
464 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
465 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
466 break;
467 case IWL_DEVICE_FAMILY_6150:
468 /* Indicate calibration version to uCode. */
469 if (priv->nvm_data->calib_version >= 6)
470 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
471 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
472 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
473 CSR_GP_DRIVER_REG_BIT_6050_1x2);
474 break;
475 default:
476 WARN_ON(1);
477 }
478}
479
480static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
481 .min_nrg_cck = 110,
482 .auto_corr_min_ofdm = 80,
483 .auto_corr_min_ofdm_mrc = 128,
484 .auto_corr_min_ofdm_x1 = 105,
485 .auto_corr_min_ofdm_mrc_x1 = 192,
486
487 .auto_corr_max_ofdm = 145,
488 .auto_corr_max_ofdm_mrc = 232,
489 .auto_corr_max_ofdm_x1 = 110,
490 .auto_corr_max_ofdm_mrc_x1 = 232,
491
492 .auto_corr_min_cck = 125,
493 .auto_corr_max_cck = 175,
494 .auto_corr_min_cck_mrc = 160,
495 .auto_corr_max_cck_mrc = 310,
496 .nrg_th_cck = 110,
497 .nrg_th_ofdm = 110,
498
499 .barker_corr_th_min = 190,
500 .barker_corr_th_min_mrc = 336,
501 .nrg_th_cca = 62,
502};
503
504static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
505{
506 iwl6000_set_ct_threshold(priv);
507
508 /* Set initial sensitivity parameters */
509 priv->hw_params.sens = &iwl6000_sensitivity;
510
511}
512
513static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
514 struct ieee80211_channel_switch *ch_switch)
515{
516 /*
517 * MULTI-FIXME
518 * See iwlagn_mac_channel_switch.
519 */
520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
521 struct iwl6000_channel_switch_cmd *cmd;
522 u32 switch_time_in_usec, ucode_switch_time;
523 u16 ch;
524 u32 tsf_low;
525 u8 switch_count;
526 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
527 struct ieee80211_vif *vif = ctx->vif;
528 struct iwl_host_cmd hcmd = {
529 .id = REPLY_CHANNEL_SWITCH,
530 .len = { sizeof(*cmd), },
531 .flags = CMD_SYNC,
532 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
533 };
534 int err;
535
536 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
537 if (!cmd)
538 return -ENOMEM;
539
540 hcmd.data[0] = cmd;
541
542 cmd->band = priv->band == IEEE80211_BAND_2GHZ;
543 ch = ch_switch->channel->hw_value;
544 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
545 ctx->active.channel, ch);
546 cmd->channel = cpu_to_le16(ch);
547 cmd->rxon_flags = ctx->staging.flags;
548 cmd->rxon_filter_flags = ctx->staging.filter_flags;
549 switch_count = ch_switch->count;
550 tsf_low = ch_switch->timestamp & 0x0ffffffff;
551 /*
552 * calculate the ucode channel switch time
553 * adding TSF as one of the factor for when to switch
554 */
555 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
556 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
557 beacon_interval)) {
558 switch_count -= (priv->ucode_beacon_time -
559 tsf_low) / beacon_interval;
560 } else
561 switch_count = 0;
562 }
563 if (switch_count <= 1)
564 cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
565 else {
566 switch_time_in_usec =
567 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
568 ucode_switch_time = iwl_usecs_to_beacons(priv,
569 switch_time_in_usec,
570 beacon_interval);
571 cmd->switch_time = iwl_add_beacon_time(priv,
572 priv->ucode_beacon_time,
573 ucode_switch_time,
574 beacon_interval);
575 }
576 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
577 cmd->switch_time);
578 cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
579
580 err = iwl_dvm_send_cmd(priv, &hcmd);
581 kfree(cmd);
582 return err;
583}
584
585struct iwl_lib_ops iwl6000_lib = {
586 .set_hw_params = iwl6000_hw_set_hw_params,
587 .set_channel_switch = iwl6000_hw_channel_switch,
588 .nic_config = iwl6000_nic_config,
589 .temperature = iwlagn_temperature,
590};
591
592struct iwl_lib_ops iwl6030_lib = {
593 .set_hw_params = iwl6000_hw_set_hw_params,
594 .set_channel_switch = iwl6000_hw_channel_switch,
595 .nic_config = iwl6000_nic_config,
596 .temperature = iwlagn_temperature,
597};
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
deleted file mode 100644
index bf479f70909..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ /dev/null
@@ -1,224 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include "iwl-io.h"
38#include "iwl-trans.h"
39#include "iwl-modparams.h"
40#include "dev.h"
41#include "agn.h"
42
43/* Throughput OFF time(ms) ON time (ms)
44 * >300 25 25
45 * >200 to 300 40 40
46 * >100 to 200 55 55
47 * >70 to 100 65 65
48 * >50 to 70 75 75
49 * >20 to 50 85 85
50 * >10 to 20 95 95
51 * >5 to 10 110 110
52 * >1 to 5 130 130
53 * >0 to 1 167 167
54 * <=0 SOLID ON
55 */
56static const struct ieee80211_tpt_blink iwl_blink[] = {
57 { .throughput = 0, .blink_time = 334 },
58 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
59 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
60 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
61 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
62 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
63 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
64 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
65 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
66 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
67};
68
69/* Set led register off */
70void iwlagn_led_enable(struct iwl_priv *priv)
71{
72 iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
73}
74
75/*
76 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
77 * Led blink rate analysis showed an average deviation of 20% on 5000 series
78 * and up.
79 * Need to compensate on the led on/off time per HW according to the deviation
80 * to achieve the desired led frequency
81 * The calculation is: (100-averageDeviation)/100 * blinkTime
82 * For code efficiency the calculation will be:
83 * compensation = (100 - averageDeviation) * 64 / 100
84 * NewBlinkTime = (compensation * BlinkTime) / 64
85 */
86static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
87 u8 time, u16 compensation)
88{
89 if (!compensation) {
90 IWL_ERR(priv, "undefined blink compensation: "
91 "use pre-defined blinking time\n");
92 return time;
93 }
94
95 return (u8)((time * compensation) >> 6);
96}
97
98static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
99{
100 struct iwl_host_cmd cmd = {
101 .id = REPLY_LEDS_CMD,
102 .len = { sizeof(struct iwl_led_cmd), },
103 .data = { led_cmd, },
104 .flags = CMD_ASYNC,
105 };
106 u32 reg;
107
108 reg = iwl_read32(priv->trans, CSR_LED_REG);
109 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
110 iwl_write32(priv->trans, CSR_LED_REG,
111 reg & CSR_LED_BSM_CTRL_MSK);
112
113 return iwl_dvm_send_cmd(priv, &cmd);
114}
115
116/* Set led pattern command */
117static int iwl_led_cmd(struct iwl_priv *priv,
118 unsigned long on,
119 unsigned long off)
120{
121 struct iwl_led_cmd led_cmd = {
122 .id = IWL_LED_LINK,
123 .interval = IWL_DEF_LED_INTRVL
124 };
125 int ret;
126
127 if (!test_bit(STATUS_READY, &priv->status))
128 return -EBUSY;
129
130 if (priv->blink_on == on && priv->blink_off == off)
131 return 0;
132
133 if (off == 0) {
134 /* led is SOLID_ON */
135 on = IWL_LED_SOLID;
136 }
137
138 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
139 priv->cfg->base_params->led_compensation);
140 led_cmd.on = iwl_blink_compensation(priv, on,
141 priv->cfg->base_params->led_compensation);
142 led_cmd.off = iwl_blink_compensation(priv, off,
143 priv->cfg->base_params->led_compensation);
144
145 ret = iwl_send_led_cmd(priv, &led_cmd);
146 if (!ret) {
147 priv->blink_on = on;
148 priv->blink_off = off;
149 }
150 return ret;
151}
152
153static void iwl_led_brightness_set(struct led_classdev *led_cdev,
154 enum led_brightness brightness)
155{
156 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
157 unsigned long on = 0;
158
159 if (brightness > 0)
160 on = IWL_LED_SOLID;
161
162 iwl_led_cmd(priv, on, 0);
163}
164
165static int iwl_led_blink_set(struct led_classdev *led_cdev,
166 unsigned long *delay_on,
167 unsigned long *delay_off)
168{
169 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
170
171 return iwl_led_cmd(priv, *delay_on, *delay_off);
172}
173
174void iwl_leds_init(struct iwl_priv *priv)
175{
176 int mode = iwlwifi_mod_params.led_mode;
177 int ret;
178
179 if (mode == IWL_LED_DISABLE) {
180 IWL_INFO(priv, "Led disabled\n");
181 return;
182 }
183 if (mode == IWL_LED_DEFAULT)
184 mode = priv->cfg->led_mode;
185
186 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
187 wiphy_name(priv->hw->wiphy));
188 priv->led.brightness_set = iwl_led_brightness_set;
189 priv->led.blink_set = iwl_led_blink_set;
190 priv->led.max_brightness = 1;
191
192 switch (mode) {
193 case IWL_LED_DEFAULT:
194 WARN_ON(1);
195 break;
196 case IWL_LED_BLINK:
197 priv->led.default_trigger =
198 ieee80211_create_tpt_led_trigger(priv->hw,
199 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
200 iwl_blink, ARRAY_SIZE(iwl_blink));
201 break;
202 case IWL_LED_RF_STATE:
203 priv->led.default_trigger =
204 ieee80211_get_radio_led_name(priv->hw);
205 break;
206 }
207
208 ret = led_classdev_register(priv->trans->dev, &priv->led);
209 if (ret) {
210 kfree(priv->led.name);
211 return;
212 }
213
214 priv->led_registered = true;
215}
216
217void iwl_leds_exit(struct iwl_priv *priv)
218{
219 if (!priv->led_registered)
220 return;
221
222 led_classdev_unregister(&priv->led);
223 kfree(priv->led.name);
224}
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
deleted file mode 100644
index b02a853103d..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_leds_h__
28#define __iwl_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39void iwlagn_led_enable(struct iwl_priv *priv);
40void iwl_leds_init(struct iwl_priv *priv);
41void iwl_leds_exit(struct iwl_priv *priv);
42
43#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
deleted file mode 100644
index 6ff46605ad4..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ /dev/null
@@ -1,1292 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37#include "iwl-agn-hw.h"
38#include "iwl-trans.h"
39#include "iwl-modparams.h"
40
41#include "dev.h"
42#include "agn.h"
43
44int iwlagn_hw_valid_rtc_data_addr(u32 addr)
45{
46 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
47 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
48}
49
50int iwlagn_send_tx_power(struct iwl_priv *priv)
51{
52 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
53 u8 tx_ant_cfg_cmd;
54
55 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
56 "TX Power requested while scanning!\n"))
57 return -EAGAIN;
58
59 /* half dBm need to multiply */
60 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
61
62 if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
63 /*
64 * For the newer devices which using enhanced/extend tx power
65 * table in EEPROM, the format is in half dBm. driver need to
66 * convert to dBm format before report to mac80211.
67 * By doing so, there is a possibility of 1/2 dBm resolution
68 * lost. driver will perform "round-up" operation before
69 * reporting, but it will cause 1/2 dBm tx power over the
70 * regulatory limit. Perform the checking here, if the
71 * "tx_power_user_lmt" is higher than EEPROM value (in
72 * half-dBm format), lower the tx power based on EEPROM
73 */
74 tx_power_cmd.global_lmt =
75 priv->nvm_data->max_tx_pwr_half_dbm;
76 }
77 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
78 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
79
80 if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
81 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
82 else
83 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
84
85 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
86 sizeof(tx_power_cmd), &tx_power_cmd);
87}
88
89void iwlagn_temperature(struct iwl_priv *priv)
90{
91 lockdep_assert_held(&priv->statistics.lock);
92
93 /* store temperature from correct statistics (in Celsius) */
94 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
95 iwl_tt_handler(priv);
96}
97
98int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
99{
100 int idx = 0;
101 int band_offset = 0;
102
103 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
104 if (rate_n_flags & RATE_MCS_HT_MSK) {
105 idx = (rate_n_flags & 0xff);
106 return idx;
107 /* Legacy rate format, search for match in table */
108 } else {
109 if (band == IEEE80211_BAND_5GHZ)
110 band_offset = IWL_FIRST_OFDM_RATE;
111 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
112 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
113 return idx - band_offset;
114 }
115
116 return -1;
117}
118
119int iwlagn_manage_ibss_station(struct iwl_priv *priv,
120 struct ieee80211_vif *vif, bool add)
121{
122 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
123
124 if (add)
125 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
126 vif->bss_conf.bssid,
127 &vif_priv->ibss_bssid_sta_id);
128 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
129 vif->bss_conf.bssid);
130}
131
132/**
133 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
134 *
135 * pre-requirements:
136 * 1. acquire mutex before calling
137 * 2. make sure rf is on and not in exit state
138 */
139int iwlagn_txfifo_flush(struct iwl_priv *priv)
140{
141 struct iwl_txfifo_flush_cmd flush_cmd;
142 struct iwl_host_cmd cmd = {
143 .id = REPLY_TXFIFO_FLUSH,
144 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
145 .flags = CMD_SYNC,
146 .data = { &flush_cmd, },
147 };
148
149 memset(&flush_cmd, 0, sizeof(flush_cmd));
150
151 flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
152 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
153 IWL_SCD_MGMT_MSK;
154 if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
155 flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
156 IWL_PAN_SCD_VI_MSK |
157 IWL_PAN_SCD_BE_MSK |
158 IWL_PAN_SCD_BK_MSK |
159 IWL_PAN_SCD_MGMT_MSK |
160 IWL_PAN_SCD_MULTICAST_MSK;
161
162 if (priv->nvm_data->sku_cap_11n_enable)
163 flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
164
165 IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
166 flush_cmd.queue_control);
167 flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
168
169 return iwl_dvm_send_cmd(priv, &cmd);
170}
171
172void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
173{
174 mutex_lock(&priv->mutex);
175 ieee80211_stop_queues(priv->hw);
176 if (iwlagn_txfifo_flush(priv)) {
177 IWL_ERR(priv, "flush request fail\n");
178 goto done;
179 }
180 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
181 iwl_trans_wait_tx_queue_empty(priv->trans);
182done:
183 ieee80211_wake_queues(priv->hw);
184 mutex_unlock(&priv->mutex);
185}
186
187/*
188 * BT coex
189 */
190/* Notmal TDM */
191static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
192 cpu_to_le32(0xaaaaaaaa),
193 cpu_to_le32(0xaaaaaaaa),
194 cpu_to_le32(0xaeaaaaaa),
195 cpu_to_le32(0xaaaaaaaa),
196 cpu_to_le32(0xcc00ff28),
197 cpu_to_le32(0x0000aaaa),
198 cpu_to_le32(0xcc00aaaa),
199 cpu_to_le32(0x0000aaaa),
200 cpu_to_le32(0xc0004000),
201 cpu_to_le32(0x00004000),
202 cpu_to_le32(0xf0005000),
203 cpu_to_le32(0xf0005000),
204};
205
206
207/* Loose Coex */
208static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
209 cpu_to_le32(0xaaaaaaaa),
210 cpu_to_le32(0xaaaaaaaa),
211 cpu_to_le32(0xaeaaaaaa),
212 cpu_to_le32(0xaaaaaaaa),
213 cpu_to_le32(0xcc00ff28),
214 cpu_to_le32(0x0000aaaa),
215 cpu_to_le32(0xcc00aaaa),
216 cpu_to_le32(0x0000aaaa),
217 cpu_to_le32(0x00000000),
218 cpu_to_le32(0x00000000),
219 cpu_to_le32(0xf0005000),
220 cpu_to_le32(0xf0005000),
221};
222
223/* Full concurrency */
224static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
225 cpu_to_le32(0xaaaaaaaa),
226 cpu_to_le32(0xaaaaaaaa),
227 cpu_to_le32(0xaaaaaaaa),
228 cpu_to_le32(0xaaaaaaaa),
229 cpu_to_le32(0xaaaaaaaa),
230 cpu_to_le32(0xaaaaaaaa),
231 cpu_to_le32(0xaaaaaaaa),
232 cpu_to_le32(0xaaaaaaaa),
233 cpu_to_le32(0x00000000),
234 cpu_to_le32(0x00000000),
235 cpu_to_le32(0x00000000),
236 cpu_to_le32(0x00000000),
237};
238
239void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
240{
241 struct iwl_basic_bt_cmd basic = {
242 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
243 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
244 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
245 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
246 };
247 struct iwl_bt_cmd_v1 bt_cmd_v1;
248 struct iwl_bt_cmd_v2 bt_cmd_v2;
249 int ret;
250
251 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
252 sizeof(basic.bt3_lookup_table));
253
254 if (priv->cfg->bt_params) {
255 /*
256 * newer generation of devices (2000 series and newer)
257 * use the version 2 of the bt command
258 * we need to make sure sending the host command
259 * with correct data structure to avoid uCode assert
260 */
261 if (priv->cfg->bt_params->bt_session_2) {
262 bt_cmd_v2.prio_boost = cpu_to_le32(
263 priv->cfg->bt_params->bt_prio_boost);
264 bt_cmd_v2.tx_prio_boost = 0;
265 bt_cmd_v2.rx_prio_boost = 0;
266 } else {
267 /* older version only has 8 bits */
268 WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF);
269 bt_cmd_v1.prio_boost =
270 priv->cfg->bt_params->bt_prio_boost;
271 bt_cmd_v1.tx_prio_boost = 0;
272 bt_cmd_v1.rx_prio_boost = 0;
273 }
274 } else {
275 IWL_ERR(priv, "failed to construct BT Coex Config\n");
276 return;
277 }
278
279 /*
280 * Possible situations when BT needs to take over for receive,
281 * at the same time where STA needs to response to AP's frame(s),
282 * reduce the tx power of the required response frames, by that,
283 * allow the concurrent BT receive & WiFi transmit
284 * (BT - ANT A, WiFi -ANT B), without interference to one another
285 *
286 * Reduced tx power apply to control frames only (ACK/Back/CTS)
287 * when indicated by the BT config command
288 */
289 basic.kill_ack_mask = priv->kill_ack_mask;
290 basic.kill_cts_mask = priv->kill_cts_mask;
291 if (priv->reduced_txpower)
292 basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
293 basic.valid = priv->bt_valid;
294
295 /*
296 * Configure BT coex mode to "no coexistence" when the
297 * user disabled BT coexistence, we have no interface
298 * (might be in monitor mode), or the interface is in
299 * IBSS mode (no proper uCode support for coex then).
300 */
301 if (!iwlwifi_mod_params.bt_coex_active ||
302 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
303 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
304 } else {
305 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
306 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
307
308 if (!priv->bt_enable_pspoll)
309 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
310 else
311 basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
312
313 if (priv->bt_ch_announce)
314 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
315 IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
316 }
317 priv->bt_enable_flag = basic.flags;
318 if (priv->bt_full_concurrent)
319 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
320 sizeof(iwlagn_concurrent_lookup));
321 else
322 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
323 sizeof(iwlagn_def_3w_lookup));
324
325 IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
326 basic.flags ? "active" : "disabled",
327 priv->bt_full_concurrent ?
328 "full concurrency" : "3-wire");
329
330 if (priv->cfg->bt_params->bt_session_2) {
331 memcpy(&bt_cmd_v2.basic, &basic,
332 sizeof(basic));
333 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
334 CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
335 } else {
336 memcpy(&bt_cmd_v1.basic, &basic,
337 sizeof(basic));
338 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
339 CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
340 }
341 if (ret)
342 IWL_ERR(priv, "failed to send BT Coex Config\n");
343
344}
345
346void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
347{
348 struct iwl_rxon_context *ctx, *found_ctx = NULL;
349 bool found_ap = false;
350
351 lockdep_assert_held(&priv->mutex);
352
353 /* Check whether AP or GO mode is active. */
354 if (rssi_ena) {
355 for_each_context(priv, ctx) {
356 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
357 iwl_is_associated_ctx(ctx)) {
358 found_ap = true;
359 break;
360 }
361 }
362 }
363
364 /*
365 * If disable was received or If GO/AP mode, disable RSSI
366 * measurements.
367 */
368 if (!rssi_ena || found_ap) {
369 if (priv->cur_rssi_ctx) {
370 ctx = priv->cur_rssi_ctx;
371 ieee80211_disable_rssi_reports(ctx->vif);
372 priv->cur_rssi_ctx = NULL;
373 }
374 return;
375 }
376
377 /*
378 * If rssi measurements need to be enabled, consider all cases now.
379 * Figure out how many contexts are active.
380 */
381 for_each_context(priv, ctx) {
382 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
383 iwl_is_associated_ctx(ctx)) {
384 found_ctx = ctx;
385 break;
386 }
387 }
388
389 /*
390 * rssi monitor already enabled for the correct interface...nothing
391 * to do.
392 */
393 if (found_ctx == priv->cur_rssi_ctx)
394 return;
395
396 /*
397 * Figure out if rssi monitor is currently enabled, and needs
398 * to be changed. If rssi monitor is already enabled, disable
399 * it first else just enable rssi measurements on the
400 * interface found above.
401 */
402 if (priv->cur_rssi_ctx) {
403 ctx = priv->cur_rssi_ctx;
404 if (ctx->vif)
405 ieee80211_disable_rssi_reports(ctx->vif);
406 }
407
408 priv->cur_rssi_ctx = found_ctx;
409
410 if (!found_ctx)
411 return;
412
413 ieee80211_enable_rssi_reports(found_ctx->vif,
414 IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
415 IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
416}
417
418static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
419{
420 return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
421 BT_UART_MSG_FRAME3SCOESCO_POS;
422}
423
424static void iwlagn_bt_traffic_change_work(struct work_struct *work)
425{
426 struct iwl_priv *priv =
427 container_of(work, struct iwl_priv, bt_traffic_change_work);
428 struct iwl_rxon_context *ctx;
429 int smps_request = -1;
430
431 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
432 /* bt coex disabled */
433 return;
434 }
435
436 /*
437 * Note: bt_traffic_load can be overridden by scan complete and
438 * coex profile notifications. Ignore that since only bad consequence
439 * can be not matching debug print with actual state.
440 */
441 IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
442 priv->bt_traffic_load);
443
444 switch (priv->bt_traffic_load) {
445 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
446 if (priv->bt_status)
447 smps_request = IEEE80211_SMPS_DYNAMIC;
448 else
449 smps_request = IEEE80211_SMPS_AUTOMATIC;
450 break;
451 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
452 smps_request = IEEE80211_SMPS_DYNAMIC;
453 break;
454 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
455 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
456 smps_request = IEEE80211_SMPS_STATIC;
457 break;
458 default:
459 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
460 priv->bt_traffic_load);
461 break;
462 }
463
464 mutex_lock(&priv->mutex);
465
466 /*
467 * We can not send command to firmware while scanning. When the scan
468 * complete we will schedule this work again. We do check with mutex
469 * locked to prevent new scan request to arrive. We do not check
470 * STATUS_SCANNING to avoid race when queue_work two times from
471 * different notifications, but quit and not perform any work at all.
472 */
473 if (test_bit(STATUS_SCAN_HW, &priv->status))
474 goto out;
475
476 iwl_update_chain_flags(priv);
477
478 if (smps_request != -1) {
479 priv->current_ht_config.smps = smps_request;
480 for_each_context(priv, ctx) {
481 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
482 ieee80211_request_smps(ctx->vif, smps_request);
483 }
484 }
485
486 /*
487 * Dynamic PS poll related functionality. Adjust RSSI measurements if
488 * necessary.
489 */
490 iwlagn_bt_coex_rssi_monitor(priv);
491out:
492 mutex_unlock(&priv->mutex);
493}
494
495/*
496 * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
497 * correct interface or disable it if this is the last interface to be
498 * removed.
499 */
500void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
501{
502 if (priv->bt_is_sco &&
503 priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
504 iwlagn_bt_adjust_rssi_monitor(priv, true);
505 else
506 iwlagn_bt_adjust_rssi_monitor(priv, false);
507}
508
509static void iwlagn_print_uartmsg(struct iwl_priv *priv,
510 struct iwl_bt_uart_msg *uart_msg)
511{
512 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
513 "Update Req = 0x%X\n",
514 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
515 BT_UART_MSG_FRAME1MSGTYPE_POS,
516 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
517 BT_UART_MSG_FRAME1SSN_POS,
518 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
519 BT_UART_MSG_FRAME1UPDATEREQ_POS);
520
521 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
522 "Chl_SeqN = 0x%X, In band = 0x%X\n",
523 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
524 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
525 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
526 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
527 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
528 BT_UART_MSG_FRAME2CHLSEQN_POS,
529 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
530 BT_UART_MSG_FRAME2INBAND_POS);
531
532 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
533 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
534 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
535 BT_UART_MSG_FRAME3SCOESCO_POS,
536 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
537 BT_UART_MSG_FRAME3SNIFF_POS,
538 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
539 BT_UART_MSG_FRAME3A2DP_POS,
540 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
541 BT_UART_MSG_FRAME3ACL_POS,
542 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
543 BT_UART_MSG_FRAME3MASTER_POS,
544 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
545 BT_UART_MSG_FRAME3OBEX_POS);
546
547 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
548 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
549 BT_UART_MSG_FRAME4IDLEDURATION_POS);
550
551 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
552 "eSCO Retransmissions = 0x%X\n",
553 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
554 BT_UART_MSG_FRAME5TXACTIVITY_POS,
555 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
556 BT_UART_MSG_FRAME5RXACTIVITY_POS,
557 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
558 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
559
560 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
561 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
562 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
563 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
564 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
565
566 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
567 "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
568 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
569 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
570 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
571 BT_UART_MSG_FRAME7PAGE_POS,
572 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
573 BT_UART_MSG_FRAME7INQUIRY_POS,
574 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
575 BT_UART_MSG_FRAME7CONNECTABLE_POS);
576}
577
578static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
579 struct iwl_bt_uart_msg *uart_msg)
580{
581 bool need_update = false;
582 u8 kill_msk = IWL_BT_KILL_REDUCE;
583 static const __le32 bt_kill_ack_msg[3] = {
584 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
585 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
586 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
587 static const __le32 bt_kill_cts_msg[3] = {
588 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
589 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
590 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
591
592 if (!priv->reduced_txpower)
593 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
594 ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
595 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
596 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
597 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
598 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
599 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
600 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
601 need_update = true;
602 }
603 return need_update;
604}
605
606/*
607 * Upon RSSI changes, sends a bt config command with following changes
608 * 1. enable/disable "reduced control frames tx power
609 * 2. update the "kill)ack_mask" and "kill_cts_mask"
610 *
611 * If "reduced tx power" is enabled, uCode shall
612 * 1. ACK/Back/CTS rate shall reduced to 6Mbps
613 * 2. not use duplciate 20/40MHz mode
614 */
615static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
616 struct iwl_bt_uart_msg *uart_msg)
617{
618 bool need_update = false;
619 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
620 int ave_rssi;
621
622 if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
623 IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
624 return false;
625 }
626
627 ave_rssi = ieee80211_ave_rssi(ctx->vif);
628 if (!ave_rssi) {
629 /* no rssi data, no changes to reduce tx power */
630 IWL_DEBUG_COEX(priv, "no rssi data available\n");
631 return need_update;
632 }
633 if (!priv->reduced_txpower &&
634 !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
635 (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
636 (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
637 BT_UART_MSG_FRAME3OBEX_MSK)) &&
638 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
639 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
640 /* enabling reduced tx power */
641 priv->reduced_txpower = true;
642 priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
643 need_update = true;
644 } else if (priv->reduced_txpower &&
645 (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
646 (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
647 (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
648 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
649 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
650 BT_UART_MSG_FRAME3OBEX_MSK)))) {
651 /* disable reduced tx power */
652 priv->reduced_txpower = false;
653 priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
654 need_update = true;
655 }
656
657 return need_update;
658}
659
660int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
661 struct iwl_rx_cmd_buffer *rxb,
662 struct iwl_device_cmd *cmd)
663{
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
666 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
667
668 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
669 /* bt coex disabled */
670 return 0;
671 }
672
673 IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
674 IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
675 IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
676 IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
677 coex->bt_ci_compliance);
678 iwlagn_print_uartmsg(priv, uart_msg);
679
680 priv->last_bt_traffic_load = priv->bt_traffic_load;
681 priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
682
683 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
684 if (priv->bt_status != coex->bt_status ||
685 priv->last_bt_traffic_load != coex->bt_traffic_load) {
686 if (coex->bt_status) {
687 /* BT on */
688 if (!priv->bt_ch_announce)
689 priv->bt_traffic_load =
690 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
691 else
692 priv->bt_traffic_load =
693 coex->bt_traffic_load;
694 } else {
695 /* BT off */
696 priv->bt_traffic_load =
697 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
698 }
699 priv->bt_status = coex->bt_status;
700 queue_work(priv->workqueue,
701 &priv->bt_traffic_change_work);
702 }
703 }
704
705 /* schedule to send runtime bt_config */
706 /* check reduce power before change ack/cts kill mask */
707 if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
708 iwlagn_set_kill_msk(priv, uart_msg))
709 queue_work(priv->workqueue, &priv->bt_runtime_config);
710
711
712 /* FIXME: based on notification, adjust the prio_boost */
713
714 priv->bt_ci_compliance = coex->bt_ci_compliance;
715 return 0;
716}
717
718void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
719{
720 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
721 iwlagn_bt_coex_profile_notif;
722}
723
724void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
725{
726 INIT_WORK(&priv->bt_traffic_change_work,
727 iwlagn_bt_traffic_change_work);
728}
729
730void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
731{
732 cancel_work_sync(&priv->bt_traffic_change_work);
733}
734
735static bool is_single_rx_stream(struct iwl_priv *priv)
736{
737 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
738 priv->current_ht_config.single_chain_sufficient;
739}
740
741#define IWL_NUM_RX_CHAINS_MULTIPLE 3
742#define IWL_NUM_RX_CHAINS_SINGLE 2
743#define IWL_NUM_IDLE_CHAINS_DUAL 2
744#define IWL_NUM_IDLE_CHAINS_SINGLE 1
745
746/*
747 * Determine how many receiver/antenna chains to use.
748 *
749 * More provides better reception via diversity. Fewer saves power
750 * at the expense of throughput, but only when not in powersave to
751 * start with.
752 *
753 * MIMO (dual stream) requires at least 2, but works better with 3.
754 * This does not determine *which* chains to use, just how many.
755 */
756static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
757{
758 if (priv->cfg->bt_params &&
759 priv->cfg->bt_params->advanced_bt_coexist &&
760 (priv->bt_full_concurrent ||
761 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
762 /*
763 * only use chain 'A' in bt high traffic load or
764 * full concurrency mode
765 */
766 return IWL_NUM_RX_CHAINS_SINGLE;
767 }
768 /* # of Rx chains to use when expecting MIMO. */
769 if (is_single_rx_stream(priv))
770 return IWL_NUM_RX_CHAINS_SINGLE;
771 else
772 return IWL_NUM_RX_CHAINS_MULTIPLE;
773}
774
775/*
776 * When we are in power saving mode, unless device support spatial
777 * multiplexing power save, use the active count for rx chain count.
778 */
779static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
780{
781 /* # Rx chains when idling, depending on SMPS mode */
782 switch (priv->current_ht_config.smps) {
783 case IEEE80211_SMPS_STATIC:
784 case IEEE80211_SMPS_DYNAMIC:
785 return IWL_NUM_IDLE_CHAINS_SINGLE;
786 case IEEE80211_SMPS_AUTOMATIC:
787 case IEEE80211_SMPS_OFF:
788 return active_cnt;
789 default:
790 WARN(1, "invalid SMPS mode %d",
791 priv->current_ht_config.smps);
792 return active_cnt;
793 }
794}
795
796/* up to 4 chains */
797static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
798{
799 u8 res;
800 res = (chain_bitmap & BIT(0)) >> 0;
801 res += (chain_bitmap & BIT(1)) >> 1;
802 res += (chain_bitmap & BIT(2)) >> 2;
803 res += (chain_bitmap & BIT(3)) >> 3;
804 return res;
805}
806
807/**
808 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
809 *
810 * Selects how many and which Rx receivers/antennas/chains to use.
811 * This should not be used for scan command ... it puts data in wrong place.
812 */
813void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
814{
815 bool is_single = is_single_rx_stream(priv);
816 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
817 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
818 u32 active_chains;
819 u16 rx_chain;
820
821 /* Tell uCode which antennas are actually connected.
822 * Before first association, we assume all antennas are connected.
823 * Just after first association, iwl_chain_noise_calibration()
824 * checks which antennas actually *are* connected. */
825 if (priv->chain_noise_data.active_chains)
826 active_chains = priv->chain_noise_data.active_chains;
827 else
828 active_chains = priv->nvm_data->valid_rx_ant;
829
830 if (priv->cfg->bt_params &&
831 priv->cfg->bt_params->advanced_bt_coexist &&
832 (priv->bt_full_concurrent ||
833 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
834 /*
835 * only use chain 'A' in bt high traffic load or
836 * full concurrency mode
837 */
838 active_chains = first_antenna(active_chains);
839 }
840
841 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
842
843 /* How many receivers should we use? */
844 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
845 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
846
847
848 /* correct rx chain count according hw settings
849 * and chain noise calibration
850 */
851 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
852 if (valid_rx_cnt < active_rx_cnt)
853 active_rx_cnt = valid_rx_cnt;
854
855 if (valid_rx_cnt < idle_rx_cnt)
856 idle_rx_cnt = valid_rx_cnt;
857
858 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
859 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
860
861 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
862
863 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
864 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
865 else
866 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
867
868 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
869 ctx->staging.rx_chain,
870 active_rx_cnt, idle_rx_cnt);
871
872 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
873 active_rx_cnt < idle_rx_cnt);
874}
875
876u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
877{
878 int i;
879 u8 ind = ant;
880
881 if (priv->band == IEEE80211_BAND_2GHZ &&
882 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
883 return 0;
884
885 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
886 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
887 if (valid & BIT(ind))
888 return ind;
889 }
890 return ant;
891}
892
893#ifdef CONFIG_PM_SLEEP
894static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
895{
896 int i;
897
898 for (i = 0; i < IWLAGN_P1K_SIZE; i++)
899 out[i] = cpu_to_le16(p1k[i]);
900}
901
902struct wowlan_key_data {
903 struct iwl_rxon_context *ctx;
904 struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
905 struct iwlagn_wowlan_tkip_params_cmd *tkip;
906 const u8 *bssid;
907 bool error, use_rsc_tsc, use_tkip;
908};
909
910
911static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
912 struct ieee80211_vif *vif,
913 struct ieee80211_sta *sta,
914 struct ieee80211_key_conf *key,
915 void *_data)
916{
917 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
918 struct wowlan_key_data *data = _data;
919 struct iwl_rxon_context *ctx = data->ctx;
920 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
921 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
922 struct iwlagn_p1k_cache *rx_p1ks;
923 u8 *rx_mic_key;
924 struct ieee80211_key_seq seq;
925 u32 cur_rx_iv32 = 0;
926 u16 p1k[IWLAGN_P1K_SIZE];
927 int ret, i;
928
929 mutex_lock(&priv->mutex);
930
931 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
932 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
933 !sta && !ctx->key_mapping_keys)
934 ret = iwl_set_default_wep_key(priv, ctx, key);
935 else
936 ret = iwl_set_dynamic_key(priv, ctx, key, sta);
937
938 if (ret) {
939 IWL_ERR(priv, "Error setting key during suspend!\n");
940 data->error = true;
941 }
942
943 switch (key->cipher) {
944 case WLAN_CIPHER_SUITE_TKIP:
945 if (sta) {
946 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
947 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
948
949 rx_p1ks = data->tkip->rx_uni;
950
951 ieee80211_get_key_tx_seq(key, &seq);
952 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
953 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
954
955 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
956 iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
957
958 memcpy(data->tkip->mic_keys.tx,
959 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
960 IWLAGN_MIC_KEY_SIZE);
961
962 rx_mic_key = data->tkip->mic_keys.rx_unicast;
963 } else {
964 tkip_sc =
965 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
966 rx_p1ks = data->tkip->rx_multi;
967 rx_mic_key = data->tkip->mic_keys.rx_mcast;
968 }
969
970 /*
971 * For non-QoS this relies on the fact that both the uCode and
972 * mac80211 use TID 0 (as they need to to avoid replay attacks)
973 * for checking the IV in the frames.
974 */
975 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
976 ieee80211_get_key_rx_seq(key, i, &seq);
977 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
978 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
979 /* wrapping isn't allowed, AP must rekey */
980 if (seq.tkip.iv32 > cur_rx_iv32)
981 cur_rx_iv32 = seq.tkip.iv32;
982 }
983
984 ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
985 iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
986 ieee80211_get_tkip_rx_p1k(key, data->bssid,
987 cur_rx_iv32 + 1, p1k);
988 iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
989
990 memcpy(rx_mic_key,
991 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
992 IWLAGN_MIC_KEY_SIZE);
993
994 data->use_tkip = true;
995 data->use_rsc_tsc = true;
996 break;
997 case WLAN_CIPHER_SUITE_CCMP:
998 if (sta) {
999 u8 *pn = seq.ccmp.pn;
1000
1001 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
1002 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
1003
1004 ieee80211_get_key_tx_seq(key, &seq);
1005 aes_tx_sc->pn = cpu_to_le64(
1006 (u64)pn[5] |
1007 ((u64)pn[4] << 8) |
1008 ((u64)pn[3] << 16) |
1009 ((u64)pn[2] << 24) |
1010 ((u64)pn[1] << 32) |
1011 ((u64)pn[0] << 40));
1012 } else
1013 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
1014
1015 /*
1016 * For non-QoS this relies on the fact that both the uCode and
1017 * mac80211 use TID 0 for checking the IV in the frames.
1018 */
1019 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
1020 u8 *pn = seq.ccmp.pn;
1021
1022 ieee80211_get_key_rx_seq(key, i, &seq);
1023 aes_sc->pn = cpu_to_le64(
1024 (u64)pn[5] |
1025 ((u64)pn[4] << 8) |
1026 ((u64)pn[3] << 16) |
1027 ((u64)pn[2] << 24) |
1028 ((u64)pn[1] << 32) |
1029 ((u64)pn[0] << 40));
1030 }
1031 data->use_rsc_tsc = true;
1032 break;
1033 }
1034
1035 mutex_unlock(&priv->mutex);
1036}
1037
1038int iwlagn_send_patterns(struct iwl_priv *priv,
1039 struct cfg80211_wowlan *wowlan)
1040{
1041 struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
1042 struct iwl_host_cmd cmd = {
1043 .id = REPLY_WOWLAN_PATTERNS,
1044 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1045 .flags = CMD_SYNC,
1046 };
1047 int i, err;
1048
1049 if (!wowlan->n_patterns)
1050 return 0;
1051
1052 cmd.len[0] = sizeof(*pattern_cmd) +
1053 wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
1054
1055 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
1056 if (!pattern_cmd)
1057 return -ENOMEM;
1058
1059 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
1060
1061 for (i = 0; i < wowlan->n_patterns; i++) {
1062 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
1063
1064 memcpy(&pattern_cmd->patterns[i].mask,
1065 wowlan->patterns[i].mask, mask_len);
1066 memcpy(&pattern_cmd->patterns[i].pattern,
1067 wowlan->patterns[i].pattern,
1068 wowlan->patterns[i].pattern_len);
1069 pattern_cmd->patterns[i].mask_size = mask_len;
1070 pattern_cmd->patterns[i].pattern_size =
1071 wowlan->patterns[i].pattern_len;
1072 }
1073
1074 cmd.data[0] = pattern_cmd;
1075 err = iwl_dvm_send_cmd(priv, &cmd);
1076 kfree(pattern_cmd);
1077 return err;
1078}
1079
1080int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1081{
1082 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
1083 struct iwl_rxon_cmd rxon;
1084 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1085 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
1086 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
1087 struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
1088 struct wowlan_key_data key_data = {
1089 .ctx = ctx,
1090 .bssid = ctx->active.bssid_addr,
1091 .use_rsc_tsc = false,
1092 .tkip = &tkip_cmd,
1093 .use_tkip = false,
1094 };
1095 int ret, i;
1096 u16 seq;
1097
1098 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
1099 if (!key_data.rsc_tsc)
1100 return -ENOMEM;
1101
1102 memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
1103
1104 /*
1105 * We know the last used seqno, and the uCode expects to know that
1106 * one, it will increment before TX.
1107 */
1108 seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
1109 wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
1110
1111 /*
1112 * For QoS counters, we store the one to use next, so subtract 0x10
1113 * since the uCode will add 0x10 before using the value.
1114 */
1115 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1116 seq = priv->tid_data[IWL_AP_ID][i].seq_number;
1117 seq -= 0x10;
1118 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
1119 }
1120
1121 if (wowlan->disconnect)
1122 wakeup_filter_cmd.enabled |=
1123 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
1124 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
1125 if (wowlan->magic_pkt)
1126 wakeup_filter_cmd.enabled |=
1127 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
1128 if (wowlan->gtk_rekey_failure)
1129 wakeup_filter_cmd.enabled |=
1130 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
1131 if (wowlan->eap_identity_req)
1132 wakeup_filter_cmd.enabled |=
1133 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
1134 if (wowlan->four_way_handshake)
1135 wakeup_filter_cmd.enabled |=
1136 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
1137 if (wowlan->n_patterns)
1138 wakeup_filter_cmd.enabled |=
1139 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
1140
1141 if (wowlan->rfkill_release)
1142 d3_cfg_cmd.wakeup_flags |=
1143 cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
1144
1145 iwl_scan_cancel_timeout(priv, 200);
1146
1147 memcpy(&rxon, &ctx->active, sizeof(rxon));
1148
1149 priv->ucode_loaded = false;
1150 iwl_trans_stop_device(priv->trans);
1151
1152 priv->wowlan = true;
1153
1154 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
1155 if (ret)
1156 goto out;
1157
1158 /* now configure WoWLAN ucode */
1159 ret = iwl_alive_start(priv);
1160 if (ret)
1161 goto out;
1162
1163 memcpy(&ctx->staging, &rxon, sizeof(rxon));
1164 ret = iwlagn_commit_rxon(priv, ctx);
1165 if (ret)
1166 goto out;
1167
1168 ret = iwl_power_update_mode(priv, true);
1169 if (ret)
1170 goto out;
1171
1172 if (!iwlwifi_mod_params.sw_crypto) {
1173 /* mark all keys clear */
1174 priv->ucode_key_table = 0;
1175 ctx->key_mapping_keys = 0;
1176
1177 /*
1178 * This needs to be unlocked due to lock ordering
1179 * constraints. Since we're in the suspend path
1180 * that isn't really a problem though.
1181 */
1182 mutex_unlock(&priv->mutex);
1183 ieee80211_iter_keys(priv->hw, ctx->vif,
1184 iwlagn_wowlan_program_keys,
1185 &key_data);
1186 mutex_lock(&priv->mutex);
1187 if (key_data.error) {
1188 ret = -EIO;
1189 goto out;
1190 }
1191
1192 if (key_data.use_rsc_tsc) {
1193 struct iwl_host_cmd rsc_tsc_cmd = {
1194 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
1195 .flags = CMD_SYNC,
1196 .data[0] = key_data.rsc_tsc,
1197 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1198 .len[0] = sizeof(*key_data.rsc_tsc),
1199 };
1200
1201 ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
1202 if (ret)
1203 goto out;
1204 }
1205
1206 if (key_data.use_tkip) {
1207 ret = iwl_dvm_send_cmd_pdu(priv,
1208 REPLY_WOWLAN_TKIP_PARAMS,
1209 CMD_SYNC, sizeof(tkip_cmd),
1210 &tkip_cmd);
1211 if (ret)
1212 goto out;
1213 }
1214
1215 if (priv->have_rekey_data) {
1216 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1217 memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
1218 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1219 memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
1220 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1221 kek_kck_cmd.replay_ctr = priv->replay_ctr;
1222
1223 ret = iwl_dvm_send_cmd_pdu(priv,
1224 REPLY_WOWLAN_KEK_KCK_MATERIAL,
1225 CMD_SYNC, sizeof(kek_kck_cmd),
1226 &kek_kck_cmd);
1227 if (ret)
1228 goto out;
1229 }
1230 }
1231
1232 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
1233 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1234 if (ret)
1235 goto out;
1236
1237 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
1238 CMD_SYNC, sizeof(wakeup_filter_cmd),
1239 &wakeup_filter_cmd);
1240 if (ret)
1241 goto out;
1242
1243 ret = iwlagn_send_patterns(priv, wowlan);
1244 out:
1245 kfree(key_data.rsc_tsc);
1246 return ret;
1247}
1248#endif
1249
1250int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1251{
1252 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
1253 IWL_WARN(priv, "Not sending command - %s KILL\n",
1254 iwl_is_rfkill(priv) ? "RF" : "CT");
1255 return -EIO;
1256 }
1257
1258 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
1259 IWL_ERR(priv, "Command %s failed: FW Error\n",
1260 iwl_dvm_get_cmd_string(cmd->id));
1261 return -EIO;
1262 }
1263
1264 /*
1265 * Synchronous commands from this op-mode must hold
1266 * the mutex, this ensures we don't try to send two
1267 * (or more) synchronous commands at a time.
1268 */
1269 if (!(cmd->flags & CMD_ASYNC))
1270 lockdep_assert_held(&priv->mutex);
1271
1272 if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
1273 !(cmd->flags & CMD_ON_DEMAND)) {
1274 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
1275 return -EIO;
1276 }
1277
1278 return iwl_trans_send_cmd(priv->trans, cmd);
1279}
1280
1281int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
1282 u32 flags, u16 len, const void *data)
1283{
1284 struct iwl_host_cmd cmd = {
1285 .id = id,
1286 .len = { len, },
1287 .data = { data, },
1288 .flags = flags,
1289 };
1290
1291 return iwl_dvm_send_cmd(priv, &cmd);
1292}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
deleted file mode 100644
index 3163e0f38c2..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ /dev/null
@@ -1,1670 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/dma-mapping.h>
34#include <linux/delay.h>
35#include <linux/sched.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_arp.h>
40
41#include <net/ieee80211_radiotap.h>
42#include <net/mac80211.h>
43
44#include <asm/div64.h>
45
46#include "iwl-io.h"
47#include "iwl-trans.h"
48#include "iwl-op-mode.h"
49#include "iwl-modparams.h"
50
51#include "dev.h"
52#include "calib.h"
53#include "agn.h"
54
55/*****************************************************************************
56 *
57 * mac80211 entry point functions
58 *
59 *****************************************************************************/
60
61static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
62 {
63 .max = 1,
64 .types = BIT(NL80211_IFTYPE_STATION),
65 },
66 {
67 .max = 1,
68 .types = BIT(NL80211_IFTYPE_AP),
69 },
70};
71
72static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
73 {
74 .max = 2,
75 .types = BIT(NL80211_IFTYPE_STATION),
76 },
77};
78
79static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
80 {
81 .max = 1,
82 .types = BIT(NL80211_IFTYPE_STATION),
83 },
84 {
85 .max = 1,
86 .types = BIT(NL80211_IFTYPE_P2P_GO) |
87 BIT(NL80211_IFTYPE_AP),
88 },
89};
90
91static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
92 {
93 .max = 2,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
99 },
100};
101
102static const struct ieee80211_iface_combination
103iwlagn_iface_combinations_dualmode[] = {
104 { .num_different_channels = 1,
105 .max_interfaces = 2,
106 .beacon_int_infra_match = true,
107 .limits = iwlagn_sta_ap_limits,
108 .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
109 },
110 { .num_different_channels = 1,
111 .max_interfaces = 2,
112 .limits = iwlagn_2sta_limits,
113 .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
114 },
115};
116
117static const struct ieee80211_iface_combination
118iwlagn_iface_combinations_p2p[] = {
119 { .num_different_channels = 1,
120 .max_interfaces = 2,
121 .beacon_int_infra_match = true,
122 .limits = iwlagn_p2p_sta_go_limits,
123 .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
124 },
125 { .num_different_channels = 1,
126 .max_interfaces = 2,
127 .limits = iwlagn_p2p_2sta_limits,
128 .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
129 },
130};
131
132/*
133 * Not a mac80211 entry point function, but it fits in with all the
134 * other mac80211 functions grouped here.
135 */
136int iwlagn_mac_setup_register(struct iwl_priv *priv,
137 const struct iwl_ucode_capabilities *capa)
138{
139 int ret;
140 struct ieee80211_hw *hw = priv->hw;
141 struct iwl_rxon_context *ctx;
142
143 hw->rate_control_algorithm = "iwl-agn-rs";
144
145 /* Tell mac80211 our characteristics */
146 hw->flags = IEEE80211_HW_SIGNAL_DBM |
147 IEEE80211_HW_AMPDU_AGGREGATION |
148 IEEE80211_HW_NEED_DTIM_PERIOD |
149 IEEE80211_HW_SPECTRUM_MGMT |
150 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
151 IEEE80211_HW_QUEUE_CONTROL |
152 IEEE80211_HW_SUPPORTS_PS |
153 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
154 IEEE80211_HW_WANT_MONITOR_VIF |
155 IEEE80211_HW_SCAN_WHILE_IDLE;
156
157 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
158 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
159
160 /*
161 * Including the following line will crash some AP's. This
162 * workaround removes the stimulus which causes the crash until
163 * the AP software can be fixed.
164 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
165 */
166
167 if (priv->nvm_data->sku_cap_11n_enable)
168 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
169 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
170
171 /*
172 * Enable 11w if advertised by firmware and software crypto
173 * is not enabled (as the firmware will interpret some mgmt
174 * packets, so enabling it with software crypto isn't safe)
175 */
176 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
177 !iwlwifi_mod_params.sw_crypto)
178 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
179
180 hw->sta_data_size = sizeof(struct iwl_station_priv);
181 hw->vif_data_size = sizeof(struct iwl_vif_priv);
182
183 for_each_context(priv, ctx) {
184 hw->wiphy->interface_modes |= ctx->interface_modes;
185 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
186 }
187
188 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
189
190 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
191 hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
192 hw->wiphy->n_iface_combinations =
193 ARRAY_SIZE(iwlagn_iface_combinations_p2p);
194 } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
195 hw->wiphy->iface_combinations =
196 iwlagn_iface_combinations_dualmode;
197 hw->wiphy->n_iface_combinations =
198 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
199 }
200
201 hw->wiphy->max_remain_on_channel_duration = 500;
202
203 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
204 WIPHY_FLAG_DISABLE_BEACON_HINTS |
205 WIPHY_FLAG_IBSS_RSN;
206
207#ifdef CONFIG_PM_SLEEP
208 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
209 priv->trans->ops->wowlan_suspend &&
210 device_can_wakeup(priv->trans->dev)) {
211 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
212 WIPHY_WOWLAN_DISCONNECT |
213 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
214 WIPHY_WOWLAN_RFKILL_RELEASE;
215 if (!iwlwifi_mod_params.sw_crypto)
216 hw->wiphy->wowlan.flags |=
217 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
218 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
219
220 hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
221 hw->wiphy->wowlan.pattern_min_len =
222 IWLAGN_WOWLAN_MIN_PATTERN_LEN;
223 hw->wiphy->wowlan.pattern_max_len =
224 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
225 }
226#endif
227
228 if (iwlwifi_mod_params.power_save)
229 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
230 else
231 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
232
233 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
234 /* we create the 802.11 header and a max-length SSID element */
235 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34;
236
237 /*
238 * We don't use all queues: 4 and 9 are unused and any
239 * aggregation queue gets mapped down to the AC queue.
240 */
241 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
242
243 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
244
245 if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
246 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
247 &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
248 if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
249 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
250 &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
251
252 hw->wiphy->hw_version = priv->trans->hw_id;
253
254 iwl_leds_init(priv);
255
256 ret = ieee80211_register_hw(priv->hw);
257 if (ret) {
258 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
259 iwl_leds_exit(priv);
260 return ret;
261 }
262 priv->mac80211_registered = 1;
263
264 return 0;
265}
266
267void iwlagn_mac_unregister(struct iwl_priv *priv)
268{
269 if (!priv->mac80211_registered)
270 return;
271 iwl_leds_exit(priv);
272 ieee80211_unregister_hw(priv->hw);
273 priv->mac80211_registered = 0;
274}
275
276static int __iwl_up(struct iwl_priv *priv)
277{
278 struct iwl_rxon_context *ctx;
279 int ret;
280
281 lockdep_assert_held(&priv->mutex);
282
283 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
284 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
285 return -EIO;
286 }
287
288 for_each_context(priv, ctx) {
289 ret = iwlagn_alloc_bcast_station(priv, ctx);
290 if (ret) {
291 iwl_dealloc_bcast_stations(priv);
292 return ret;
293 }
294 }
295
296 ret = iwl_run_init_ucode(priv);
297 if (ret) {
298 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
299 goto error;
300 }
301
302 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
303 if (ret) {
304 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
305 goto error;
306 }
307
308 ret = iwl_alive_start(priv);
309 if (ret)
310 goto error;
311 return 0;
312
313 error:
314 set_bit(STATUS_EXIT_PENDING, &priv->status);
315 iwl_down(priv);
316 clear_bit(STATUS_EXIT_PENDING, &priv->status);
317
318 IWL_ERR(priv, "Unable to initialize device.\n");
319 return ret;
320}
321
322static int iwlagn_mac_start(struct ieee80211_hw *hw)
323{
324 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
325 int ret;
326
327 IWL_DEBUG_MAC80211(priv, "enter\n");
328
329 /* we should be verifying the device is ready to be opened */
330 mutex_lock(&priv->mutex);
331 ret = __iwl_up(priv);
332 mutex_unlock(&priv->mutex);
333 if (ret)
334 return ret;
335
336 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
337
338 /* Now we should be done, and the READY bit should be set. */
339 if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
340 ret = -EIO;
341
342 iwlagn_led_enable(priv);
343
344 priv->is_open = 1;
345 IWL_DEBUG_MAC80211(priv, "leave\n");
346 return 0;
347}
348
349static void iwlagn_mac_stop(struct ieee80211_hw *hw)
350{
351 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
352
353 IWL_DEBUG_MAC80211(priv, "enter\n");
354
355 if (!priv->is_open)
356 return;
357
358 priv->is_open = 0;
359
360 mutex_lock(&priv->mutex);
361 iwl_down(priv);
362 mutex_unlock(&priv->mutex);
363
364 iwl_cancel_deferred_work(priv);
365
366 flush_workqueue(priv->workqueue);
367
368 /* User space software may expect getting rfkill changes
369 * even if interface is down, trans->down will leave the RF
370 * kill interrupt enabled
371 */
372 iwl_trans_stop_hw(priv->trans, false);
373
374 IWL_DEBUG_MAC80211(priv, "leave\n");
375}
376
377static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
378 struct ieee80211_vif *vif,
379 struct cfg80211_gtk_rekey_data *data)
380{
381 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
382
383 if (iwlwifi_mod_params.sw_crypto)
384 return;
385
386 IWL_DEBUG_MAC80211(priv, "enter\n");
387 mutex_lock(&priv->mutex);
388
389 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
390 goto out;
391
392 memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
393 memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
394 priv->replay_ctr =
395 cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
396 priv->have_rekey_data = true;
397
398 out:
399 mutex_unlock(&priv->mutex);
400 IWL_DEBUG_MAC80211(priv, "leave\n");
401}
402
403#ifdef CONFIG_PM_SLEEP
404
405static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
406 struct cfg80211_wowlan *wowlan)
407{
408 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
409 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
410 int ret;
411
412 if (WARN_ON(!wowlan))
413 return -EINVAL;
414
415 IWL_DEBUG_MAC80211(priv, "enter\n");
416 mutex_lock(&priv->mutex);
417
418 /* Don't attempt WoWLAN when not associated, tear down instead. */
419 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
420 !iwl_is_associated_ctx(ctx)) {
421 ret = 1;
422 goto out;
423 }
424
425 ret = iwlagn_suspend(priv, wowlan);
426 if (ret)
427 goto error;
428
429 iwl_trans_wowlan_suspend(priv->trans);
430
431 goto out;
432
433 error:
434 priv->wowlan = false;
435 iwlagn_prepare_restart(priv);
436 ieee80211_restart_hw(priv->hw);
437 out:
438 mutex_unlock(&priv->mutex);
439 IWL_DEBUG_MAC80211(priv, "leave\n");
440
441 return ret;
442}
443
444static int iwlagn_mac_resume(struct ieee80211_hw *hw)
445{
446 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
447 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
448 struct ieee80211_vif *vif;
449 unsigned long flags;
450 u32 base, status = 0xffffffff;
451 int ret = -EIO;
452
453 IWL_DEBUG_MAC80211(priv, "enter\n");
454 mutex_lock(&priv->mutex);
455
456 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
457 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
458
459 base = priv->device_pointers.error_event_table;
460 if (iwlagn_hw_valid_rtc_data_addr(base)) {
461 spin_lock_irqsave(&priv->trans->reg_lock, flags);
462 ret = iwl_grab_nic_access_silent(priv->trans);
463 if (likely(ret == 0)) {
464 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
465 status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
466 iwl_release_nic_access(priv->trans);
467 }
468 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
469
470#ifdef CONFIG_IWLWIFI_DEBUGFS
471 if (ret == 0) {
472 const struct fw_img *img;
473
474 img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
475 if (!priv->wowlan_sram) {
476 priv->wowlan_sram =
477 kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
478 GFP_KERNEL);
479 }
480
481 if (priv->wowlan_sram)
482 _iwl_read_targ_mem_dwords(
483 priv->trans, 0x800000,
484 priv->wowlan_sram,
485 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
486 }
487#endif
488 }
489
490 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
491 vif = ctx->vif;
492
493 priv->wowlan = false;
494
495 iwlagn_prepare_restart(priv);
496
497 memset((void *)&ctx->active, 0, sizeof(ctx->active));
498 iwl_connection_init_rx_config(priv, ctx);
499 iwlagn_set_rxon_chain(priv, ctx);
500
501 mutex_unlock(&priv->mutex);
502 IWL_DEBUG_MAC80211(priv, "leave\n");
503
504 ieee80211_resume_disconnect(vif);
505
506 return 1;
507}
508
509static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
510{
511 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
512
513 device_set_wakeup_enable(priv->trans->dev, enabled);
514}
515#endif
516
517static void iwlagn_mac_tx(struct ieee80211_hw *hw,
518 struct ieee80211_tx_control *control,
519 struct sk_buff *skb)
520{
521 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
522
523 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
524 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
525
526 if (iwlagn_tx_skb(priv, control->sta, skb))
527 ieee80211_free_txskb(hw, skb);
528}
529
530static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
531 struct ieee80211_vif *vif,
532 struct ieee80211_key_conf *keyconf,
533 struct ieee80211_sta *sta,
534 u32 iv32, u16 *phase1key)
535{
536 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
537
538 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
539}
540
541static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
542 struct ieee80211_vif *vif,
543 struct ieee80211_sta *sta,
544 struct ieee80211_key_conf *key)
545{
546 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
547 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
548 struct iwl_rxon_context *ctx = vif_priv->ctx;
549 int ret;
550 bool is_default_wep_key = false;
551
552 IWL_DEBUG_MAC80211(priv, "enter\n");
553
554 if (iwlwifi_mod_params.sw_crypto) {
555 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
556 return -EOPNOTSUPP;
557 }
558
559 switch (key->cipher) {
560 case WLAN_CIPHER_SUITE_TKIP:
561 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
562 /* fall through */
563 case WLAN_CIPHER_SUITE_CCMP:
564 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
565 break;
566 default:
567 break;
568 }
569
570 /*
571 * We could program these keys into the hardware as well, but we
572 * don't expect much multicast traffic in IBSS and having keys
573 * for more stations is probably more useful.
574 *
575 * Mark key TX-only and return 0.
576 */
577 if (vif->type == NL80211_IFTYPE_ADHOC &&
578 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
579 key->hw_key_idx = WEP_INVALID_OFFSET;
580 return 0;
581 }
582
583 /* If they key was TX-only, accept deletion */
584 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
585 return 0;
586
587 mutex_lock(&priv->mutex);
588 iwl_scan_cancel_timeout(priv, 100);
589
590 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
591
592 /*
593 * If we are getting WEP group key and we didn't receive any key mapping
594 * so far, we are in legacy wep mode (group key only), otherwise we are
595 * in 1X mode.
596 * In legacy wep mode, we use another host command to the uCode.
597 */
598 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
599 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
600 if (cmd == SET_KEY)
601 is_default_wep_key = !ctx->key_mapping_keys;
602 else
603 is_default_wep_key =
604 key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
605 }
606
607
608 switch (cmd) {
609 case SET_KEY:
610 if (is_default_wep_key) {
611 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
612 break;
613 }
614 ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
615 if (ret) {
616 /*
617 * can't add key for RX, but we don't need it
618 * in the device for TX so still return 0
619 */
620 ret = 0;
621 key->hw_key_idx = WEP_INVALID_OFFSET;
622 }
623
624 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
625 break;
626 case DISABLE_KEY:
627 if (is_default_wep_key)
628 ret = iwl_remove_default_wep_key(priv, ctx, key);
629 else
630 ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
631
632 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
633 break;
634 default:
635 ret = -EINVAL;
636 }
637
638 mutex_unlock(&priv->mutex);
639 IWL_DEBUG_MAC80211(priv, "leave\n");
640
641 return ret;
642}
643
644static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
645 struct ieee80211_vif *vif,
646 enum ieee80211_ampdu_mlme_action action,
647 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
648 u8 buf_size)
649{
650 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
651 int ret = -EINVAL;
652 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
653
654 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
655 sta->addr, tid);
656
657 if (!(priv->nvm_data->sku_cap_11n_enable))
658 return -EACCES;
659
660 IWL_DEBUG_MAC80211(priv, "enter\n");
661 mutex_lock(&priv->mutex);
662
663 switch (action) {
664 case IEEE80211_AMPDU_RX_START:
665 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
666 break;
667 IWL_DEBUG_HT(priv, "start Rx\n");
668 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
669 break;
670 case IEEE80211_AMPDU_RX_STOP:
671 IWL_DEBUG_HT(priv, "stop Rx\n");
672 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
673 break;
674 case IEEE80211_AMPDU_TX_START:
675 if (!priv->trans->ops->txq_enable)
676 break;
677 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
678 break;
679 IWL_DEBUG_HT(priv, "start Tx\n");
680 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
681 break;
682 case IEEE80211_AMPDU_TX_STOP:
683 IWL_DEBUG_HT(priv, "stop Tx\n");
684 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
685 if ((ret == 0) && (priv->agg_tids_count > 0)) {
686 priv->agg_tids_count--;
687 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
688 priv->agg_tids_count);
689 }
690 if (!priv->agg_tids_count &&
691 priv->hw_params.use_rts_for_aggregation) {
692 /*
693 * switch off RTS/CTS if it was previously enabled
694 */
695 sta_priv->lq_sta.lq.general_params.flags &=
696 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
697 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
698 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
699 }
700 break;
701 case IEEE80211_AMPDU_TX_OPERATIONAL:
702 ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
703 break;
704 }
705 mutex_unlock(&priv->mutex);
706 IWL_DEBUG_MAC80211(priv, "leave\n");
707 return ret;
708}
709
710static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
711 struct ieee80211_vif *vif,
712 struct ieee80211_sta *sta)
713{
714 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
715 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
716 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
717 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
718 int ret;
719 u8 sta_id;
720
721 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
722 sta->addr);
723 sta_priv->sta_id = IWL_INVALID_STATION;
724
725 atomic_set(&sta_priv->pending_frames, 0);
726 if (vif->type == NL80211_IFTYPE_AP)
727 sta_priv->client = true;
728
729 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
730 is_ap, sta, &sta_id);
731 if (ret) {
732 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
733 sta->addr, ret);
734 /* Should we return success if return code is EEXIST ? */
735 return ret;
736 }
737
738 sta_priv->sta_id = sta_id;
739
740 return 0;
741}
742
743static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
744 struct ieee80211_vif *vif,
745 struct ieee80211_sta *sta)
746{
747 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
748 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
749 int ret;
750
751 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr);
752
753 if (vif->type == NL80211_IFTYPE_STATION) {
754 /*
755 * Station will be removed from device when the RXON
756 * is set to unassociated -- just deactivate it here
757 * to avoid re-programming it.
758 */
759 ret = 0;
760 iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr);
761 } else {
762 ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
763 if (ret)
764 IWL_DEBUG_QUIET_RFKILL(priv,
765 "Error removing station %pM\n", sta->addr);
766 }
767 return ret;
768}
769
770static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
771 struct ieee80211_vif *vif,
772 struct ieee80211_sta *sta,
773 enum ieee80211_sta_state old_state,
774 enum ieee80211_sta_state new_state)
775{
776 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
777 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
778 enum {
779 NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT,
780 } op = NONE;
781 int ret;
782
783 IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n",
784 sta->addr, old_state, new_state);
785
786 mutex_lock(&priv->mutex);
787 if (vif->type == NL80211_IFTYPE_STATION) {
788 if (old_state == IEEE80211_STA_NOTEXIST &&
789 new_state == IEEE80211_STA_NONE)
790 op = ADD;
791 else if (old_state == IEEE80211_STA_NONE &&
792 new_state == IEEE80211_STA_NOTEXIST)
793 op = REMOVE;
794 else if (old_state == IEEE80211_STA_AUTH &&
795 new_state == IEEE80211_STA_ASSOC)
796 op = HT_RATE_INIT;
797 } else {
798 if (old_state == IEEE80211_STA_AUTH &&
799 new_state == IEEE80211_STA_ASSOC)
800 op = ADD_RATE_INIT;
801 else if (old_state == IEEE80211_STA_ASSOC &&
802 new_state == IEEE80211_STA_AUTH)
803 op = REMOVE;
804 }
805
806 switch (op) {
807 case ADD:
808 ret = iwlagn_mac_sta_add(hw, vif, sta);
809 if (ret)
810 break;
811 /*
812 * Clear the in-progress flag, the AP station entry was added
813 * but we'll initialize LQ only when we've associated (which
814 * would also clear the in-progress flag). This is necessary
815 * in case we never initialize LQ because association fails.
816 */
817 spin_lock_bh(&priv->sta_lock);
818 priv->stations[iwl_sta_id(sta)].used &=
819 ~IWL_STA_UCODE_INPROGRESS;
820 spin_unlock_bh(&priv->sta_lock);
821 break;
822 case REMOVE:
823 ret = iwlagn_mac_sta_remove(hw, vif, sta);
824 break;
825 case ADD_RATE_INIT:
826 ret = iwlagn_mac_sta_add(hw, vif, sta);
827 if (ret)
828 break;
829 /* Initialize rate scaling */
830 IWL_DEBUG_INFO(priv,
831 "Initializing rate scaling for station %pM\n",
832 sta->addr);
833 iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
834 ret = 0;
835 break;
836 case HT_RATE_INIT:
837 /* Initialize rate scaling */
838 ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta);
839 if (ret)
840 break;
841 IWL_DEBUG_INFO(priv,
842 "Initializing rate scaling for station %pM\n",
843 sta->addr);
844 iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
845 ret = 0;
846 break;
847 default:
848 ret = 0;
849 break;
850 }
851
852 /*
853 * mac80211 might WARN if we fail, but due the way we
854 * (badly) handle hard rfkill, we might fail here
855 */
856 if (iwl_is_rfkill(priv))
857 ret = 0;
858
859 mutex_unlock(&priv->mutex);
860 IWL_DEBUG_MAC80211(priv, "leave\n");
861
862 return ret;
863}
864
865static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
866 struct ieee80211_channel_switch *ch_switch)
867{
868 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
869 struct ieee80211_conf *conf = &hw->conf;
870 struct ieee80211_channel *channel = ch_switch->channel;
871 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
872 /*
873 * MULTI-FIXME
874 * When we add support for multiple interfaces, we need to
875 * revisit this. The channel switch command in the device
876 * only affects the BSS context, but what does that really
877 * mean? And what if we get a CSA on the second interface?
878 * This needs a lot of work.
879 */
880 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
881 u16 ch;
882
883 IWL_DEBUG_MAC80211(priv, "enter\n");
884
885 mutex_lock(&priv->mutex);
886
887 if (iwl_is_rfkill(priv))
888 goto out;
889
890 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
891 test_bit(STATUS_SCANNING, &priv->status) ||
892 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
893 goto out;
894
895 if (!iwl_is_associated_ctx(ctx))
896 goto out;
897
898 if (!priv->lib->set_channel_switch)
899 goto out;
900
901 ch = channel->hw_value;
902 if (le16_to_cpu(ctx->active.channel) == ch)
903 goto out;
904
905 priv->current_ht_config.smps = conf->smps_mode;
906
907 /* Configure HT40 channels */
908 ctx->ht.enabled = conf_is_ht(conf);
909 if (ctx->ht.enabled)
910 iwlagn_config_ht40(conf, ctx);
911 else
912 ctx->ht.is_40mhz = false;
913
914 if ((le16_to_cpu(ctx->staging.channel) != ch))
915 ctx->staging.flags = 0;
916
917 iwl_set_rxon_channel(priv, channel, ctx);
918 iwl_set_rxon_ht(priv, ht_conf);
919 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
920
921 /*
922 * at this point, staging_rxon has the
923 * configuration for channel switch
924 */
925 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
926 priv->switch_channel = cpu_to_le16(ch);
927 if (priv->lib->set_channel_switch(priv, ch_switch)) {
928 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
929 priv->switch_channel = 0;
930 ieee80211_chswitch_done(ctx->vif, false);
931 }
932
933out:
934 mutex_unlock(&priv->mutex);
935 IWL_DEBUG_MAC80211(priv, "leave\n");
936}
937
938void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
939{
940 /*
941 * MULTI-FIXME
942 * See iwlagn_mac_channel_switch.
943 */
944 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
945
946 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
947 return;
948
949 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
950 ieee80211_chswitch_done(ctx->vif, is_success);
951}
952
953static void iwlagn_configure_filter(struct ieee80211_hw *hw,
954 unsigned int changed_flags,
955 unsigned int *total_flags,
956 u64 multicast)
957{
958 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
959 __le32 filter_or = 0, filter_nand = 0;
960 struct iwl_rxon_context *ctx;
961
962#define CHK(test, flag) do { \
963 if (*total_flags & (test)) \
964 filter_or |= (flag); \
965 else \
966 filter_nand |= (flag); \
967 } while (0)
968
969 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
970 changed_flags, *total_flags);
971
972 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
973 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
974 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
975 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
976
977#undef CHK
978
979 mutex_lock(&priv->mutex);
980
981 for_each_context(priv, ctx) {
982 ctx->staging.filter_flags &= ~filter_nand;
983 ctx->staging.filter_flags |= filter_or;
984
985 /*
986 * Not committing directly because hardware can perform a scan,
987 * but we'll eventually commit the filter flags change anyway.
988 */
989 }
990
991 mutex_unlock(&priv->mutex);
992
993 /*
994 * Receiving all multicast frames is always enabled by the
995 * default flags setup in iwl_connection_init_rx_config()
996 * since we currently do not support programming multicast
997 * filters into the device.
998 */
999 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1000 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1001}
1002
1003static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
1004{
1005 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1006
1007 mutex_lock(&priv->mutex);
1008 IWL_DEBUG_MAC80211(priv, "enter\n");
1009
1010 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1011 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
1012 goto done;
1013 }
1014 if (iwl_is_rfkill(priv)) {
1015 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
1016 goto done;
1017 }
1018
1019 /*
1020 * mac80211 will not push any more frames for transmit
1021 * until the flush is completed
1022 */
1023 if (drop) {
1024 IWL_DEBUG_MAC80211(priv, "send flush command\n");
1025 if (iwlagn_txfifo_flush(priv)) {
1026 IWL_ERR(priv, "flush request fail\n");
1027 goto done;
1028 }
1029 }
1030 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
1031 iwl_trans_wait_tx_queue_empty(priv->trans);
1032done:
1033 mutex_unlock(&priv->mutex);
1034 IWL_DEBUG_MAC80211(priv, "leave\n");
1035}
1036
1037static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1038 struct ieee80211_vif *vif,
1039 struct ieee80211_channel *channel,
1040 int duration)
1041{
1042 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1043 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1044 int err = 0;
1045
1046 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1047 return -EOPNOTSUPP;
1048
1049 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
1050 return -EOPNOTSUPP;
1051
1052 IWL_DEBUG_MAC80211(priv, "enter\n");
1053 mutex_lock(&priv->mutex);
1054
1055 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1056 /* mac80211 should not scan while ROC or ROC while scanning */
1057 if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
1058 err = -EBUSY;
1059 goto out;
1060 }
1061
1062 iwl_scan_cancel_timeout(priv, 100);
1063
1064 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1065 err = -EBUSY;
1066 goto out;
1067 }
1068 }
1069
1070 priv->hw_roc_channel = channel;
1071 /* convert from ms to TU */
1072 priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
1073 priv->hw_roc_start_notified = false;
1074 cancel_delayed_work(&priv->hw_roc_disable_work);
1075
1076 if (!ctx->is_active) {
1077 static const struct iwl_qos_info default_qos_data = {
1078 .def_qos_parm = {
1079 .ac[0] = {
1080 .cw_min = cpu_to_le16(3),
1081 .cw_max = cpu_to_le16(7),
1082 .aifsn = 2,
1083 .edca_txop = cpu_to_le16(1504),
1084 },
1085 .ac[1] = {
1086 .cw_min = cpu_to_le16(7),
1087 .cw_max = cpu_to_le16(15),
1088 .aifsn = 2,
1089 .edca_txop = cpu_to_le16(3008),
1090 },
1091 .ac[2] = {
1092 .cw_min = cpu_to_le16(15),
1093 .cw_max = cpu_to_le16(1023),
1094 .aifsn = 3,
1095 },
1096 .ac[3] = {
1097 .cw_min = cpu_to_le16(15),
1098 .cw_max = cpu_to_le16(1023),
1099 .aifsn = 7,
1100 },
1101 },
1102 };
1103
1104 ctx->is_active = true;
1105 ctx->qos_data = default_qos_data;
1106 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
1107 memcpy(ctx->staging.node_addr,
1108 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
1109 ETH_ALEN);
1110 memcpy(ctx->staging.bssid_addr,
1111 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
1112 ETH_ALEN);
1113 err = iwlagn_commit_rxon(priv, ctx);
1114 if (err)
1115 goto out;
1116 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
1117 RXON_FILTER_PROMISC_MSK |
1118 RXON_FILTER_CTL2HOST_MSK;
1119
1120 err = iwlagn_commit_rxon(priv, ctx);
1121 if (err) {
1122 iwlagn_disable_roc(priv);
1123 goto out;
1124 }
1125 priv->hw_roc_setup = true;
1126 }
1127
1128 err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
1129 if (err)
1130 iwlagn_disable_roc(priv);
1131
1132 out:
1133 mutex_unlock(&priv->mutex);
1134 IWL_DEBUG_MAC80211(priv, "leave\n");
1135
1136 return err;
1137}
1138
1139static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1140{
1141 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1142
1143 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1144 return -EOPNOTSUPP;
1145
1146 IWL_DEBUG_MAC80211(priv, "enter\n");
1147 mutex_lock(&priv->mutex);
1148 iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
1149 iwlagn_disable_roc(priv);
1150 mutex_unlock(&priv->mutex);
1151 IWL_DEBUG_MAC80211(priv, "leave\n");
1152
1153 return 0;
1154}
1155
1156static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1157 enum ieee80211_rssi_event rssi_event)
1158{
1159 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1160
1161 IWL_DEBUG_MAC80211(priv, "enter\n");
1162 mutex_lock(&priv->mutex);
1163
1164 if (priv->cfg->bt_params &&
1165 priv->cfg->bt_params->advanced_bt_coexist) {
1166 if (rssi_event == RSSI_EVENT_LOW)
1167 priv->bt_enable_pspoll = true;
1168 else if (rssi_event == RSSI_EVENT_HIGH)
1169 priv->bt_enable_pspoll = false;
1170
1171 iwlagn_send_advance_bt_config(priv);
1172 } else {
1173 IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
1174 "ignoring RSSI callback\n");
1175 }
1176
1177 mutex_unlock(&priv->mutex);
1178 IWL_DEBUG_MAC80211(priv, "leave\n");
1179}
1180
1181static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1182 struct ieee80211_sta *sta, bool set)
1183{
1184 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1185
1186 queue_work(priv->workqueue, &priv->beacon_update);
1187
1188 return 0;
1189}
1190
1191static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1192 struct ieee80211_vif *vif, u16 queue,
1193 const struct ieee80211_tx_queue_params *params)
1194{
1195 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1196 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1197 struct iwl_rxon_context *ctx = vif_priv->ctx;
1198 int q;
1199
1200 if (WARN_ON(!ctx))
1201 return -EINVAL;
1202
1203 IWL_DEBUG_MAC80211(priv, "enter\n");
1204
1205 if (!iwl_is_ready_rf(priv)) {
1206 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1207 return -EIO;
1208 }
1209
1210 if (queue >= AC_NUM) {
1211 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1212 return 0;
1213 }
1214
1215 q = AC_NUM - 1 - queue;
1216
1217 mutex_lock(&priv->mutex);
1218
1219 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1220 cpu_to_le16(params->cw_min);
1221 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1222 cpu_to_le16(params->cw_max);
1223 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1224 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1225 cpu_to_le16((params->txop * 32));
1226
1227 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1228
1229 mutex_unlock(&priv->mutex);
1230
1231 IWL_DEBUG_MAC80211(priv, "leave\n");
1232 return 0;
1233}
1234
1235static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1236{
1237 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1238
1239 return priv->ibss_manager == IWL_IBSS_MANAGER;
1240}
1241
1242static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1243{
1244 iwl_connection_init_rx_config(priv, ctx);
1245
1246 iwlagn_set_rxon_chain(priv, ctx);
1247
1248 return iwlagn_commit_rxon(priv, ctx);
1249}
1250
1251static int iwl_setup_interface(struct iwl_priv *priv,
1252 struct iwl_rxon_context *ctx)
1253{
1254 struct ieee80211_vif *vif = ctx->vif;
1255 int err, ac;
1256
1257 lockdep_assert_held(&priv->mutex);
1258
1259 /*
1260 * This variable will be correct only when there's just
1261 * a single context, but all code using it is for hardware
1262 * that supports only one context.
1263 */
1264 priv->iw_mode = vif->type;
1265
1266 ctx->is_active = true;
1267
1268 err = iwl_set_mode(priv, ctx);
1269 if (err) {
1270 if (!ctx->always_active)
1271 ctx->is_active = false;
1272 return err;
1273 }
1274
1275 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1276 vif->type == NL80211_IFTYPE_ADHOC) {
1277 /*
1278 * pretend to have high BT traffic as long as we
1279 * are operating in IBSS mode, as this will cause
1280 * the rate scaling etc. to behave as intended.
1281 */
1282 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1283 }
1284
1285 /* set up queue mappings */
1286 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1287 vif->hw_queue[ac] = ctx->ac_to_queue[ac];
1288
1289 if (vif->type == NL80211_IFTYPE_AP)
1290 vif->cab_queue = ctx->mcast_queue;
1291 else
1292 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1293
1294 return 0;
1295}
1296
1297static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1298 struct ieee80211_vif *vif)
1299{
1300 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1301 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1302 struct iwl_rxon_context *tmp, *ctx = NULL;
1303 int err;
1304 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1305 bool reset = false;
1306
1307 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1308 viftype, vif->addr);
1309
1310 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
1311
1312 mutex_lock(&priv->mutex);
1313
1314 iwlagn_disable_roc(priv);
1315
1316 if (!iwl_is_ready_rf(priv)) {
1317 IWL_WARN(priv, "Try to add interface when device not ready\n");
1318 err = -EINVAL;
1319 goto out;
1320 }
1321
1322 for_each_context(priv, tmp) {
1323 u32 possible_modes =
1324 tmp->interface_modes | tmp->exclusive_interface_modes;
1325
1326 if (tmp->vif) {
1327 /* On reset we need to add the same interface again */
1328 if (tmp->vif == vif) {
1329 reset = true;
1330 ctx = tmp;
1331 break;
1332 }
1333
1334 /* check if this busy context is exclusive */
1335 if (tmp->exclusive_interface_modes &
1336 BIT(tmp->vif->type)) {
1337 err = -EINVAL;
1338 goto out;
1339 }
1340 continue;
1341 }
1342
1343 if (!(possible_modes & BIT(viftype)))
1344 continue;
1345
1346 /* have maybe usable context w/o interface */
1347 ctx = tmp;
1348 break;
1349 }
1350
1351 if (!ctx) {
1352 err = -EOPNOTSUPP;
1353 goto out;
1354 }
1355
1356 vif_priv->ctx = ctx;
1357 ctx->vif = vif;
1358
1359 /*
1360 * In SNIFFER device type, the firmware reports the FCS to
1361 * the host, rather than snipping it off. Unfortunately,
1362 * mac80211 doesn't (yet) provide a per-packet flag for
1363 * this, so that we have to set the hardware flag based
1364 * on the interfaces added. As the monitor interface can
1365 * only be present by itself, and will be removed before
1366 * other interfaces are added, this is safe.
1367 */
1368 if (vif->type == NL80211_IFTYPE_MONITOR)
1369 priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
1370 else
1371 priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
1372
1373 err = iwl_setup_interface(priv, ctx);
1374 if (!err || reset)
1375 goto out;
1376
1377 ctx->vif = NULL;
1378 priv->iw_mode = NL80211_IFTYPE_STATION;
1379 out:
1380 mutex_unlock(&priv->mutex);
1381
1382 IWL_DEBUG_MAC80211(priv, "leave\n");
1383 return err;
1384}
1385
1386static void iwl_teardown_interface(struct iwl_priv *priv,
1387 struct ieee80211_vif *vif,
1388 bool mode_change)
1389{
1390 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1391
1392 lockdep_assert_held(&priv->mutex);
1393
1394 if (priv->scan_vif == vif) {
1395 iwl_scan_cancel_timeout(priv, 200);
1396 iwl_force_scan_end(priv);
1397 }
1398
1399 if (!mode_change) {
1400 iwl_set_mode(priv, ctx);
1401 if (!ctx->always_active)
1402 ctx->is_active = false;
1403 }
1404
1405 /*
1406 * When removing the IBSS interface, overwrite the
1407 * BT traffic load with the stored one from the last
1408 * notification, if any. If this is a device that
1409 * doesn't implement this, this has no effect since
1410 * both values are the same and zero.
1411 */
1412 if (vif->type == NL80211_IFTYPE_ADHOC)
1413 priv->bt_traffic_load = priv->last_bt_traffic_load;
1414}
1415
1416static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
1417 struct ieee80211_vif *vif)
1418{
1419 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1420 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1421
1422 IWL_DEBUG_MAC80211(priv, "enter\n");
1423
1424 mutex_lock(&priv->mutex);
1425
1426 if (WARN_ON(ctx->vif != vif)) {
1427 struct iwl_rxon_context *tmp;
1428 IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
1429 for_each_context(priv, tmp)
1430 IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
1431 tmp->ctxid, tmp, tmp->vif);
1432 }
1433 ctx->vif = NULL;
1434
1435 iwl_teardown_interface(priv, vif, false);
1436
1437 mutex_unlock(&priv->mutex);
1438
1439 IWL_DEBUG_MAC80211(priv, "leave\n");
1440
1441}
1442
1443static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1444 struct ieee80211_vif *vif,
1445 enum nl80211_iftype newtype, bool newp2p)
1446{
1447 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1448 struct iwl_rxon_context *ctx, *tmp;
1449 enum nl80211_iftype newviftype = newtype;
1450 u32 interface_modes;
1451 int err;
1452
1453 IWL_DEBUG_MAC80211(priv, "enter\n");
1454
1455 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1456
1457 mutex_lock(&priv->mutex);
1458
1459 ctx = iwl_rxon_ctx_from_vif(vif);
1460
1461 /*
1462 * To simplify this code, only support changes on the
1463 * BSS context. The PAN context is usually reassigned
1464 * by creating/removing P2P interfaces anyway.
1465 */
1466 if (ctx->ctxid != IWL_RXON_CTX_BSS) {
1467 err = -EBUSY;
1468 goto out;
1469 }
1470
1471 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1472 /*
1473 * Huh? But wait ... this can maybe happen when
1474 * we're in the middle of a firmware restart!
1475 */
1476 err = -EBUSY;
1477 goto out;
1478 }
1479
1480 /* Check if the switch is supported in the same context */
1481 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1482 if (!(interface_modes & BIT(newtype))) {
1483 err = -EBUSY;
1484 goto out;
1485 }
1486
1487 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1488 for_each_context(priv, tmp) {
1489 if (ctx == tmp)
1490 continue;
1491
1492 if (!tmp->is_active)
1493 continue;
1494
1495 /*
1496 * The current mode switch would be exclusive, but
1497 * another context is active ... refuse the switch.
1498 */
1499 err = -EBUSY;
1500 goto out;
1501 }
1502 }
1503
1504 /* success */
1505 iwl_teardown_interface(priv, vif, true);
1506 vif->type = newviftype;
1507 vif->p2p = newp2p;
1508 err = iwl_setup_interface(priv, ctx);
1509 WARN_ON(err);
1510 /*
1511 * We've switched internally, but submitting to the
1512 * device may have failed for some reason. Mask this
1513 * error, because otherwise mac80211 will not switch
1514 * (and set the interface type back) and we'll be
1515 * out of sync with it.
1516 */
1517 err = 0;
1518
1519 out:
1520 mutex_unlock(&priv->mutex);
1521 IWL_DEBUG_MAC80211(priv, "leave\n");
1522
1523 return err;
1524}
1525
1526static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1527 struct ieee80211_vif *vif,
1528 struct cfg80211_scan_request *req)
1529{
1530 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1531 int ret;
1532
1533 IWL_DEBUG_MAC80211(priv, "enter\n");
1534
1535 if (req->n_channels == 0)
1536 return -EINVAL;
1537
1538 mutex_lock(&priv->mutex);
1539
1540 /*
1541 * If an internal scan is in progress, just set
1542 * up the scan_request as per above.
1543 */
1544 if (priv->scan_type != IWL_SCAN_NORMAL) {
1545 IWL_DEBUG_SCAN(priv,
1546 "SCAN request during internal scan - defer\n");
1547 priv->scan_request = req;
1548 priv->scan_vif = vif;
1549 ret = 0;
1550 } else {
1551 priv->scan_request = req;
1552 priv->scan_vif = vif;
1553 /*
1554 * mac80211 will only ask for one band at a time
1555 * so using channels[0] here is ok
1556 */
1557 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
1558 req->channels[0]->band);
1559 if (ret) {
1560 priv->scan_request = NULL;
1561 priv->scan_vif = NULL;
1562 }
1563 }
1564
1565 IWL_DEBUG_MAC80211(priv, "leave\n");
1566
1567 mutex_unlock(&priv->mutex);
1568
1569 return ret;
1570}
1571
1572static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1573{
1574 struct iwl_addsta_cmd cmd = {
1575 .mode = STA_CONTROL_MODIFY_MSK,
1576 .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
1577 .sta.sta_id = sta_id,
1578 };
1579
1580 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1581}
1582
1583static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1584 struct ieee80211_vif *vif,
1585 enum sta_notify_cmd cmd,
1586 struct ieee80211_sta *sta)
1587{
1588 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1589 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1590 int sta_id;
1591
1592 IWL_DEBUG_MAC80211(priv, "enter\n");
1593
1594 switch (cmd) {
1595 case STA_NOTIFY_SLEEP:
1596 WARN_ON(!sta_priv->client);
1597 sta_priv->asleep = true;
1598 if (atomic_read(&sta_priv->pending_frames) > 0)
1599 ieee80211_sta_block_awake(hw, sta, true);
1600 break;
1601 case STA_NOTIFY_AWAKE:
1602 WARN_ON(!sta_priv->client);
1603 if (!sta_priv->asleep)
1604 break;
1605 sta_priv->asleep = false;
1606 sta_id = iwl_sta_id(sta);
1607 if (sta_id != IWL_INVALID_STATION)
1608 iwl_sta_modify_ps_wake(priv, sta_id);
1609 break;
1610 default:
1611 break;
1612 }
1613 IWL_DEBUG_MAC80211(priv, "leave\n");
1614}
1615
1616struct ieee80211_ops iwlagn_hw_ops = {
1617 .tx = iwlagn_mac_tx,
1618 .start = iwlagn_mac_start,
1619 .stop = iwlagn_mac_stop,
1620#ifdef CONFIG_PM_SLEEP
1621 .suspend = iwlagn_mac_suspend,
1622 .resume = iwlagn_mac_resume,
1623 .set_wakeup = iwlagn_mac_set_wakeup,
1624#endif
1625 .add_interface = iwlagn_mac_add_interface,
1626 .remove_interface = iwlagn_mac_remove_interface,
1627 .change_interface = iwlagn_mac_change_interface,
1628 .config = iwlagn_mac_config,
1629 .configure_filter = iwlagn_configure_filter,
1630 .set_key = iwlagn_mac_set_key,
1631 .update_tkip_key = iwlagn_mac_update_tkip_key,
1632 .set_rekey_data = iwlagn_mac_set_rekey_data,
1633 .conf_tx = iwlagn_mac_conf_tx,
1634 .bss_info_changed = iwlagn_bss_info_changed,
1635 .ampdu_action = iwlagn_mac_ampdu_action,
1636 .hw_scan = iwlagn_mac_hw_scan,
1637 .sta_notify = iwlagn_mac_sta_notify,
1638 .sta_state = iwlagn_mac_sta_state,
1639 .channel_switch = iwlagn_mac_channel_switch,
1640 .flush = iwlagn_mac_flush,
1641 .tx_last_beacon = iwlagn_mac_tx_last_beacon,
1642 .remain_on_channel = iwlagn_mac_remain_on_channel,
1643 .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
1644 .rssi_callback = iwlagn_mac_rssi_callback,
1645 CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
1646 CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
1647 .set_tim = iwlagn_mac_set_tim,
1648};
1649
1650/* This function both allocates and initializes hw and priv. */
1651struct ieee80211_hw *iwl_alloc_all(void)
1652{
1653 struct iwl_priv *priv;
1654 struct iwl_op_mode *op_mode;
1655 /* mac80211 allocates memory for this device instance, including
1656 * space for this driver's private structure */
1657 struct ieee80211_hw *hw;
1658
1659 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) +
1660 sizeof(struct iwl_op_mode), &iwlagn_hw_ops);
1661 if (!hw)
1662 goto out;
1663
1664 op_mode = hw->priv;
1665 priv = IWL_OP_MODE_GET_DVM(op_mode);
1666 priv->hw = hw;
1667
1668out:
1669 return hw;
1670}
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
deleted file mode 100644
index faa05932efa..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ /dev/null
@@ -1,2175 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/delay.h>
37#include <linux/sched.h>
38#include <linux/skbuff.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/mac80211.h>
44
45#include <asm/div64.h>
46
47#include "iwl-eeprom-read.h"
48#include "iwl-eeprom-parse.h"
49#include "iwl-io.h"
50#include "iwl-trans.h"
51#include "iwl-op-mode.h"
52#include "iwl-drv.h"
53#include "iwl-modparams.h"
54#include "iwl-prph.h"
55
56#include "dev.h"
57#include "calib.h"
58#include "agn.h"
59
60
61/******************************************************************************
62 *
63 * module boiler plate
64 *
65 ******************************************************************************/
66
67/*
68 * module name, copyright, version, etc.
69 */
70#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
71
72#ifdef CONFIG_IWLWIFI_DEBUG
73#define VD "d"
74#else
75#define VD
76#endif
77
78#define DRV_VERSION IWLWIFI_VERSION VD
79
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL");
85
86static const struct iwl_op_mode_ops iwl_dvm_ops;
87
88void iwl_update_chain_flags(struct iwl_priv *priv)
89{
90 struct iwl_rxon_context *ctx;
91
92 for_each_context(priv, ctx) {
93 iwlagn_set_rxon_chain(priv, ctx);
94 if (ctx->active.rx_chain != ctx->staging.rx_chain)
95 iwlagn_commit_rxon(priv, ctx);
96 }
97}
98
99/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
100static void iwl_set_beacon_tim(struct iwl_priv *priv,
101 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
102 u8 *beacon, u32 frame_size)
103{
104 u16 tim_idx;
105 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
106
107 /*
108 * The index is relative to frame start but we start looking at the
109 * variable-length part of the beacon.
110 */
111 tim_idx = mgmt->u.beacon.variable - beacon;
112
113 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
114 while ((tim_idx < (frame_size - 2)) &&
115 (beacon[tim_idx] != WLAN_EID_TIM))
116 tim_idx += beacon[tim_idx+1] + 2;
117
118 /* If TIM field was found, set variables */
119 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
120 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
121 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
122 } else
123 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
124}
125
126int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
127{
128 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
129 struct iwl_host_cmd cmd = {
130 .id = REPLY_TX_BEACON,
131 .flags = CMD_SYNC,
132 };
133 struct ieee80211_tx_info *info;
134 u32 frame_size;
135 u32 rate_flags;
136 u32 rate;
137
138 /*
139 * We have to set up the TX command, the TX Beacon command, and the
140 * beacon contents.
141 */
142
143 lockdep_assert_held(&priv->mutex);
144
145 if (!priv->beacon_ctx) {
146 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
147 return 0;
148 }
149
150 if (WARN_ON(!priv->beacon_skb))
151 return -EINVAL;
152
153 /* Allocate beacon command */
154 if (!priv->beacon_cmd)
155 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
156 tx_beacon_cmd = priv->beacon_cmd;
157 if (!tx_beacon_cmd)
158 return -ENOMEM;
159
160 frame_size = priv->beacon_skb->len;
161
162 /* Set up TX command fields */
163 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
164 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
165 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
166 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
167 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
168
169 /* Set up TX beacon command fields */
170 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
171 frame_size);
172
173 /* Set up packet rate and flags */
174 info = IEEE80211_SKB_CB(priv->beacon_skb);
175
176 /*
177 * Let's set up the rate at least somewhat correctly;
178 * it will currently not actually be used by the uCode,
179 * it uses the broadcast station's rate instead.
180 */
181 if (info->control.rates[0].idx < 0 ||
182 info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
183 rate = 0;
184 else
185 rate = info->control.rates[0].idx;
186
187 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
188 priv->nvm_data->valid_tx_ant);
189 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
190
191 /* In mac80211, rates for 5 GHz start at 0 */
192 if (info->band == IEEE80211_BAND_5GHZ)
193 rate += IWL_FIRST_OFDM_RATE;
194 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
195 rate_flags |= RATE_MCS_CCK_MSK;
196
197 tx_beacon_cmd->tx.rate_n_flags =
198 iwl_hw_set_rate_n_flags(rate, rate_flags);
199
200 /* Submit command */
201 cmd.len[0] = sizeof(*tx_beacon_cmd);
202 cmd.data[0] = tx_beacon_cmd;
203 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
204 cmd.len[1] = frame_size;
205 cmd.data[1] = priv->beacon_skb->data;
206 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
207
208 return iwl_dvm_send_cmd(priv, &cmd);
209}
210
211static void iwl_bg_beacon_update(struct work_struct *work)
212{
213 struct iwl_priv *priv =
214 container_of(work, struct iwl_priv, beacon_update);
215 struct sk_buff *beacon;
216
217 mutex_lock(&priv->mutex);
218 if (!priv->beacon_ctx) {
219 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
220 goto out;
221 }
222
223 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
224 /*
225 * The ucode will send beacon notifications even in
226 * IBSS mode, but we don't want to process them. But
227 * we need to defer the type check to here due to
228 * requiring locking around the beacon_ctx access.
229 */
230 goto out;
231 }
232
233 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
234 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
235 if (!beacon) {
236 IWL_ERR(priv, "update beacon failed -- keeping old\n");
237 goto out;
238 }
239
240 /* new beacon skb is allocated every time; dispose previous.*/
241 dev_kfree_skb(priv->beacon_skb);
242
243 priv->beacon_skb = beacon;
244
245 iwlagn_send_beacon_cmd(priv);
246 out:
247 mutex_unlock(&priv->mutex);
248}
249
250static void iwl_bg_bt_runtime_config(struct work_struct *work)
251{
252 struct iwl_priv *priv =
253 container_of(work, struct iwl_priv, bt_runtime_config);
254
255 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
256 return;
257
258 /* dont send host command if rf-kill is on */
259 if (!iwl_is_ready_rf(priv))
260 return;
261 iwlagn_send_advance_bt_config(priv);
262}
263
264static void iwl_bg_bt_full_concurrency(struct work_struct *work)
265{
266 struct iwl_priv *priv =
267 container_of(work, struct iwl_priv, bt_full_concurrency);
268 struct iwl_rxon_context *ctx;
269
270 mutex_lock(&priv->mutex);
271
272 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
273 goto out;
274
275 /* dont send host command if rf-kill is on */
276 if (!iwl_is_ready_rf(priv))
277 goto out;
278
279 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
280 priv->bt_full_concurrent ?
281 "full concurrency" : "3-wire");
282
283 /*
284 * LQ & RXON updated cmds must be sent before BT Config cmd
285 * to avoid 3-wire collisions
286 */
287 for_each_context(priv, ctx) {
288 iwlagn_set_rxon_chain(priv, ctx);
289 iwlagn_commit_rxon(priv, ctx);
290 }
291
292 iwlagn_send_advance_bt_config(priv);
293out:
294 mutex_unlock(&priv->mutex);
295}
296
297int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
298{
299 struct iwl_statistics_cmd statistics_cmd = {
300 .configuration_flags =
301 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
302 };
303
304 if (flags & CMD_ASYNC)
305 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
306 CMD_ASYNC,
307 sizeof(struct iwl_statistics_cmd),
308 &statistics_cmd);
309 else
310 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
311 CMD_SYNC,
312 sizeof(struct iwl_statistics_cmd),
313 &statistics_cmd);
314}
315
316/**
317 * iwl_bg_statistics_periodic - Timer callback to queue statistics
318 *
319 * This callback is provided in order to send a statistics request.
320 *
321 * This timer function is continually reset to execute within
322 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
323 * was received. We need to ensure we receive the statistics in order
324 * to update the temperature used for calibrating the TXPOWER.
325 */
326static void iwl_bg_statistics_periodic(unsigned long data)
327{
328 struct iwl_priv *priv = (struct iwl_priv *)data;
329
330 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
331 return;
332
333 /* dont send host command if rf-kill is on */
334 if (!iwl_is_ready_rf(priv))
335 return;
336
337 iwl_send_statistics_request(priv, CMD_ASYNC, false);
338}
339
340
341static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
342 u32 start_idx, u32 num_events,
343 u32 capacity, u32 mode)
344{
345 u32 i;
346 u32 ptr; /* SRAM byte address of log data */
347 u32 ev, time, data; /* event log data */
348 unsigned long reg_flags;
349
350 if (mode == 0)
351 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
352 else
353 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
354
355 /* Make sure device is powered up for SRAM reads */
356 spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
357 if (unlikely(!iwl_grab_nic_access(priv->trans))) {
358 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
359 return;
360 }
361
362 /* Set starting address; reads will auto-increment */
363 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
364
365 /*
366 * Refuse to read more than would have fit into the log from
367 * the current start_idx. This used to happen due to the race
368 * described below, but now WARN because the code below should
369 * prevent it from happening here.
370 */
371 if (WARN_ON(num_events > capacity - start_idx))
372 num_events = capacity - start_idx;
373
374 /*
375 * "time" is actually "data" for mode 0 (no timestamp).
376 * place event id # at far right for easier visual parsing.
377 */
378 for (i = 0; i < num_events; i++) {
379 ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
380 time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
381 if (mode == 0) {
382 trace_iwlwifi_dev_ucode_cont_event(
383 priv->trans->dev, 0, time, ev);
384 } else {
385 data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
386 trace_iwlwifi_dev_ucode_cont_event(
387 priv->trans->dev, time, data, ev);
388 }
389 }
390 /* Allow device to power down */
391 iwl_release_nic_access(priv->trans);
392 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
393}
394
395static void iwl_continuous_event_trace(struct iwl_priv *priv)
396{
397 u32 capacity; /* event log capacity in # entries */
398 struct {
399 u32 capacity;
400 u32 mode;
401 u32 wrap_counter;
402 u32 write_counter;
403 } __packed read;
404 u32 base; /* SRAM byte address of event log header */
405 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
406 u32 num_wraps; /* # times uCode wrapped to top of log */
407 u32 next_entry; /* index of next entry to be written by uCode */
408
409 base = priv->device_pointers.log_event_table;
410 if (iwlagn_hw_valid_rtc_data_addr(base)) {
411 iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
412 capacity = read.capacity;
413 mode = read.mode;
414 num_wraps = read.wrap_counter;
415 next_entry = read.write_counter;
416 } else
417 return;
418
419 /*
420 * Unfortunately, the uCode doesn't use temporary variables.
421 * Therefore, it can happen that we read next_entry == capacity,
422 * which really means next_entry == 0.
423 */
424 if (unlikely(next_entry == capacity))
425 next_entry = 0;
426 /*
427 * Additionally, the uCode increases the write pointer before
428 * the wraps counter, so if the write pointer is smaller than
429 * the old write pointer (wrap occurred) but we read that no
430 * wrap occurred, we actually read between the next_entry and
431 * num_wraps update (this does happen in practice!!) -- take
432 * that into account by increasing num_wraps.
433 */
434 if (unlikely(next_entry < priv->event_log.next_entry &&
435 num_wraps == priv->event_log.num_wraps))
436 num_wraps++;
437
438 if (num_wraps == priv->event_log.num_wraps) {
439 iwl_print_cont_event_trace(
440 priv, base, priv->event_log.next_entry,
441 next_entry - priv->event_log.next_entry,
442 capacity, mode);
443
444 priv->event_log.non_wraps_count++;
445 } else {
446 if (num_wraps - priv->event_log.num_wraps > 1)
447 priv->event_log.wraps_more_count++;
448 else
449 priv->event_log.wraps_once_count++;
450
451 trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
452 num_wraps - priv->event_log.num_wraps,
453 next_entry, priv->event_log.next_entry);
454
455 if (next_entry < priv->event_log.next_entry) {
456 iwl_print_cont_event_trace(
457 priv, base, priv->event_log.next_entry,
458 capacity - priv->event_log.next_entry,
459 capacity, mode);
460
461 iwl_print_cont_event_trace(
462 priv, base, 0, next_entry, capacity, mode);
463 } else {
464 iwl_print_cont_event_trace(
465 priv, base, next_entry,
466 capacity - next_entry,
467 capacity, mode);
468
469 iwl_print_cont_event_trace(
470 priv, base, 0, next_entry, capacity, mode);
471 }
472 }
473
474 priv->event_log.num_wraps = num_wraps;
475 priv->event_log.next_entry = next_entry;
476}
477
478/**
479 * iwl_bg_ucode_trace - Timer callback to log ucode event
480 *
481 * The timer is continually set to execute every
482 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
483 * this function is to perform continuous uCode event logging operation
484 * if enabled
485 */
486static void iwl_bg_ucode_trace(unsigned long data)
487{
488 struct iwl_priv *priv = (struct iwl_priv *)data;
489
490 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
491 return;
492
493 if (priv->event_log.ucode_trace) {
494 iwl_continuous_event_trace(priv);
495 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
496 mod_timer(&priv->ucode_trace,
497 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
498 }
499}
500
501static void iwl_bg_tx_flush(struct work_struct *work)
502{
503 struct iwl_priv *priv =
504 container_of(work, struct iwl_priv, tx_flush);
505
506 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
507 return;
508
509 /* do nothing if rf-kill is on */
510 if (!iwl_is_ready_rf(priv))
511 return;
512
513 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
514 iwlagn_dev_txfifo_flush(priv);
515}
516
517/*
518 * queue/FIFO/AC mapping definitions
519 */
520
521static const u8 iwlagn_bss_ac_to_fifo[] = {
522 IWL_TX_FIFO_VO,
523 IWL_TX_FIFO_VI,
524 IWL_TX_FIFO_BE,
525 IWL_TX_FIFO_BK,
526};
527
528static const u8 iwlagn_bss_ac_to_queue[] = {
529 0, 1, 2, 3,
530};
531
532static const u8 iwlagn_pan_ac_to_fifo[] = {
533 IWL_TX_FIFO_VO_IPAN,
534 IWL_TX_FIFO_VI_IPAN,
535 IWL_TX_FIFO_BE_IPAN,
536 IWL_TX_FIFO_BK_IPAN,
537};
538
539static const u8 iwlagn_pan_ac_to_queue[] = {
540 7, 6, 5, 4,
541};
542
543static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
544{
545 int i;
546
547 /*
548 * The default context is always valid,
549 * the PAN context depends on uCode.
550 */
551 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
552 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
553 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
554
555 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
556 priv->contexts[i].ctxid = i;
557
558 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
559 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
560 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
561 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
562 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
563 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
564 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
565 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
566 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
567 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
568 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
569 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
570 BIT(NL80211_IFTYPE_STATION);
571 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
572 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
573 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
574 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
575 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
576 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
577 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
578 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
579
580 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
581 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
582 REPLY_WIPAN_RXON_TIMING;
583 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
584 REPLY_WIPAN_RXON_ASSOC;
585 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
586 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
587 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
588 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
589 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
590 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
591 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
592
593 if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
594 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
595 BIT(NL80211_IFTYPE_P2P_CLIENT) |
596 BIT(NL80211_IFTYPE_P2P_GO);
597
598 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
599 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
600 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
601 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
602 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
603 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
604 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
605 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
606
607 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
608}
609
610static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
611{
612 struct iwl_ct_kill_config cmd;
613 struct iwl_ct_kill_throttling_config adv_cmd;
614 int ret = 0;
615
616 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
617 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
618
619 priv->thermal_throttle.ct_kill_toggle = false;
620
621 if (priv->cfg->base_params->support_ct_kill_exit) {
622 adv_cmd.critical_temperature_enter =
623 cpu_to_le32(priv->hw_params.ct_kill_threshold);
624 adv_cmd.critical_temperature_exit =
625 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
626
627 ret = iwl_dvm_send_cmd_pdu(priv,
628 REPLY_CT_KILL_CONFIG_CMD,
629 CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
630 if (ret)
631 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
632 else
633 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
634 "succeeded, critical temperature enter is %d,"
635 "exit is %d\n",
636 priv->hw_params.ct_kill_threshold,
637 priv->hw_params.ct_kill_exit_threshold);
638 } else {
639 cmd.critical_temperature_R =
640 cpu_to_le32(priv->hw_params.ct_kill_threshold);
641
642 ret = iwl_dvm_send_cmd_pdu(priv,
643 REPLY_CT_KILL_CONFIG_CMD,
644 CMD_SYNC, sizeof(cmd), &cmd);
645 if (ret)
646 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
647 else
648 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
649 "succeeded, "
650 "critical temperature is %d\n",
651 priv->hw_params.ct_kill_threshold);
652 }
653}
654
655static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
656{
657 struct iwl_calib_cfg_cmd calib_cfg_cmd;
658 struct iwl_host_cmd cmd = {
659 .id = CALIBRATION_CFG_CMD,
660 .len = { sizeof(struct iwl_calib_cfg_cmd), },
661 .data = { &calib_cfg_cmd, },
662 };
663
664 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
665 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
666 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
667
668 return iwl_dvm_send_cmd(priv, &cmd);
669}
670
671
672static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
673{
674 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
675 .valid = cpu_to_le32(valid_tx_ant),
676 };
677
678 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
679 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
680 return iwl_dvm_send_cmd_pdu(priv,
681 TX_ANT_CONFIGURATION_CMD,
682 CMD_SYNC,
683 sizeof(struct iwl_tx_ant_config_cmd),
684 &tx_ant_cmd);
685 } else {
686 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
687 return -EOPNOTSUPP;
688 }
689}
690
691static void iwl_send_bt_config(struct iwl_priv *priv)
692{
693 struct iwl_bt_cmd bt_cmd = {
694 .lead_time = BT_LEAD_TIME_DEF,
695 .max_kill = BT_MAX_KILL_DEF,
696 .kill_ack_mask = 0,
697 .kill_cts_mask = 0,
698 };
699
700 if (!iwlwifi_mod_params.bt_coex_active)
701 bt_cmd.flags = BT_COEX_DISABLE;
702 else
703 bt_cmd.flags = BT_COEX_ENABLE;
704
705 priv->bt_enable_flag = bt_cmd.flags;
706 IWL_DEBUG_INFO(priv, "BT coex %s\n",
707 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
708
709 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
710 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
711 IWL_ERR(priv, "failed to send BT Coex Config\n");
712}
713
714/**
715 * iwl_alive_start - called after REPLY_ALIVE notification received
716 * from protocol/runtime uCode (initialization uCode's
717 * Alive gets handled by iwl_init_alive_start()).
718 */
719int iwl_alive_start(struct iwl_priv *priv)
720{
721 int ret = 0;
722 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
723
724 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
725
726 /* After the ALIVE response, we can send host commands to the uCode */
727 set_bit(STATUS_ALIVE, &priv->status);
728
729 if (iwl_is_rfkill(priv))
730 return -ERFKILL;
731
732 if (priv->event_log.ucode_trace) {
733 /* start collecting data now */
734 mod_timer(&priv->ucode_trace, jiffies);
735 }
736
737 /* download priority table before any calibration request */
738 if (priv->cfg->bt_params &&
739 priv->cfg->bt_params->advanced_bt_coexist) {
740 /* Configure Bluetooth device coexistence support */
741 if (priv->cfg->bt_params->bt_sco_disable)
742 priv->bt_enable_pspoll = false;
743 else
744 priv->bt_enable_pspoll = true;
745
746 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
747 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
748 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
749 iwlagn_send_advance_bt_config(priv);
750 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
751 priv->cur_rssi_ctx = NULL;
752
753 iwl_send_prio_tbl(priv);
754
755 /* FIXME: w/a to force change uCode BT state machine */
756 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
757 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
758 if (ret)
759 return ret;
760 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
761 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
762 if (ret)
763 return ret;
764 } else {
765 /*
766 * default is 2-wire BT coexexistence support
767 */
768 iwl_send_bt_config(priv);
769 }
770
771 /*
772 * Perform runtime calibrations, including DC calibration.
773 */
774 iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
775
776 ieee80211_wake_queues(priv->hw);
777
778 /* Configure Tx antenna selection based on H/W config */
779 iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
780
781 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
782 struct iwl_rxon_cmd *active_rxon =
783 (struct iwl_rxon_cmd *)&ctx->active;
784 /* apply any changes in staging */
785 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
786 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
787 } else {
788 struct iwl_rxon_context *tmp;
789 /* Initialize our rx_config data */
790 for_each_context(priv, tmp)
791 iwl_connection_init_rx_config(priv, tmp);
792
793 iwlagn_set_rxon_chain(priv, ctx);
794 }
795
796 if (!priv->wowlan) {
797 /* WoWLAN ucode will not reply in the same way, skip it */
798 iwl_reset_run_time_calib(priv);
799 }
800
801 set_bit(STATUS_READY, &priv->status);
802
803 /* Configure the adapter for unassociated operation */
804 ret = iwlagn_commit_rxon(priv, ctx);
805 if (ret)
806 return ret;
807
808 /* At this point, the NIC is initialized and operational */
809 iwl_rf_kill_ct_config(priv);
810
811 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
812
813 return iwl_power_update_mode(priv, true);
814}
815
816/**
817 * iwl_clear_driver_stations - clear knowledge of all stations from driver
818 * @priv: iwl priv struct
819 *
820 * This is called during iwl_down() to make sure that in the case
821 * we're coming there from a hardware restart mac80211 will be
822 * able to reconfigure stations -- if we're getting there in the
823 * normal down flow then the stations will already be cleared.
824 */
825static void iwl_clear_driver_stations(struct iwl_priv *priv)
826{
827 struct iwl_rxon_context *ctx;
828
829 spin_lock_bh(&priv->sta_lock);
830 memset(priv->stations, 0, sizeof(priv->stations));
831 priv->num_stations = 0;
832
833 priv->ucode_key_table = 0;
834
835 for_each_context(priv, ctx) {
836 /*
837 * Remove all key information that is not stored as part
838 * of station information since mac80211 may not have had
839 * a chance to remove all the keys. When device is
840 * reconfigured by mac80211 after an error all keys will
841 * be reconfigured.
842 */
843 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
844 ctx->key_mapping_keys = 0;
845 }
846
847 spin_unlock_bh(&priv->sta_lock);
848}
849
850void iwl_down(struct iwl_priv *priv)
851{
852 int exit_pending;
853
854 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
855
856 lockdep_assert_held(&priv->mutex);
857
858 iwl_scan_cancel_timeout(priv, 200);
859
860 /*
861 * If active, scanning won't cancel it, so say it expired.
862 * No race since we hold the mutex here and a new one
863 * can't come in at this time.
864 */
865 if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
866 ieee80211_remain_on_channel_expired(priv->hw);
867
868 exit_pending =
869 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
870
871 iwl_clear_ucode_stations(priv, NULL);
872 iwl_dealloc_bcast_stations(priv);
873 iwl_clear_driver_stations(priv);
874
875 /* reset BT coex data */
876 priv->bt_status = 0;
877 priv->cur_rssi_ctx = NULL;
878 priv->bt_is_sco = 0;
879 if (priv->cfg->bt_params)
880 priv->bt_traffic_load =
881 priv->cfg->bt_params->bt_init_traffic_load;
882 else
883 priv->bt_traffic_load = 0;
884 priv->bt_full_concurrent = false;
885 priv->bt_ci_compliance = 0;
886
887 /* Wipe out the EXIT_PENDING status bit if we are not actually
888 * exiting the module */
889 if (!exit_pending)
890 clear_bit(STATUS_EXIT_PENDING, &priv->status);
891
892 if (priv->mac80211_registered)
893 ieee80211_stop_queues(priv->hw);
894
895 priv->ucode_loaded = false;
896 iwl_trans_stop_device(priv->trans);
897
898 /* Set num_aux_in_flight must be done after the transport is stopped */
899 atomic_set(&priv->num_aux_in_flight, 0);
900
901 /* Clear out all status bits but a few that are stable across reset */
902 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
903 STATUS_RF_KILL_HW |
904 test_bit(STATUS_FW_ERROR, &priv->status) <<
905 STATUS_FW_ERROR |
906 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
907 STATUS_EXIT_PENDING;
908
909 dev_kfree_skb(priv->beacon_skb);
910 priv->beacon_skb = NULL;
911}
912
913/*****************************************************************************
914 *
915 * Workqueue callbacks
916 *
917 *****************************************************************************/
918
919static void iwl_bg_run_time_calib_work(struct work_struct *work)
920{
921 struct iwl_priv *priv = container_of(work, struct iwl_priv,
922 run_time_calib_work);
923
924 mutex_lock(&priv->mutex);
925
926 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
927 test_bit(STATUS_SCANNING, &priv->status)) {
928 mutex_unlock(&priv->mutex);
929 return;
930 }
931
932 if (priv->start_calib) {
933 iwl_chain_noise_calibration(priv);
934 iwl_sensitivity_calibration(priv);
935 }
936
937 mutex_unlock(&priv->mutex);
938}
939
940void iwlagn_prepare_restart(struct iwl_priv *priv)
941{
942 bool bt_full_concurrent;
943 u8 bt_ci_compliance;
944 u8 bt_load;
945 u8 bt_status;
946 bool bt_is_sco;
947 int i;
948
949 lockdep_assert_held(&priv->mutex);
950
951 priv->is_open = 0;
952
953 /*
954 * __iwl_down() will clear the BT status variables,
955 * which is correct, but when we restart we really
956 * want to keep them so restore them afterwards.
957 *
958 * The restart process will later pick them up and
959 * re-configure the hw when we reconfigure the BT
960 * command.
961 */
962 bt_full_concurrent = priv->bt_full_concurrent;
963 bt_ci_compliance = priv->bt_ci_compliance;
964 bt_load = priv->bt_traffic_load;
965 bt_status = priv->bt_status;
966 bt_is_sco = priv->bt_is_sco;
967
968 iwl_down(priv);
969
970 priv->bt_full_concurrent = bt_full_concurrent;
971 priv->bt_ci_compliance = bt_ci_compliance;
972 priv->bt_traffic_load = bt_load;
973 priv->bt_status = bt_status;
974 priv->bt_is_sco = bt_is_sco;
975
976 /* reset aggregation queues */
977 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
978 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
979 /* and stop counts */
980 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
981 atomic_set(&priv->queue_stop_count[i], 0);
982
983 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
984}
985
986static void iwl_bg_restart(struct work_struct *data)
987{
988 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
989
990 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
991 return;
992
993 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
994 mutex_lock(&priv->mutex);
995 iwlagn_prepare_restart(priv);
996 mutex_unlock(&priv->mutex);
997 iwl_cancel_deferred_work(priv);
998 if (priv->mac80211_registered)
999 ieee80211_restart_hw(priv->hw);
1000 else
1001 IWL_ERR(priv,
1002 "Cannot request restart before registrating with mac80211");
1003 } else {
1004 WARN_ON(1);
1005 }
1006}
1007
1008
1009
1010
1011void iwlagn_disable_roc(struct iwl_priv *priv)
1012{
1013 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1014
1015 lockdep_assert_held(&priv->mutex);
1016
1017 if (!priv->hw_roc_setup)
1018 return;
1019
1020 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
1021 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1022
1023 priv->hw_roc_channel = NULL;
1024
1025 memset(ctx->staging.node_addr, 0, ETH_ALEN);
1026
1027 iwlagn_commit_rxon(priv, ctx);
1028
1029 ctx->is_active = false;
1030 priv->hw_roc_setup = false;
1031}
1032
1033static void iwlagn_disable_roc_work(struct work_struct *work)
1034{
1035 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1036 hw_roc_disable_work.work);
1037
1038 mutex_lock(&priv->mutex);
1039 iwlagn_disable_roc(priv);
1040 mutex_unlock(&priv->mutex);
1041}
1042
1043/*****************************************************************************
1044 *
1045 * driver setup and teardown
1046 *
1047 *****************************************************************************/
1048
1049static void iwl_setup_deferred_work(struct iwl_priv *priv)
1050{
1051 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
1052
1053 INIT_WORK(&priv->restart, iwl_bg_restart);
1054 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
1055 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
1056 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
1057 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
1058 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
1059 INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
1060 iwlagn_disable_roc_work);
1061
1062 iwl_setup_scan_deferred_work(priv);
1063
1064 if (priv->cfg->bt_params)
1065 iwlagn_bt_setup_deferred_work(priv);
1066
1067 init_timer(&priv->statistics_periodic);
1068 priv->statistics_periodic.data = (unsigned long)priv;
1069 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
1070
1071 init_timer(&priv->ucode_trace);
1072 priv->ucode_trace.data = (unsigned long)priv;
1073 priv->ucode_trace.function = iwl_bg_ucode_trace;
1074}
1075
1076void iwl_cancel_deferred_work(struct iwl_priv *priv)
1077{
1078 if (priv->cfg->bt_params)
1079 iwlagn_bt_cancel_deferred_work(priv);
1080
1081 cancel_work_sync(&priv->run_time_calib_work);
1082 cancel_work_sync(&priv->beacon_update);
1083
1084 iwl_cancel_scan_deferred_work(priv);
1085
1086 cancel_work_sync(&priv->bt_full_concurrency);
1087 cancel_work_sync(&priv->bt_runtime_config);
1088 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
1089
1090 del_timer_sync(&priv->statistics_periodic);
1091 del_timer_sync(&priv->ucode_trace);
1092}
1093
1094static int iwl_init_drv(struct iwl_priv *priv)
1095{
1096 spin_lock_init(&priv->sta_lock);
1097
1098 mutex_init(&priv->mutex);
1099
1100 INIT_LIST_HEAD(&priv->calib_results);
1101
1102 priv->band = IEEE80211_BAND_2GHZ;
1103
1104 priv->plcp_delta_threshold =
1105 priv->cfg->base_params->plcp_delta_threshold;
1106
1107 priv->iw_mode = NL80211_IFTYPE_STATION;
1108 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
1109 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
1110 priv->agg_tids_count = 0;
1111
1112 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
1113
1114 priv->rx_statistics_jiffies = jiffies;
1115
1116 /* Choose which receivers/antennas to use */
1117 iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1118
1119 iwl_init_scan_params(priv);
1120
1121 /* init bt coex */
1122 if (priv->cfg->bt_params &&
1123 priv->cfg->bt_params->advanced_bt_coexist) {
1124 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1125 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1126 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
1127 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
1128 priv->bt_duration = BT_DURATION_LIMIT_DEF;
1129 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1130 }
1131
1132 return 0;
1133}
1134
1135static void iwl_uninit_drv(struct iwl_priv *priv)
1136{
1137 kfree(priv->scan_cmd);
1138 kfree(priv->beacon_cmd);
1139 kfree(rcu_dereference_raw(priv->noa_data));
1140 iwl_calib_free_results(priv);
1141#ifdef CONFIG_IWLWIFI_DEBUGFS
1142 kfree(priv->wowlan_sram);
1143#endif
1144}
1145
1146static void iwl_set_hw_params(struct iwl_priv *priv)
1147{
1148 if (priv->cfg->ht_params)
1149 priv->hw_params.use_rts_for_aggregation =
1150 priv->cfg->ht_params->use_rts_for_aggregation;
1151
1152 /* Device-specific setup */
1153 priv->lib->set_hw_params(priv);
1154}
1155
1156
1157
1158/* show what optional capabilities we have */
1159static void iwl_option_config(struct iwl_priv *priv)
1160{
1161#ifdef CONFIG_IWLWIFI_DEBUG
1162 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
1163#else
1164 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
1165#endif
1166
1167#ifdef CONFIG_IWLWIFI_DEBUGFS
1168 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
1169#else
1170 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
1171#endif
1172
1173#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1174 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
1175#else
1176 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1177#endif
1178
1179#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1180 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
1181#else
1182 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
1183#endif
1184
1185#ifdef CONFIG_IWLWIFI_P2P
1186 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
1187#else
1188 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
1189#endif
1190}
1191
1192static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1193{
1194 struct iwl_nvm_data *data = priv->nvm_data;
1195 char *debug_msg;
1196
1197 if (data->sku_cap_11n_enable &&
1198 !priv->cfg->ht_params) {
1199 IWL_ERR(priv, "Invalid 11n configuration\n");
1200 return -EINVAL;
1201 }
1202
1203 if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
1204 !data->sku_cap_band_52GHz_enable) {
1205 IWL_ERR(priv, "Invalid device sku\n");
1206 return -EINVAL;
1207 }
1208
1209 debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
1210 IWL_DEBUG_INFO(priv, debug_msg,
1211 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
1212 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
1213 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
1214
1215 priv->hw_params.tx_chains_num =
1216 num_of_ant(data->valid_tx_ant);
1217 if (priv->cfg->rx_with_siso_diversity)
1218 priv->hw_params.rx_chains_num = 1;
1219 else
1220 priv->hw_params.rx_chains_num =
1221 num_of_ant(data->valid_rx_ant);
1222
1223 IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1224 data->valid_tx_ant,
1225 data->valid_rx_ant);
1226
1227 return 0;
1228}
1229
1230static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1231 const struct iwl_cfg *cfg,
1232 const struct iwl_fw *fw,
1233 struct dentry *dbgfs_dir)
1234{
1235 struct iwl_priv *priv;
1236 struct ieee80211_hw *hw;
1237 struct iwl_op_mode *op_mode;
1238 u16 num_mac;
1239 u32 ucode_flags;
1240 struct iwl_trans_config trans_cfg = {};
1241 static const u8 no_reclaim_cmds[] = {
1242 REPLY_RX_PHY_CMD,
1243 REPLY_RX_MPDU_CMD,
1244 REPLY_COMPRESSED_BA,
1245 STATISTICS_NOTIFICATION,
1246 REPLY_TX,
1247 };
1248 int i;
1249
1250 /************************
1251 * 1. Allocating HW data
1252 ************************/
1253 hw = iwl_alloc_all();
1254 if (!hw) {
1255 pr_err("%s: Cannot allocate network device\n", cfg->name);
1256 goto out;
1257 }
1258
1259 op_mode = hw->priv;
1260 op_mode->ops = &iwl_dvm_ops;
1261 priv = IWL_OP_MODE_GET_DVM(op_mode);
1262 priv->trans = trans;
1263 priv->dev = trans->dev;
1264 priv->cfg = cfg;
1265 priv->fw = fw;
1266
1267 switch (priv->cfg->device_family) {
1268 case IWL_DEVICE_FAMILY_1000:
1269 case IWL_DEVICE_FAMILY_100:
1270 priv->lib = &iwl1000_lib;
1271 break;
1272 case IWL_DEVICE_FAMILY_2000:
1273 case IWL_DEVICE_FAMILY_105:
1274 priv->lib = &iwl2000_lib;
1275 break;
1276 case IWL_DEVICE_FAMILY_2030:
1277 case IWL_DEVICE_FAMILY_135:
1278 priv->lib = &iwl2030_lib;
1279 break;
1280 case IWL_DEVICE_FAMILY_5000:
1281 priv->lib = &iwl5000_lib;
1282 break;
1283 case IWL_DEVICE_FAMILY_5150:
1284 priv->lib = &iwl5150_lib;
1285 break;
1286 case IWL_DEVICE_FAMILY_6000:
1287 case IWL_DEVICE_FAMILY_6005:
1288 case IWL_DEVICE_FAMILY_6000i:
1289 case IWL_DEVICE_FAMILY_6050:
1290 case IWL_DEVICE_FAMILY_6150:
1291 priv->lib = &iwl6000_lib;
1292 break;
1293 case IWL_DEVICE_FAMILY_6030:
1294 priv->lib = &iwl6030_lib;
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 if (WARN_ON(!priv->lib))
1301 goto out_free_hw;
1302
1303 /*
1304 * Populate the state variables that the transport layer needs
1305 * to know about.
1306 */
1307 trans_cfg.op_mode = op_mode;
1308 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1309 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1310 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
1311 if (!iwlwifi_mod_params.wd_disable)
1312 trans_cfg.queue_watchdog_timeout =
1313 priv->cfg->base_params->wd_timeout;
1314 else
1315 trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
1316 trans_cfg.command_names = iwl_dvm_cmd_strings;
1317 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
1318
1319 WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
1320 priv->cfg->base_params->num_of_queues);
1321
1322 ucode_flags = fw->ucode_capa.flags;
1323
1324#ifndef CONFIG_IWLWIFI_P2P
1325 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1326#endif
1327
1328 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1329 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1330 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1331 } else {
1332 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1333 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1334 }
1335
1336 /* Configure transport layer */
1337 iwl_trans_configure(priv->trans, &trans_cfg);
1338
1339 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1340 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
1341
1342 /* At this point both hw and priv are allocated. */
1343
1344 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
1345
1346 iwl_option_config(priv);
1347
1348 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
1349
1350 /* is antenna coupling more than 35dB ? */
1351 priv->bt_ant_couple_ok =
1352 (iwlwifi_mod_params.ant_coupling >
1353 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1354 true : false;
1355
1356 /* enable/disable bt channel inhibition */
1357 priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
1358 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1359 (priv->bt_ch_announce) ? "On" : "Off");
1360
1361 /* these spin locks will be used in apm_ops.init and EEPROM access
1362 * we should init now
1363 */
1364 spin_lock_init(&priv->statistics.lock);
1365
1366 /***********************
1367 * 2. Read REV register
1368 ***********************/
1369 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
1370 priv->cfg->name, priv->trans->hw_rev);
1371
1372 if (iwl_trans_start_hw(priv->trans))
1373 goto out_free_hw;
1374
1375 /* Read the EEPROM */
1376 if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
1377 &priv->eeprom_blob_size)) {
1378 IWL_ERR(priv, "Unable to init EEPROM\n");
1379 goto out_free_hw;
1380 }
1381
1382 /* Reset chip to save power until we load uCode during "up". */
1383 iwl_trans_stop_hw(priv->trans, false);
1384
1385 priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1386 priv->eeprom_blob,
1387 priv->eeprom_blob_size);
1388 if (!priv->nvm_data)
1389 goto out_free_eeprom_blob;
1390
1391 if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
1392 goto out_free_eeprom;
1393
1394 if (iwl_eeprom_init_hw_params(priv))
1395 goto out_free_eeprom;
1396
1397 /* extract MAC Address */
1398 memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
1399 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1400 priv->hw->wiphy->addresses = priv->addresses;
1401 priv->hw->wiphy->n_addresses = 1;
1402 num_mac = priv->nvm_data->n_hw_addrs;
1403 if (num_mac > 1) {
1404 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1405 ETH_ALEN);
1406 priv->addresses[1].addr[5]++;
1407 priv->hw->wiphy->n_addresses++;
1408 }
1409
1410 /************************
1411 * 4. Setup HW constants
1412 ************************/
1413 iwl_set_hw_params(priv);
1414
1415 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1416 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1417 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1418 /*
1419 * if not PAN, then don't support P2P -- might be a uCode
1420 * packaging bug or due to the eeprom check above
1421 */
1422 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1423 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1424 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1425
1426 /* Configure transport layer again*/
1427 iwl_trans_configure(priv->trans, &trans_cfg);
1428 }
1429
1430 /*******************
1431 * 5. Setup priv
1432 *******************/
1433 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1434 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1435 if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
1436 i != IWL_DEFAULT_CMD_QUEUE_NUM &&
1437 i != IWL_IPAN_CMD_QUEUE_NUM)
1438 priv->queue_to_mac80211[i] = i;
1439 atomic_set(&priv->queue_stop_count[i], 0);
1440 }
1441
1442 if (iwl_init_drv(priv))
1443 goto out_free_eeprom;
1444
1445 /* At this point both hw and priv are initialized. */
1446
1447 /********************
1448 * 6. Setup services
1449 ********************/
1450 iwl_setup_deferred_work(priv);
1451 iwl_setup_rx_handlers(priv);
1452 iwl_testmode_init(priv);
1453
1454 iwl_power_initialize(priv);
1455 iwl_tt_initialize(priv);
1456
1457 snprintf(priv->hw->wiphy->fw_version,
1458 sizeof(priv->hw->wiphy->fw_version),
1459 "%s", fw->fw_version);
1460
1461 priv->new_scan_threshold_behaviour =
1462 !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1463
1464 priv->phy_calib_chain_noise_reset_cmd =
1465 fw->ucode_capa.standard_phy_calibration_size;
1466 priv->phy_calib_chain_noise_gain_cmd =
1467 fw->ucode_capa.standard_phy_calibration_size + 1;
1468
1469 /* initialize all valid contexts */
1470 iwl_init_context(priv, ucode_flags);
1471
1472 /**************************************************
1473 * This is still part of probe() in a sense...
1474 *
1475 * 7. Setup and register with mac80211 and debugfs
1476 **************************************************/
1477 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1478 goto out_destroy_workqueue;
1479
1480 if (iwl_dbgfs_register(priv, dbgfs_dir))
1481 goto out_mac80211_unregister;
1482
1483 return op_mode;
1484
1485out_mac80211_unregister:
1486 iwlagn_mac_unregister(priv);
1487out_destroy_workqueue:
1488 iwl_tt_exit(priv);
1489 iwl_testmode_free(priv);
1490 iwl_cancel_deferred_work(priv);
1491 destroy_workqueue(priv->workqueue);
1492 priv->workqueue = NULL;
1493 iwl_uninit_drv(priv);
1494out_free_eeprom_blob:
1495 kfree(priv->eeprom_blob);
1496out_free_eeprom:
1497 iwl_free_nvm_data(priv->nvm_data);
1498out_free_hw:
1499 ieee80211_free_hw(priv->hw);
1500out:
1501 op_mode = NULL;
1502 return op_mode;
1503}
1504
1505static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1506{
1507 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1508
1509 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1510
1511 iwl_testmode_free(priv);
1512 iwlagn_mac_unregister(priv);
1513
1514 iwl_tt_exit(priv);
1515
1516 kfree(priv->eeprom_blob);
1517 iwl_free_nvm_data(priv->nvm_data);
1518
1519 /*netif_stop_queue(dev); */
1520 flush_workqueue(priv->workqueue);
1521
1522 /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
1523 * priv->workqueue... so we can't take down the workqueue
1524 * until now... */
1525 destroy_workqueue(priv->workqueue);
1526 priv->workqueue = NULL;
1527
1528 iwl_uninit_drv(priv);
1529
1530 dev_kfree_skb(priv->beacon_skb);
1531
1532 iwl_trans_stop_hw(priv->trans, true);
1533 ieee80211_free_hw(priv->hw);
1534}
1535
1536static const char * const desc_lookup_text[] = {
1537 "OK",
1538 "FAIL",
1539 "BAD_PARAM",
1540 "BAD_CHECKSUM",
1541 "NMI_INTERRUPT_WDG",
1542 "SYSASSERT",
1543 "FATAL_ERROR",
1544 "BAD_COMMAND",
1545 "HW_ERROR_TUNE_LOCK",
1546 "HW_ERROR_TEMPERATURE",
1547 "ILLEGAL_CHAN_FREQ",
1548 "VCC_NOT_STABLE",
1549 "FH_ERROR",
1550 "NMI_INTERRUPT_HOST",
1551 "NMI_INTERRUPT_ACTION_PT",
1552 "NMI_INTERRUPT_UNKNOWN",
1553 "UCODE_VERSION_MISMATCH",
1554 "HW_ERROR_ABS_LOCK",
1555 "HW_ERROR_CAL_LOCK_FAIL",
1556 "NMI_INTERRUPT_INST_ACTION_PT",
1557 "NMI_INTERRUPT_DATA_ACTION_PT",
1558 "NMI_TRM_HW_ER",
1559 "NMI_INTERRUPT_TRM",
1560 "NMI_INTERRUPT_BREAK_POINT",
1561 "DEBUG_0",
1562 "DEBUG_1",
1563 "DEBUG_2",
1564 "DEBUG_3",
1565};
1566
1567static struct { char *name; u8 num; } advanced_lookup[] = {
1568 { "NMI_INTERRUPT_WDG", 0x34 },
1569 { "SYSASSERT", 0x35 },
1570 { "UCODE_VERSION_MISMATCH", 0x37 },
1571 { "BAD_COMMAND", 0x38 },
1572 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1573 { "FATAL_ERROR", 0x3D },
1574 { "NMI_TRM_HW_ERR", 0x46 },
1575 { "NMI_INTERRUPT_TRM", 0x4C },
1576 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1577 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1578 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1579 { "NMI_INTERRUPT_HOST", 0x66 },
1580 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1581 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1582 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1583 { "ADVANCED_SYSASSERT", 0 },
1584};
1585
1586static const char *desc_lookup(u32 num)
1587{
1588 int i;
1589 int max = ARRAY_SIZE(desc_lookup_text);
1590
1591 if (num < max)
1592 return desc_lookup_text[num];
1593
1594 max = ARRAY_SIZE(advanced_lookup) - 1;
1595 for (i = 0; i < max; i++) {
1596 if (advanced_lookup[i].num == num)
1597 break;
1598 }
1599 return advanced_lookup[i].name;
1600}
1601
1602#define ERROR_START_OFFSET (1 * sizeof(u32))
1603#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1604
1605static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1606{
1607 struct iwl_trans *trans = priv->trans;
1608 u32 base;
1609 struct iwl_error_event_table table;
1610
1611 base = priv->device_pointers.error_event_table;
1612 if (priv->cur_ucode == IWL_UCODE_INIT) {
1613 if (!base)
1614 base = priv->fw->init_errlog_ptr;
1615 } else {
1616 if (!base)
1617 base = priv->fw->inst_errlog_ptr;
1618 }
1619
1620 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1621 IWL_ERR(priv,
1622 "Not valid error log pointer 0x%08X for %s uCode\n",
1623 base,
1624 (priv->cur_ucode == IWL_UCODE_INIT)
1625 ? "Init" : "RT");
1626 return;
1627 }
1628
1629 /*TODO: Update dbgfs with ISR error stats obtained below */
1630 iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
1631
1632 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1633 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
1634 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
1635 priv->status, table.valid);
1636 }
1637
1638 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
1639 table.data1, table.data2, table.line,
1640 table.blink1, table.blink2, table.ilink1,
1641 table.ilink2, table.bcon_time, table.gp1,
1642 table.gp2, table.gp3, table.ucode_ver,
1643 table.hw_ver, table.brd_ver);
1644 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1645 desc_lookup(table.error_id));
1646 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1647 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1648 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1649 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1650 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1651 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1652 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1653 IWL_ERR(priv, "0x%08X | line\n", table.line);
1654 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1655 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1656 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1657 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1658 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1659 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1660 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1661 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1662 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1663 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1664 IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
1665 IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
1666 IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
1667 IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
1668 IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
1669 IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
1670 IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
1671 IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
1672 IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
1673 IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
1674 IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
1675 IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
1676 IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
1677 IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
1678}
1679
1680#define EVENT_START_OFFSET (4 * sizeof(u32))
1681
1682/**
1683 * iwl_print_event_log - Dump error event log to syslog
1684 *
1685 */
1686static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1687 u32 num_events, u32 mode,
1688 int pos, char **buf, size_t bufsz)
1689{
1690 u32 i;
1691 u32 base; /* SRAM byte address of event log header */
1692 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1693 u32 ptr; /* SRAM byte address of log data */
1694 u32 ev, time, data; /* event log data */
1695 unsigned long reg_flags;
1696
1697 struct iwl_trans *trans = priv->trans;
1698
1699 if (num_events == 0)
1700 return pos;
1701
1702 base = priv->device_pointers.log_event_table;
1703 if (priv->cur_ucode == IWL_UCODE_INIT) {
1704 if (!base)
1705 base = priv->fw->init_evtlog_ptr;
1706 } else {
1707 if (!base)
1708 base = priv->fw->inst_evtlog_ptr;
1709 }
1710
1711 if (mode == 0)
1712 event_size = 2 * sizeof(u32);
1713 else
1714 event_size = 3 * sizeof(u32);
1715
1716 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1717
1718 /* Make sure device is powered up for SRAM reads */
1719 spin_lock_irqsave(&trans->reg_lock, reg_flags);
1720 if (unlikely(!iwl_grab_nic_access(trans)))
1721 goto out_unlock;
1722
1723 /* Set starting address; reads will auto-increment */
1724 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
1725
1726 /* "time" is actually "data" for mode 0 (no timestamp).
1727 * place event id # at far right for easier visual parsing. */
1728 for (i = 0; i < num_events; i++) {
1729 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1730 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1731 if (mode == 0) {
1732 /* data, ev */
1733 if (bufsz) {
1734 pos += scnprintf(*buf + pos, bufsz - pos,
1735 "EVT_LOG:0x%08x:%04u\n",
1736 time, ev);
1737 } else {
1738 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
1739 time, ev);
1740 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1741 time, ev);
1742 }
1743 } else {
1744 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1745 if (bufsz) {
1746 pos += scnprintf(*buf + pos, bufsz - pos,
1747 "EVT_LOGT:%010u:0x%08x:%04u\n",
1748 time, data, ev);
1749 } else {
1750 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1751 time, data, ev);
1752 trace_iwlwifi_dev_ucode_event(trans->dev, time,
1753 data, ev);
1754 }
1755 }
1756 }
1757
1758 /* Allow device to power down */
1759 iwl_release_nic_access(trans);
1760out_unlock:
1761 spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
1762 return pos;
1763}
1764
1765/**
1766 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1767 */
1768static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1769 u32 num_wraps, u32 next_entry,
1770 u32 size, u32 mode,
1771 int pos, char **buf, size_t bufsz)
1772{
1773 /*
1774 * display the newest DEFAULT_LOG_ENTRIES entries
1775 * i.e the entries just before the next ont that uCode would fill.
1776 */
1777 if (num_wraps) {
1778 if (next_entry < size) {
1779 pos = iwl_print_event_log(priv,
1780 capacity - (size - next_entry),
1781 size - next_entry, mode,
1782 pos, buf, bufsz);
1783 pos = iwl_print_event_log(priv, 0,
1784 next_entry, mode,
1785 pos, buf, bufsz);
1786 } else
1787 pos = iwl_print_event_log(priv, next_entry - size,
1788 size, mode, pos, buf, bufsz);
1789 } else {
1790 if (next_entry < size) {
1791 pos = iwl_print_event_log(priv, 0, next_entry,
1792 mode, pos, buf, bufsz);
1793 } else {
1794 pos = iwl_print_event_log(priv, next_entry - size,
1795 size, mode, pos, buf, bufsz);
1796 }
1797 }
1798 return pos;
1799}
1800
1801#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1802
1803int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1804 char **buf, bool display)
1805{
1806 u32 base; /* SRAM byte address of event log header */
1807 u32 capacity; /* event log capacity in # entries */
1808 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1809 u32 num_wraps; /* # times uCode wrapped to top of log */
1810 u32 next_entry; /* index of next entry to be written by uCode */
1811 u32 size; /* # entries that we'll print */
1812 u32 logsize;
1813 int pos = 0;
1814 size_t bufsz = 0;
1815 struct iwl_trans *trans = priv->trans;
1816
1817 base = priv->device_pointers.log_event_table;
1818 if (priv->cur_ucode == IWL_UCODE_INIT) {
1819 logsize = priv->fw->init_evtlog_size;
1820 if (!base)
1821 base = priv->fw->init_evtlog_ptr;
1822 } else {
1823 logsize = priv->fw->inst_evtlog_size;
1824 if (!base)
1825 base = priv->fw->inst_evtlog_ptr;
1826 }
1827
1828 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1829 IWL_ERR(priv,
1830 "Invalid event log pointer 0x%08X for %s uCode\n",
1831 base,
1832 (priv->cur_ucode == IWL_UCODE_INIT)
1833 ? "Init" : "RT");
1834 return -EINVAL;
1835 }
1836
1837 /* event log header */
1838 capacity = iwl_read_targ_mem(trans, base);
1839 mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
1840 num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
1841 next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
1842
1843 if (capacity > logsize) {
1844 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
1845 "entries\n", capacity, logsize);
1846 capacity = logsize;
1847 }
1848
1849 if (next_entry > logsize) {
1850 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1851 next_entry, logsize);
1852 next_entry = logsize;
1853 }
1854
1855 size = num_wraps ? capacity : next_entry;
1856
1857 /* bail out if nothing in log */
1858 if (size == 0) {
1859 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
1860 return pos;
1861 }
1862
1863#ifdef CONFIG_IWLWIFI_DEBUG
1864 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
1865 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1866 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1867#else
1868 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1869 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1870#endif
1871 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1872 size);
1873
1874#ifdef CONFIG_IWLWIFI_DEBUG
1875 if (display) {
1876 if (full_log)
1877 bufsz = capacity * 48;
1878 else
1879 bufsz = size * 48;
1880 *buf = kmalloc(bufsz, GFP_KERNEL);
1881 if (!*buf)
1882 return -ENOMEM;
1883 }
1884 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
1885 /*
1886 * if uCode has wrapped back to top of log,
1887 * start at the oldest entry,
1888 * i.e the next one that uCode would fill.
1889 */
1890 if (num_wraps)
1891 pos = iwl_print_event_log(priv, next_entry,
1892 capacity - next_entry, mode,
1893 pos, buf, bufsz);
1894 /* (then/else) start at top of log */
1895 pos = iwl_print_event_log(priv, 0,
1896 next_entry, mode, pos, buf, bufsz);
1897 } else
1898 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1899 next_entry, size, mode,
1900 pos, buf, bufsz);
1901#else
1902 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1903 next_entry, size, mode,
1904 pos, buf, bufsz);
1905#endif
1906 return pos;
1907}
1908
1909static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
1910{
1911 unsigned int reload_msec;
1912 unsigned long reload_jiffies;
1913
1914#ifdef CONFIG_IWLWIFI_DEBUG
1915 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
1916 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
1917#endif
1918
1919 /* uCode is no longer loaded. */
1920 priv->ucode_loaded = false;
1921
1922 /* Set the FW error flag -- cleared on iwl_down */
1923 set_bit(STATUS_FW_ERROR, &priv->status);
1924
1925 iwl_abort_notification_waits(&priv->notif_wait);
1926
1927 /* Keep the restart process from trying to send host
1928 * commands by clearing the ready bit */
1929 clear_bit(STATUS_READY, &priv->status);
1930
1931 if (!ondemand) {
1932 /*
1933 * If firmware keep reloading, then it indicate something
1934 * serious wrong and firmware having problem to recover
1935 * from it. Instead of keep trying which will fill the syslog
1936 * and hang the system, let's just stop it
1937 */
1938 reload_jiffies = jiffies;
1939 reload_msec = jiffies_to_msecs((long) reload_jiffies -
1940 (long) priv->reload_jiffies);
1941 priv->reload_jiffies = reload_jiffies;
1942 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
1943 priv->reload_count++;
1944 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
1945 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
1946 return;
1947 }
1948 } else
1949 priv->reload_count = 0;
1950 }
1951
1952 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1953 if (iwlwifi_mod_params.restart_fw) {
1954 IWL_DEBUG_FW_ERRORS(priv,
1955 "Restarting adapter due to uCode error.\n");
1956 queue_work(priv->workqueue, &priv->restart);
1957 } else
1958 IWL_DEBUG_FW_ERRORS(priv,
1959 "Detected FW error, but not restarting\n");
1960 }
1961}
1962
1963static void iwl_nic_error(struct iwl_op_mode *op_mode)
1964{
1965 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1966
1967 IWL_ERR(priv, "Loaded firmware version: %s\n",
1968 priv->fw->fw_version);
1969
1970 iwl_dump_nic_error_log(priv);
1971 iwl_dump_nic_event_log(priv, false, NULL, false);
1972
1973 iwlagn_fw_error(priv, false);
1974}
1975
1976static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1977{
1978 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1979
1980 if (!iwl_check_for_ct_kill(priv)) {
1981 IWL_ERR(priv, "Restarting adapter queue is full\n");
1982 iwlagn_fw_error(priv, false);
1983 }
1984}
1985
1986#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
1987
1988static void iwl_nic_config(struct iwl_op_mode *op_mode)
1989{
1990 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1991
1992 /* SKU Control */
1993 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
1994 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1995 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
1996 (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
1997 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
1998 (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
1999 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
2000
2001 /* write radio config values to register */
2002 if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
2003 u32 reg_val =
2004 priv->nvm_data->radio_cfg_type <<
2005 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
2006 priv->nvm_data->radio_cfg_step <<
2007 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
2008 priv->nvm_data->radio_cfg_dash <<
2009 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2010
2011 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2012 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2013 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2014 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
2015
2016 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
2017 priv->nvm_data->radio_cfg_type,
2018 priv->nvm_data->radio_cfg_step,
2019 priv->nvm_data->radio_cfg_dash);
2020 } else {
2021 WARN_ON(1);
2022 }
2023
2024 /* set CSR_HW_CONFIG_REG for uCode use */
2025 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
2026 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2027 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
2028
2029 /* W/A : NIC is stuck in a reset state after Early PCIe power off
2030 * (PCIe power is lost before PERST# is asserted),
2031 * causing ME FW to lose ownership and not being able to obtain it back.
2032 */
2033 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
2034 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2035 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2036
2037 if (priv->lib->nic_config)
2038 priv->lib->nic_config(priv);
2039}
2040
2041static void iwl_wimax_active(struct iwl_op_mode *op_mode)
2042{
2043 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2044
2045 clear_bit(STATUS_READY, &priv->status);
2046 IWL_ERR(priv, "RF is used by WiMAX\n");
2047}
2048
2049static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2050{
2051 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2052 int mq = priv->queue_to_mac80211[queue];
2053
2054 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2055 return;
2056
2057 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
2058 IWL_DEBUG_TX_QUEUES(priv,
2059 "queue %d (mac80211 %d) already stopped\n",
2060 queue, mq);
2061 return;
2062 }
2063
2064 set_bit(mq, &priv->transport_queue_stop);
2065 ieee80211_stop_queue(priv->hw, mq);
2066}
2067
2068static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
2069{
2070 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2071 int mq = priv->queue_to_mac80211[queue];
2072
2073 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2074 return;
2075
2076 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
2077 IWL_DEBUG_TX_QUEUES(priv,
2078 "queue %d (mac80211 %d) already awake\n",
2079 queue, mq);
2080 return;
2081 }
2082
2083 clear_bit(mq, &priv->transport_queue_stop);
2084
2085 if (!priv->passive_no_rx)
2086 ieee80211_wake_queue(priv->hw, mq);
2087}
2088
2089void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2090{
2091 int mq;
2092
2093 if (!priv->passive_no_rx)
2094 return;
2095
2096 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
2097 if (!test_bit(mq, &priv->transport_queue_stop)) {
2098 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
2099 ieee80211_wake_queue(priv->hw, mq);
2100 } else {
2101 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
2102 }
2103 }
2104
2105 priv->passive_no_rx = false;
2106}
2107
2108static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2109{
2110 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2111 struct ieee80211_tx_info *info;
2112
2113 info = IEEE80211_SKB_CB(skb);
2114 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2115 ieee80211_free_txskb(priv->hw, skb);
2116}
2117
2118static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2119{
2120 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2121
2122 if (state)
2123 set_bit(STATUS_RF_KILL_HW, &priv->status);
2124 else
2125 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2126
2127 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2128}
2129
2130static const struct iwl_op_mode_ops iwl_dvm_ops = {
2131 .start = iwl_op_mode_dvm_start,
2132 .stop = iwl_op_mode_dvm_stop,
2133 .rx = iwl_rx_dispatch,
2134 .queue_full = iwl_stop_sw_queue,
2135 .queue_not_full = iwl_wake_sw_queue,
2136 .hw_rf_kill = iwl_set_hw_rfkill_state,
2137 .free_skb = iwl_free_skb,
2138 .nic_error = iwl_nic_error,
2139 .cmd_queue_full = iwl_cmd_queue_full,
2140 .nic_config = iwl_nic_config,
2141 .wimax_active = iwl_wimax_active,
2142};
2143
2144/*****************************************************************************
2145 *
2146 * driver and module entry point
2147 *
2148 *****************************************************************************/
2149static int __init iwl_init(void)
2150{
2151
2152 int ret;
2153
2154 ret = iwlagn_rate_control_register();
2155 if (ret) {
2156 pr_err("Unable to register rate control algorithm: %d\n", ret);
2157 return ret;
2158 }
2159
2160 ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
2161 if (ret) {
2162 pr_err("Unable to register op_mode: %d\n", ret);
2163 iwlagn_rate_control_unregister();
2164 }
2165
2166 return ret;
2167}
2168module_init(iwl_init);
2169
2170static void __exit iwl_exit(void)
2171{
2172 iwl_opmode_deregister("iwldvm");
2173 iwlagn_rate_control_unregister();
2174}
2175module_exit(iwl_exit);
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
deleted file mode 100644
index 518cf371580..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ /dev/null
@@ -1,387 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <net/mac80211.h>
35#include "iwl-io.h"
36#include "iwl-debug.h"
37#include "iwl-trans.h"
38#include "iwl-modparams.h"
39#include "dev.h"
40#include "agn.h"
41#include "commands.h"
42#include "power.h"
43
44/*
45 * Setting power level allows the card to go to sleep when not busy.
46 *
47 * We calculate a sleep command based on the required latency, which
48 * we get from mac80211. In order to handle thermal throttling, we can
49 * also use pre-defined power levels.
50 */
51
52/*
53 * This defines the old power levels. They are still used by default
54 * (level 1) and for thermal throttle (levels 3 through 5)
55 */
56
57struct iwl_power_vec_entry {
58 struct iwl_powertable_cmd cmd;
59 u8 no_dtim; /* number of skip dtim */
60};
61
62#define IWL_DTIM_RANGE_0_MAX 2
63#define IWL_DTIM_RANGE_1_MAX 10
64
65#define NOSLP cpu_to_le16(0), 0, 0
66#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
67#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
68 IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
69 IWL_POWER_ADVANCE_PM_ENA_MSK)
70#define ASLP_TOUT(T) cpu_to_le32(T)
71#define TU_TO_USEC 1024
72#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
73#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
74 cpu_to_le32(X1), \
75 cpu_to_le32(X2), \
76 cpu_to_le32(X3), \
77 cpu_to_le32(X4)}
78/* default power management (not Tx power) table values */
79/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
80/* DTIM 0 - 2 */
81static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
82 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
83 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
84 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
85 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
86 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
87};
88
89
90/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
91/* DTIM 3 - 10 */
92static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
93 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
94 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
95 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
96 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
97 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
98};
99
100/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
101/* DTIM 11 - */
102static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
103 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
104 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
105 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
106 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
108};
109
110/* advance power management */
111/* DTIM 0 - 2 */
112static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
113 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
114 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
115 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
116 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
117 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
118 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
119 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
120 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
121 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
122 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
123};
124
125
126/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
127/* DTIM 3 - 10 */
128static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
129 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
130 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
131 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
132 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
133 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
134 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
135 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
136 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
137 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
138 SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
139};
140
141/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
142/* DTIM 11 - */
143static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
144 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
145 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
146 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
147 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
148 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
149 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
150 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
151 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
152 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
153 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
154};
155
156static void iwl_static_sleep_cmd(struct iwl_priv *priv,
157 struct iwl_powertable_cmd *cmd,
158 enum iwl_power_level lvl, int period)
159{
160 const struct iwl_power_vec_entry *table;
161 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
162 int i;
163 u8 skip;
164 u32 slp_itrvl;
165
166 if (priv->cfg->adv_pm) {
167 table = apm_range_2;
168 if (period <= IWL_DTIM_RANGE_1_MAX)
169 table = apm_range_1;
170 if (period <= IWL_DTIM_RANGE_0_MAX)
171 table = apm_range_0;
172 } else {
173 table = range_2;
174 if (period <= IWL_DTIM_RANGE_1_MAX)
175 table = range_1;
176 if (period <= IWL_DTIM_RANGE_0_MAX)
177 table = range_0;
178 }
179
180 if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
181 memset(cmd, 0, sizeof(*cmd));
182 else
183 *cmd = table[lvl].cmd;
184
185 if (period == 0) {
186 skip = 0;
187 period = 1;
188 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
189 max_sleep[i] = 1;
190
191 } else {
192 skip = table[lvl].no_dtim;
193 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
194 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
195 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
196 }
197
198 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
199 /* figure out the listen interval based on dtim period and skip */
200 if (slp_itrvl == 0xFF)
201 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
202 cpu_to_le32(period * (skip + 1));
203
204 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
205 if (slp_itrvl > period)
206 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
207 cpu_to_le32((slp_itrvl / period) * period);
208
209 if (skip)
210 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
211 else
212 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
213
214 if (priv->cfg->base_params->shadow_reg_enable)
215 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
216 else
217 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
218
219 if (iwl_advanced_bt_coexist(priv)) {
220 if (!priv->cfg->bt_params->bt_sco_disable)
221 cmd->flags |= IWL_POWER_BT_SCO_ENA;
222 else
223 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
224 }
225
226
227 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
228 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
229 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
230 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
231
232 /* enforce max sleep interval */
233 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
234 if (le32_to_cpu(cmd->sleep_interval[i]) >
235 (max_sleep[i] * period))
236 cmd->sleep_interval[i] =
237 cpu_to_le32(max_sleep[i] * period);
238 if (i != (IWL_POWER_VEC_SIZE - 1)) {
239 if (le32_to_cpu(cmd->sleep_interval[i]) >
240 le32_to_cpu(cmd->sleep_interval[i+1]))
241 cmd->sleep_interval[i] =
242 cmd->sleep_interval[i+1];
243 }
244 }
245
246 if (priv->power_data.bus_pm)
247 cmd->flags |= IWL_POWER_PCI_PM_MSK;
248 else
249 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
250
251 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
252 skip, period);
253 /* The power level here is 0-4 (used as array index), but user expects
254 to see 1-5 (according to spec). */
255 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
256}
257
258static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
259 struct iwl_powertable_cmd *cmd)
260{
261 memset(cmd, 0, sizeof(*cmd));
262
263 if (priv->power_data.bus_pm)
264 cmd->flags |= IWL_POWER_PCI_PM_MSK;
265
266 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
267}
268
269static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
270{
271 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
272 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
273 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
274 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
275 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
276 le32_to_cpu(cmd->sleep_interval[0]),
277 le32_to_cpu(cmd->sleep_interval[1]),
278 le32_to_cpu(cmd->sleep_interval[2]),
279 le32_to_cpu(cmd->sleep_interval[3]),
280 le32_to_cpu(cmd->sleep_interval[4]));
281
282 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
283 sizeof(struct iwl_powertable_cmd), cmd);
284}
285
286static void iwl_power_build_cmd(struct iwl_priv *priv,
287 struct iwl_powertable_cmd *cmd)
288{
289 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
290 int dtimper;
291
292 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
293
294 if (priv->wowlan)
295 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
296 else if (!priv->cfg->base_params->no_idle_support &&
297 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
298 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
299 else if (iwl_tt_is_low_power_state(priv)) {
300 /* in thermal throttling low power state */
301 iwl_static_sleep_cmd(priv, cmd,
302 iwl_tt_current_power_mode(priv), dtimper);
303 } else if (!enabled)
304 iwl_power_sleep_cam_cmd(priv, cmd);
305 else if (priv->power_data.debug_sleep_level_override >= 0)
306 iwl_static_sleep_cmd(priv, cmd,
307 priv->power_data.debug_sleep_level_override,
308 dtimper);
309 else {
310 /* Note that the user parameter is 1-5 (according to spec),
311 but we pass 0-4 because it acts as an array index. */
312 if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
313 iwlwifi_mod_params.power_level <= IWL_POWER_NUM)
314 iwl_static_sleep_cmd(priv, cmd,
315 iwlwifi_mod_params.power_level - 1, dtimper);
316 else
317 iwl_static_sleep_cmd(priv, cmd,
318 IWL_POWER_INDEX_1, dtimper);
319 }
320}
321
322int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
323 bool force)
324{
325 int ret;
326 bool update_chains;
327
328 lockdep_assert_held(&priv->mutex);
329
330 /* Don't update the RX chain when chain noise calibration is running */
331 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
332 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
333
334 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
335 return 0;
336
337 if (!iwl_is_ready_rf(priv))
338 return -EIO;
339
340 /* scan complete use sleep_power_next, need to be updated */
341 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
342 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
343 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
344 return 0;
345 }
346
347 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
348 iwl_dvm_set_pmi(priv, true);
349
350 ret = iwl_set_power(priv, cmd);
351 if (!ret) {
352 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
353 iwl_dvm_set_pmi(priv, false);
354
355 if (update_chains)
356 iwl_update_chain_flags(priv);
357 else
358 IWL_DEBUG_POWER(priv,
359 "Cannot update the power, chain noise "
360 "calibration running: %d\n",
361 priv->chain_noise_data.state);
362
363 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
364 } else
365 IWL_ERR(priv, "set power fail, ret = %d", ret);
366
367 return ret;
368}
369
370int iwl_power_update_mode(struct iwl_priv *priv, bool force)
371{
372 struct iwl_powertable_cmd cmd;
373
374 iwl_power_build_cmd(priv, &cmd);
375 return iwl_power_set_mode(priv, &cmd, force);
376}
377
378/* initialize to default */
379void iwl_power_initialize(struct iwl_priv *priv)
380{
381 priv->power_data.bus_pm = priv->trans->pm_support;
382
383 priv->power_data.debug_sleep_level_override = -1;
384
385 memset(&priv->power_data.sleep_cmd, 0,
386 sizeof(priv->power_data.sleep_cmd));
387}
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
deleted file mode 100644
index a2cee7f0484..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include "commands.h"
32
33struct iwl_power_mgr {
34 struct iwl_powertable_cmd sleep_cmd;
35 struct iwl_powertable_cmd sleep_cmd_next;
36 int debug_sleep_level_override;
37 bool bus_pm;
38};
39
40int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
41 bool force);
42int iwl_power_update_mode(struct iwl_priv *priv, bool force);
43void iwl_power_initialize(struct iwl_priv *priv);
44
45extern bool no_sleep_autoadjust;
46
47#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
deleted file mode 100644
index f3dd0da60d8..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ /dev/null
@@ -1,3370 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "dev.h"
39#include "agn.h"
40
41#define RS_NAME "iwl-agn-rs"
42
43#define NUM_TRY_BEFORE_ANT_TOGGLE 1
44#define IWL_NUMBER_TRY 1
45#define IWL_HT_NUMBER_TRY 3
46
47#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
48#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
49#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
50
51/* max allowed rate miss before sync LQ cmd */
52#define IWL_MISSED_RATE_MAX 15
53/* max time to accum history 2 seconds */
54#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
55
56static u8 rs_ht_to_legacy[] = {
57 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
58 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
59 IWL_RATE_6M_INDEX,
60 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
61 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
62 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
63 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
64};
65
66static const u8 ant_toggle_lookup[] = {
67 /*ANT_NONE -> */ ANT_NONE,
68 /*ANT_A -> */ ANT_B,
69 /*ANT_B -> */ ANT_C,
70 /*ANT_AB -> */ ANT_BC,
71 /*ANT_C -> */ ANT_A,
72 /*ANT_AC -> */ ANT_AB,
73 /*ANT_BC -> */ ANT_AC,
74 /*ANT_ABC -> */ ANT_ABC,
75};
76
77#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
78 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
79 IWL_RATE_SISO_##s##M_PLCP, \
80 IWL_RATE_MIMO2_##s##M_PLCP,\
81 IWL_RATE_MIMO3_##s##M_PLCP,\
82 IWL_RATE_##r##M_IEEE, \
83 IWL_RATE_##ip##M_INDEX, \
84 IWL_RATE_##in##M_INDEX, \
85 IWL_RATE_##rp##M_INDEX, \
86 IWL_RATE_##rn##M_INDEX, \
87 IWL_RATE_##pp##M_INDEX, \
88 IWL_RATE_##np##M_INDEX }
89
90/*
91 * Parameter order:
92 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
93 *
94 * If there isn't a valid next or previous rate then INV is used which
95 * maps to IWL_RATE_INVALID
96 *
97 */
98const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
99 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
100 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
101 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
102 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
103 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
104 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
105 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
106 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
107 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
108 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
109 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
110 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
111 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
112 /* FIXME:RS: ^^ should be INV (legacy) */
113};
114
115static inline u8 rs_extract_rate(u32 rate_n_flags)
116{
117 return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
118}
119
120static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
121{
122 int idx = 0;
123
124 /* HT rate format */
125 if (rate_n_flags & RATE_MCS_HT_MSK) {
126 idx = rs_extract_rate(rate_n_flags);
127
128 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
129 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
130 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
131 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
132
133 idx += IWL_FIRST_OFDM_RATE;
134 /* skip 9M not supported in ht*/
135 if (idx >= IWL_RATE_9M_INDEX)
136 idx += 1;
137 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
138 return idx;
139
140 /* legacy rate format, search for match in table */
141 } else {
142 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
143 if (iwl_rates[idx].plcp ==
144 rs_extract_rate(rate_n_flags))
145 return idx;
146 }
147
148 return -1;
149}
150
151static void rs_rate_scale_perform(struct iwl_priv *priv,
152 struct sk_buff *skb,
153 struct ieee80211_sta *sta,
154 struct iwl_lq_sta *lq_sta);
155static void rs_fill_link_cmd(struct iwl_priv *priv,
156 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
157static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
158
159
160#ifdef CONFIG_MAC80211_DEBUGFS
161static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
162 u32 *rate_n_flags, int index);
163#else
164static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
165 u32 *rate_n_flags, int index)
166{}
167#endif
168
169/**
170 * The following tables contain the expected throughput metrics for all rates
171 *
172 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
173 *
174 * where invalid entries are zeros.
175 *
176 * CCK rates are only valid in legacy table and will only be used in G
177 * (2.4 GHz) band.
178 */
179
180static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
181 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
182};
183
184static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
186 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
187 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
188 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
193 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
194 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
195 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
196};
197
198static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
200 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
201 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
202 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
203};
204
205static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
206 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
207 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
208 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
209 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
210};
211
212static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
213 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
214 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
215 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
216 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
217};
218
219static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
220 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
221 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
222 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
223 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
224};
225
226/* mbps, mcs */
227static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
228 { "1", "BPSK DSSS"},
229 { "2", "QPSK DSSS"},
230 {"5.5", "BPSK CCK"},
231 { "11", "QPSK CCK"},
232 { "6", "BPSK 1/2"},
233 { "9", "BPSK 1/2"},
234 { "12", "QPSK 1/2"},
235 { "18", "QPSK 3/4"},
236 { "24", "16QAM 1/2"},
237 { "36", "16QAM 3/4"},
238 { "48", "64QAM 2/3"},
239 { "54", "64QAM 3/4"},
240 { "60", "64QAM 5/6"},
241};
242
243#define MCS_INDEX_PER_STREAM (8)
244
245static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
246{
247 window->data = 0;
248 window->success_counter = 0;
249 window->success_ratio = IWL_INVALID_VALUE;
250 window->counter = 0;
251 window->average_tpt = IWL_INVALID_VALUE;
252 window->stamp = 0;
253}
254
255static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
256{
257 return (ant_type & valid_antenna) == ant_type;
258}
259
260/*
261 * removes the old data from the statistics. All data that is older than
262 * TID_MAX_TIME_DIFF, will be deleted.
263 */
264static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
265{
266 /* The oldest age we want to keep */
267 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
268
269 while (tl->queue_count &&
270 (tl->time_stamp < oldest_time)) {
271 tl->total -= tl->packet_count[tl->head];
272 tl->packet_count[tl->head] = 0;
273 tl->time_stamp += TID_QUEUE_CELL_SPACING;
274 tl->queue_count--;
275 tl->head++;
276 if (tl->head >= TID_QUEUE_MAX_SIZE)
277 tl->head = 0;
278 }
279}
280
281/*
282 * increment traffic load value for tid and also remove
283 * any old values if passed the certain time period
284 */
285static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
286 struct ieee80211_hdr *hdr)
287{
288 u32 curr_time = jiffies_to_msecs(jiffies);
289 u32 time_diff;
290 s32 index;
291 struct iwl_traffic_load *tl = NULL;
292 u8 tid;
293
294 if (ieee80211_is_data_qos(hdr->frame_control)) {
295 u8 *qc = ieee80211_get_qos_ctl(hdr);
296 tid = qc[0] & 0xf;
297 } else
298 return IWL_MAX_TID_COUNT;
299
300 if (unlikely(tid >= IWL_MAX_TID_COUNT))
301 return IWL_MAX_TID_COUNT;
302
303 tl = &lq_data->load[tid];
304
305 curr_time -= curr_time % TID_ROUND_VALUE;
306
307 /* Happens only for the first packet. Initialize the data */
308 if (!(tl->queue_count)) {
309 tl->total = 1;
310 tl->time_stamp = curr_time;
311 tl->queue_count = 1;
312 tl->head = 0;
313 tl->packet_count[0] = 1;
314 return IWL_MAX_TID_COUNT;
315 }
316
317 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
318 index = time_diff / TID_QUEUE_CELL_SPACING;
319
320 /* The history is too long: remove data that is older than */
321 /* TID_MAX_TIME_DIFF */
322 if (index >= TID_QUEUE_MAX_SIZE)
323 rs_tl_rm_old_stats(tl, curr_time);
324
325 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
326 tl->packet_count[index] = tl->packet_count[index] + 1;
327 tl->total = tl->total + 1;
328
329 if ((index + 1) > tl->queue_count)
330 tl->queue_count = index + 1;
331
332 return tid;
333}
334
335#ifdef CONFIG_MAC80211_DEBUGFS
336/**
337 * Program the device to use fixed rate for frame transmit
338 * This is for debugging/testing only
339 * once the device start use fixed rate, we need to reload the module
340 * to being back the normal operation.
341 */
342static void rs_program_fix_rate(struct iwl_priv *priv,
343 struct iwl_lq_sta *lq_sta)
344{
345 struct iwl_station_priv *sta_priv =
346 container_of(lq_sta, struct iwl_station_priv, lq_sta);
347 struct iwl_rxon_context *ctx = sta_priv->ctx;
348
349 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
350 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
351 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
352 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
353
354#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
355 /* testmode has higher priority to overwirte the fixed rate */
356 if (priv->tm_fixed_rate)
357 lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
358#endif
359
360 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
361 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
362
363 if (lq_sta->dbg_fixed_rate) {
364 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
365 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
366 false);
367 }
368}
369#endif
370
371/*
372 get the traffic load value for tid
373*/
374static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
375{
376 u32 curr_time = jiffies_to_msecs(jiffies);
377 u32 time_diff;
378 s32 index;
379 struct iwl_traffic_load *tl = NULL;
380
381 if (tid >= IWL_MAX_TID_COUNT)
382 return 0;
383
384 tl = &(lq_data->load[tid]);
385
386 curr_time -= curr_time % TID_ROUND_VALUE;
387
388 if (!(tl->queue_count))
389 return 0;
390
391 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
392 index = time_diff / TID_QUEUE_CELL_SPACING;
393
394 /* The history is too long: remove data that is older than */
395 /* TID_MAX_TIME_DIFF */
396 if (index >= TID_QUEUE_MAX_SIZE)
397 rs_tl_rm_old_stats(tl, curr_time);
398
399 return tl->total;
400}
401
402static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
403 struct iwl_lq_sta *lq_data, u8 tid,
404 struct ieee80211_sta *sta)
405{
406 int ret = -EAGAIN;
407 u32 load;
408
409 /*
410 * Don't create TX aggregation sessions when in high
411 * BT traffic, as they would just be disrupted by BT.
412 */
413 if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
414 IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
415 priv->bt_traffic_load);
416 return ret;
417 }
418
419 load = rs_tl_get_load(lq_data, tid);
420
421 if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
422 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
423 sta->addr, tid);
424 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
425 if (ret == -EAGAIN) {
426 /*
427 * driver and mac80211 is out of sync
428 * this might be cause by reloading firmware
429 * stop the tx ba session here
430 */
431 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
432 tid);
433 ieee80211_stop_tx_ba_session(sta, tid);
434 }
435 } else {
436 IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
437 "because load = %u\n", tid, load);
438 }
439 return ret;
440}
441
442static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
443 struct iwl_lq_sta *lq_data,
444 struct ieee80211_sta *sta)
445{
446 if (tid < IWL_MAX_TID_COUNT)
447 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
448 else
449 IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
450 tid, IWL_MAX_TID_COUNT);
451}
452
453static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
454{
455 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
456 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
457 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
458}
459
460/*
461 * Static function to get the expected throughput from an iwl_scale_tbl_info
462 * that wraps a NULL pointer check
463 */
464static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
465{
466 if (tbl->expected_tpt)
467 return tbl->expected_tpt[rs_index];
468 return 0;
469}
470
471/**
472 * rs_collect_tx_data - Update the success/failure sliding window
473 *
474 * We keep a sliding window of the last 62 packets transmitted
475 * at this rate. window->data contains the bitmask of successful
476 * packets.
477 */
478static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
479 int scale_index, int attempts, int successes)
480{
481 struct iwl_rate_scale_data *window = NULL;
482 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
483 s32 fail_count, tpt;
484
485 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
486 return -EINVAL;
487
488 /* Select window for current tx bit rate */
489 window = &(tbl->win[scale_index]);
490
491 /* Get expected throughput */
492 tpt = get_expected_tpt(tbl, scale_index);
493
494 /*
495 * Keep track of only the latest 62 tx frame attempts in this rate's
496 * history window; anything older isn't really relevant any more.
497 * If we have filled up the sliding window, drop the oldest attempt;
498 * if the oldest attempt (highest bit in bitmap) shows "success",
499 * subtract "1" from the success counter (this is the main reason
500 * we keep these bitmaps!).
501 */
502 while (attempts > 0) {
503 if (window->counter >= IWL_RATE_MAX_WINDOW) {
504
505 /* remove earliest */
506 window->counter = IWL_RATE_MAX_WINDOW - 1;
507
508 if (window->data & mask) {
509 window->data &= ~mask;
510 window->success_counter--;
511 }
512 }
513
514 /* Increment frames-attempted counter */
515 window->counter++;
516
517 /* Shift bitmap by one frame to throw away oldest history */
518 window->data <<= 1;
519
520 /* Mark the most recent #successes attempts as successful */
521 if (successes > 0) {
522 window->success_counter++;
523 window->data |= 0x1;
524 successes--;
525 }
526
527 attempts--;
528 }
529
530 /* Calculate current success ratio, avoid divide-by-0! */
531 if (window->counter > 0)
532 window->success_ratio = 128 * (100 * window->success_counter)
533 / window->counter;
534 else
535 window->success_ratio = IWL_INVALID_VALUE;
536
537 fail_count = window->counter - window->success_counter;
538
539 /* Calculate average throughput, if we have enough history. */
540 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
541 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
542 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
543 else
544 window->average_tpt = IWL_INVALID_VALUE;
545
546 /* Tag this window as having been updated */
547 window->stamp = jiffies;
548
549 return 0;
550}
551
552/*
553 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
554 */
555/* FIXME:RS:remove this function and put the flags statically in the table */
556static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
557 struct iwl_scale_tbl_info *tbl,
558 int index, u8 use_green)
559{
560 u32 rate_n_flags = 0;
561
562 if (is_legacy(tbl->lq_type)) {
563 rate_n_flags = iwl_rates[index].plcp;
564 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
565 rate_n_flags |= RATE_MCS_CCK_MSK;
566
567 } else if (is_Ht(tbl->lq_type)) {
568 if (index > IWL_LAST_OFDM_RATE) {
569 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
570 index = IWL_LAST_OFDM_RATE;
571 }
572 rate_n_flags = RATE_MCS_HT_MSK;
573
574 if (is_siso(tbl->lq_type))
575 rate_n_flags |= iwl_rates[index].plcp_siso;
576 else if (is_mimo2(tbl->lq_type))
577 rate_n_flags |= iwl_rates[index].plcp_mimo2;
578 else
579 rate_n_flags |= iwl_rates[index].plcp_mimo3;
580 } else {
581 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
582 }
583
584 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
585 RATE_MCS_ANT_ABC_MSK);
586
587 if (is_Ht(tbl->lq_type)) {
588 if (tbl->is_ht40) {
589 if (tbl->is_dup)
590 rate_n_flags |= RATE_MCS_DUP_MSK;
591 else
592 rate_n_flags |= RATE_MCS_HT40_MSK;
593 }
594 if (tbl->is_SGI)
595 rate_n_flags |= RATE_MCS_SGI_MSK;
596
597 if (use_green) {
598 rate_n_flags |= RATE_MCS_GF_MSK;
599 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
600 rate_n_flags &= ~RATE_MCS_SGI_MSK;
601 IWL_ERR(priv, "GF was set with SGI:SISO\n");
602 }
603 }
604 }
605 return rate_n_flags;
606}
607
608/*
609 * Interpret uCode API's rate_n_flags format,
610 * fill "search" or "active" tx mode table.
611 */
612static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
613 enum ieee80211_band band,
614 struct iwl_scale_tbl_info *tbl,
615 int *rate_idx)
616{
617 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
618 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
619 u8 mcs;
620
621 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
622 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
623
624 if (*rate_idx == IWL_RATE_INVALID) {
625 *rate_idx = -1;
626 return -EINVAL;
627 }
628 tbl->is_SGI = 0; /* default legacy setup */
629 tbl->is_ht40 = 0;
630 tbl->is_dup = 0;
631 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
632 tbl->lq_type = LQ_NONE;
633 tbl->max_search = IWL_MAX_SEARCH;
634
635 /* legacy rate format */
636 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
637 if (num_of_ant == 1) {
638 if (band == IEEE80211_BAND_5GHZ)
639 tbl->lq_type = LQ_A;
640 else
641 tbl->lq_type = LQ_G;
642 }
643 /* HT rate format */
644 } else {
645 if (rate_n_flags & RATE_MCS_SGI_MSK)
646 tbl->is_SGI = 1;
647
648 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
649 (rate_n_flags & RATE_MCS_DUP_MSK))
650 tbl->is_ht40 = 1;
651
652 if (rate_n_flags & RATE_MCS_DUP_MSK)
653 tbl->is_dup = 1;
654
655 mcs = rs_extract_rate(rate_n_flags);
656
657 /* SISO */
658 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
659 if (num_of_ant == 1)
660 tbl->lq_type = LQ_SISO; /*else NONE*/
661 /* MIMO2 */
662 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
663 if (num_of_ant == 2)
664 tbl->lq_type = LQ_MIMO2;
665 /* MIMO3 */
666 } else {
667 if (num_of_ant == 3) {
668 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
669 tbl->lq_type = LQ_MIMO3;
670 }
671 }
672 }
673 return 0;
674}
675
676/* switch to another antenna/antennas and return 1 */
677/* if no other valid antenna found, return 0 */
678static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
679 struct iwl_scale_tbl_info *tbl)
680{
681 u8 new_ant_type;
682
683 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
684 return 0;
685
686 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
687 return 0;
688
689 new_ant_type = ant_toggle_lookup[tbl->ant_type];
690
691 while ((new_ant_type != tbl->ant_type) &&
692 !rs_is_valid_ant(valid_ant, new_ant_type))
693 new_ant_type = ant_toggle_lookup[new_ant_type];
694
695 if (new_ant_type == tbl->ant_type)
696 return 0;
697
698 tbl->ant_type = new_ant_type;
699 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
700 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
701 return 1;
702}
703
704/**
705 * Green-field mode is valid if the station supports it and
706 * there are no non-GF stations present in the BSS.
707 */
708static bool rs_use_green(struct ieee80211_sta *sta)
709{
710 /*
711 * There's a bug somewhere in this code that causes the
712 * scaling to get stuck because GF+SGI can't be combined
713 * in SISO rates. Until we find that bug, disable GF, it
714 * has only limited benefit and we still interoperate with
715 * GF APs since we can always receive GF transmissions.
716 */
717 return false;
718}
719
720/**
721 * rs_get_supported_rates - get the available rates
722 *
723 * if management frame or broadcast frame only return
724 * basic available rates.
725 *
726 */
727static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
728 struct ieee80211_hdr *hdr,
729 enum iwl_table_type rate_type)
730{
731 if (is_legacy(rate_type)) {
732 return lq_sta->active_legacy_rate;
733 } else {
734 if (is_siso(rate_type))
735 return lq_sta->active_siso_rate;
736 else if (is_mimo2(rate_type))
737 return lq_sta->active_mimo2_rate;
738 else
739 return lq_sta->active_mimo3_rate;
740 }
741}
742
743static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
744 int rate_type)
745{
746 u8 high = IWL_RATE_INVALID;
747 u8 low = IWL_RATE_INVALID;
748
749 /* 802.11A or ht walks to the next literal adjacent rate in
750 * the rate table */
751 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
752 int i;
753 u32 mask;
754
755 /* Find the previous rate that is in the rate mask */
756 i = index - 1;
757 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
758 if (rate_mask & mask) {
759 low = i;
760 break;
761 }
762 }
763
764 /* Find the next rate that is in the rate mask */
765 i = index + 1;
766 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
767 if (rate_mask & mask) {
768 high = i;
769 break;
770 }
771 }
772
773 return (high << 8) | low;
774 }
775
776 low = index;
777 while (low != IWL_RATE_INVALID) {
778 low = iwl_rates[low].prev_rs;
779 if (low == IWL_RATE_INVALID)
780 break;
781 if (rate_mask & (1 << low))
782 break;
783 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
784 }
785
786 high = index;
787 while (high != IWL_RATE_INVALID) {
788 high = iwl_rates[high].next_rs;
789 if (high == IWL_RATE_INVALID)
790 break;
791 if (rate_mask & (1 << high))
792 break;
793 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
794 }
795
796 return (high << 8) | low;
797}
798
799static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
800 struct iwl_scale_tbl_info *tbl,
801 u8 scale_index, u8 ht_possible)
802{
803 s32 low;
804 u16 rate_mask;
805 u16 high_low;
806 u8 switch_to_legacy = 0;
807 u8 is_green = lq_sta->is_green;
808 struct iwl_priv *priv = lq_sta->drv;
809
810 /* check if we need to switch from HT to legacy rates.
811 * assumption is that mandatory rates (1Mbps or 6Mbps)
812 * are always supported (spec demand) */
813 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
814 switch_to_legacy = 1;
815 scale_index = rs_ht_to_legacy[scale_index];
816 if (lq_sta->band == IEEE80211_BAND_5GHZ)
817 tbl->lq_type = LQ_A;
818 else
819 tbl->lq_type = LQ_G;
820
821 if (num_of_ant(tbl->ant_type) > 1)
822 tbl->ant_type =
823 first_antenna(priv->nvm_data->valid_tx_ant);
824
825 tbl->is_ht40 = 0;
826 tbl->is_SGI = 0;
827 tbl->max_search = IWL_MAX_SEARCH;
828 }
829
830 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
831
832 /* Mask with station rate restriction */
833 if (is_legacy(tbl->lq_type)) {
834 /* supp_rates has no CCK bits in A mode */
835 if (lq_sta->band == IEEE80211_BAND_5GHZ)
836 rate_mask = (u16)(rate_mask &
837 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
838 else
839 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
840 }
841
842 /* If we switched from HT to legacy, check current rate */
843 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
844 low = scale_index;
845 goto out;
846 }
847
848 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
849 tbl->lq_type);
850 low = high_low & 0xff;
851
852 if (low == IWL_RATE_INVALID)
853 low = scale_index;
854
855out:
856 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
857}
858
859/*
860 * Simple function to compare two rate scale table types
861 */
862static bool table_type_matches(struct iwl_scale_tbl_info *a,
863 struct iwl_scale_tbl_info *b)
864{
865 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
866 (a->is_SGI == b->is_SGI);
867}
868
869static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
870 struct iwl_lq_sta *lq_sta)
871{
872 struct iwl_scale_tbl_info *tbl;
873 bool full_concurrent = priv->bt_full_concurrent;
874
875 if (priv->bt_ant_couple_ok) {
876 /*
877 * Is there a need to switch between
878 * full concurrency and 3-wire?
879 */
880 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
881 full_concurrent = true;
882 else
883 full_concurrent = false;
884 }
885 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
886 (priv->bt_full_concurrent != full_concurrent)) {
887 priv->bt_full_concurrent = full_concurrent;
888 priv->last_bt_traffic_load = priv->bt_traffic_load;
889
890 /* Update uCode's rate table. */
891 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
892 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
893 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
894
895 queue_work(priv->workqueue, &priv->bt_full_concurrency);
896 }
897}
898
899/*
900 * mac80211 sends us Tx status
901 */
902static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *priv_sta,
904 struct sk_buff *skb)
905{
906 int legacy_success;
907 int retries;
908 int rs_index, mac_index, i;
909 struct iwl_lq_sta *lq_sta = priv_sta;
910 struct iwl_link_quality_cmd *table;
911 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
912 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r;
913 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
914 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
915 enum mac80211_rate_control_flags mac_flags;
916 u32 tx_rate;
917 struct iwl_scale_tbl_info tbl_type;
918 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
919 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
920 struct iwl_rxon_context *ctx = sta_priv->ctx;
921
922 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
923
924 /* Treat uninitialized rate scaling data same as non-existing. */
925 if (!lq_sta) {
926 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
927 return;
928 } else if (!lq_sta->drv) {
929 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
930 return;
931 }
932
933 if (!ieee80211_is_data(hdr->frame_control) ||
934 info->flags & IEEE80211_TX_CTL_NO_ACK)
935 return;
936
937 /* This packet was aggregated but doesn't carry status info */
938 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
939 !(info->flags & IEEE80211_TX_STAT_AMPDU))
940 return;
941
942 /*
943 * Ignore this Tx frame response if its initial rate doesn't match
944 * that of latest Link Quality command. There may be stragglers
945 * from a previous Link Quality command, but we're no longer interested
946 * in those; they're either from the "active" mode while we're trying
947 * to check "search" mode, or a prior "search" mode after we've moved
948 * to a new "search" mode (which might become the new "active" mode).
949 */
950 table = &lq_sta->lq;
951 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
952 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
953 if (priv->band == IEEE80211_BAND_5GHZ)
954 rs_index -= IWL_FIRST_OFDM_RATE;
955 mac_flags = info->status.rates[0].flags;
956 mac_index = info->status.rates[0].idx;
957 /* For HT packets, map MCS to PLCP */
958 if (mac_flags & IEEE80211_TX_RC_MCS) {
959 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
960 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
961 mac_index++;
962 /*
963 * mac80211 HT index is always zero-indexed; we need to move
964 * HT OFDM rates after CCK rates in 2.4 GHz band
965 */
966 if (priv->band == IEEE80211_BAND_2GHZ)
967 mac_index += IWL_FIRST_OFDM_RATE;
968 }
969 /* Here we actually compare this rate to the latest LQ command */
970 if ((mac_index < 0) ||
971 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
972 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
973 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
974 (tbl_type.ant_type != info->status.antenna) ||
975 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
976 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
977 (rs_index != mac_index)) {
978 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
979 /*
980 * Since rates mis-match, the last LQ command may have failed.
981 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
982 * ... driver.
983 */
984 lq_sta->missed_rate_counter++;
985 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
986 lq_sta->missed_rate_counter = 0;
987 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
988 }
989 /* Regardless, ignore this status info for outdated rate */
990 return;
991 } else
992 /* Rate did match, so reset the missed_rate_counter */
993 lq_sta->missed_rate_counter = 0;
994
995 /* Figure out if rate scale algorithm is in active or search table */
996 if (table_type_matches(&tbl_type,
997 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
998 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
999 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1000 } else if (table_type_matches(&tbl_type,
1001 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
1002 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1003 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1004 } else {
1005 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
1006 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1007 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
1008 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
1009 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1010 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
1011 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
1012 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
1013 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
1014 /*
1015 * no matching table found, let's by-pass the data collection
1016 * and continue to perform rate scale to find the rate table
1017 */
1018 rs_stay_in_table(lq_sta, true);
1019 goto done;
1020 }
1021
1022 /*
1023 * Updating the frame history depends on whether packets were
1024 * aggregated.
1025 *
1026 * For aggregation, all packets were transmitted at the same rate, the
1027 * first index into rate scale table.
1028 */
1029 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1030 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
1031 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
1032 &rs_index);
1033 rs_collect_tx_data(curr_tbl, rs_index,
1034 info->status.ampdu_len,
1035 info->status.ampdu_ack_len);
1036
1037 /* Update success/fail counts if not searching for new mode */
1038 if (lq_sta->stay_in_tbl) {
1039 lq_sta->total_success += info->status.ampdu_ack_len;
1040 lq_sta->total_failed += (info->status.ampdu_len -
1041 info->status.ampdu_ack_len);
1042 }
1043 } else {
1044 /*
1045 * For legacy, update frame history with for each Tx retry.
1046 */
1047 retries = info->status.rates[0].count - 1;
1048 /* HW doesn't send more than 15 retries */
1049 retries = min(retries, 15);
1050
1051 /* The last transmission may have been successful */
1052 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1053 /* Collect data for each rate used during failed TX attempts */
1054 for (i = 0; i <= retries; ++i) {
1055 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
1056 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
1057 &tbl_type, &rs_index);
1058 /*
1059 * Only collect stats if retried rate is in the same RS
1060 * table as active/search.
1061 */
1062 if (table_type_matches(&tbl_type, curr_tbl))
1063 tmp_tbl = curr_tbl;
1064 else if (table_type_matches(&tbl_type, other_tbl))
1065 tmp_tbl = other_tbl;
1066 else
1067 continue;
1068 rs_collect_tx_data(tmp_tbl, rs_index, 1,
1069 i < retries ? 0 : legacy_success);
1070 }
1071
1072 /* Update success/fail counts if not searching for new mode */
1073 if (lq_sta->stay_in_tbl) {
1074 lq_sta->total_success += legacy_success;
1075 lq_sta->total_failed += retries + (1 - legacy_success);
1076 }
1077 }
1078 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1079 lq_sta->last_rate_n_flags = tx_rate;
1080done:
1081 /* See if there's a better rate or modulation mode to try. */
1082 if (sta && sta->supp_rates[sband->band])
1083 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1084
1085#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
1086 if ((priv->tm_fixed_rate) &&
1087 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1088 rs_program_fix_rate(priv, lq_sta);
1089#endif
1090 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1091 rs_bt_update_lq(priv, ctx, lq_sta);
1092}
1093
1094/*
1095 * Begin a period of staying with a selected modulation mode.
1096 * Set "stay_in_tbl" flag to prevent any mode switches.
1097 * Set frame tx success limits according to legacy vs. high-throughput,
1098 * and reset overall (spanning all rates) tx success history statistics.
1099 * These control how long we stay using same modulation mode before
1100 * searching for a new mode.
1101 */
1102static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1103 struct iwl_lq_sta *lq_sta)
1104{
1105 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1106 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1107 if (is_legacy) {
1108 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1109 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1110 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1111 } else {
1112 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1113 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1114 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1115 }
1116 lq_sta->table_count = 0;
1117 lq_sta->total_failed = 0;
1118 lq_sta->total_success = 0;
1119 lq_sta->flush_timer = jiffies;
1120 lq_sta->action_counter = 0;
1121}
1122
1123/*
1124 * Find correct throughput table for given mode of modulation
1125 */
1126static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1127 struct iwl_scale_tbl_info *tbl)
1128{
1129 /* Used to choose among HT tables */
1130 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1131
1132 /* Check for invalid LQ type */
1133 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1134 tbl->expected_tpt = expected_tpt_legacy;
1135 return;
1136 }
1137
1138 /* Legacy rates have only one table */
1139 if (is_legacy(tbl->lq_type)) {
1140 tbl->expected_tpt = expected_tpt_legacy;
1141 return;
1142 }
1143
1144 /* Choose among many HT tables depending on number of streams
1145 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
1146 * status */
1147 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1148 ht_tbl_pointer = expected_tpt_siso20MHz;
1149 else if (is_siso(tbl->lq_type))
1150 ht_tbl_pointer = expected_tpt_siso40MHz;
1151 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1152 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1153 else if (is_mimo2(tbl->lq_type))
1154 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1155 else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1156 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1157 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1158 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1159
1160 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1161 tbl->expected_tpt = ht_tbl_pointer[0];
1162 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1163 tbl->expected_tpt = ht_tbl_pointer[1];
1164 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1165 tbl->expected_tpt = ht_tbl_pointer[2];
1166 else /* AGG+SGI */
1167 tbl->expected_tpt = ht_tbl_pointer[3];
1168}
1169
1170/*
1171 * Find starting rate for new "search" high-throughput mode of modulation.
1172 * Goal is to find lowest expected rate (under perfect conditions) that is
1173 * above the current measured throughput of "active" mode, to give new mode
1174 * a fair chance to prove itself without too many challenges.
1175 *
1176 * This gets called when transitioning to more aggressive modulation
1177 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1178 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1179 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1180 * bit rate will typically need to increase, but not if performance was bad.
1181 */
1182static s32 rs_get_best_rate(struct iwl_priv *priv,
1183 struct iwl_lq_sta *lq_sta,
1184 struct iwl_scale_tbl_info *tbl, /* "search" */
1185 u16 rate_mask, s8 index)
1186{
1187 /* "active" values */
1188 struct iwl_scale_tbl_info *active_tbl =
1189 &(lq_sta->lq_info[lq_sta->active_tbl]);
1190 s32 active_sr = active_tbl->win[index].success_ratio;
1191 s32 active_tpt = active_tbl->expected_tpt[index];
1192
1193 /* expected "search" throughput */
1194 s32 *tpt_tbl = tbl->expected_tpt;
1195
1196 s32 new_rate, high, low, start_hi;
1197 u16 high_low;
1198 s8 rate = index;
1199
1200 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1201
1202 for (; ;) {
1203 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1204 tbl->lq_type);
1205
1206 low = high_low & 0xff;
1207 high = (high_low >> 8) & 0xff;
1208
1209 /*
1210 * Lower the "search" bit rate, to give new "search" mode
1211 * approximately the same throughput as "active" if:
1212 *
1213 * 1) "Active" mode has been working modestly well (but not
1214 * great), and expected "search" throughput (under perfect
1215 * conditions) at candidate rate is above the actual
1216 * measured "active" throughput (but less than expected
1217 * "active" throughput under perfect conditions).
1218 * OR
1219 * 2) "Active" mode has been working perfectly or very well
1220 * and expected "search" throughput (under perfect
1221 * conditions) at candidate rate is above expected
1222 * "active" throughput (under perfect conditions).
1223 */
1224 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1225 ((active_sr > IWL_RATE_DECREASE_TH) &&
1226 (active_sr <= IWL_RATE_HIGH_TH) &&
1227 (tpt_tbl[rate] <= active_tpt))) ||
1228 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1229 (tpt_tbl[rate] > active_tpt))) {
1230
1231 /* (2nd or later pass)
1232 * If we've already tried to raise the rate, and are
1233 * now trying to lower it, use the higher rate. */
1234 if (start_hi != IWL_RATE_INVALID) {
1235 new_rate = start_hi;
1236 break;
1237 }
1238
1239 new_rate = rate;
1240
1241 /* Loop again with lower rate */
1242 if (low != IWL_RATE_INVALID)
1243 rate = low;
1244
1245 /* Lower rate not available, use the original */
1246 else
1247 break;
1248
1249 /* Else try to raise the "search" rate to match "active" */
1250 } else {
1251 /* (2nd or later pass)
1252 * If we've already tried to lower the rate, and are
1253 * now trying to raise it, use the lower rate. */
1254 if (new_rate != IWL_RATE_INVALID)
1255 break;
1256
1257 /* Loop again with higher rate */
1258 else if (high != IWL_RATE_INVALID) {
1259 start_hi = high;
1260 rate = high;
1261
1262 /* Higher rate not available, use the original */
1263 } else {
1264 new_rate = rate;
1265 break;
1266 }
1267 }
1268 }
1269
1270 return new_rate;
1271}
1272
1273/*
1274 * Set up search table for MIMO2
1275 */
1276static int rs_switch_to_mimo2(struct iwl_priv *priv,
1277 struct iwl_lq_sta *lq_sta,
1278 struct ieee80211_conf *conf,
1279 struct ieee80211_sta *sta,
1280 struct iwl_scale_tbl_info *tbl, int index)
1281{
1282 u16 rate_mask;
1283 s32 rate;
1284 s8 is_green = lq_sta->is_green;
1285 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1286 struct iwl_rxon_context *ctx = sta_priv->ctx;
1287
1288 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1289 return -1;
1290
1291 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1292 == WLAN_HT_CAP_SM_PS_STATIC)
1293 return -1;
1294
1295 /* Need both Tx chains/antennas to support MIMO */
1296 if (priv->hw_params.tx_chains_num < 2)
1297 return -1;
1298
1299 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1300
1301 tbl->lq_type = LQ_MIMO2;
1302 tbl->is_dup = lq_sta->is_dup;
1303 tbl->action = 0;
1304 tbl->max_search = IWL_MAX_SEARCH;
1305 rate_mask = lq_sta->active_mimo2_rate;
1306
1307 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1308 tbl->is_ht40 = 1;
1309 else
1310 tbl->is_ht40 = 0;
1311
1312 rs_set_expected_tpt_table(lq_sta, tbl);
1313
1314 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1315
1316 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1317 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1318 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1319 rate, rate_mask);
1320 return -1;
1321 }
1322 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1323
1324 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1325 tbl->current_rate, is_green);
1326 return 0;
1327}
1328
1329/*
1330 * Set up search table for MIMO3
1331 */
1332static int rs_switch_to_mimo3(struct iwl_priv *priv,
1333 struct iwl_lq_sta *lq_sta,
1334 struct ieee80211_conf *conf,
1335 struct ieee80211_sta *sta,
1336 struct iwl_scale_tbl_info *tbl, int index)
1337{
1338 u16 rate_mask;
1339 s32 rate;
1340 s8 is_green = lq_sta->is_green;
1341 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1342 struct iwl_rxon_context *ctx = sta_priv->ctx;
1343
1344 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1345 return -1;
1346
1347 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1348 == WLAN_HT_CAP_SM_PS_STATIC)
1349 return -1;
1350
1351 /* Need both Tx chains/antennas to support MIMO */
1352 if (priv->hw_params.tx_chains_num < 3)
1353 return -1;
1354
1355 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
1356
1357 tbl->lq_type = LQ_MIMO3;
1358 tbl->is_dup = lq_sta->is_dup;
1359 tbl->action = 0;
1360 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1361 rate_mask = lq_sta->active_mimo3_rate;
1362
1363 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1364 tbl->is_ht40 = 1;
1365 else
1366 tbl->is_ht40 = 0;
1367
1368 rs_set_expected_tpt_table(lq_sta, tbl);
1369
1370 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1371
1372 IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
1373 rate, rate_mask);
1374 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1375 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1376 rate, rate_mask);
1377 return -1;
1378 }
1379 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1380
1381 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1382 tbl->current_rate, is_green);
1383 return 0;
1384}
1385
1386/*
1387 * Set up search table for SISO
1388 */
1389static int rs_switch_to_siso(struct iwl_priv *priv,
1390 struct iwl_lq_sta *lq_sta,
1391 struct ieee80211_conf *conf,
1392 struct ieee80211_sta *sta,
1393 struct iwl_scale_tbl_info *tbl, int index)
1394{
1395 u16 rate_mask;
1396 u8 is_green = lq_sta->is_green;
1397 s32 rate;
1398 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1399 struct iwl_rxon_context *ctx = sta_priv->ctx;
1400
1401 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1402 return -1;
1403
1404 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1405
1406 tbl->is_dup = lq_sta->is_dup;
1407 tbl->lq_type = LQ_SISO;
1408 tbl->action = 0;
1409 tbl->max_search = IWL_MAX_SEARCH;
1410 rate_mask = lq_sta->active_siso_rate;
1411
1412 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1413 tbl->is_ht40 = 1;
1414 else
1415 tbl->is_ht40 = 0;
1416
1417 if (is_green)
1418 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1419
1420 rs_set_expected_tpt_table(lq_sta, tbl);
1421 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1422
1423 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1424 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1425 IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
1426 rate, rate_mask);
1427 return -1;
1428 }
1429 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1430 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1431 tbl->current_rate, is_green);
1432 return 0;
1433}
1434
1435/*
1436 * Try to switch to new modulation mode from legacy
1437 */
1438static int rs_move_legacy_other(struct iwl_priv *priv,
1439 struct iwl_lq_sta *lq_sta,
1440 struct ieee80211_conf *conf,
1441 struct ieee80211_sta *sta,
1442 int index)
1443{
1444 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1445 struct iwl_scale_tbl_info *search_tbl =
1446 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1447 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1448 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1449 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1450 u8 start_action;
1451 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1452 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1453 int ret = 0;
1454 u8 update_search_tbl_counter = 0;
1455
1456 switch (priv->bt_traffic_load) {
1457 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1458 /* nothing */
1459 break;
1460 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1461 /* avoid antenna B unless MIMO */
1462 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1463 tbl->action = IWL_LEGACY_SWITCH_SISO;
1464 break;
1465 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1466 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1467 /* avoid antenna B and MIMO */
1468 valid_tx_ant =
1469 first_antenna(priv->nvm_data->valid_tx_ant);
1470 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1471 tbl->action != IWL_LEGACY_SWITCH_SISO)
1472 tbl->action = IWL_LEGACY_SWITCH_SISO;
1473 break;
1474 default:
1475 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1476 break;
1477 }
1478
1479 if (!iwl_ht_enabled(priv))
1480 /* stay in Legacy */
1481 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1482 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1483 tbl->action > IWL_LEGACY_SWITCH_SISO)
1484 tbl->action = IWL_LEGACY_SWITCH_SISO;
1485
1486 /* configure as 1x1 if bt full concurrency */
1487 if (priv->bt_full_concurrent) {
1488 if (!iwl_ht_enabled(priv))
1489 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1490 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1491 tbl->action = IWL_LEGACY_SWITCH_SISO;
1492 valid_tx_ant =
1493 first_antenna(priv->nvm_data->valid_tx_ant);
1494 }
1495
1496 start_action = tbl->action;
1497 for (; ;) {
1498 lq_sta->action_counter++;
1499 switch (tbl->action) {
1500 case IWL_LEGACY_SWITCH_ANTENNA1:
1501 case IWL_LEGACY_SWITCH_ANTENNA2:
1502 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1503
1504 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1505 tx_chains_num <= 1) ||
1506 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1507 tx_chains_num <= 2))
1508 break;
1509
1510 /* Don't change antenna if success has been great */
1511 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1512 !priv->bt_full_concurrent &&
1513 priv->bt_traffic_load ==
1514 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1515 break;
1516
1517 /* Set up search table to try other antenna */
1518 memcpy(search_tbl, tbl, sz);
1519
1520 if (rs_toggle_antenna(valid_tx_ant,
1521 &search_tbl->current_rate, search_tbl)) {
1522 update_search_tbl_counter = 1;
1523 rs_set_expected_tpt_table(lq_sta, search_tbl);
1524 goto out;
1525 }
1526 break;
1527 case IWL_LEGACY_SWITCH_SISO:
1528 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1529
1530 /* Set up search table to try SISO */
1531 memcpy(search_tbl, tbl, sz);
1532 search_tbl->is_SGI = 0;
1533 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1534 search_tbl, index);
1535 if (!ret) {
1536 lq_sta->action_counter = 0;
1537 goto out;
1538 }
1539
1540 break;
1541 case IWL_LEGACY_SWITCH_MIMO2_AB:
1542 case IWL_LEGACY_SWITCH_MIMO2_AC:
1543 case IWL_LEGACY_SWITCH_MIMO2_BC:
1544 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1545
1546 /* Set up search table to try MIMO */
1547 memcpy(search_tbl, tbl, sz);
1548 search_tbl->is_SGI = 0;
1549
1550 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1551 search_tbl->ant_type = ANT_AB;
1552 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1553 search_tbl->ant_type = ANT_AC;
1554 else
1555 search_tbl->ant_type = ANT_BC;
1556
1557 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1558 break;
1559
1560 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1561 search_tbl, index);
1562 if (!ret) {
1563 lq_sta->action_counter = 0;
1564 goto out;
1565 }
1566 break;
1567
1568 case IWL_LEGACY_SWITCH_MIMO3_ABC:
1569 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
1570
1571 /* Set up search table to try MIMO3 */
1572 memcpy(search_tbl, tbl, sz);
1573 search_tbl->is_SGI = 0;
1574
1575 search_tbl->ant_type = ANT_ABC;
1576
1577 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1578 break;
1579
1580 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1581 search_tbl, index);
1582 if (!ret) {
1583 lq_sta->action_counter = 0;
1584 goto out;
1585 }
1586 break;
1587 }
1588 tbl->action++;
1589 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1590 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1591
1592 if (tbl->action == start_action)
1593 break;
1594
1595 }
1596 search_tbl->lq_type = LQ_NONE;
1597 return 0;
1598
1599out:
1600 lq_sta->search_better_tbl = 1;
1601 tbl->action++;
1602 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1603 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1604 if (update_search_tbl_counter)
1605 search_tbl->action = tbl->action;
1606 return 0;
1607
1608}
1609
1610/*
1611 * Try to switch to new modulation mode from SISO
1612 */
1613static int rs_move_siso_to_other(struct iwl_priv *priv,
1614 struct iwl_lq_sta *lq_sta,
1615 struct ieee80211_conf *conf,
1616 struct ieee80211_sta *sta, int index)
1617{
1618 u8 is_green = lq_sta->is_green;
1619 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1620 struct iwl_scale_tbl_info *search_tbl =
1621 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1622 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1623 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1624 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1625 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1626 u8 start_action;
1627 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1628 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1629 u8 update_search_tbl_counter = 0;
1630 int ret;
1631
1632 switch (priv->bt_traffic_load) {
1633 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1634 /* nothing */
1635 break;
1636 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1637 /* avoid antenna B unless MIMO */
1638 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1639 tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
1640 break;
1641 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1642 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1643 /* avoid antenna B and MIMO */
1644 valid_tx_ant =
1645 first_antenna(priv->nvm_data->valid_tx_ant);
1646 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1647 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1648 break;
1649 default:
1650 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1651 break;
1652 }
1653
1654 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1655 tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
1656 /* stay in SISO */
1657 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1658 }
1659
1660 /* configure as 1x1 if bt full concurrency */
1661 if (priv->bt_full_concurrent) {
1662 valid_tx_ant =
1663 first_antenna(priv->nvm_data->valid_tx_ant);
1664 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1665 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1666 }
1667
1668 start_action = tbl->action;
1669 for (;;) {
1670 lq_sta->action_counter++;
1671 switch (tbl->action) {
1672 case IWL_SISO_SWITCH_ANTENNA1:
1673 case IWL_SISO_SWITCH_ANTENNA2:
1674 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1675 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1676 tx_chains_num <= 1) ||
1677 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1678 tx_chains_num <= 2))
1679 break;
1680
1681 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1682 !priv->bt_full_concurrent &&
1683 priv->bt_traffic_load ==
1684 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1685 break;
1686
1687 memcpy(search_tbl, tbl, sz);
1688 if (rs_toggle_antenna(valid_tx_ant,
1689 &search_tbl->current_rate, search_tbl)) {
1690 update_search_tbl_counter = 1;
1691 goto out;
1692 }
1693 break;
1694 case IWL_SISO_SWITCH_MIMO2_AB:
1695 case IWL_SISO_SWITCH_MIMO2_AC:
1696 case IWL_SISO_SWITCH_MIMO2_BC:
1697 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1698 memcpy(search_tbl, tbl, sz);
1699 search_tbl->is_SGI = 0;
1700
1701 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1702 search_tbl->ant_type = ANT_AB;
1703 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1704 search_tbl->ant_type = ANT_AC;
1705 else
1706 search_tbl->ant_type = ANT_BC;
1707
1708 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1709 break;
1710
1711 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1712 search_tbl, index);
1713 if (!ret)
1714 goto out;
1715 break;
1716 case IWL_SISO_SWITCH_GI:
1717 if (!tbl->is_ht40 && !(ht_cap->cap &
1718 IEEE80211_HT_CAP_SGI_20))
1719 break;
1720 if (tbl->is_ht40 && !(ht_cap->cap &
1721 IEEE80211_HT_CAP_SGI_40))
1722 break;
1723
1724 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1725
1726 memcpy(search_tbl, tbl, sz);
1727 if (is_green) {
1728 if (!tbl->is_SGI)
1729 break;
1730 else
1731 IWL_ERR(priv,
1732 "SGI was set in GF+SISO\n");
1733 }
1734 search_tbl->is_SGI = !tbl->is_SGI;
1735 rs_set_expected_tpt_table(lq_sta, search_tbl);
1736 if (tbl->is_SGI) {
1737 s32 tpt = lq_sta->last_tpt / 100;
1738 if (tpt >= search_tbl->expected_tpt[index])
1739 break;
1740 }
1741 search_tbl->current_rate =
1742 rate_n_flags_from_tbl(priv, search_tbl,
1743 index, is_green);
1744 update_search_tbl_counter = 1;
1745 goto out;
1746 case IWL_SISO_SWITCH_MIMO3_ABC:
1747 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
1748 memcpy(search_tbl, tbl, sz);
1749 search_tbl->is_SGI = 0;
1750 search_tbl->ant_type = ANT_ABC;
1751
1752 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1753 break;
1754
1755 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1756 search_tbl, index);
1757 if (!ret)
1758 goto out;
1759 break;
1760 }
1761 tbl->action++;
1762 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1763 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1764
1765 if (tbl->action == start_action)
1766 break;
1767 }
1768 search_tbl->lq_type = LQ_NONE;
1769 return 0;
1770
1771 out:
1772 lq_sta->search_better_tbl = 1;
1773 tbl->action++;
1774 if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
1775 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1776 if (update_search_tbl_counter)
1777 search_tbl->action = tbl->action;
1778
1779 return 0;
1780}
1781
1782/*
1783 * Try to switch to new modulation mode from MIMO2
1784 */
1785static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1786 struct iwl_lq_sta *lq_sta,
1787 struct ieee80211_conf *conf,
1788 struct ieee80211_sta *sta, int index)
1789{
1790 s8 is_green = lq_sta->is_green;
1791 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1792 struct iwl_scale_tbl_info *search_tbl =
1793 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1794 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1795 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1796 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1797 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1798 u8 start_action;
1799 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1800 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1801 u8 update_search_tbl_counter = 0;
1802 int ret;
1803
1804 switch (priv->bt_traffic_load) {
1805 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1806 /* nothing */
1807 break;
1808 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1809 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1810 /* avoid antenna B and MIMO */
1811 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1812 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1813 break;
1814 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1815 /* avoid antenna B unless MIMO */
1816 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1817 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1818 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1819 break;
1820 default:
1821 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1822 break;
1823 }
1824
1825 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1826 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1827 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
1828 /* switch in SISO */
1829 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1830 }
1831
1832 /* configure as 1x1 if bt full concurrency */
1833 if (priv->bt_full_concurrent &&
1834 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1835 tbl->action > IWL_MIMO2_SWITCH_SISO_C))
1836 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1837
1838 start_action = tbl->action;
1839 for (;;) {
1840 lq_sta->action_counter++;
1841 switch (tbl->action) {
1842 case IWL_MIMO2_SWITCH_ANTENNA1:
1843 case IWL_MIMO2_SWITCH_ANTENNA2:
1844 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1845
1846 if (tx_chains_num <= 2)
1847 break;
1848
1849 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1850 break;
1851
1852 memcpy(search_tbl, tbl, sz);
1853 if (rs_toggle_antenna(valid_tx_ant,
1854 &search_tbl->current_rate, search_tbl)) {
1855 update_search_tbl_counter = 1;
1856 goto out;
1857 }
1858 break;
1859 case IWL_MIMO2_SWITCH_SISO_A:
1860 case IWL_MIMO2_SWITCH_SISO_B:
1861 case IWL_MIMO2_SWITCH_SISO_C:
1862 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1863
1864 /* Set up new search table for SISO */
1865 memcpy(search_tbl, tbl, sz);
1866
1867 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1868 search_tbl->ant_type = ANT_A;
1869 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1870 search_tbl->ant_type = ANT_B;
1871 else
1872 search_tbl->ant_type = ANT_C;
1873
1874 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1875 break;
1876
1877 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1878 search_tbl, index);
1879 if (!ret)
1880 goto out;
1881
1882 break;
1883
1884 case IWL_MIMO2_SWITCH_GI:
1885 if (!tbl->is_ht40 && !(ht_cap->cap &
1886 IEEE80211_HT_CAP_SGI_20))
1887 break;
1888 if (tbl->is_ht40 && !(ht_cap->cap &
1889 IEEE80211_HT_CAP_SGI_40))
1890 break;
1891
1892 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1893
1894 /* Set up new search table for MIMO2 */
1895 memcpy(search_tbl, tbl, sz);
1896 search_tbl->is_SGI = !tbl->is_SGI;
1897 rs_set_expected_tpt_table(lq_sta, search_tbl);
1898 /*
1899 * If active table already uses the fastest possible
1900 * modulation (dual stream with short guard interval),
1901 * and it's working well, there's no need to look
1902 * for a better type of modulation!
1903 */
1904 if (tbl->is_SGI) {
1905 s32 tpt = lq_sta->last_tpt / 100;
1906 if (tpt >= search_tbl->expected_tpt[index])
1907 break;
1908 }
1909 search_tbl->current_rate =
1910 rate_n_flags_from_tbl(priv, search_tbl,
1911 index, is_green);
1912 update_search_tbl_counter = 1;
1913 goto out;
1914
1915 case IWL_MIMO2_SWITCH_MIMO3_ABC:
1916 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
1917 memcpy(search_tbl, tbl, sz);
1918 search_tbl->is_SGI = 0;
1919 search_tbl->ant_type = ANT_ABC;
1920
1921 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1922 break;
1923
1924 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1925 search_tbl, index);
1926 if (!ret)
1927 goto out;
1928
1929 break;
1930 }
1931 tbl->action++;
1932 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1933 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1934
1935 if (tbl->action == start_action)
1936 break;
1937 }
1938 search_tbl->lq_type = LQ_NONE;
1939 return 0;
1940 out:
1941 lq_sta->search_better_tbl = 1;
1942 tbl->action++;
1943 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1944 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1945 if (update_search_tbl_counter)
1946 search_tbl->action = tbl->action;
1947
1948 return 0;
1949
1950}
1951
1952/*
1953 * Try to switch to new modulation mode from MIMO3
1954 */
1955static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1956 struct iwl_lq_sta *lq_sta,
1957 struct ieee80211_conf *conf,
1958 struct ieee80211_sta *sta, int index)
1959{
1960 s8 is_green = lq_sta->is_green;
1961 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1962 struct iwl_scale_tbl_info *search_tbl =
1963 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1964 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1965 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1966 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1967 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1968 u8 start_action;
1969 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1970 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1971 int ret;
1972 u8 update_search_tbl_counter = 0;
1973
1974 switch (priv->bt_traffic_load) {
1975 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1976 /* nothing */
1977 break;
1978 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1979 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1980 /* avoid antenna B and MIMO */
1981 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1982 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1983 break;
1984 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1985 /* avoid antenna B unless MIMO */
1986 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1987 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1988 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1989 break;
1990 default:
1991 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1992 break;
1993 }
1994
1995 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1996 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1997 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
1998 /* switch in SISO */
1999 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
2000 }
2001
2002 /* configure as 1x1 if bt full concurrency */
2003 if (priv->bt_full_concurrent &&
2004 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
2005 tbl->action > IWL_MIMO3_SWITCH_SISO_C))
2006 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
2007
2008 start_action = tbl->action;
2009 for (;;) {
2010 lq_sta->action_counter++;
2011 switch (tbl->action) {
2012 case IWL_MIMO3_SWITCH_ANTENNA1:
2013 case IWL_MIMO3_SWITCH_ANTENNA2:
2014 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
2015
2016 if (tx_chains_num <= 3)
2017 break;
2018
2019 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
2020 break;
2021
2022 memcpy(search_tbl, tbl, sz);
2023 if (rs_toggle_antenna(valid_tx_ant,
2024 &search_tbl->current_rate, search_tbl))
2025 goto out;
2026 break;
2027 case IWL_MIMO3_SWITCH_SISO_A:
2028 case IWL_MIMO3_SWITCH_SISO_B:
2029 case IWL_MIMO3_SWITCH_SISO_C:
2030 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
2031
2032 /* Set up new search table for SISO */
2033 memcpy(search_tbl, tbl, sz);
2034
2035 if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
2036 search_tbl->ant_type = ANT_A;
2037 else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
2038 search_tbl->ant_type = ANT_B;
2039 else
2040 search_tbl->ant_type = ANT_C;
2041
2042 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2043 break;
2044
2045 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
2046 search_tbl, index);
2047 if (!ret)
2048 goto out;
2049
2050 break;
2051
2052 case IWL_MIMO3_SWITCH_MIMO2_AB:
2053 case IWL_MIMO3_SWITCH_MIMO2_AC:
2054 case IWL_MIMO3_SWITCH_MIMO2_BC:
2055 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
2056
2057 memcpy(search_tbl, tbl, sz);
2058 search_tbl->is_SGI = 0;
2059 if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
2060 search_tbl->ant_type = ANT_AB;
2061 else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
2062 search_tbl->ant_type = ANT_AC;
2063 else
2064 search_tbl->ant_type = ANT_BC;
2065
2066 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2067 break;
2068
2069 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
2070 search_tbl, index);
2071 if (!ret)
2072 goto out;
2073
2074 break;
2075
2076 case IWL_MIMO3_SWITCH_GI:
2077 if (!tbl->is_ht40 && !(ht_cap->cap &
2078 IEEE80211_HT_CAP_SGI_20))
2079 break;
2080 if (tbl->is_ht40 && !(ht_cap->cap &
2081 IEEE80211_HT_CAP_SGI_40))
2082 break;
2083
2084 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
2085
2086 /* Set up new search table for MIMO */
2087 memcpy(search_tbl, tbl, sz);
2088 search_tbl->is_SGI = !tbl->is_SGI;
2089 rs_set_expected_tpt_table(lq_sta, search_tbl);
2090 /*
2091 * If active table already uses the fastest possible
2092 * modulation (dual stream with short guard interval),
2093 * and it's working well, there's no need to look
2094 * for a better type of modulation!
2095 */
2096 if (tbl->is_SGI) {
2097 s32 tpt = lq_sta->last_tpt / 100;
2098 if (tpt >= search_tbl->expected_tpt[index])
2099 break;
2100 }
2101 search_tbl->current_rate =
2102 rate_n_flags_from_tbl(priv, search_tbl,
2103 index, is_green);
2104 update_search_tbl_counter = 1;
2105 goto out;
2106 }
2107 tbl->action++;
2108 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2109 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2110
2111 if (tbl->action == start_action)
2112 break;
2113 }
2114 search_tbl->lq_type = LQ_NONE;
2115 return 0;
2116 out:
2117 lq_sta->search_better_tbl = 1;
2118 tbl->action++;
2119 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2120 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2121 if (update_search_tbl_counter)
2122 search_tbl->action = tbl->action;
2123
2124 return 0;
2125
2126}
2127
2128/*
2129 * Check whether we should continue using same modulation mode, or
2130 * begin search for a new mode, based on:
2131 * 1) # tx successes or failures while using this mode
2132 * 2) # times calling this function
2133 * 3) elapsed time in this mode (not used, for now)
2134 */
2135static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2136{
2137 struct iwl_scale_tbl_info *tbl;
2138 int i;
2139 int active_tbl;
2140 int flush_interval_passed = 0;
2141 struct iwl_priv *priv;
2142
2143 priv = lq_sta->drv;
2144 active_tbl = lq_sta->active_tbl;
2145
2146 tbl = &(lq_sta->lq_info[active_tbl]);
2147
2148 /* If we've been disallowing search, see if we should now allow it */
2149 if (lq_sta->stay_in_tbl) {
2150
2151 /* Elapsed time using current modulation mode */
2152 if (lq_sta->flush_timer)
2153 flush_interval_passed =
2154 time_after(jiffies,
2155 (unsigned long)(lq_sta->flush_timer +
2156 IWL_RATE_SCALE_FLUSH_INTVL));
2157
2158 /*
2159 * Check if we should allow search for new modulation mode.
2160 * If many frames have failed or succeeded, or we've used
2161 * this same modulation for a long time, allow search, and
2162 * reset history stats that keep track of whether we should
2163 * allow a new search. Also (below) reset all bitmaps and
2164 * stats in active history.
2165 */
2166 if (force_search ||
2167 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
2168 (lq_sta->total_success > lq_sta->max_success_limit) ||
2169 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
2170 && (flush_interval_passed))) {
2171 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
2172 lq_sta->total_failed,
2173 lq_sta->total_success,
2174 flush_interval_passed);
2175
2176 /* Allow search for new mode */
2177 lq_sta->stay_in_tbl = 0; /* only place reset */
2178 lq_sta->total_failed = 0;
2179 lq_sta->total_success = 0;
2180 lq_sta->flush_timer = 0;
2181
2182 /*
2183 * Else if we've used this modulation mode enough repetitions
2184 * (regardless of elapsed time or success/failure), reset
2185 * history bitmaps and rate-specific stats for all rates in
2186 * active table.
2187 */
2188 } else {
2189 lq_sta->table_count++;
2190 if (lq_sta->table_count >=
2191 lq_sta->table_count_limit) {
2192 lq_sta->table_count = 0;
2193
2194 IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
2195 for (i = 0; i < IWL_RATE_COUNT; i++)
2196 rs_rate_scale_clear_window(
2197 &(tbl->win[i]));
2198 }
2199 }
2200
2201 /* If transitioning to allow "search", reset all history
2202 * bitmaps and stats in active table (this will become the new
2203 * "search" table). */
2204 if (!lq_sta->stay_in_tbl) {
2205 for (i = 0; i < IWL_RATE_COUNT; i++)
2206 rs_rate_scale_clear_window(&(tbl->win[i]));
2207 }
2208 }
2209}
2210
2211/*
2212 * setup rate table in uCode
2213 */
2214static void rs_update_rate_tbl(struct iwl_priv *priv,
2215 struct iwl_rxon_context *ctx,
2216 struct iwl_lq_sta *lq_sta,
2217 struct iwl_scale_tbl_info *tbl,
2218 int index, u8 is_green)
2219{
2220 u32 rate;
2221
2222 /* Update uCode's rate table. */
2223 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2224 rs_fill_link_cmd(priv, lq_sta, rate);
2225 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2226}
2227
2228/*
2229 * Do rate scaling and search for new modulation mode.
2230 */
2231static void rs_rate_scale_perform(struct iwl_priv *priv,
2232 struct sk_buff *skb,
2233 struct ieee80211_sta *sta,
2234 struct iwl_lq_sta *lq_sta)
2235{
2236 struct ieee80211_hw *hw = priv->hw;
2237 struct ieee80211_conf *conf = &hw->conf;
2238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2239 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2240 int low = IWL_RATE_INVALID;
2241 int high = IWL_RATE_INVALID;
2242 int index;
2243 int i;
2244 struct iwl_rate_scale_data *window = NULL;
2245 int current_tpt = IWL_INVALID_VALUE;
2246 int low_tpt = IWL_INVALID_VALUE;
2247 int high_tpt = IWL_INVALID_VALUE;
2248 u32 fail_count;
2249 s8 scale_action = 0;
2250 u16 rate_mask;
2251 u8 update_lq = 0;
2252 struct iwl_scale_tbl_info *tbl, *tbl1;
2253 u16 rate_scale_index_msk = 0;
2254 u8 is_green = 0;
2255 u8 active_tbl = 0;
2256 u8 done_search = 0;
2257 u16 high_low;
2258 s32 sr;
2259 u8 tid = IWL_MAX_TID_COUNT;
2260 struct iwl_tid_data *tid_data;
2261 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2262 struct iwl_rxon_context *ctx = sta_priv->ctx;
2263
2264 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
2265
2266 /* Send management frames and NO_ACK data using lowest rate. */
2267 /* TODO: this could probably be improved.. */
2268 if (!ieee80211_is_data(hdr->frame_control) ||
2269 info->flags & IEEE80211_TX_CTL_NO_ACK)
2270 return;
2271
2272 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2273
2274 tid = rs_tl_add_packet(lq_sta, hdr);
2275 if ((tid != IWL_MAX_TID_COUNT) &&
2276 (lq_sta->tx_agg_tid_en & (1 << tid))) {
2277 tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
2278 if (tid_data->agg.state == IWL_AGG_OFF)
2279 lq_sta->is_agg = 0;
2280 else
2281 lq_sta->is_agg = 1;
2282 } else
2283 lq_sta->is_agg = 0;
2284
2285 /*
2286 * Select rate-scale / modulation-mode table to work with in
2287 * the rest of this function: "search" if searching for better
2288 * modulation mode, or "active" if doing rate scaling within a mode.
2289 */
2290 if (!lq_sta->search_better_tbl)
2291 active_tbl = lq_sta->active_tbl;
2292 else
2293 active_tbl = 1 - lq_sta->active_tbl;
2294
2295 tbl = &(lq_sta->lq_info[active_tbl]);
2296 if (is_legacy(tbl->lq_type))
2297 lq_sta->is_green = 0;
2298 else
2299 lq_sta->is_green = rs_use_green(sta);
2300 is_green = lq_sta->is_green;
2301
2302 /* current tx rate */
2303 index = lq_sta->last_txrate_idx;
2304
2305 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
2306 tbl->lq_type);
2307
2308 /* rates available for this association, and for modulation mode */
2309 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
2310
2311 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
2312
2313 /* mask with station rate restriction */
2314 if (is_legacy(tbl->lq_type)) {
2315 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2316 /* supp_rates has no CCK bits in A mode */
2317 rate_scale_index_msk = (u16) (rate_mask &
2318 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
2319 else
2320 rate_scale_index_msk = (u16) (rate_mask &
2321 lq_sta->supp_rates);
2322
2323 } else
2324 rate_scale_index_msk = rate_mask;
2325
2326 if (!rate_scale_index_msk)
2327 rate_scale_index_msk = rate_mask;
2328
2329 if (!((1 << index) & rate_scale_index_msk)) {
2330 IWL_ERR(priv, "Current Rate is not valid\n");
2331 if (lq_sta->search_better_tbl) {
2332 /* revert to active table if search table is not valid*/
2333 tbl->lq_type = LQ_NONE;
2334 lq_sta->search_better_tbl = 0;
2335 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2336 /* get "active" rate info */
2337 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2338 rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
2339 index, is_green);
2340 }
2341 return;
2342 }
2343
2344 /* Get expected throughput table and history window for current rate */
2345 if (!tbl->expected_tpt) {
2346 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
2347 return;
2348 }
2349
2350 /* force user max rate if set by user */
2351 if ((lq_sta->max_rate_idx != -1) &&
2352 (lq_sta->max_rate_idx < index)) {
2353 index = lq_sta->max_rate_idx;
2354 update_lq = 1;
2355 window = &(tbl->win[index]);
2356 goto lq_update;
2357 }
2358
2359 window = &(tbl->win[index]);
2360
2361 /*
2362 * If there is not enough history to calculate actual average
2363 * throughput, keep analyzing results of more tx frames, without
2364 * changing rate or mode (bypass most of the rest of this function).
2365 * Set up new rate table in uCode only if old rate is not supported
2366 * in current association (use new rate found above).
2367 */
2368 fail_count = window->counter - window->success_counter;
2369 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
2370 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
2371 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
2372 "for index %d\n",
2373 window->success_counter, window->counter, index);
2374
2375 /* Can't calculate this yet; not enough history */
2376 window->average_tpt = IWL_INVALID_VALUE;
2377
2378 /* Should we stay with this modulation mode,
2379 * or search for a new one? */
2380 rs_stay_in_table(lq_sta, false);
2381
2382 goto out;
2383 }
2384 /* Else we have enough samples; calculate estimate of
2385 * actual average throughput */
2386 if (window->average_tpt != ((window->success_ratio *
2387 tbl->expected_tpt[index] + 64) / 128)) {
2388 IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2389 window->average_tpt = ((window->success_ratio *
2390 tbl->expected_tpt[index] + 64) / 128);
2391 }
2392
2393 /* If we are searching for better modulation mode, check success. */
2394 if (lq_sta->search_better_tbl &&
2395 (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) {
2396 /* If good success, continue using the "search" mode;
2397 * no need to send new link quality command, since we're
2398 * continuing to use the setup that we've been trying. */
2399 if (window->average_tpt > lq_sta->last_tpt) {
2400
2401 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
2402 "suc=%d cur-tpt=%d old-tpt=%d\n",
2403 window->success_ratio,
2404 window->average_tpt,
2405 lq_sta->last_tpt);
2406
2407 if (!is_legacy(tbl->lq_type))
2408 lq_sta->enable_counter = 1;
2409
2410 /* Swap tables; "search" becomes "active" */
2411 lq_sta->active_tbl = active_tbl;
2412 current_tpt = window->average_tpt;
2413
2414 /* Else poor success; go back to mode in "active" table */
2415 } else {
2416
2417 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
2418 "suc=%d cur-tpt=%d old-tpt=%d\n",
2419 window->success_ratio,
2420 window->average_tpt,
2421 lq_sta->last_tpt);
2422
2423 /* Nullify "search" table */
2424 tbl->lq_type = LQ_NONE;
2425
2426 /* Revert to "active" table */
2427 active_tbl = lq_sta->active_tbl;
2428 tbl = &(lq_sta->lq_info[active_tbl]);
2429
2430 /* Revert to "active" rate and throughput info */
2431 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2432 current_tpt = lq_sta->last_tpt;
2433
2434 /* Need to set up a new rate table in uCode */
2435 update_lq = 1;
2436 }
2437
2438 /* Either way, we've made a decision; modulation mode
2439 * search is done, allow rate adjustment next time. */
2440 lq_sta->search_better_tbl = 0;
2441 done_search = 1; /* Don't switch modes below! */
2442 goto lq_update;
2443 }
2444
2445 /* (Else) not in search of better modulation mode, try for better
2446 * starting rate, while staying in this mode. */
2447 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
2448 tbl->lq_type);
2449 low = high_low & 0xff;
2450 high = (high_low >> 8) & 0xff;
2451
2452 /* If user set max rate, dont allow higher than user constrain */
2453 if ((lq_sta->max_rate_idx != -1) &&
2454 (lq_sta->max_rate_idx < high))
2455 high = IWL_RATE_INVALID;
2456
2457 sr = window->success_ratio;
2458
2459 /* Collect measured throughputs for current and adjacent rates */
2460 current_tpt = window->average_tpt;
2461 if (low != IWL_RATE_INVALID)
2462 low_tpt = tbl->win[low].average_tpt;
2463 if (high != IWL_RATE_INVALID)
2464 high_tpt = tbl->win[high].average_tpt;
2465
2466 scale_action = 0;
2467
2468 /* Too many failures, decrease rate */
2469 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
2470 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
2471 scale_action = -1;
2472
2473 /* No throughput measured yet for adjacent rates; try increase. */
2474 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2475 (high_tpt == IWL_INVALID_VALUE)) {
2476
2477 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2478 scale_action = 1;
2479 else if (low != IWL_RATE_INVALID)
2480 scale_action = 0;
2481 }
2482
2483 /* Both adjacent throughputs are measured, but neither one has better
2484 * throughput; we're using the best rate, don't change it! */
2485 else if ((low_tpt != IWL_INVALID_VALUE) &&
2486 (high_tpt != IWL_INVALID_VALUE) &&
2487 (low_tpt < current_tpt) &&
2488 (high_tpt < current_tpt))
2489 scale_action = 0;
2490
2491 /* At least one adjacent rate's throughput is measured,
2492 * and may have better performance. */
2493 else {
2494 /* Higher adjacent rate's throughput is measured */
2495 if (high_tpt != IWL_INVALID_VALUE) {
2496 /* Higher rate has better throughput */
2497 if (high_tpt > current_tpt &&
2498 sr >= IWL_RATE_INCREASE_TH) {
2499 scale_action = 1;
2500 } else {
2501 scale_action = 0;
2502 }
2503
2504 /* Lower adjacent rate's throughput is measured */
2505 } else if (low_tpt != IWL_INVALID_VALUE) {
2506 /* Lower rate has better throughput */
2507 if (low_tpt > current_tpt) {
2508 IWL_DEBUG_RATE(priv,
2509 "decrease rate because of low tpt\n");
2510 scale_action = -1;
2511 } else if (sr >= IWL_RATE_INCREASE_TH) {
2512 scale_action = 1;
2513 }
2514 }
2515 }
2516
2517 /* Sanity check; asked for decrease, but success rate or throughput
2518 * has been good at old rate. Don't change it. */
2519 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2520 ((sr > IWL_RATE_HIGH_TH) ||
2521 (current_tpt > (100 * tbl->expected_tpt[low]))))
2522 scale_action = 0;
2523 if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type))
2524 scale_action = -1;
2525 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
2526 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
2527 scale_action = -1;
2528
2529 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2530 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2531 if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
2532 /*
2533 * don't set scale_action, don't want to scale up if
2534 * the rate scale doesn't otherwise think that is a
2535 * good idea.
2536 */
2537 } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
2538 scale_action = -1;
2539 }
2540 }
2541 lq_sta->last_bt_traffic = priv->bt_traffic_load;
2542
2543 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2544 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2545 /* search for a new modulation */
2546 rs_stay_in_table(lq_sta, true);
2547 goto lq_update;
2548 }
2549
2550 switch (scale_action) {
2551 case -1:
2552 /* Decrease starting rate, update uCode's rate table */
2553 if (low != IWL_RATE_INVALID) {
2554 update_lq = 1;
2555 index = low;
2556 }
2557
2558 break;
2559 case 1:
2560 /* Increase starting rate, update uCode's rate table */
2561 if (high != IWL_RATE_INVALID) {
2562 update_lq = 1;
2563 index = high;
2564 }
2565
2566 break;
2567 case 0:
2568 /* No change */
2569 default:
2570 break;
2571 }
2572
2573 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2574 "high %d type %d\n",
2575 index, scale_action, low, high, tbl->lq_type);
2576
2577lq_update:
2578 /* Replace uCode's rate table for the destination station. */
2579 if (update_lq)
2580 rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
2581
2582 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2583 /* Should we stay with this modulation mode,
2584 * or search for a new one? */
2585 rs_stay_in_table(lq_sta, false);
2586 }
2587 /*
2588 * Search for new modulation mode if we're:
2589 * 1) Not changing rates right now
2590 * 2) Not just finishing up a search
2591 * 3) Allowing a new search
2592 */
2593 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
2594 /* Save current throughput to compare with "search" throughput*/
2595 lq_sta->last_tpt = current_tpt;
2596
2597 /* Select a new "search" modulation mode to try.
2598 * If one is found, set up the new "search" table. */
2599 if (is_legacy(tbl->lq_type))
2600 rs_move_legacy_other(priv, lq_sta, conf, sta, index);
2601 else if (is_siso(tbl->lq_type))
2602 rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
2603 else if (is_mimo2(tbl->lq_type))
2604 rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
2605 else
2606 rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
2607
2608 /* If new "search" mode was selected, set up in uCode table */
2609 if (lq_sta->search_better_tbl) {
2610 /* Access the "search" table, clear its history. */
2611 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2612 for (i = 0; i < IWL_RATE_COUNT; i++)
2613 rs_rate_scale_clear_window(&(tbl->win[i]));
2614
2615 /* Use new "search" start rate */
2616 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2617
2618 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2619 tbl->current_rate, index);
2620 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2621 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2622 } else
2623 done_search = 1;
2624 }
2625
2626 if (done_search && !lq_sta->stay_in_tbl) {
2627 /* If the "active" (non-search) mode was legacy,
2628 * and we've tried switching antennas,
2629 * but we haven't been able to try HT modes (not available),
2630 * stay with best antenna legacy modulation for a while
2631 * before next round of mode comparisons. */
2632 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2633 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2634 lq_sta->action_counter > tbl1->max_search) {
2635 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2636 rs_set_stay_in_table(priv, 1, lq_sta);
2637 }
2638
2639 /* If we're in an HT mode, and all 3 mode switch actions
2640 * have been tried and compared, stay in this best modulation
2641 * mode for a while before next round of mode comparisons. */
2642 if (lq_sta->enable_counter &&
2643 (lq_sta->action_counter >= tbl1->max_search) &&
2644 iwl_ht_enabled(priv)) {
2645 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2646 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2647 (tid != IWL_MAX_TID_COUNT)) {
2648 u8 sta_id = lq_sta->lq.sta_id;
2649 tid_data = &priv->tid_data[sta_id][tid];
2650 if (tid_data->agg.state == IWL_AGG_OFF) {
2651 IWL_DEBUG_RATE(priv,
2652 "try to aggregate tid %d\n",
2653 tid);
2654 rs_tl_turn_on_agg(priv, tid,
2655 lq_sta, sta);
2656 }
2657 }
2658 rs_set_stay_in_table(priv, 0, lq_sta);
2659 }
2660 }
2661
2662out:
2663 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2664 lq_sta->last_txrate_idx = index;
2665}
2666
2667/**
2668 * rs_initialize_lq - Initialize a station's hardware rate table
2669 *
2670 * The uCode's station table contains a table of fallback rates
2671 * for automatic fallback during transmission.
2672 *
2673 * NOTE: This sets up a default set of values. These will be replaced later
2674 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2675 * rc80211_simple.
2676 *
2677 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2678 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2679 * which requires station table entry to exist).
2680 */
2681static void rs_initialize_lq(struct iwl_priv *priv,
2682 struct ieee80211_sta *sta,
2683 struct iwl_lq_sta *lq_sta)
2684{
2685 struct iwl_scale_tbl_info *tbl;
2686 int rate_idx;
2687 int i;
2688 u32 rate;
2689 u8 use_green = rs_use_green(sta);
2690 u8 active_tbl = 0;
2691 u8 valid_tx_ant;
2692 struct iwl_station_priv *sta_priv;
2693 struct iwl_rxon_context *ctx;
2694
2695 if (!sta || !lq_sta)
2696 return;
2697
2698 sta_priv = (void *)sta->drv_priv;
2699 ctx = sta_priv->ctx;
2700
2701 i = lq_sta->last_txrate_idx;
2702
2703 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2704
2705 if (!lq_sta->search_better_tbl)
2706 active_tbl = lq_sta->active_tbl;
2707 else
2708 active_tbl = 1 - lq_sta->active_tbl;
2709
2710 tbl = &(lq_sta->lq_info[active_tbl]);
2711
2712 if ((i < 0) || (i >= IWL_RATE_COUNT))
2713 i = 0;
2714
2715 rate = iwl_rates[i].plcp;
2716 tbl->ant_type = first_antenna(valid_tx_ant);
2717 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2718
2719 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2720 rate |= RATE_MCS_CCK_MSK;
2721
2722 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2723 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2724 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2725
2726 rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2727 tbl->current_rate = rate;
2728 rs_set_expected_tpt_table(lq_sta, tbl);
2729 rs_fill_link_cmd(NULL, lq_sta, rate);
2730 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2731 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2732}
2733
2734static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2735 struct ieee80211_tx_rate_control *txrc)
2736{
2737
2738 struct sk_buff *skb = txrc->skb;
2739 struct ieee80211_supported_band *sband = txrc->sband;
2740 struct iwl_op_mode *op_mode __maybe_unused =
2741 (struct iwl_op_mode *)priv_r;
2742 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
2743 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2744 struct iwl_lq_sta *lq_sta = priv_sta;
2745 int rate_idx;
2746
2747 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2748
2749 /* Get max rate if user set max rate */
2750 if (lq_sta) {
2751 lq_sta->max_rate_idx = txrc->max_rate_idx;
2752 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2753 (lq_sta->max_rate_idx != -1))
2754 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2755 if ((lq_sta->max_rate_idx < 0) ||
2756 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2757 lq_sta->max_rate_idx = -1;
2758 }
2759
2760 /* Treat uninitialized rate scaling data same as non-existing. */
2761 if (lq_sta && !lq_sta->drv) {
2762 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2763 priv_sta = NULL;
2764 }
2765
2766 /* Send management frames and NO_ACK data using lowest rate. */
2767 if (rate_control_send_low(sta, priv_sta, txrc))
2768 return;
2769
2770 rate_idx = lq_sta->last_txrate_idx;
2771
2772 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2773 rate_idx -= IWL_FIRST_OFDM_RATE;
2774 /* 6M and 9M shared same MCS index */
2775 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2776 if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2777 IWL_RATE_MIMO3_6M_PLCP)
2778 rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
2779 else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2780 IWL_RATE_MIMO2_6M_PLCP)
2781 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2782 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2783 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2784 info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
2785 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2786 info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
2787 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2788 info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2789 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2790 info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
2791 } else {
2792 /* Check for invalid rates */
2793 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2794 ((sband->band == IEEE80211_BAND_5GHZ) &&
2795 (rate_idx < IWL_FIRST_OFDM_RATE)))
2796 rate_idx = rate_lowest_index(sband, sta);
2797 /* On valid 5 GHz rate, adjust index */
2798 else if (sband->band == IEEE80211_BAND_5GHZ)
2799 rate_idx -= IWL_FIRST_OFDM_RATE;
2800 info->control.rates[0].flags = 0;
2801 }
2802 info->control.rates[0].idx = rate_idx;
2803
2804}
2805
2806static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2807 gfp_t gfp)
2808{
2809 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2810 struct iwl_op_mode *op_mode __maybe_unused =
2811 (struct iwl_op_mode *)priv_rate;
2812 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
2813
2814 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2815
2816 return &sta_priv->lq_sta;
2817}
2818
2819/*
2820 * Called after adding a new station to initialize rate scaling
2821 */
2822void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
2823{
2824 int i, j;
2825 struct ieee80211_hw *hw = priv->hw;
2826 struct ieee80211_conf *conf = &priv->hw->conf;
2827 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2828 struct iwl_station_priv *sta_priv;
2829 struct iwl_lq_sta *lq_sta;
2830 struct ieee80211_supported_band *sband;
2831 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2832
2833 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2834 lq_sta = &sta_priv->lq_sta;
2835 sband = hw->wiphy->bands[conf->channel->band];
2836
2837
2838 lq_sta->lq.sta_id = sta_id;
2839
2840 for (j = 0; j < LQ_SIZE; j++)
2841 for (i = 0; i < IWL_RATE_COUNT; i++)
2842 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2843
2844 lq_sta->flush_timer = 0;
2845 lq_sta->supp_rates = sta->supp_rates[sband->band];
2846 for (j = 0; j < LQ_SIZE; j++)
2847 for (i = 0; i < IWL_RATE_COUNT; i++)
2848 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2849
2850 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2851 sta_id);
2852 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2853 * the lowest or the highest rate.. Could consider using RSSI from
2854 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2855 * after assoc.. */
2856
2857 lq_sta->is_dup = 0;
2858 lq_sta->max_rate_idx = -1;
2859 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2860 lq_sta->is_green = rs_use_green(sta);
2861 lq_sta->band = sband->band;
2862 /*
2863 * active legacy rates as per supported rates bitmap
2864 */
2865 supp = sta->supp_rates[sband->band];
2866 lq_sta->active_legacy_rate = 0;
2867 for_each_set_bit(i, &supp, BITS_PER_LONG)
2868 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2869
2870 /*
2871 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2872 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2873 */
2874 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2875 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2876 lq_sta->active_siso_rate &= ~((u16)0x2);
2877 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2878
2879 /* Same here */
2880 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2881 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2882 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2883 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2884
2885 lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
2886 lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
2887 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2888 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2889
2890 IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2891 lq_sta->active_siso_rate,
2892 lq_sta->active_mimo2_rate,
2893 lq_sta->active_mimo3_rate);
2894
2895 /* These values will be overridden later */
2896 lq_sta->lq.general_params.single_stream_ant_msk =
2897 first_antenna(priv->nvm_data->valid_tx_ant);
2898 lq_sta->lq.general_params.dual_stream_ant_msk =
2899 priv->nvm_data->valid_tx_ant &
2900 ~first_antenna(priv->nvm_data->valid_tx_ant);
2901 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2902 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2903 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
2904 lq_sta->lq.general_params.dual_stream_ant_msk =
2905 priv->nvm_data->valid_tx_ant;
2906 }
2907
2908 /* as default allow aggregation for all tids */
2909 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2910 lq_sta->drv = priv;
2911
2912 /* Set last_txrate_idx to lowest rate */
2913 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2914 if (sband->band == IEEE80211_BAND_5GHZ)
2915 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2916 lq_sta->is_agg = 0;
2917#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
2918 priv->tm_fixed_rate = 0;
2919#endif
2920#ifdef CONFIG_MAC80211_DEBUGFS
2921 lq_sta->dbg_fixed_rate = 0;
2922#endif
2923
2924 rs_initialize_lq(priv, sta, lq_sta);
2925}
2926
2927static void rs_fill_link_cmd(struct iwl_priv *priv,
2928 struct iwl_lq_sta *lq_sta, u32 new_rate)
2929{
2930 struct iwl_scale_tbl_info tbl_type;
2931 int index = 0;
2932 int rate_idx;
2933 int repeat_rate = 0;
2934 u8 ant_toggle_cnt = 0;
2935 u8 use_ht_possible = 1;
2936 u8 valid_tx_ant = 0;
2937 struct iwl_station_priv *sta_priv =
2938 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2939 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2940
2941 /* Override starting rate (index 0) if needed for debug purposes */
2942 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2943
2944 /* Interpret new_rate (rate_n_flags) */
2945 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2946 &tbl_type, &rate_idx);
2947
2948 if (priv && priv->bt_full_concurrent) {
2949 /* 1x1 only */
2950 tbl_type.ant_type =
2951 first_antenna(priv->nvm_data->valid_tx_ant);
2952 }
2953
2954 /* How many times should we repeat the initial rate? */
2955 if (is_legacy(tbl_type.lq_type)) {
2956 ant_toggle_cnt = 1;
2957 repeat_rate = IWL_NUMBER_TRY;
2958 } else {
2959 repeat_rate = min(IWL_HT_NUMBER_TRY,
2960 LINK_QUAL_AGG_DISABLE_START_DEF - 1);
2961 }
2962
2963 lq_cmd->general_params.mimo_delimiter =
2964 is_mimo(tbl_type.lq_type) ? 1 : 0;
2965
2966 /* Fill 1st table entry (index 0) */
2967 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2968
2969 if (num_of_ant(tbl_type.ant_type) == 1) {
2970 lq_cmd->general_params.single_stream_ant_msk =
2971 tbl_type.ant_type;
2972 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2973 lq_cmd->general_params.dual_stream_ant_msk =
2974 tbl_type.ant_type;
2975 } /* otherwise we don't modify the existing value */
2976
2977 index++;
2978 repeat_rate--;
2979 if (priv) {
2980 if (priv->bt_full_concurrent)
2981 valid_tx_ant = ANT_A;
2982 else
2983 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2984 }
2985
2986 /* Fill rest of rate table */
2987 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2988 /* Repeat initial/next rate.
2989 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2990 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2991 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2992 if (is_legacy(tbl_type.lq_type)) {
2993 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2994 ant_toggle_cnt++;
2995 else if (priv &&
2996 rs_toggle_antenna(valid_tx_ant,
2997 &new_rate, &tbl_type))
2998 ant_toggle_cnt = 1;
2999 }
3000
3001 /* Override next rate if needed for debug purposes */
3002 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
3003
3004 /* Fill next table entry */
3005 lq_cmd->rs_table[index].rate_n_flags =
3006 cpu_to_le32(new_rate);
3007 repeat_rate--;
3008 index++;
3009 }
3010
3011 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
3012 &rate_idx);
3013
3014 if (priv && priv->bt_full_concurrent) {
3015 /* 1x1 only */
3016 tbl_type.ant_type =
3017 first_antenna(priv->nvm_data->valid_tx_ant);
3018 }
3019
3020 /* Indicate to uCode which entries might be MIMO.
3021 * If initial rate was MIMO, this will finally end up
3022 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
3023 if (is_mimo(tbl_type.lq_type))
3024 lq_cmd->general_params.mimo_delimiter = index;
3025
3026 /* Get next rate */
3027 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
3028 use_ht_possible);
3029
3030 /* How many times should we repeat the next rate? */
3031 if (is_legacy(tbl_type.lq_type)) {
3032 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
3033 ant_toggle_cnt++;
3034 else if (priv &&
3035 rs_toggle_antenna(valid_tx_ant,
3036 &new_rate, &tbl_type))
3037 ant_toggle_cnt = 1;
3038
3039 repeat_rate = IWL_NUMBER_TRY;
3040 } else {
3041 repeat_rate = IWL_HT_NUMBER_TRY;
3042 }
3043
3044 /* Don't allow HT rates after next pass.
3045 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
3046 use_ht_possible = 0;
3047
3048 /* Override next rate if needed for debug purposes */
3049 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
3050
3051 /* Fill next table entry */
3052 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
3053
3054 index++;
3055 repeat_rate--;
3056 }
3057
3058 lq_cmd->agg_params.agg_frame_cnt_limit =
3059 sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3060 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3061
3062 lq_cmd->agg_params.agg_time_limit =
3063 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3064 /*
3065 * overwrite if needed, pass aggregation time limit
3066 * to uCode in uSec
3067 */
3068 if (priv && priv->cfg->bt_params &&
3069 priv->cfg->bt_params->agg_time_limit &&
3070 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3071 lq_cmd->agg_params.agg_time_limit =
3072 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3073}
3074
3075static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
3076{
3077 return hw->priv;
3078}
3079/* rate scale requires free function to be implemented */
3080static void rs_free(void *priv_rate)
3081{
3082 return;
3083}
3084
3085static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
3086 void *priv_sta)
3087{
3088 struct iwl_op_mode *op_mode __maybe_unused = priv_r;
3089 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
3090
3091 IWL_DEBUG_RATE(priv, "enter\n");
3092 IWL_DEBUG_RATE(priv, "leave\n");
3093}
3094
3095#ifdef CONFIG_MAC80211_DEBUGFS
3096static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3097 u32 *rate_n_flags, int index)
3098{
3099 struct iwl_priv *priv;
3100 u8 valid_tx_ant;
3101 u8 ant_sel_tx;
3102
3103 priv = lq_sta->drv;
3104 valid_tx_ant = priv->nvm_data->valid_tx_ant;
3105 if (lq_sta->dbg_fixed_rate) {
3106 ant_sel_tx =
3107 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
3108 >> RATE_MCS_ANT_POS);
3109 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
3110 *rate_n_flags = lq_sta->dbg_fixed_rate;
3111 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
3112 } else {
3113 lq_sta->dbg_fixed_rate = 0;
3114 IWL_ERR(priv,
3115 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
3116 ant_sel_tx, valid_tx_ant);
3117 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3118 }
3119 } else {
3120 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3121 }
3122}
3123
3124static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3125 const char __user *user_buf, size_t count, loff_t *ppos)
3126{
3127 struct iwl_lq_sta *lq_sta = file->private_data;
3128 struct iwl_priv *priv;
3129 char buf[64];
3130 size_t buf_size;
3131 u32 parsed_rate;
3132
3133
3134 priv = lq_sta->drv;
3135 memset(buf, 0, sizeof(buf));
3136 buf_size = min(count, sizeof(buf) - 1);
3137 if (copy_from_user(buf, user_buf, buf_size))
3138 return -EFAULT;
3139
3140 if (sscanf(buf, "%x", &parsed_rate) == 1)
3141 lq_sta->dbg_fixed_rate = parsed_rate;
3142 else
3143 lq_sta->dbg_fixed_rate = 0;
3144
3145 rs_program_fix_rate(priv, lq_sta);
3146
3147 return count;
3148}
3149
3150static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3151 char __user *user_buf, size_t count, loff_t *ppos)
3152{
3153 char *buff;
3154 int desc = 0;
3155 int i = 0;
3156 int index = 0;
3157 ssize_t ret;
3158
3159 struct iwl_lq_sta *lq_sta = file->private_data;
3160 struct iwl_priv *priv;
3161 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3162
3163 priv = lq_sta->drv;
3164 buff = kmalloc(1024, GFP_KERNEL);
3165 if (!buff)
3166 return -ENOMEM;
3167
3168 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
3169 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
3170 lq_sta->total_failed, lq_sta->total_success,
3171 lq_sta->active_legacy_rate);
3172 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3173 lq_sta->dbg_fixed_rate);
3174 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3175 (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
3176 (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
3177 (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
3178 desc += sprintf(buff+desc, "lq type %s\n",
3179 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3180 if (is_Ht(tbl->lq_type)) {
3181 desc += sprintf(buff+desc, " %s",
3182 (is_siso(tbl->lq_type)) ? "SISO" :
3183 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
3184 desc += sprintf(buff+desc, " %s",
3185 (tbl->is_ht40) ? "40MHz" : "20MHz");
3186 desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
3187 (lq_sta->is_green) ? "GF enabled" : "",
3188 (lq_sta->is_agg) ? "AGG on" : "");
3189 }
3190 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3191 lq_sta->last_rate_n_flags);
3192 desc += sprintf(buff+desc, "general:"
3193 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
3194 lq_sta->lq.general_params.flags,
3195 lq_sta->lq.general_params.mimo_delimiter,
3196 lq_sta->lq.general_params.single_stream_ant_msk,
3197 lq_sta->lq.general_params.dual_stream_ant_msk);
3198
3199 desc += sprintf(buff+desc, "agg:"
3200 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3201 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
3202 lq_sta->lq.agg_params.agg_dis_start_th,
3203 lq_sta->lq.agg_params.agg_frame_cnt_limit);
3204
3205 desc += sprintf(buff+desc,
3206 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3207 lq_sta->lq.general_params.start_rate_index[0],
3208 lq_sta->lq.general_params.start_rate_index[1],
3209 lq_sta->lq.general_params.start_rate_index[2],
3210 lq_sta->lq.general_params.start_rate_index[3]);
3211
3212 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3213 index = iwl_hwrate_to_plcp_idx(
3214 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
3215 if (is_legacy(tbl->lq_type)) {
3216 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
3217 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3218 iwl_rate_mcs[index].mbps);
3219 } else {
3220 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
3221 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3222 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
3223 }
3224 }
3225
3226 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3227 kfree(buff);
3228 return ret;
3229}
3230
3231static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3232 .write = rs_sta_dbgfs_scale_table_write,
3233 .read = rs_sta_dbgfs_scale_table_read,
3234 .open = simple_open,
3235 .llseek = default_llseek,
3236};
3237static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3238 char __user *user_buf, size_t count, loff_t *ppos)
3239{
3240 char *buff;
3241 int desc = 0;
3242 int i, j;
3243 ssize_t ret;
3244
3245 struct iwl_lq_sta *lq_sta = file->private_data;
3246
3247 buff = kmalloc(1024, GFP_KERNEL);
3248 if (!buff)
3249 return -ENOMEM;
3250
3251 for (i = 0; i < LQ_SIZE; i++) {
3252 desc += sprintf(buff+desc,
3253 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
3254 "rate=0x%X\n",
3255 lq_sta->active_tbl == i ? "*" : "x",
3256 lq_sta->lq_info[i].lq_type,
3257 lq_sta->lq_info[i].is_SGI,
3258 lq_sta->lq_info[i].is_ht40,
3259 lq_sta->lq_info[i].is_dup,
3260 lq_sta->is_green,
3261 lq_sta->lq_info[i].current_rate);
3262 for (j = 0; j < IWL_RATE_COUNT; j++) {
3263 desc += sprintf(buff+desc,
3264 "counter=%d success=%d %%=%d\n",
3265 lq_sta->lq_info[i].win[j].counter,
3266 lq_sta->lq_info[i].win[j].success_counter,
3267 lq_sta->lq_info[i].win[j].success_ratio);
3268 }
3269 }
3270 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3271 kfree(buff);
3272 return ret;
3273}
3274
3275static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3276 .read = rs_sta_dbgfs_stats_table_read,
3277 .open = simple_open,
3278 .llseek = default_llseek,
3279};
3280
3281static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3282 char __user *user_buf, size_t count, loff_t *ppos)
3283{
3284 struct iwl_lq_sta *lq_sta = file->private_data;
3285 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3286 char buff[120];
3287 int desc = 0;
3288
3289 if (is_Ht(tbl->lq_type))
3290 desc += sprintf(buff+desc,
3291 "Bit Rate= %d Mb/s\n",
3292 tbl->expected_tpt[lq_sta->last_txrate_idx]);
3293 else
3294 desc += sprintf(buff+desc,
3295 "Bit Rate= %d Mb/s\n",
3296 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
3297
3298 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3299}
3300
3301static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
3302 .read = rs_sta_dbgfs_rate_scale_data_read,
3303 .open = simple_open,
3304 .llseek = default_llseek,
3305};
3306
3307static void rs_add_debugfs(void *priv, void *priv_sta,
3308 struct dentry *dir)
3309{
3310 struct iwl_lq_sta *lq_sta = priv_sta;
3311 lq_sta->rs_sta_dbgfs_scale_table_file =
3312 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3313 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3314 lq_sta->rs_sta_dbgfs_stats_table_file =
3315 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3316 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3317 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
3318 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
3319 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
3320 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3321 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3322 &lq_sta->tx_agg_tid_en);
3323
3324}
3325
3326static void rs_remove_debugfs(void *priv, void *priv_sta)
3327{
3328 struct iwl_lq_sta *lq_sta = priv_sta;
3329 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
3330 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3331 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
3332 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3333}
3334#endif
3335
3336/*
3337 * Initialization of rate scaling information is done by driver after
3338 * the station is added. Since mac80211 calls this function before a
3339 * station is added we ignore it.
3340 */
3341static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
3342 struct ieee80211_sta *sta, void *priv_sta)
3343{
3344}
3345static struct rate_control_ops rs_ops = {
3346 .module = NULL,
3347 .name = RS_NAME,
3348 .tx_status = rs_tx_status,
3349 .get_rate = rs_get_rate,
3350 .rate_init = rs_rate_init_stub,
3351 .alloc = rs_alloc,
3352 .free = rs_free,
3353 .alloc_sta = rs_alloc_sta,
3354 .free_sta = rs_free_sta,
3355#ifdef CONFIG_MAC80211_DEBUGFS
3356 .add_sta_debugfs = rs_add_debugfs,
3357 .remove_sta_debugfs = rs_remove_debugfs,
3358#endif
3359};
3360
3361int iwlagn_rate_control_register(void)
3362{
3363 return ieee80211_rate_control_register(&rs_ops);
3364}
3365
3366void iwlagn_rate_control_unregister(void)
3367{
3368 ieee80211_rate_control_unregister(&rs_ops);
3369}
3370
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
deleted file mode 100644
index ad3aea8f626..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ /dev/null
@@ -1,433 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_agn_rs_h__
28#define __iwl_agn_rs_h__
29
30#include <net/mac80211.h>
31
32#include "iwl-config.h"
33
34#include "commands.h"
35
36struct iwl_rate_info {
37 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
38 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
39 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
40 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
41 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
42 u8 prev_ieee; /* previous rate in IEEE speeds */
43 u8 next_ieee; /* next rate in IEEE speeds */
44 u8 prev_rs; /* previous rate used in rs algo */
45 u8 next_rs; /* next rate used in rs algo */
46 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
47 u8 next_rs_tgg; /* next rate used in TGG rs algo */
48};
49
50/*
51 * These serve as indexes into
52 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
53 */
54enum {
55 IWL_RATE_1M_INDEX = 0,
56 IWL_RATE_2M_INDEX,
57 IWL_RATE_5M_INDEX,
58 IWL_RATE_11M_INDEX,
59 IWL_RATE_6M_INDEX,
60 IWL_RATE_9M_INDEX,
61 IWL_RATE_12M_INDEX,
62 IWL_RATE_18M_INDEX,
63 IWL_RATE_24M_INDEX,
64 IWL_RATE_36M_INDEX,
65 IWL_RATE_48M_INDEX,
66 IWL_RATE_54M_INDEX,
67 IWL_RATE_60M_INDEX,
68 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
69 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
70 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
71 IWL_RATE_INVALID = IWL_RATE_COUNT,
72};
73
74enum {
75 IWL_RATE_6M_INDEX_TABLE = 0,
76 IWL_RATE_9M_INDEX_TABLE,
77 IWL_RATE_12M_INDEX_TABLE,
78 IWL_RATE_18M_INDEX_TABLE,
79 IWL_RATE_24M_INDEX_TABLE,
80 IWL_RATE_36M_INDEX_TABLE,
81 IWL_RATE_48M_INDEX_TABLE,
82 IWL_RATE_54M_INDEX_TABLE,
83 IWL_RATE_1M_INDEX_TABLE,
84 IWL_RATE_2M_INDEX_TABLE,
85 IWL_RATE_5M_INDEX_TABLE,
86 IWL_RATE_11M_INDEX_TABLE,
87 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
88};
89
90enum {
91 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
92 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
93 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
94 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
95};
96
97/* #define vs. enum to keep from defaulting to 'large integer' */
98#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
99#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
100#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
101#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
102#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
103#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
104#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
105#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
106#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
107#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
108#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
109#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
110#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
111
112/* uCode API values for legacy bit rates, both OFDM and CCK */
113enum {
114 IWL_RATE_6M_PLCP = 13,
115 IWL_RATE_9M_PLCP = 15,
116 IWL_RATE_12M_PLCP = 5,
117 IWL_RATE_18M_PLCP = 7,
118 IWL_RATE_24M_PLCP = 9,
119 IWL_RATE_36M_PLCP = 11,
120 IWL_RATE_48M_PLCP = 1,
121 IWL_RATE_54M_PLCP = 3,
122 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
123 IWL_RATE_1M_PLCP = 10,
124 IWL_RATE_2M_PLCP = 20,
125 IWL_RATE_5M_PLCP = 55,
126 IWL_RATE_11M_PLCP = 110,
127 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
128 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
129};
130
131/* uCode API values for OFDM high-throughput (HT) bit rates */
132enum {
133 IWL_RATE_SISO_6M_PLCP = 0,
134 IWL_RATE_SISO_12M_PLCP = 1,
135 IWL_RATE_SISO_18M_PLCP = 2,
136 IWL_RATE_SISO_24M_PLCP = 3,
137 IWL_RATE_SISO_36M_PLCP = 4,
138 IWL_RATE_SISO_48M_PLCP = 5,
139 IWL_RATE_SISO_54M_PLCP = 6,
140 IWL_RATE_SISO_60M_PLCP = 7,
141 IWL_RATE_MIMO2_6M_PLCP = 0x8,
142 IWL_RATE_MIMO2_12M_PLCP = 0x9,
143 IWL_RATE_MIMO2_18M_PLCP = 0xa,
144 IWL_RATE_MIMO2_24M_PLCP = 0xb,
145 IWL_RATE_MIMO2_36M_PLCP = 0xc,
146 IWL_RATE_MIMO2_48M_PLCP = 0xd,
147 IWL_RATE_MIMO2_54M_PLCP = 0xe,
148 IWL_RATE_MIMO2_60M_PLCP = 0xf,
149 IWL_RATE_MIMO3_6M_PLCP = 0x10,
150 IWL_RATE_MIMO3_12M_PLCP = 0x11,
151 IWL_RATE_MIMO3_18M_PLCP = 0x12,
152 IWL_RATE_MIMO3_24M_PLCP = 0x13,
153 IWL_RATE_MIMO3_36M_PLCP = 0x14,
154 IWL_RATE_MIMO3_48M_PLCP = 0x15,
155 IWL_RATE_MIMO3_54M_PLCP = 0x16,
156 IWL_RATE_MIMO3_60M_PLCP = 0x17,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
160};
161
162/* MAC header values for bit rates */
163enum {
164 IWL_RATE_6M_IEEE = 12,
165 IWL_RATE_9M_IEEE = 18,
166 IWL_RATE_12M_IEEE = 24,
167 IWL_RATE_18M_IEEE = 36,
168 IWL_RATE_24M_IEEE = 48,
169 IWL_RATE_36M_IEEE = 72,
170 IWL_RATE_48M_IEEE = 96,
171 IWL_RATE_54M_IEEE = 108,
172 IWL_RATE_60M_IEEE = 120,
173 IWL_RATE_1M_IEEE = 2,
174 IWL_RATE_2M_IEEE = 4,
175 IWL_RATE_5M_IEEE = 11,
176 IWL_RATE_11M_IEEE = 22,
177};
178
179#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
180
181#define IWL_INVALID_VALUE -1
182
183#define IWL_MIN_RSSI_VAL -100
184#define IWL_MAX_RSSI_VAL 0
185
186/* These values specify how many Tx frame attempts before
187 * searching for a new modulation mode */
188#define IWL_LEGACY_FAILURE_LIMIT 160
189#define IWL_LEGACY_SUCCESS_LIMIT 480
190#define IWL_LEGACY_TABLE_COUNT 160
191
192#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
193#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
194#define IWL_NONE_LEGACY_TABLE_COUNT 1500
195
196/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
197#define IWL_RS_GOOD_RATIO 12800 /* 100% */
198#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
199#define IWL_RATE_HIGH_TH 10880 /* 85% */
200#define IWL_RATE_INCREASE_TH 6400 /* 50% */
201#define IWL_RATE_DECREASE_TH 1920 /* 15% */
202
203/* possible actions when in legacy mode */
204#define IWL_LEGACY_SWITCH_ANTENNA1 0
205#define IWL_LEGACY_SWITCH_ANTENNA2 1
206#define IWL_LEGACY_SWITCH_SISO 2
207#define IWL_LEGACY_SWITCH_MIMO2_AB 3
208#define IWL_LEGACY_SWITCH_MIMO2_AC 4
209#define IWL_LEGACY_SWITCH_MIMO2_BC 5
210#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
211
212/* possible actions when in siso mode */
213#define IWL_SISO_SWITCH_ANTENNA1 0
214#define IWL_SISO_SWITCH_ANTENNA2 1
215#define IWL_SISO_SWITCH_MIMO2_AB 2
216#define IWL_SISO_SWITCH_MIMO2_AC 3
217#define IWL_SISO_SWITCH_MIMO2_BC 4
218#define IWL_SISO_SWITCH_GI 5
219#define IWL_SISO_SWITCH_MIMO3_ABC 6
220
221
222/* possible actions when in mimo mode */
223#define IWL_MIMO2_SWITCH_ANTENNA1 0
224#define IWL_MIMO2_SWITCH_ANTENNA2 1
225#define IWL_MIMO2_SWITCH_SISO_A 2
226#define IWL_MIMO2_SWITCH_SISO_B 3
227#define IWL_MIMO2_SWITCH_SISO_C 4
228#define IWL_MIMO2_SWITCH_GI 5
229#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
230
231
232/* possible actions when in mimo3 mode */
233#define IWL_MIMO3_SWITCH_ANTENNA1 0
234#define IWL_MIMO3_SWITCH_ANTENNA2 1
235#define IWL_MIMO3_SWITCH_SISO_A 2
236#define IWL_MIMO3_SWITCH_SISO_B 3
237#define IWL_MIMO3_SWITCH_SISO_C 4
238#define IWL_MIMO3_SWITCH_MIMO2_AB 5
239#define IWL_MIMO3_SWITCH_MIMO2_AC 6
240#define IWL_MIMO3_SWITCH_MIMO2_BC 7
241#define IWL_MIMO3_SWITCH_GI 8
242
243
244#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
245#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
246
247/*FIXME:RS:add possible actions for MIMO3*/
248
249#define IWL_ACTION_LIMIT 3 /* # possible actions */
250
251#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
252
253/* load per tid defines for A-MPDU activation */
254#define IWL_AGG_TPT_THREHOLD 0
255#define IWL_AGG_LOAD_THRESHOLD 10
256#define IWL_AGG_ALL_TID 0xff
257#define TID_QUEUE_CELL_SPACING 50 /*mS */
258#define TID_QUEUE_MAX_SIZE 20
259#define TID_ROUND_VALUE 5 /* mS */
260
261#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
262#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
263
264extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
265
266enum iwl_table_type {
267 LQ_NONE,
268 LQ_G, /* legacy types */
269 LQ_A,
270 LQ_SISO, /* high-throughput types */
271 LQ_MIMO2,
272 LQ_MIMO3,
273 LQ_MAX,
274};
275
276#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
277#define is_siso(tbl) ((tbl) == LQ_SISO)
278#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
279#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
280#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
281#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
282#define is_a_band(tbl) ((tbl) == LQ_A)
283#define is_g_and(tbl) ((tbl) == LQ_G)
284
285#define IWL_MAX_MCS_DISPLAY_SIZE 12
286
287struct iwl_rate_mcs_info {
288 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
289 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
290};
291
292/**
293 * struct iwl_rate_scale_data -- tx success history for one rate
294 */
295struct iwl_rate_scale_data {
296 u64 data; /* bitmap of successful frames */
297 s32 success_counter; /* number of frames successful */
298 s32 success_ratio; /* per-cent * 128 */
299 s32 counter; /* number of frames attempted */
300 s32 average_tpt; /* success ratio * expected throughput */
301 unsigned long stamp;
302};
303
304/**
305 * struct iwl_scale_tbl_info -- tx params and success history for all rates
306 *
307 * There are two of these in struct iwl_lq_sta,
308 * one for "active", and one for "search".
309 */
310struct iwl_scale_tbl_info {
311 enum iwl_table_type lq_type;
312 u8 ant_type;
313 u8 is_SGI; /* 1 = short guard interval */
314 u8 is_ht40; /* 1 = 40 MHz channel width */
315 u8 is_dup; /* 1 = duplicated data streams */
316 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
317 u8 max_search; /* maximun number of tables we can search */
318 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
319 u32 current_rate; /* rate_n_flags, uCode API format */
320 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
321};
322
323struct iwl_traffic_load {
324 unsigned long time_stamp; /* age of the oldest statistics */
325 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
326 * slice */
327 u32 total; /* total num of packets during the
328 * last TID_MAX_TIME_DIFF */
329 u8 queue_count; /* number of queues that has
330 * been used since the last cleanup */
331 u8 head; /* start of the circular buffer */
332};
333
334/**
335 * struct iwl_lq_sta -- driver's rate scaling private structure
336 *
337 * Pointer to this gets passed back and forth between driver and mac80211.
338 */
339struct iwl_lq_sta {
340 u8 active_tbl; /* index of active table, range 0-1 */
341 u8 enable_counter; /* indicates HT mode */
342 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
343 u8 search_better_tbl; /* 1: currently trying alternate mode */
344 s32 last_tpt;
345
346 /* The following determine when to search for a new mode */
347 u32 table_count_limit;
348 u32 max_failure_limit; /* # failed frames before new search */
349 u32 max_success_limit; /* # successful frames before new search */
350 u32 table_count;
351 u32 total_failed; /* total failed frames, any/all rates */
352 u32 total_success; /* total successful frames, any/all rates */
353 u64 flush_timer; /* time staying in mode before new search */
354
355 u8 action_counter; /* # mode-switch actions tried */
356 u8 is_green;
357 u8 is_dup;
358 enum ieee80211_band band;
359
360 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
361 u32 supp_rates;
362 u16 active_legacy_rate;
363 u16 active_siso_rate;
364 u16 active_mimo2_rate;
365 u16 active_mimo3_rate;
366 s8 max_rate_idx; /* Max rate set by user */
367 u8 missed_rate_counter;
368
369 struct iwl_link_quality_cmd lq;
370 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
371 struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
372 u8 tx_agg_tid_en;
373#ifdef CONFIG_MAC80211_DEBUGFS
374 struct dentry *rs_sta_dbgfs_scale_table_file;
375 struct dentry *rs_sta_dbgfs_stats_table_file;
376 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
377 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
378 u32 dbg_fixed_rate;
379#endif
380 struct iwl_priv *drv;
381
382 /* used to be in sta_info */
383 int last_txrate_idx;
384 /* last tx rate_n_flags */
385 u32 last_rate_n_flags;
386 /* packets destined for this STA are aggregated */
387 u8 is_agg;
388 /* BT traffic this sta was last updated in */
389 u8 last_bt_traffic;
390};
391
392static inline u8 num_of_ant(u8 mask)
393{
394 return !!((mask) & ANT_A) +
395 !!((mask) & ANT_B) +
396 !!((mask) & ANT_C);
397}
398
399static inline u8 first_antenna(u8 mask)
400{
401 if (mask & ANT_A)
402 return ANT_A;
403 if (mask & ANT_B)
404 return ANT_B;
405 return ANT_C;
406}
407
408
409/* Initialize station's rate scaling information after adding station */
410extern void iwl_rs_rate_init(struct iwl_priv *priv,
411 struct ieee80211_sta *sta, u8 sta_id);
412
413/**
414 * iwl_rate_control_register - Register the rate control algorithm callbacks
415 *
416 * Since the rate control algorithm is hardware specific, there is no need
417 * or reason to place it as a stand alone module. The driver can call
418 * iwl_rate_control_register in order to register the rate control callbacks
419 * with the mac80211 subsystem. This should be performed prior to calling
420 * ieee80211_register_hw
421 *
422 */
423extern int iwlagn_rate_control_register(void);
424
425/**
426 * iwl_rate_control_unregister - Unregister the rate control callbacks
427 *
428 * This should be called after calling ieee80211_unregister_hw, but before
429 * the driver is unloaded.
430 */
431extern void iwlagn_rate_control_unregister(void);
432
433#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
deleted file mode 100644
index cac4f37cc42..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ /dev/null
@@ -1,1151 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portionhelp of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34#include <asm/unaligned.h>
35#include "iwl-io.h"
36#include "dev.h"
37#include "calib.h"
38#include "agn.h"
39
40#define IWL_CMD_ENTRY(x) [x] = #x
41
42const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
43 IWL_CMD_ENTRY(REPLY_ALIVE),
44 IWL_CMD_ENTRY(REPLY_ERROR),
45 IWL_CMD_ENTRY(REPLY_ECHO),
46 IWL_CMD_ENTRY(REPLY_RXON),
47 IWL_CMD_ENTRY(REPLY_RXON_ASSOC),
48 IWL_CMD_ENTRY(REPLY_QOS_PARAM),
49 IWL_CMD_ENTRY(REPLY_RXON_TIMING),
50 IWL_CMD_ENTRY(REPLY_ADD_STA),
51 IWL_CMD_ENTRY(REPLY_REMOVE_STA),
52 IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA),
53 IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH),
54 IWL_CMD_ENTRY(REPLY_WEPKEY),
55 IWL_CMD_ENTRY(REPLY_TX),
56 IWL_CMD_ENTRY(REPLY_LEDS_CMD),
57 IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD),
58 IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD),
59 IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION),
60 IWL_CMD_ENTRY(COEX_EVENT_CMD),
61 IWL_CMD_ENTRY(REPLY_QUIET_CMD),
62 IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH),
63 IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION),
64 IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD),
65 IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION),
66 IWL_CMD_ENTRY(POWER_TABLE_CMD),
67 IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION),
68 IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC),
69 IWL_CMD_ENTRY(REPLY_SCAN_CMD),
70 IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD),
71 IWL_CMD_ENTRY(SCAN_START_NOTIFICATION),
72 IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION),
73 IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION),
74 IWL_CMD_ENTRY(BEACON_NOTIFICATION),
75 IWL_CMD_ENTRY(REPLY_TX_BEACON),
76 IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION),
77 IWL_CMD_ENTRY(QUIET_NOTIFICATION),
78 IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD),
79 IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION),
80 IWL_CMD_ENTRY(REPLY_BT_CONFIG),
81 IWL_CMD_ENTRY(REPLY_STATISTICS_CMD),
82 IWL_CMD_ENTRY(STATISTICS_NOTIFICATION),
83 IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD),
84 IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION),
85 IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION),
86 IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD),
87 IWL_CMD_ENTRY(SENSITIVITY_CMD),
88 IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
89 IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
90 IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
91 IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
92 IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
93 IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
94 IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION),
95 IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD),
96 IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION),
97 IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD),
98 IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF),
99 IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE),
100 IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV),
101 IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS),
102 IWL_CMD_ENTRY(REPLY_WIPAN_RXON),
103 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING),
104 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC),
105 IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM),
106 IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY),
107 IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
108 IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION),
109 IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE),
110 IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS),
111 IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER),
112 IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS),
113 IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS),
114 IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL),
115 IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS),
116 IWL_CMD_ENTRY(REPLY_D3_CONFIG),
117};
118#undef IWL_CMD_ENTRY
119
120/******************************************************************************
121 *
122 * Generic RX handler implementations
123 *
124 ******************************************************************************/
125
126static int iwlagn_rx_reply_error(struct iwl_priv *priv,
127 struct iwl_rx_cmd_buffer *rxb,
128 struct iwl_device_cmd *cmd)
129{
130 struct iwl_rx_packet *pkt = rxb_addr(rxb);
131 struct iwl_error_resp *err_resp = (void *)pkt->data;
132
133 IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
134 "seq 0x%04X ser 0x%08X\n",
135 le32_to_cpu(err_resp->error_type),
136 err_resp->cmd_id,
137 le16_to_cpu(err_resp->bad_cmd_seq_num),
138 le32_to_cpu(err_resp->error_info));
139 return 0;
140}
141
142static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
143 struct iwl_device_cmd *cmd)
144{
145 struct iwl_rx_packet *pkt = rxb_addr(rxb);
146 struct iwl_csa_notification *csa = (void *)pkt->data;
147 /*
148 * MULTI-FIXME
149 * See iwlagn_mac_channel_switch.
150 */
151 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
152 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
153
154 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
155 return 0;
156
157 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
158 rxon->channel = csa->channel;
159 ctx->staging.channel = csa->channel;
160 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
161 le16_to_cpu(csa->channel));
162 iwl_chswitch_done(priv, true);
163 } else {
164 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
165 le16_to_cpu(csa->channel));
166 iwl_chswitch_done(priv, false);
167 }
168 return 0;
169}
170
171
172static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
173 struct iwl_rx_cmd_buffer *rxb,
174 struct iwl_device_cmd *cmd)
175{
176 struct iwl_rx_packet *pkt = rxb_addr(rxb);
177 struct iwl_spectrum_notification *report = (void *)pkt->data;
178
179 if (!report->state) {
180 IWL_DEBUG_11H(priv,
181 "Spectrum Measure Notification: Start\n");
182 return 0;
183 }
184
185 memcpy(&priv->measure_report, report, sizeof(*report));
186 priv->measurement_status |= MEASUREMENT_READY;
187 return 0;
188}
189
190static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
191 struct iwl_rx_cmd_buffer *rxb,
192 struct iwl_device_cmd *cmd)
193{
194#ifdef CONFIG_IWLWIFI_DEBUG
195 struct iwl_rx_packet *pkt = rxb_addr(rxb);
196 struct iwl_sleep_notification *sleep = (void *)pkt->data;
197 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
198 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
199#endif
200 return 0;
201}
202
203static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
204 struct iwl_rx_cmd_buffer *rxb,
205 struct iwl_device_cmd *cmd)
206{
207 struct iwl_rx_packet *pkt = rxb_addr(rxb);
208 u32 __maybe_unused len =
209 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
210 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
211 "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
212 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
213 return 0;
214}
215
216static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
217 struct iwl_rx_cmd_buffer *rxb,
218 struct iwl_device_cmd *cmd)
219{
220 struct iwl_rx_packet *pkt = rxb_addr(rxb);
221 struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
222#ifdef CONFIG_IWLWIFI_DEBUG
223 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
224 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
225
226 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
227 "tsf:0x%.8x%.8x rate:%d\n",
228 status & TX_STATUS_MSK,
229 beacon->beacon_notify_hdr.failure_frame,
230 le32_to_cpu(beacon->ibss_mgr_status),
231 le32_to_cpu(beacon->high_tsf),
232 le32_to_cpu(beacon->low_tsf), rate);
233#endif
234
235 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
236
237 return 0;
238}
239
240/**
241 * iwl_good_plcp_health - checks for plcp error.
242 *
243 * When the plcp error is exceeding the thresholds, reset the radio
244 * to improve the throughput.
245 */
246static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
247 struct statistics_rx_phy *cur_ofdm,
248 struct statistics_rx_ht_phy *cur_ofdm_ht,
249 unsigned int msecs)
250{
251 int delta;
252 int threshold = priv->plcp_delta_threshold;
253
254 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
255 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
256 return true;
257 }
258
259 delta = le32_to_cpu(cur_ofdm->plcp_err) -
260 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
261 le32_to_cpu(cur_ofdm_ht->plcp_err) -
262 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
263
264 /* Can be negative if firmware reset statistics */
265 if (delta <= 0)
266 return true;
267
268 if ((delta * 100 / msecs) > threshold) {
269 IWL_DEBUG_RADIO(priv,
270 "plcp health threshold %u delta %d msecs %u\n",
271 threshold, delta, msecs);
272 return false;
273 }
274
275 return true;
276}
277
278int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
279{
280 struct iwl_rf_reset *rf_reset;
281
282 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
283 return -EAGAIN;
284
285 if (!iwl_is_any_associated(priv)) {
286 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
287 return -ENOLINK;
288 }
289
290 rf_reset = &priv->rf_reset;
291 rf_reset->reset_request_count++;
292 if (!external && rf_reset->last_reset_jiffies &&
293 time_after(rf_reset->last_reset_jiffies +
294 IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
295 IWL_DEBUG_INFO(priv, "RF reset rejected\n");
296 rf_reset->reset_reject_count++;
297 return -EAGAIN;
298 }
299 rf_reset->reset_success_count++;
300 rf_reset->last_reset_jiffies = jiffies;
301
302 /*
303 * There is no easy and better way to force reset the radio,
304 * the only known method is switching channel which will force to
305 * reset and tune the radio.
306 * Use internal short scan (single channel) operation to should
307 * achieve this objective.
308 * Driver should reset the radio when number of consecutive missed
309 * beacon, or any other uCode error condition detected.
310 */
311 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
312 iwl_internal_short_hw_scan(priv);
313 return 0;
314}
315
316
317static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
318 struct statistics_rx_phy *cur_ofdm,
319 struct statistics_rx_ht_phy *cur_ofdm_ht,
320 struct statistics_tx *tx,
321 unsigned long stamp)
322{
323 unsigned int msecs;
324
325 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
326 return;
327
328 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
329
330 /* Only gather statistics and update time stamp when not associated */
331 if (!iwl_is_any_associated(priv))
332 return;
333
334 /* Do not check/recover when do not have enough statistics data */
335 if (msecs < 99)
336 return;
337
338 if (iwlwifi_mod_params.plcp_check &&
339 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
340 iwl_force_rf_reset(priv, false);
341}
342
343/* Calculate noise level, based on measurements during network silence just
344 * before arriving beacon. This measurement can be done only if we know
345 * exactly when to expect beacons, therefore only when we're associated. */
346static void iwlagn_rx_calc_noise(struct iwl_priv *priv)
347{
348 struct statistics_rx_non_phy *rx_info;
349 int num_active_rx = 0;
350 int total_silence = 0;
351 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
352 int last_rx_noise;
353
354 rx_info = &priv->statistics.rx_non_phy;
355
356 bcn_silence_a =
357 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
358 bcn_silence_b =
359 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
360 bcn_silence_c =
361 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
362
363 if (bcn_silence_a) {
364 total_silence += bcn_silence_a;
365 num_active_rx++;
366 }
367 if (bcn_silence_b) {
368 total_silence += bcn_silence_b;
369 num_active_rx++;
370 }
371 if (bcn_silence_c) {
372 total_silence += bcn_silence_c;
373 num_active_rx++;
374 }
375
376 /* Average among active antennas */
377 if (num_active_rx)
378 last_rx_noise = (total_silence / num_active_rx) - 107;
379 else
380 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
381
382 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
383 bcn_silence_a, bcn_silence_b, bcn_silence_c,
384 last_rx_noise);
385}
386
387#ifdef CONFIG_IWLWIFI_DEBUGFS
388/*
389 * based on the assumption of all statistics counter are in DWORD
390 * FIXME: This function is for debugging, do not deal with
391 * the case of counters roll-over.
392 */
393static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
394 __le32 *max_delta, __le32 *accum, int size)
395{
396 int i;
397
398 for (i = 0;
399 i < size / sizeof(__le32);
400 i++, prev++, cur++, delta++, max_delta++, accum++) {
401 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
402 *delta = cpu_to_le32(
403 le32_to_cpu(*cur) - le32_to_cpu(*prev));
404 le32_add_cpu(accum, le32_to_cpu(*delta));
405 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
406 *max_delta = *delta;
407 }
408 }
409}
410
411static void
412iwlagn_accumulative_statistics(struct iwl_priv *priv,
413 struct statistics_general_common *common,
414 struct statistics_rx_non_phy *rx_non_phy,
415 struct statistics_rx_phy *rx_ofdm,
416 struct statistics_rx_ht_phy *rx_ofdm_ht,
417 struct statistics_rx_phy *rx_cck,
418 struct statistics_tx *tx,
419 struct statistics_bt_activity *bt_activity)
420{
421#define ACCUM(_name) \
422 accum_stats((__le32 *)&priv->statistics._name, \
423 (__le32 *)_name, \
424 (__le32 *)&priv->delta_stats._name, \
425 (__le32 *)&priv->max_delta_stats._name, \
426 (__le32 *)&priv->accum_stats._name, \
427 sizeof(*_name));
428
429 ACCUM(common);
430 ACCUM(rx_non_phy);
431 ACCUM(rx_ofdm);
432 ACCUM(rx_ofdm_ht);
433 ACCUM(rx_cck);
434 ACCUM(tx);
435 if (bt_activity)
436 ACCUM(bt_activity);
437#undef ACCUM
438}
439#else
440static inline void
441iwlagn_accumulative_statistics(struct iwl_priv *priv,
442 struct statistics_general_common *common,
443 struct statistics_rx_non_phy *rx_non_phy,
444 struct statistics_rx_phy *rx_ofdm,
445 struct statistics_rx_ht_phy *rx_ofdm_ht,
446 struct statistics_rx_phy *rx_cck,
447 struct statistics_tx *tx,
448 struct statistics_bt_activity *bt_activity)
449{
450}
451#endif
452
453static int iwlagn_rx_statistics(struct iwl_priv *priv,
454 struct iwl_rx_cmd_buffer *rxb,
455 struct iwl_device_cmd *cmd)
456{
457 unsigned long stamp = jiffies;
458 const int reg_recalib_period = 60;
459 int change;
460 struct iwl_rx_packet *pkt = rxb_addr(rxb);
461 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
462 __le32 *flag;
463 struct statistics_general_common *common;
464 struct statistics_rx_non_phy *rx_non_phy;
465 struct statistics_rx_phy *rx_ofdm;
466 struct statistics_rx_ht_phy *rx_ofdm_ht;
467 struct statistics_rx_phy *rx_cck;
468 struct statistics_tx *tx;
469 struct statistics_bt_activity *bt_activity;
470
471 len -= sizeof(struct iwl_cmd_header); /* skip header */
472
473 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
474 len);
475
476 spin_lock(&priv->statistics.lock);
477
478 if (len == sizeof(struct iwl_bt_notif_statistics)) {
479 struct iwl_bt_notif_statistics *stats;
480 stats = (void *)&pkt->data;
481 flag = &stats->flag;
482 common = &stats->general.common;
483 rx_non_phy = &stats->rx.general.common;
484 rx_ofdm = &stats->rx.ofdm;
485 rx_ofdm_ht = &stats->rx.ofdm_ht;
486 rx_cck = &stats->rx.cck;
487 tx = &stats->tx;
488 bt_activity = &stats->general.activity;
489
490#ifdef CONFIG_IWLWIFI_DEBUGFS
491 /* handle this exception directly */
492 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
493 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
494 le32_to_cpu(stats->rx.general.num_bt_kills));
495#endif
496 } else if (len == sizeof(struct iwl_notif_statistics)) {
497 struct iwl_notif_statistics *stats;
498 stats = (void *)&pkt->data;
499 flag = &stats->flag;
500 common = &stats->general.common;
501 rx_non_phy = &stats->rx.general;
502 rx_ofdm = &stats->rx.ofdm;
503 rx_ofdm_ht = &stats->rx.ofdm_ht;
504 rx_cck = &stats->rx.cck;
505 tx = &stats->tx;
506 bt_activity = NULL;
507 } else {
508 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
509 len, sizeof(struct iwl_bt_notif_statistics),
510 sizeof(struct iwl_notif_statistics));
511 spin_unlock(&priv->statistics.lock);
512 return 0;
513 }
514
515 change = common->temperature != priv->statistics.common.temperature ||
516 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
517 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
518
519 iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
520 rx_ofdm_ht, rx_cck, tx, bt_activity);
521
522 iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
523
524 priv->statistics.flag = *flag;
525 memcpy(&priv->statistics.common, common, sizeof(*common));
526 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
527 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
528 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
529 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
530 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
531#ifdef CONFIG_IWLWIFI_DEBUGFS
532 if (bt_activity)
533 memcpy(&priv->statistics.bt_activity, bt_activity,
534 sizeof(*bt_activity));
535#endif
536
537 priv->rx_statistics_jiffies = stamp;
538
539 set_bit(STATUS_STATISTICS, &priv->status);
540
541 /* Reschedule the statistics timer to occur in
542 * reg_recalib_period seconds to ensure we get a
543 * thermal update even if the uCode doesn't give
544 * us one */
545 mod_timer(&priv->statistics_periodic, jiffies +
546 msecs_to_jiffies(reg_recalib_period * 1000));
547
548 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
549 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
550 iwlagn_rx_calc_noise(priv);
551 queue_work(priv->workqueue, &priv->run_time_calib_work);
552 }
553 if (priv->lib->temperature && change)
554 priv->lib->temperature(priv);
555
556 spin_unlock(&priv->statistics.lock);
557
558 return 0;
559}
560
561static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
562 struct iwl_rx_cmd_buffer *rxb,
563 struct iwl_device_cmd *cmd)
564{
565 struct iwl_rx_packet *pkt = rxb_addr(rxb);
566 struct iwl_notif_statistics *stats = (void *)pkt->data;
567
568 if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) {
569#ifdef CONFIG_IWLWIFI_DEBUGFS
570 memset(&priv->accum_stats, 0,
571 sizeof(priv->accum_stats));
572 memset(&priv->delta_stats, 0,
573 sizeof(priv->delta_stats));
574 memset(&priv->max_delta_stats, 0,
575 sizeof(priv->max_delta_stats));
576#endif
577 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
578 }
579 iwlagn_rx_statistics(priv, rxb, cmd);
580 return 0;
581}
582
583/* Handle notification from uCode that card's power state is changing
584 * due to software, hardware, or critical temperature RFKILL */
585static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
586 struct iwl_rx_cmd_buffer *rxb,
587 struct iwl_device_cmd *cmd)
588{
589 struct iwl_rx_packet *pkt = rxb_addr(rxb);
590 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
591 u32 flags = le32_to_cpu(card_state_notif->flags);
592 unsigned long status = priv->status;
593
594 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
595 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
596 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
597 (flags & CT_CARD_DISABLED) ?
598 "Reached" : "Not reached");
599
600 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
601 CT_CARD_DISABLED)) {
602
603 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
604 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
605
606 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
607 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
608
609 if (!(flags & RXON_CARD_DISABLED)) {
610 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
611 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
612 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
613 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
614 }
615 if (flags & CT_CARD_DISABLED)
616 iwl_tt_enter_ct_kill(priv);
617 }
618 if (!(flags & CT_CARD_DISABLED))
619 iwl_tt_exit_ct_kill(priv);
620
621 if (flags & HW_CARD_DISABLED)
622 set_bit(STATUS_RF_KILL_HW, &priv->status);
623 else
624 clear_bit(STATUS_RF_KILL_HW, &priv->status);
625
626
627 if (!(flags & RXON_CARD_DISABLED))
628 iwl_scan_cancel(priv);
629
630 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
631 test_bit(STATUS_RF_KILL_HW, &priv->status)))
632 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
633 test_bit(STATUS_RF_KILL_HW, &priv->status));
634 return 0;
635}
636
637static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
638 struct iwl_rx_cmd_buffer *rxb,
639 struct iwl_device_cmd *cmd)
640
641{
642 struct iwl_rx_packet *pkt = rxb_addr(rxb);
643 struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
644
645 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
646 priv->missed_beacon_threshold) {
647 IWL_DEBUG_CALIB(priv,
648 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
649 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
650 le32_to_cpu(missed_beacon->total_missed_becons),
651 le32_to_cpu(missed_beacon->num_recvd_beacons),
652 le32_to_cpu(missed_beacon->num_expected_beacons));
653 if (!test_bit(STATUS_SCANNING, &priv->status))
654 iwl_init_sensitivity(priv);
655 }
656 return 0;
657}
658
659/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
660 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
661static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
662 struct iwl_rx_cmd_buffer *rxb,
663 struct iwl_device_cmd *cmd)
664{
665 struct iwl_rx_packet *pkt = rxb_addr(rxb);
666
667 priv->last_phy_res_valid = true;
668 priv->ampdu_ref++;
669 memcpy(&priv->last_phy_res, pkt->data,
670 sizeof(struct iwl_rx_phy_res));
671 return 0;
672}
673
674/*
675 * returns non-zero if packet should be dropped
676 */
677static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
678 struct ieee80211_hdr *hdr,
679 u32 decrypt_res,
680 struct ieee80211_rx_status *stats)
681{
682 u16 fc = le16_to_cpu(hdr->frame_control);
683
684 /*
685 * All contexts have the same setting here due to it being
686 * a module parameter, so OK to check any context.
687 */
688 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
689 RXON_FILTER_DIS_DECRYPT_MSK)
690 return 0;
691
692 if (!(fc & IEEE80211_FCTL_PROTECTED))
693 return 0;
694
695 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
696 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
697 case RX_RES_STATUS_SEC_TYPE_TKIP:
698 /* The uCode has got a bad phase 1 Key, pushes the packet.
699 * Decryption will be done in SW. */
700 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
701 RX_RES_STATUS_BAD_KEY_TTAK)
702 break;
703
704 case RX_RES_STATUS_SEC_TYPE_WEP:
705 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
706 RX_RES_STATUS_BAD_ICV_MIC) {
707 /* bad ICV, the packet is destroyed since the
708 * decryption is inplace, drop it */
709 IWL_DEBUG_RX(priv, "Packet destroyed\n");
710 return -1;
711 }
712 case RX_RES_STATUS_SEC_TYPE_CCMP:
713 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
714 RX_RES_STATUS_DECRYPT_OK) {
715 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
716 stats->flag |= RX_FLAG_DECRYPTED;
717 }
718 break;
719
720 default:
721 break;
722 }
723 return 0;
724}
725
726static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
727 struct ieee80211_hdr *hdr,
728 u16 len,
729 u32 ampdu_status,
730 struct iwl_rx_cmd_buffer *rxb,
731 struct ieee80211_rx_status *stats)
732{
733 struct sk_buff *skb;
734 __le16 fc = hdr->frame_control;
735 struct iwl_rxon_context *ctx;
736 unsigned int hdrlen, fraglen;
737
738 /* We only process data packets if the interface is open */
739 if (unlikely(!priv->is_open)) {
740 IWL_DEBUG_DROP_LIMIT(priv,
741 "Dropping packet while interface is not open.\n");
742 return;
743 }
744
745 /* In case of HW accelerated crypto and bad decryption, drop */
746 if (!iwlwifi_mod_params.sw_crypto &&
747 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
748 return;
749
750 /* Dont use dev_alloc_skb(), we'll have enough headroom once
751 * ieee80211_hdr pulled.
752 */
753 skb = alloc_skb(128, GFP_ATOMIC);
754 if (!skb) {
755 IWL_ERR(priv, "alloc_skb failed\n");
756 return;
757 }
758 /* If frame is small enough to fit in skb->head, pull it completely.
759 * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
760 * are more efficient.
761 */
762 hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
763
764 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
765 fraglen = len - hdrlen;
766
767 if (fraglen) {
768 int offset = (void *)hdr + hdrlen -
769 rxb_addr(rxb) + rxb_offset(rxb);
770
771 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
772 fraglen, rxb->truesize);
773 }
774
775 /*
776 * Wake any queues that were stopped due to a passive channel tx
777 * failure. This can happen because the regulatory enforcement in
778 * the device waits for a beacon before allowing transmission,
779 * sometimes even after already having transmitted frames for the
780 * association because the new RXON may reset the information.
781 */
782 if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
783 for_each_context(priv, ctx) {
784 if (!ether_addr_equal(hdr->addr3,
785 ctx->active.bssid_addr))
786 continue;
787 iwlagn_lift_passive_no_rx(priv);
788 }
789 }
790
791 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
792
793 ieee80211_rx(priv->hw, skb);
794}
795
796static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
797{
798 u32 decrypt_out = 0;
799
800 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
801 RX_RES_STATUS_STATION_FOUND)
802 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
803 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
804
805 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
806
807 /* packet was not encrypted */
808 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
809 RX_RES_STATUS_SEC_TYPE_NONE)
810 return decrypt_out;
811
812 /* packet was encrypted with unknown alg */
813 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
814 RX_RES_STATUS_SEC_TYPE_ERR)
815 return decrypt_out;
816
817 /* decryption was not done in HW */
818 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
819 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
820 return decrypt_out;
821
822 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
823
824 case RX_RES_STATUS_SEC_TYPE_CCMP:
825 /* alg is CCM: check MIC only */
826 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
827 /* Bad MIC */
828 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
829 else
830 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
831
832 break;
833
834 case RX_RES_STATUS_SEC_TYPE_TKIP:
835 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
836 /* Bad TTAK */
837 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
838 break;
839 }
840 /* fall through if TTAK OK */
841 default:
842 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
843 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
844 else
845 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
846 break;
847 }
848
849 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
850 decrypt_in, decrypt_out);
851
852 return decrypt_out;
853}
854
855/* Calc max signal level (dBm) among 3 possible receivers */
856static int iwlagn_calc_rssi(struct iwl_priv *priv,
857 struct iwl_rx_phy_res *rx_resp)
858{
859 /* data from PHY/DSP regarding signal strength, etc.,
860 * contents are always there, not configurable by host
861 */
862 struct iwlagn_non_cfg_phy *ncphy =
863 (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
864 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
865 u8 agc;
866
867 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
868 agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
869
870 /* Find max rssi among 3 possible receivers.
871 * These values are measured by the digital signal processor (DSP).
872 * They should stay fairly constant even as the signal strength varies,
873 * if the radio's automatic gain control (AGC) is working right.
874 * AGC value (see below) will provide the "interesting" info.
875 */
876 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
877 rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
878 IWLAGN_OFDM_RSSI_A_BIT_POS;
879 rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
880 IWLAGN_OFDM_RSSI_B_BIT_POS;
881 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
882 rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
883 IWLAGN_OFDM_RSSI_C_BIT_POS;
884
885 max_rssi = max_t(u32, rssi_a, rssi_b);
886 max_rssi = max_t(u32, max_rssi, rssi_c);
887
888 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
889 rssi_a, rssi_b, rssi_c, max_rssi, agc);
890
891 /* dBm = max_rssi dB - agc dB - constant.
892 * Higher AGC (higher radio gain) means lower signal. */
893 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
894}
895
896/* Called for REPLY_RX_MPDU_CMD */
897static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
898 struct iwl_rx_cmd_buffer *rxb,
899 struct iwl_device_cmd *cmd)
900{
901 struct ieee80211_hdr *header;
902 struct ieee80211_rx_status rx_status = {};
903 struct iwl_rx_packet *pkt = rxb_addr(rxb);
904 struct iwl_rx_phy_res *phy_res;
905 __le32 rx_pkt_status;
906 struct iwl_rx_mpdu_res_start *amsdu;
907 u32 len;
908 u32 ampdu_status;
909 u32 rate_n_flags;
910
911 if (!priv->last_phy_res_valid) {
912 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
913 return 0;
914 }
915 phy_res = &priv->last_phy_res;
916 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
917 header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
918 len = le16_to_cpu(amsdu->byte_count);
919 rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
920 ampdu_status = iwlagn_translate_rx_status(priv,
921 le32_to_cpu(rx_pkt_status));
922
923 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
924 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
925 phy_res->cfg_phy_cnt);
926 return 0;
927 }
928
929 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
930 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
931 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
932 le32_to_cpu(rx_pkt_status));
933 return 0;
934 }
935
936 /* This will be used in several places later */
937 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
938
939 /* rx_status carries information about the packet to mac80211 */
940 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
941 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
942 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
943 rx_status.freq =
944 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
945 rx_status.band);
946 rx_status.rate_idx =
947 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
948 rx_status.flag = 0;
949
950 /* TSF isn't reliable. In order to allow smooth user experience,
951 * this W/A doesn't propagate it to the mac80211 */
952 /*rx_status.flag |= RX_FLAG_MACTIME_START;*/
953
954 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
955
956 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
957 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
958
959 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
960 rx_status.signal, (unsigned long long)rx_status.mactime);
961
962 /*
963 * "antenna number"
964 *
965 * It seems that the antenna field in the phy flags value
966 * is actually a bit field. This is undefined by radiotap,
967 * it wants an actual antenna number but I always get "7"
968 * for most legacy frames I receive indicating that the
969 * same frame was received on all three RX chains.
970 *
971 * I think this field should be removed in favor of a
972 * new 802.11n radiotap field "RX chains" that is defined
973 * as a bitmask.
974 */
975 rx_status.antenna =
976 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
977 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
978
979 /* set the preamble flag if appropriate */
980 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
981 rx_status.flag |= RX_FLAG_SHORTPRE;
982
983 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
984 /*
985 * We know which subframes of an A-MPDU belong
986 * together since we get a single PHY response
987 * from the firmware for all of them
988 */
989 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
990 rx_status.ampdu_reference = priv->ampdu_ref;
991 }
992
993 /* Set up the HT phy flags */
994 if (rate_n_flags & RATE_MCS_HT_MSK)
995 rx_status.flag |= RX_FLAG_HT;
996 if (rate_n_flags & RATE_MCS_HT40_MSK)
997 rx_status.flag |= RX_FLAG_40MHZ;
998 if (rate_n_flags & RATE_MCS_SGI_MSK)
999 rx_status.flag |= RX_FLAG_SHORT_GI;
1000 if (rate_n_flags & RATE_MCS_GF_MSK)
1001 rx_status.flag |= RX_FLAG_HT_GF;
1002
1003 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1004 rxb, &rx_status);
1005 return 0;
1006}
1007
1008static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
1009 struct iwl_rx_cmd_buffer *rxb,
1010 struct iwl_device_cmd *cmd)
1011{
1012 struct iwl_wipan_noa_data *new_data, *old_data;
1013 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1014 struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
1015
1016 /* no condition -- we're in softirq */
1017 old_data = rcu_dereference_protected(priv->noa_data, true);
1018
1019 if (noa_notif->noa_active) {
1020 u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
1021 u32 copylen = len;
1022
1023 /* EID, len, OUI, subtype */
1024 len += 1 + 1 + 3 + 1;
1025 /* P2P id, P2P length */
1026 len += 1 + 2;
1027 copylen += 1 + 2;
1028
1029 new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
1030 if (new_data) {
1031 new_data->length = len;
1032 new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
1033 new_data->data[1] = len - 2; /* not counting EID, len */
1034 new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
1035 new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
1036 new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
1037 new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
1038 memcpy(&new_data->data[6], &noa_notif->noa_attribute,
1039 copylen);
1040 }
1041 } else
1042 new_data = NULL;
1043
1044 rcu_assign_pointer(priv->noa_data, new_data);
1045
1046 if (old_data)
1047 kfree_rcu(old_data, rcu_head);
1048
1049 return 0;
1050}
1051
1052/**
1053 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1054 *
1055 * Setup the RX handlers for each of the reply types sent from the uCode
1056 * to the host.
1057 */
1058void iwl_setup_rx_handlers(struct iwl_priv *priv)
1059{
1060 int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1061 struct iwl_device_cmd *cmd);
1062
1063 handlers = priv->rx_handlers;
1064
1065 handlers[REPLY_ERROR] = iwlagn_rx_reply_error;
1066 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa;
1067 handlers[SPECTRUM_MEASURE_NOTIFICATION] =
1068 iwlagn_rx_spectrum_measure_notif;
1069 handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif;
1070 handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
1071 iwlagn_rx_pm_debug_statistics_notif;
1072 handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
1073 handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
1074
1075 handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
1076
1077 /*
1078 * The same handler is used for both the REPLY to a discrete
1079 * statistics request from the host as well as for the periodic
1080 * statistics notifications (after received beacons) from the uCode.
1081 */
1082 handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics;
1083 handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics;
1084
1085 iwl_setup_rx_scan_handlers(priv);
1086
1087 handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif;
1088 handlers[MISSED_BEACONS_NOTIFICATION] =
1089 iwlagn_rx_missed_beacon_notif;
1090
1091 /* Rx handlers */
1092 handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
1093 handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
1094
1095 /* block ack */
1096 handlers[REPLY_COMPRESSED_BA] =
1097 iwlagn_rx_reply_compressed_ba;
1098
1099 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
1100
1101 /* set up notification wait support */
1102 iwl_notification_wait_init(&priv->notif_wait);
1103
1104 /* Set up BT Rx handlers */
1105 if (priv->cfg->bt_params)
1106 iwlagn_bt_rx_handler_setup(priv);
1107}
1108
1109int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1110 struct iwl_device_cmd *cmd)
1111{
1112 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1113 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1114 int err = 0;
1115
1116 /*
1117 * Do the notification wait before RX handlers so
1118 * even if the RX handler consumes the RXB we have
1119 * access to it in the notification wait entry.
1120 */
1121 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1122
1123#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1124 /*
1125 * RX data may be forwarded to userspace in one
1126 * of two cases: the user owns the fw through testmode or when
1127 * the user requested to monitor the rx w/o affecting the regular flow.
1128 * In these cases the iwl_test object will handle forwarding the rx
1129 * data to user space.
1130 * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
1131 * continues.
1132 */
1133 iwl_test_rx(&priv->tst, rxb);
1134#endif
1135
1136 if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
1137 /* Based on type of command response or notification,
1138 * handle those that need handling via function in
1139 * rx_handlers table. See iwl_setup_rx_handlers() */
1140 if (priv->rx_handlers[pkt->hdr.cmd]) {
1141 priv->rx_handlers_stats[pkt->hdr.cmd]++;
1142 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
1143 } else {
1144 /* No handling needed */
1145 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1146 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1147 pkt->hdr.cmd);
1148 }
1149 }
1150 return err;
1151}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
deleted file mode 100644
index 9a891e6e60e..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ /dev/null
@@ -1,1577 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/etherdevice.h>
28#include "iwl-trans.h"
29#include "iwl-modparams.h"
30#include "dev.h"
31#include "agn.h"
32#include "calib.h"
33
34/*
35 * initialize rxon structure with default values from eeprom
36 */
37void iwl_connection_init_rx_config(struct iwl_priv *priv,
38 struct iwl_rxon_context *ctx)
39{
40 memset(&ctx->staging, 0, sizeof(ctx->staging));
41
42 if (!ctx->vif) {
43 ctx->staging.dev_type = ctx->unused_devtype;
44 } else
45 switch (ctx->vif->type) {
46 case NL80211_IFTYPE_AP:
47 ctx->staging.dev_type = ctx->ap_devtype;
48 break;
49
50 case NL80211_IFTYPE_STATION:
51 ctx->staging.dev_type = ctx->station_devtype;
52 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
53 break;
54
55 case NL80211_IFTYPE_ADHOC:
56 ctx->staging.dev_type = ctx->ibss_devtype;
57 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
58 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
59 RXON_FILTER_ACCEPT_GRP_MSK;
60 break;
61
62 case NL80211_IFTYPE_MONITOR:
63 ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
64 break;
65
66 default:
67 IWL_ERR(priv, "Unsupported interface type %d\n",
68 ctx->vif->type);
69 break;
70 }
71
72#if 0
73 /* TODO: Figure out when short_preamble would be set and cache from
74 * that */
75 if (!hw_to_local(priv->hw)->short_preamble)
76 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
77 else
78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
79#endif
80
81 ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value);
82 priv->band = priv->hw->conf.channel->band;
83
84 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
85
86 /* clear both MIX and PURE40 mode flag */
87 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
88 RXON_FLG_CHANNEL_MODE_PURE_40);
89 if (ctx->vif)
90 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
91
92 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
93 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
94 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
95}
96
97static int iwlagn_disable_bss(struct iwl_priv *priv,
98 struct iwl_rxon_context *ctx,
99 struct iwl_rxon_cmd *send)
100{
101 __le32 old_filter = send->filter_flags;
102 int ret;
103
104 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
105 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
106 CMD_SYNC, sizeof(*send), send);
107
108 send->filter_flags = old_filter;
109
110 if (ret)
111 IWL_DEBUG_QUIET_RFKILL(priv,
112 "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
113
114 return ret;
115}
116
117static int iwlagn_disable_pan(struct iwl_priv *priv,
118 struct iwl_rxon_context *ctx,
119 struct iwl_rxon_cmd *send)
120{
121 struct iwl_notification_wait disable_wait;
122 __le32 old_filter = send->filter_flags;
123 u8 old_dev_type = send->dev_type;
124 int ret;
125 static const u8 deactivate_cmd[] = {
126 REPLY_WIPAN_DEACTIVATION_COMPLETE
127 };
128
129 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
130 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
131 NULL, NULL);
132
133 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
134 send->dev_type = RXON_DEV_TYPE_P2P;
135 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
136 CMD_SYNC, sizeof(*send), send);
137
138 send->filter_flags = old_filter;
139 send->dev_type = old_dev_type;
140
141 if (ret) {
142 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
143 iwl_remove_notification(&priv->notif_wait, &disable_wait);
144 } else {
145 ret = iwl_wait_notification(&priv->notif_wait,
146 &disable_wait, HZ);
147 if (ret)
148 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
149 }
150
151 return ret;
152}
153
154static int iwlagn_disconn_pan(struct iwl_priv *priv,
155 struct iwl_rxon_context *ctx,
156 struct iwl_rxon_cmd *send)
157{
158 __le32 old_filter = send->filter_flags;
159 int ret;
160
161 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
162 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
163 sizeof(*send), send);
164
165 send->filter_flags = old_filter;
166
167 return ret;
168}
169
170static void iwlagn_update_qos(struct iwl_priv *priv,
171 struct iwl_rxon_context *ctx)
172{
173 int ret;
174
175 if (!ctx->is_active)
176 return;
177
178 ctx->qos_data.def_qos_parm.qos_flags = 0;
179
180 if (ctx->qos_data.qos_active)
181 ctx->qos_data.def_qos_parm.qos_flags |=
182 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
183
184 if (ctx->ht.enabled)
185 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
186
187 IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
188 ctx->qos_data.qos_active,
189 ctx->qos_data.def_qos_parm.qos_flags);
190
191 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
192 sizeof(struct iwl_qosparam_cmd),
193 &ctx->qos_data.def_qos_parm);
194 if (ret)
195 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
196}
197
198static int iwlagn_update_beacon(struct iwl_priv *priv,
199 struct ieee80211_vif *vif)
200{
201 lockdep_assert_held(&priv->mutex);
202
203 dev_kfree_skb(priv->beacon_skb);
204 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
205 if (!priv->beacon_skb)
206 return -ENOMEM;
207 return iwlagn_send_beacon_cmd(priv);
208}
209
210static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
211 struct iwl_rxon_context *ctx)
212{
213 int ret = 0;
214 struct iwl_rxon_assoc_cmd rxon_assoc;
215 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
216 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
217
218 if ((rxon1->flags == rxon2->flags) &&
219 (rxon1->filter_flags == rxon2->filter_flags) &&
220 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
221 (rxon1->ofdm_ht_single_stream_basic_rates ==
222 rxon2->ofdm_ht_single_stream_basic_rates) &&
223 (rxon1->ofdm_ht_dual_stream_basic_rates ==
224 rxon2->ofdm_ht_dual_stream_basic_rates) &&
225 (rxon1->ofdm_ht_triple_stream_basic_rates ==
226 rxon2->ofdm_ht_triple_stream_basic_rates) &&
227 (rxon1->acquisition_data == rxon2->acquisition_data) &&
228 (rxon1->rx_chain == rxon2->rx_chain) &&
229 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
230 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
231 return 0;
232 }
233
234 rxon_assoc.flags = ctx->staging.flags;
235 rxon_assoc.filter_flags = ctx->staging.filter_flags;
236 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
237 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
238 rxon_assoc.reserved1 = 0;
239 rxon_assoc.reserved2 = 0;
240 rxon_assoc.reserved3 = 0;
241 rxon_assoc.ofdm_ht_single_stream_basic_rates =
242 ctx->staging.ofdm_ht_single_stream_basic_rates;
243 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
244 ctx->staging.ofdm_ht_dual_stream_basic_rates;
245 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
246 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
247 ctx->staging.ofdm_ht_triple_stream_basic_rates;
248 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
249
250 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
251 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
252 return ret;
253}
254
255static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
256{
257 u16 new_val;
258 u16 beacon_factor;
259
260 /*
261 * If mac80211 hasn't given us a beacon interval, program
262 * the default into the device (not checking this here
263 * would cause the adjustment below to return the maximum
264 * value, which may break PAN.)
265 */
266 if (!beacon_val)
267 return DEFAULT_BEACON_INTERVAL;
268
269 /*
270 * If the beacon interval we obtained from the peer
271 * is too large, we'll have to wake up more often
272 * (and in IBSS case, we'll beacon too much)
273 *
274 * For example, if max_beacon_val is 4096, and the
275 * requested beacon interval is 7000, we'll have to
276 * use 3500 to be able to wake up on the beacons.
277 *
278 * This could badly influence beacon detection stats.
279 */
280
281 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
282 new_val = beacon_val / beacon_factor;
283
284 if (!new_val)
285 new_val = max_beacon_val;
286
287 return new_val;
288}
289
290static int iwl_send_rxon_timing(struct iwl_priv *priv,
291 struct iwl_rxon_context *ctx)
292{
293 u64 tsf;
294 s32 interval_tm, rem;
295 struct ieee80211_conf *conf = NULL;
296 u16 beacon_int;
297 struct ieee80211_vif *vif = ctx->vif;
298
299 conf = &priv->hw->conf;
300
301 lockdep_assert_held(&priv->mutex);
302
303 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
304
305 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
306 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
307
308 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
309
310 /*
311 * TODO: For IBSS we need to get atim_window from mac80211,
312 * for now just always use 0
313 */
314 ctx->timing.atim_window = 0;
315
316 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
317 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
318 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
319 priv->contexts[IWL_RXON_CTX_BSS].vif &&
320 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
321 ctx->timing.beacon_interval =
322 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
323 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
324 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
325 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
326 priv->contexts[IWL_RXON_CTX_PAN].vif &&
327 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
328 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
329 !ctx->vif->bss_conf.beacon_int)) {
330 ctx->timing.beacon_interval =
331 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
332 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
333 } else {
334 beacon_int = iwl_adjust_beacon_interval(beacon_int,
335 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
336 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
337 }
338
339 ctx->beacon_int = beacon_int;
340
341 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
342 interval_tm = beacon_int * TIME_UNIT;
343 rem = do_div(tsf, interval_tm);
344 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
345
346 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
347
348 IWL_DEBUG_ASSOC(priv,
349 "beacon interval %d beacon timer %d beacon tim %d\n",
350 le16_to_cpu(ctx->timing.beacon_interval),
351 le32_to_cpu(ctx->timing.beacon_init_val),
352 le16_to_cpu(ctx->timing.atim_window));
353
354 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
355 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
356}
357
358static int iwlagn_rxon_disconn(struct iwl_priv *priv,
359 struct iwl_rxon_context *ctx)
360{
361 int ret;
362 struct iwl_rxon_cmd *active = (void *)&ctx->active;
363
364 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
365 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
366 } else {
367 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
368 if (ret)
369 return ret;
370 if (ctx->vif) {
371 ret = iwl_send_rxon_timing(priv, ctx);
372 if (ret) {
373 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
374 return ret;
375 }
376 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
377 }
378 }
379 if (ret)
380 return ret;
381
382 /*
383 * Un-assoc RXON clears the station table and WEP
384 * keys, so we have to restore those afterwards.
385 */
386 iwl_clear_ucode_stations(priv, ctx);
387 /* update -- might need P2P now */
388 iwl_update_bcast_station(priv, ctx);
389 iwl_restore_stations(priv, ctx);
390 ret = iwl_restore_default_wep_keys(priv, ctx);
391 if (ret) {
392 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
393 return ret;
394 }
395
396 memcpy(active, &ctx->staging, sizeof(*active));
397 return 0;
398}
399
400static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
401{
402 int ret;
403 s8 prev_tx_power;
404 bool defer;
405 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
406
407 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
408 return 0;
409
410 lockdep_assert_held(&priv->mutex);
411
412 if (priv->tx_power_user_lmt == tx_power && !force)
413 return 0;
414
415 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
416 IWL_WARN(priv,
417 "Requested user TXPOWER %d below lower limit %d.\n",
418 tx_power,
419 IWLAGN_TX_POWER_TARGET_POWER_MIN);
420 return -EINVAL;
421 }
422
423 if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
424 IWL_WARN(priv,
425 "Requested user TXPOWER %d above upper limit %d.\n",
426 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
427 return -EINVAL;
428 }
429
430 if (!iwl_is_ready_rf(priv))
431 return -EIO;
432
433 /* scan complete and commit_rxon use tx_power_next value,
434 * it always need to be updated for newest request */
435 priv->tx_power_next = tx_power;
436
437 /* do not set tx power when scanning or channel changing */
438 defer = test_bit(STATUS_SCANNING, &priv->status) ||
439 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
440 if (defer && !force) {
441 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
442 return 0;
443 }
444
445 prev_tx_power = priv->tx_power_user_lmt;
446 priv->tx_power_user_lmt = tx_power;
447
448 ret = iwlagn_send_tx_power(priv);
449
450 /* if fail to set tx_power, restore the orig. tx power */
451 if (ret) {
452 priv->tx_power_user_lmt = prev_tx_power;
453 priv->tx_power_next = prev_tx_power;
454 }
455 return ret;
456}
457
458static int iwlagn_rxon_connect(struct iwl_priv *priv,
459 struct iwl_rxon_context *ctx)
460{
461 int ret;
462 struct iwl_rxon_cmd *active = (void *)&ctx->active;
463
464 /* RXON timing must be before associated RXON */
465 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
466 ret = iwl_send_rxon_timing(priv, ctx);
467 if (ret) {
468 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
469 return ret;
470 }
471 }
472 /* QoS info may be cleared by previous un-assoc RXON */
473 iwlagn_update_qos(priv, ctx);
474
475 /*
476 * We'll run into this code path when beaconing is
477 * enabled, but then we also need to send the beacon
478 * to the device.
479 */
480 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
481 ret = iwlagn_update_beacon(priv, ctx->vif);
482 if (ret) {
483 IWL_ERR(priv,
484 "Error sending required beacon (%d)!\n",
485 ret);
486 return ret;
487 }
488 }
489
490 priv->start_calib = 0;
491 /*
492 * Apply the new configuration.
493 *
494 * Associated RXON doesn't clear the station table in uCode,
495 * so we don't need to restore stations etc. after this.
496 */
497 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
498 sizeof(struct iwl_rxon_cmd), &ctx->staging);
499 if (ret) {
500 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
501 return ret;
502 }
503 memcpy(active, &ctx->staging, sizeof(*active));
504
505 /* IBSS beacon needs to be sent after setting assoc */
506 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
507 if (iwlagn_update_beacon(priv, ctx->vif))
508 IWL_ERR(priv, "Error sending IBSS beacon\n");
509 iwl_init_sensitivity(priv);
510
511 /*
512 * If we issue a new RXON command which required a tune then
513 * we must send a new TXPOWER command or we won't be able to
514 * Tx any frames.
515 *
516 * It's expected we set power here if channel is changing.
517 */
518 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
519 if (ret) {
520 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
521 return ret;
522 }
523
524 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
525 priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
526 ieee80211_request_smps(ctx->vif,
527 priv->cfg->ht_params->smps_mode);
528
529 return 0;
530}
531
532int iwlagn_set_pan_params(struct iwl_priv *priv)
533{
534 struct iwl_wipan_params_cmd cmd;
535 struct iwl_rxon_context *ctx_bss, *ctx_pan;
536 int slot0 = 300, slot1 = 0;
537 int ret;
538
539 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
540 return 0;
541
542 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
543
544 lockdep_assert_held(&priv->mutex);
545
546 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
547 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
548
549 /*
550 * If the PAN context is inactive, then we don't need
551 * to update the PAN parameters, the last thing we'll
552 * have done before it goes inactive is making the PAN
553 * parameters be WLAN-only.
554 */
555 if (!ctx_pan->is_active)
556 return 0;
557
558 memset(&cmd, 0, sizeof(cmd));
559
560 /* only 2 slots are currently allowed */
561 cmd.num_slots = 2;
562
563 cmd.slots[0].type = 0; /* BSS */
564 cmd.slots[1].type = 1; /* PAN */
565
566 if (priv->hw_roc_setup) {
567 /* both contexts must be used for this to happen */
568 slot1 = IWL_MIN_SLOT_TIME;
569 slot0 = 3000;
570 } else if (ctx_bss->vif && ctx_pan->vif) {
571 int bcnint = ctx_pan->beacon_int;
572 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
573
574 /* should be set, but seems unused?? */
575 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
576
577 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
578 bcnint &&
579 bcnint != ctx_bss->beacon_int) {
580 IWL_ERR(priv,
581 "beacon intervals don't match (%d, %d)\n",
582 ctx_bss->beacon_int, ctx_pan->beacon_int);
583 } else
584 bcnint = max_t(int, bcnint,
585 ctx_bss->beacon_int);
586 if (!bcnint)
587 bcnint = DEFAULT_BEACON_INTERVAL;
588 slot0 = bcnint / 2;
589 slot1 = bcnint - slot0;
590
591 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
592 (!ctx_bss->vif->bss_conf.idle &&
593 !ctx_bss->vif->bss_conf.assoc)) {
594 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
595 slot1 = IWL_MIN_SLOT_TIME;
596 } else if (!ctx_pan->vif->bss_conf.idle &&
597 !ctx_pan->vif->bss_conf.assoc) {
598 slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
599 slot0 = IWL_MIN_SLOT_TIME;
600 }
601 } else if (ctx_pan->vif) {
602 slot0 = 0;
603 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
604 ctx_pan->beacon_int;
605 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
606
607 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
608 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
609 slot1 = IWL_MIN_SLOT_TIME;
610 }
611 }
612
613 cmd.slots[0].width = cpu_to_le16(slot0);
614 cmd.slots[1].width = cpu_to_le16(slot1);
615
616 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
617 sizeof(cmd), &cmd);
618 if (ret)
619 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
620
621 return ret;
622}
623
624static void _iwl_set_rxon_ht(struct iwl_priv *priv,
625 struct iwl_ht_config *ht_conf,
626 struct iwl_rxon_context *ctx)
627{
628 struct iwl_rxon_cmd *rxon = &ctx->staging;
629
630 if (!ctx->ht.enabled) {
631 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
632 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
633 RXON_FLG_HT40_PROT_MSK |
634 RXON_FLG_HT_PROT_MSK);
635 return;
636 }
637
638 /* FIXME: if the definition of ht.protection changed, the "translation"
639 * will be needed for rxon->flags
640 */
641 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
642 RXON_FLG_HT_OPERATING_MODE_POS);
643
644 /* Set up channel bandwidth:
645 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
646 /* clear the HT channel mode before set the mode */
647 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
648 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
649 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
650 /* pure ht40 */
651 if (ctx->ht.protection ==
652 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
653 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
654 /*
655 * Note: control channel is opposite of extension
656 * channel
657 */
658 switch (ctx->ht.extension_chan_offset) {
659 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
660 rxon->flags &=
661 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
662 break;
663 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
664 rxon->flags |=
665 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
666 break;
667 }
668 } else {
669 /*
670 * Note: control channel is opposite of extension
671 * channel
672 */
673 switch (ctx->ht.extension_chan_offset) {
674 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
675 rxon->flags &=
676 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
677 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
678 break;
679 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
680 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
681 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
682 break;
683 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
684 default:
685 /*
686 * channel location only valid if in Mixed
687 * mode
688 */
689 IWL_ERR(priv,
690 "invalid extension channel offset\n");
691 break;
692 }
693 }
694 } else {
695 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
696 }
697
698 iwlagn_set_rxon_chain(priv, ctx);
699
700 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
701 "extension channel offset 0x%x\n",
702 le32_to_cpu(rxon->flags), ctx->ht.protection,
703 ctx->ht.extension_chan_offset);
704}
705
706void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
707{
708 struct iwl_rxon_context *ctx;
709
710 for_each_context(priv, ctx)
711 _iwl_set_rxon_ht(priv, ht_conf, ctx);
712}
713
714/**
715 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
716 * @ch: requested channel as a pointer to struct ieee80211_channel
717
718 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
719 * in the staging RXON flag structure based on the ch->band
720 */
721void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
722 struct iwl_rxon_context *ctx)
723{
724 enum ieee80211_band band = ch->band;
725 u16 channel = ch->hw_value;
726
727 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
728 (priv->band == band))
729 return;
730
731 ctx->staging.channel = cpu_to_le16(channel);
732 if (band == IEEE80211_BAND_5GHZ)
733 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
734 else
735 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
736
737 priv->band = band;
738
739 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
740
741}
742
743void iwl_set_flags_for_band(struct iwl_priv *priv,
744 struct iwl_rxon_context *ctx,
745 enum ieee80211_band band,
746 struct ieee80211_vif *vif)
747{
748 if (band == IEEE80211_BAND_5GHZ) {
749 ctx->staging.flags &=
750 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
751 | RXON_FLG_CCK_MSK);
752 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
753 } else {
754 /* Copied from iwl_post_associate() */
755 if (vif && vif->bss_conf.use_short_slot)
756 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
757 else
758 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
759
760 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
761 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
762 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
763 }
764}
765
766static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
767 struct iwl_rxon_context *ctx, int hw_decrypt)
768{
769 struct iwl_rxon_cmd *rxon = &ctx->staging;
770
771 if (hw_decrypt)
772 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
773 else
774 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
775
776}
777
778/* validate RXON structure is valid */
779static int iwl_check_rxon_cmd(struct iwl_priv *priv,
780 struct iwl_rxon_context *ctx)
781{
782 struct iwl_rxon_cmd *rxon = &ctx->staging;
783 u32 errors = 0;
784
785 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
786 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
787 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
788 errors |= BIT(0);
789 }
790 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
791 IWL_WARN(priv, "check 2.4G: wrong radar\n");
792 errors |= BIT(1);
793 }
794 } else {
795 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
796 IWL_WARN(priv, "check 5.2G: not short slot!\n");
797 errors |= BIT(2);
798 }
799 if (rxon->flags & RXON_FLG_CCK_MSK) {
800 IWL_WARN(priv, "check 5.2G: CCK!\n");
801 errors |= BIT(3);
802 }
803 }
804 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
805 IWL_WARN(priv, "mac/bssid mcast!\n");
806 errors |= BIT(4);
807 }
808
809 /* make sure basic rates 6Mbps and 1Mbps are supported */
810 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
811 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
812 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
813 errors |= BIT(5);
814 }
815
816 if (le16_to_cpu(rxon->assoc_id) > 2007) {
817 IWL_WARN(priv, "aid > 2007\n");
818 errors |= BIT(6);
819 }
820
821 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
822 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
823 IWL_WARN(priv, "CCK and short slot\n");
824 errors |= BIT(7);
825 }
826
827 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
828 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
829 IWL_WARN(priv, "CCK and auto detect");
830 errors |= BIT(8);
831 }
832
833 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
834 RXON_FLG_TGG_PROTECT_MSK)) ==
835 RXON_FLG_TGG_PROTECT_MSK) {
836 IWL_WARN(priv, "TGg but no auto-detect\n");
837 errors |= BIT(9);
838 }
839
840 if (rxon->channel == 0) {
841 IWL_WARN(priv, "zero channel is invalid\n");
842 errors |= BIT(10);
843 }
844
845 WARN(errors, "Invalid RXON (%#x), channel %d",
846 errors, le16_to_cpu(rxon->channel));
847
848 return errors ? -EINVAL : 0;
849}
850
851/**
852 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
853 * @priv: staging_rxon is compared to active_rxon
854 *
855 * If the RXON structure is changing enough to require a new tune,
856 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
857 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
858 */
859static int iwl_full_rxon_required(struct iwl_priv *priv,
860 struct iwl_rxon_context *ctx)
861{
862 const struct iwl_rxon_cmd *staging = &ctx->staging;
863 const struct iwl_rxon_cmd *active = &ctx->active;
864
865#define CHK(cond) \
866 if ((cond)) { \
867 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
868 return 1; \
869 }
870
871#define CHK_NEQ(c1, c2) \
872 if ((c1) != (c2)) { \
873 IWL_DEBUG_INFO(priv, "need full RXON - " \
874 #c1 " != " #c2 " - %d != %d\n", \
875 (c1), (c2)); \
876 return 1; \
877 }
878
879 /* These items are only settable from the full RXON command */
880 CHK(!iwl_is_associated_ctx(ctx));
881 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
882 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
883 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
884 active->wlap_bssid_addr));
885 CHK_NEQ(staging->dev_type, active->dev_type);
886 CHK_NEQ(staging->channel, active->channel);
887 CHK_NEQ(staging->air_propagation, active->air_propagation);
888 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
889 active->ofdm_ht_single_stream_basic_rates);
890 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
891 active->ofdm_ht_dual_stream_basic_rates);
892 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
893 active->ofdm_ht_triple_stream_basic_rates);
894 CHK_NEQ(staging->assoc_id, active->assoc_id);
895
896 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
897 * be updated with the RXON_ASSOC command -- however only some
898 * flag transitions are allowed using RXON_ASSOC */
899
900 /* Check if we are not switching bands */
901 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
902 active->flags & RXON_FLG_BAND_24G_MSK);
903
904 /* Check if we are switching association toggle */
905 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
906 active->filter_flags & RXON_FILTER_ASSOC_MSK);
907
908#undef CHK
909#undef CHK_NEQ
910
911 return 0;
912}
913
914#ifdef CONFIG_IWLWIFI_DEBUG
915void iwl_print_rx_config_cmd(struct iwl_priv *priv,
916 enum iwl_rxon_context_id ctxid)
917{
918 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
919 struct iwl_rxon_cmd *rxon = &ctx->staging;
920
921 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
922 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
923 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
924 le16_to_cpu(rxon->channel));
925 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
926 le32_to_cpu(rxon->flags));
927 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
928 le32_to_cpu(rxon->filter_flags));
929 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
930 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
931 rxon->ofdm_basic_rates);
932 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
933 rxon->cck_basic_rates);
934 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
935 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
936 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
937 le16_to_cpu(rxon->assoc_id));
938}
939#endif
940
941static void iwl_calc_basic_rates(struct iwl_priv *priv,
942 struct iwl_rxon_context *ctx)
943{
944 int lowest_present_ofdm = 100;
945 int lowest_present_cck = 100;
946 u8 cck = 0;
947 u8 ofdm = 0;
948
949 if (ctx->vif) {
950 struct ieee80211_supported_band *sband;
951 unsigned long basic = ctx->vif->bss_conf.basic_rates;
952 int i;
953
954 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
955
956 for_each_set_bit(i, &basic, BITS_PER_LONG) {
957 int hw = sband->bitrates[i].hw_value;
958 if (hw >= IWL_FIRST_OFDM_RATE) {
959 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
960 if (lowest_present_ofdm > hw)
961 lowest_present_ofdm = hw;
962 } else {
963 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
964
965 cck |= BIT(hw);
966 if (lowest_present_cck > hw)
967 lowest_present_cck = hw;
968 }
969 }
970 }
971
972 /*
973 * Now we've got the basic rates as bitmaps in the ofdm and cck
974 * variables. This isn't sufficient though, as there might not
975 * be all the right rates in the bitmap. E.g. if the only basic
976 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
977 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
978 *
979 * [...] a STA responding to a received frame shall transmit
980 * its Control Response frame [...] at the highest rate in the
981 * BSSBasicRateSet parameter that is less than or equal to the
982 * rate of the immediately previous frame in the frame exchange
983 * sequence ([...]) and that is of the same modulation class
984 * ([...]) as the received frame. If no rate contained in the
985 * BSSBasicRateSet parameter meets these conditions, then the
986 * control frame sent in response to a received frame shall be
987 * transmitted at the highest mandatory rate of the PHY that is
988 * less than or equal to the rate of the received frame, and
989 * that is of the same modulation class as the received frame.
990 *
991 * As a consequence, we need to add all mandatory rates that are
992 * lower than all of the basic rates to these bitmaps.
993 */
994
995 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
996 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
997 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
998 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
999 /* 6M already there or needed so always add */
1000 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
1001
1002 /*
1003 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
1004 * Note, however:
1005 * - if no CCK rates are basic, it must be ERP since there must
1006 * be some basic rates at all, so they're OFDM => ERP PHY
1007 * (or we're in 5 GHz, and the cck bitmap will never be used)
1008 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
1009 * - if 5.5M is basic, 1M and 2M are mandatory
1010 * - if 2M is basic, 1M is mandatory
1011 * - if 1M is basic, that's the only valid ACK rate.
1012 * As a consequence, it's not as complicated as it sounds, just add
1013 * any lower rates to the ACK rate bitmap.
1014 */
1015 if (IWL_RATE_11M_INDEX < lowest_present_cck)
1016 cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1017 if (IWL_RATE_5M_INDEX < lowest_present_cck)
1018 cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1019 if (IWL_RATE_2M_INDEX < lowest_present_cck)
1020 cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1021 /* 1M already there or needed so always add */
1022 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1023
1024 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1025 cck, ofdm);
1026
1027 /* "basic_rates" is a misnomer here -- should be called ACK rates */
1028 ctx->staging.cck_basic_rates = cck;
1029 ctx->staging.ofdm_basic_rates = ofdm;
1030}
1031
1032/**
1033 * iwlagn_commit_rxon - commit staging_rxon to hardware
1034 *
1035 * The RXON command in staging_rxon is committed to the hardware and
1036 * the active_rxon structure is updated with the new data. This
1037 * function correctly transitions out of the RXON_ASSOC_MSK state if
1038 * a HW tune is required based on the RXON structure changes.
1039 *
1040 * The connect/disconnect flow should be as the following:
1041 *
1042 * 1. make sure send RXON command with association bit unset if not connect
1043 * this should include the channel and the band for the candidate
1044 * to be connected to
1045 * 2. Add Station before RXON association with the AP
1046 * 3. RXON_timing has to send before RXON for connection
1047 * 4. full RXON command - associated bit set
1048 * 5. use RXON_ASSOC command to update any flags changes
1049 */
1050int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1051{
1052 /* cast away the const for active_rxon in this function */
1053 struct iwl_rxon_cmd *active = (void *)&ctx->active;
1054 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1055 int ret;
1056
1057 lockdep_assert_held(&priv->mutex);
1058
1059 if (!iwl_is_alive(priv))
1060 return -EBUSY;
1061
1062 /* This function hardcodes a bunch of dual-mode assumptions */
1063 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1064
1065 if (!ctx->is_active)
1066 return 0;
1067
1068 /* always get timestamp with Rx frame */
1069 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1070
1071 /* recalculate basic rates */
1072 iwl_calc_basic_rates(priv, ctx);
1073
1074 /*
1075 * force CTS-to-self frames protection if RTS-CTS is not preferred
1076 * one aggregation protection method
1077 */
1078 if (!priv->hw_params.use_rts_for_aggregation)
1079 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1080
1081 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1082 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1083 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1084 else
1085 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1086
1087 iwl_print_rx_config_cmd(priv, ctx->ctxid);
1088 ret = iwl_check_rxon_cmd(priv, ctx);
1089 if (ret) {
1090 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1091 return -EINVAL;
1092 }
1093
1094 /*
1095 * receive commit_rxon request
1096 * abort any previous channel switch if still in process
1097 */
1098 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1099 (priv->switch_channel != ctx->staging.channel)) {
1100 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1101 le16_to_cpu(priv->switch_channel));
1102 iwl_chswitch_done(priv, false);
1103 }
1104
1105 /*
1106 * If we don't need to send a full RXON, we can use
1107 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1108 * and other flags for the current radio configuration.
1109 */
1110 if (!iwl_full_rxon_required(priv, ctx)) {
1111 ret = iwlagn_send_rxon_assoc(priv, ctx);
1112 if (ret) {
1113 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1114 return ret;
1115 }
1116
1117 memcpy(active, &ctx->staging, sizeof(*active));
1118 /*
1119 * We do not commit tx power settings while channel changing,
1120 * do it now if after settings changed.
1121 */
1122 iwl_set_tx_power(priv, priv->tx_power_next, false);
1123
1124 /* make sure we are in the right PS state */
1125 iwl_power_update_mode(priv, true);
1126
1127 return 0;
1128 }
1129
1130 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
1131
1132 IWL_DEBUG_INFO(priv,
1133 "Going to commit RXON\n"
1134 " * with%s RXON_FILTER_ASSOC_MSK\n"
1135 " * channel = %d\n"
1136 " * bssid = %pM\n",
1137 (new_assoc ? "" : "out"),
1138 le16_to_cpu(ctx->staging.channel),
1139 ctx->staging.bssid_addr);
1140
1141 /*
1142 * Always clear associated first, but with the correct config.
1143 * This is required as for example station addition for the
1144 * AP station must be done after the BSSID is set to correctly
1145 * set up filters in the device.
1146 */
1147 ret = iwlagn_rxon_disconn(priv, ctx);
1148 if (ret)
1149 return ret;
1150
1151 ret = iwlagn_set_pan_params(priv);
1152 if (ret)
1153 return ret;
1154
1155 if (new_assoc)
1156 return iwlagn_rxon_connect(priv, ctx);
1157
1158 return 0;
1159}
1160
1161void iwlagn_config_ht40(struct ieee80211_conf *conf,
1162 struct iwl_rxon_context *ctx)
1163{
1164 if (conf_is_ht40_minus(conf)) {
1165 ctx->ht.extension_chan_offset =
1166 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1167 ctx->ht.is_40mhz = true;
1168 } else if (conf_is_ht40_plus(conf)) {
1169 ctx->ht.extension_chan_offset =
1170 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1171 ctx->ht.is_40mhz = true;
1172 } else {
1173 ctx->ht.extension_chan_offset =
1174 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1175 ctx->ht.is_40mhz = false;
1176 }
1177}
1178
1179int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1180{
1181 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1182 struct iwl_rxon_context *ctx;
1183 struct ieee80211_conf *conf = &hw->conf;
1184 struct ieee80211_channel *channel = conf->channel;
1185 int ret = 0;
1186
1187 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
1188
1189 mutex_lock(&priv->mutex);
1190
1191 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1192 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1193 goto out;
1194 }
1195
1196 if (!iwl_is_ready(priv)) {
1197 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1198 goto out;
1199 }
1200
1201 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1202 IEEE80211_CONF_CHANGE_CHANNEL)) {
1203 /* mac80211 uses static for non-HT which is what we want */
1204 priv->current_ht_config.smps = conf->smps_mode;
1205
1206 /*
1207 * Recalculate chain counts.
1208 *
1209 * If monitor mode is enabled then mac80211 will
1210 * set up the SM PS mode to OFF if an HT channel is
1211 * configured.
1212 */
1213 for_each_context(priv, ctx)
1214 iwlagn_set_rxon_chain(priv, ctx);
1215 }
1216
1217 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1218 for_each_context(priv, ctx) {
1219 /* Configure HT40 channels */
1220 if (ctx->ht.enabled != conf_is_ht(conf))
1221 ctx->ht.enabled = conf_is_ht(conf);
1222
1223 if (ctx->ht.enabled) {
1224 /* if HT40 is used, it should not change
1225 * after associated except channel switch */
1226 if (!ctx->ht.is_40mhz ||
1227 !iwl_is_associated_ctx(ctx))
1228 iwlagn_config_ht40(conf, ctx);
1229 } else
1230 ctx->ht.is_40mhz = false;
1231
1232 /*
1233 * Default to no protection. Protection mode will
1234 * later be set from BSS config in iwl_ht_conf
1235 */
1236 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1237
1238 /* if we are switching from ht to 2.4 clear flags
1239 * from any ht related info since 2.4 does not
1240 * support ht */
1241 if (le16_to_cpu(ctx->staging.channel) !=
1242 channel->hw_value)
1243 ctx->staging.flags = 0;
1244
1245 iwl_set_rxon_channel(priv, channel, ctx);
1246 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1247
1248 iwl_set_flags_for_band(priv, ctx, channel->band,
1249 ctx->vif);
1250 }
1251
1252 iwl_update_bcast_stations(priv);
1253 }
1254
1255 if (changed & (IEEE80211_CONF_CHANGE_PS |
1256 IEEE80211_CONF_CHANGE_IDLE)) {
1257 ret = iwl_power_update_mode(priv, false);
1258 if (ret)
1259 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
1260 }
1261
1262 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1263 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
1264 priv->tx_power_user_lmt, conf->power_level);
1265
1266 iwl_set_tx_power(priv, conf->power_level, false);
1267 }
1268
1269 for_each_context(priv, ctx) {
1270 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1271 continue;
1272 iwlagn_commit_rxon(priv, ctx);
1273 }
1274 out:
1275 mutex_unlock(&priv->mutex);
1276 IWL_DEBUG_MAC80211(priv, "leave\n");
1277
1278 return ret;
1279}
1280
1281static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1282 struct iwl_rxon_context *ctx,
1283 struct ieee80211_bss_conf *bss_conf)
1284{
1285 struct ieee80211_vif *vif = ctx->vif;
1286 struct iwl_rxon_context *tmp;
1287 struct ieee80211_sta *sta;
1288 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1289 struct ieee80211_sta_ht_cap *ht_cap;
1290 bool need_multiple;
1291
1292 lockdep_assert_held(&priv->mutex);
1293
1294 switch (vif->type) {
1295 case NL80211_IFTYPE_STATION:
1296 rcu_read_lock();
1297 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1298 if (!sta) {
1299 /*
1300 * If at all, this can only happen through a race
1301 * when the AP disconnects us while we're still
1302 * setting up the connection, in that case mac80211
1303 * will soon tell us about that.
1304 */
1305 need_multiple = false;
1306 rcu_read_unlock();
1307 break;
1308 }
1309
1310 ht_cap = &sta->ht_cap;
1311
1312 need_multiple = true;
1313
1314 /*
1315 * If the peer advertises no support for receiving 2 and 3
1316 * stream MCS rates, it can't be transmitting them either.
1317 */
1318 if (ht_cap->mcs.rx_mask[1] == 0 &&
1319 ht_cap->mcs.rx_mask[2] == 0) {
1320 need_multiple = false;
1321 } else if (!(ht_cap->mcs.tx_params &
1322 IEEE80211_HT_MCS_TX_DEFINED)) {
1323 /* If it can't TX MCS at all ... */
1324 need_multiple = false;
1325 } else if (ht_cap->mcs.tx_params &
1326 IEEE80211_HT_MCS_TX_RX_DIFF) {
1327 int maxstreams;
1328
1329 /*
1330 * But if it can receive them, it might still not
1331 * be able to transmit them, which is what we need
1332 * to check here -- so check the number of streams
1333 * it advertises for TX (if different from RX).
1334 */
1335
1336 maxstreams = (ht_cap->mcs.tx_params &
1337 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
1338 maxstreams >>=
1339 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1340 maxstreams += 1;
1341
1342 if (maxstreams <= 1)
1343 need_multiple = false;
1344 }
1345
1346 rcu_read_unlock();
1347 break;
1348 case NL80211_IFTYPE_ADHOC:
1349 /* currently */
1350 need_multiple = false;
1351 break;
1352 default:
1353 /* only AP really */
1354 need_multiple = true;
1355 break;
1356 }
1357
1358 ctx->ht_need_multiple_chains = need_multiple;
1359
1360 if (!need_multiple) {
1361 /* check all contexts */
1362 for_each_context(priv, tmp) {
1363 if (!tmp->vif)
1364 continue;
1365 if (tmp->ht_need_multiple_chains) {
1366 need_multiple = true;
1367 break;
1368 }
1369 }
1370 }
1371
1372 ht_conf->single_chain_sufficient = !need_multiple;
1373}
1374
1375static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1376{
1377 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1378 int ret;
1379
1380 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
1381 return;
1382
1383 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
1384 iwl_is_any_associated(priv)) {
1385 struct iwl_calib_chain_noise_reset_cmd cmd;
1386
1387 /* clear data for chain noise calibration algorithm */
1388 data->chain_noise_a = 0;
1389 data->chain_noise_b = 0;
1390 data->chain_noise_c = 0;
1391 data->chain_signal_a = 0;
1392 data->chain_signal_b = 0;
1393 data->chain_signal_c = 0;
1394 data->beacon_count = 0;
1395
1396 memset(&cmd, 0, sizeof(cmd));
1397 iwl_set_calib_hdr(&cmd.hdr,
1398 priv->phy_calib_chain_noise_reset_cmd);
1399 ret = iwl_dvm_send_cmd_pdu(priv,
1400 REPLY_PHY_CALIBRATION_CMD,
1401 CMD_SYNC, sizeof(cmd), &cmd);
1402 if (ret)
1403 IWL_ERR(priv,
1404 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
1405 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1406 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
1407 }
1408}
1409
1410void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1411 struct ieee80211_vif *vif,
1412 struct ieee80211_bss_conf *bss_conf,
1413 u32 changes)
1414{
1415 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1416 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1417 int ret;
1418 bool force = false;
1419
1420 mutex_lock(&priv->mutex);
1421
1422 if (unlikely(!iwl_is_ready(priv))) {
1423 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1424 mutex_unlock(&priv->mutex);
1425 return;
1426 }
1427
1428 if (unlikely(!ctx->vif)) {
1429 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
1430 mutex_unlock(&priv->mutex);
1431 return;
1432 }
1433
1434 if (changes & BSS_CHANGED_BEACON_INT)
1435 force = true;
1436
1437 if (changes & BSS_CHANGED_QOS) {
1438 ctx->qos_data.qos_active = bss_conf->qos;
1439 iwlagn_update_qos(priv, ctx);
1440 }
1441
1442 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1443 if (vif->bss_conf.use_short_preamble)
1444 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1445 else
1446 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1447
1448 if (changes & BSS_CHANGED_ASSOC) {
1449 if (bss_conf->assoc) {
1450 priv->timestamp = bss_conf->sync_tsf;
1451 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1452 } else {
1453 /*
1454 * If we disassociate while there are pending
1455 * frames, just wake up the queues and let the
1456 * frames "escape" ... This shouldn't really
1457 * be happening to start with, but we should
1458 * not get stuck in this case either since it
1459 * can happen if userspace gets confused.
1460 */
1461 iwlagn_lift_passive_no_rx(priv);
1462
1463 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1464
1465 if (ctx->ctxid == IWL_RXON_CTX_BSS)
1466 priv->have_rekey_data = false;
1467 }
1468
1469 iwlagn_bt_coex_rssi_monitor(priv);
1470 }
1471
1472 if (ctx->ht.enabled) {
1473 ctx->ht.protection = bss_conf->ht_operation_mode &
1474 IEEE80211_HT_OP_MODE_PROTECTION;
1475 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
1476 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1477 iwlagn_check_needed_chains(priv, ctx, bss_conf);
1478 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1479 }
1480
1481 iwlagn_set_rxon_chain(priv, ctx);
1482
1483 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1484 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1485 else
1486 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1487
1488 if (bss_conf->use_cts_prot)
1489 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1490 else
1491 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1492
1493 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1494
1495 if (vif->type == NL80211_IFTYPE_AP ||
1496 vif->type == NL80211_IFTYPE_ADHOC) {
1497 if (vif->bss_conf.enable_beacon) {
1498 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1499 priv->beacon_ctx = ctx;
1500 } else {
1501 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1502 priv->beacon_ctx = NULL;
1503 }
1504 }
1505
1506 /*
1507 * If the ucode decides to do beacon filtering before
1508 * association, it will lose beacons that are needed
1509 * before sending frames out on passive channels. This
1510 * causes association failures on those channels. Enable
1511 * receiving beacons in such cases.
1512 */
1513
1514 if (vif->type == NL80211_IFTYPE_STATION) {
1515 if (!bss_conf->assoc)
1516 ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1517 else
1518 ctx->staging.filter_flags &=
1519 ~RXON_FILTER_BCON_AWARE_MSK;
1520 }
1521
1522 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1523 iwlagn_commit_rxon(priv, ctx);
1524
1525 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
1526 /*
1527 * The chain noise calibration will enable PM upon
1528 * completion. If calibration has already been run
1529 * then we need to enable power management here.
1530 */
1531 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1532 iwl_power_update_mode(priv, false);
1533
1534 /* Enable RX differential gain and sensitivity calibrations */
1535 iwlagn_chain_noise_reset(priv);
1536 priv->start_calib = 1;
1537 }
1538
1539 if (changes & BSS_CHANGED_IBSS) {
1540 ret = iwlagn_manage_ibss_station(priv, vif,
1541 bss_conf->ibss_joined);
1542 if (ret)
1543 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1544 bss_conf->ibss_joined ? "add" : "remove",
1545 bss_conf->bssid);
1546 }
1547
1548 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
1549 priv->beacon_ctx) {
1550 if (iwlagn_update_beacon(priv, vif))
1551 IWL_ERR(priv, "Error sending IBSS beacon\n");
1552 }
1553
1554 mutex_unlock(&priv->mutex);
1555}
1556
1557void iwlagn_post_scan(struct iwl_priv *priv)
1558{
1559 struct iwl_rxon_context *ctx;
1560
1561 /*
1562 * We do not commit power settings while scan is pending,
1563 * do it now if the settings changed.
1564 */
1565 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
1566 iwl_set_tx_power(priv, priv->tx_power_next, false);
1567
1568 /*
1569 * Since setting the RXON may have been deferred while
1570 * performing the scan, fire one off if needed
1571 */
1572 for_each_context(priv, ctx)
1573 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1574 iwlagn_commit_rxon(priv, ctx);
1575
1576 iwlagn_set_pan_params(priv);
1577}
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
deleted file mode 100644
index 610ed2204e1..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ /dev/null
@@ -1,1188 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "dev.h"
34#include "agn.h"
35
36/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
37 * sending probe req. This should be set long enough to hear probe responses
38 * from more than one AP. */
39#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
40#define IWL_ACTIVE_DWELL_TIME_52 (20)
41
42#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
43#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
44
45/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
46 * Must be set longer than active dwell time.
47 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
48#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
49#define IWL_PASSIVE_DWELL_TIME_52 (10)
50#define IWL_PASSIVE_DWELL_BASE (100)
51#define IWL_CHANNEL_TUNE_TIME 5
52#define MAX_SCAN_CHANNEL 50
53
54/* For reset radio, need minimal dwell time only */
55#define IWL_RADIO_RESET_DWELL_TIME 5
56
57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_host_cmd cmd = {
61 .id = REPLY_SCAN_ABORT_CMD,
62 .flags = CMD_SYNC | CMD_WANT_SKB,
63 };
64 __le32 *status;
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
71 test_bit(STATUS_FW_ERROR, &priv->status))
72 return -EIO;
73
74 ret = iwl_dvm_send_cmd(priv, &cmd);
75 if (ret)
76 return ret;
77
78 status = (void *)cmd.resp_pkt->data;
79 if (*status != CAN_ABORT_STATUS) {
80 /* The scan abort will return 1 for success or
81 * 2 for "failure". A failure condition can be
82 * due to simply not being in an active scan which
83 * can occur if we send the scan abort before we
84 * the microcode has notified us that a scan is
85 * completed. */
86 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
87 le32_to_cpu(*status));
88 ret = -EIO;
89 }
90
91 iwl_free_resp(&cmd);
92 return ret;
93}
94
95static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
96{
97 /* check if scan was requested from mac80211 */
98 if (priv->scan_request) {
99 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
100 ieee80211_scan_completed(priv->hw, aborted);
101 }
102
103 if (priv->scan_type == IWL_SCAN_ROC)
104 iwl_scan_roc_expired(priv);
105
106 priv->scan_type = IWL_SCAN_NORMAL;
107 priv->scan_vif = NULL;
108 priv->scan_request = NULL;
109}
110
111static void iwl_process_scan_complete(struct iwl_priv *priv)
112{
113 bool aborted;
114
115 lockdep_assert_held(&priv->mutex);
116
117 if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
118 return;
119
120 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
121
122 cancel_delayed_work(&priv->scan_check);
123
124 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
125 if (aborted)
126 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
127
128 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
129 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
130 goto out_settings;
131 }
132
133 if (priv->scan_type == IWL_SCAN_ROC)
134 iwl_scan_roc_expired(priv);
135
136 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
137 int err;
138
139 /* Check if mac80211 requested scan during our internal scan */
140 if (priv->scan_request == NULL)
141 goto out_complete;
142
143 /* If so request a new scan */
144 err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
145 priv->scan_request->channels[0]->band);
146 if (err) {
147 IWL_DEBUG_SCAN(priv,
148 "failed to initiate pending scan: %d\n", err);
149 aborted = true;
150 goto out_complete;
151 }
152
153 return;
154 }
155
156out_complete:
157 iwl_complete_scan(priv, aborted);
158
159out_settings:
160 /* Can we still talk to firmware ? */
161 if (!iwl_is_ready_rf(priv))
162 return;
163
164 iwlagn_post_scan(priv);
165}
166
167void iwl_force_scan_end(struct iwl_priv *priv)
168{
169 lockdep_assert_held(&priv->mutex);
170
171 if (!test_bit(STATUS_SCANNING, &priv->status)) {
172 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
173 return;
174 }
175
176 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
177 clear_bit(STATUS_SCANNING, &priv->status);
178 clear_bit(STATUS_SCAN_HW, &priv->status);
179 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
180 clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
181 iwl_complete_scan(priv, true);
182}
183
184static void iwl_do_scan_abort(struct iwl_priv *priv)
185{
186 int ret;
187
188 lockdep_assert_held(&priv->mutex);
189
190 if (!test_bit(STATUS_SCANNING, &priv->status)) {
191 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
192 return;
193 }
194
195 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
196 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
197 return;
198 }
199
200 ret = iwl_send_scan_abort(priv);
201 if (ret) {
202 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
203 iwl_force_scan_end(priv);
204 } else
205 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
206}
207
208/**
209 * iwl_scan_cancel - Cancel any currently executing HW scan
210 */
211int iwl_scan_cancel(struct iwl_priv *priv)
212{
213 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
214 queue_work(priv->workqueue, &priv->abort_scan);
215 return 0;
216}
217
218/**
219 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
220 * @ms: amount of time to wait (in milliseconds) for scan to abort
221 *
222 */
223void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
224{
225 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
226
227 lockdep_assert_held(&priv->mutex);
228
229 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
230
231 iwl_do_scan_abort(priv);
232
233 while (time_before_eq(jiffies, timeout)) {
234 if (!test_bit(STATUS_SCAN_HW, &priv->status))
235 goto finished;
236 msleep(20);
237 }
238
239 return;
240
241 finished:
242 /*
243 * Now STATUS_SCAN_HW is clear. This means that the
244 * device finished, but the background work is going
245 * to execute at best as soon as we release the mutex.
246 * Since we need to be able to issue a new scan right
247 * after this function returns, run the complete here.
248 * The STATUS_SCAN_COMPLETE bit will then be cleared
249 * and prevent the background work from "completing"
250 * a possible new scan.
251 */
252 iwl_process_scan_complete(priv);
253}
254
255/* Service response to REPLY_SCAN_CMD (0x80) */
256static int iwl_rx_reply_scan(struct iwl_priv *priv,
257 struct iwl_rx_cmd_buffer *rxb,
258 struct iwl_device_cmd *cmd)
259{
260#ifdef CONFIG_IWLWIFI_DEBUG
261 struct iwl_rx_packet *pkt = rxb_addr(rxb);
262 struct iwl_scanreq_notification *notif = (void *)pkt->data;
263
264 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
265#endif
266 return 0;
267}
268
269/* Service SCAN_START_NOTIFICATION (0x82) */
270static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
271 struct iwl_rx_cmd_buffer *rxb,
272 struct iwl_device_cmd *cmd)
273{
274 struct iwl_rx_packet *pkt = rxb_addr(rxb);
275 struct iwl_scanstart_notification *notif = (void *)pkt->data;
276
277 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
278 IWL_DEBUG_SCAN(priv, "Scan start: "
279 "%d [802.11%s] "
280 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
281 notif->channel,
282 notif->band ? "bg" : "a",
283 le32_to_cpu(notif->tsf_high),
284 le32_to_cpu(notif->tsf_low),
285 notif->status, notif->beacon_timer);
286
287 if (priv->scan_type == IWL_SCAN_ROC &&
288 !priv->hw_roc_start_notified) {
289 ieee80211_ready_on_channel(priv->hw);
290 priv->hw_roc_start_notified = true;
291 }
292
293 return 0;
294}
295
296/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
297static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
298 struct iwl_rx_cmd_buffer *rxb,
299 struct iwl_device_cmd *cmd)
300{
301#ifdef CONFIG_IWLWIFI_DEBUG
302 struct iwl_rx_packet *pkt = rxb_addr(rxb);
303 struct iwl_scanresults_notification *notif = (void *)pkt->data;
304
305 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
306 "%d [802.11%s] "
307 "probe status: %u:%u "
308 "(TSF: 0x%08X:%08X) - %d "
309 "elapsed=%lu usec\n",
310 notif->channel,
311 notif->band ? "bg" : "a",
312 notif->probe_status, notif->num_probe_not_sent,
313 le32_to_cpu(notif->tsf_high),
314 le32_to_cpu(notif->tsf_low),
315 le32_to_cpu(notif->statistics[0]),
316 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
317#endif
318 return 0;
319}
320
321/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
322static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
323 struct iwl_rx_cmd_buffer *rxb,
324 struct iwl_device_cmd *cmd)
325{
326 struct iwl_rx_packet *pkt = rxb_addr(rxb);
327 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
328
329 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
330 scan_notif->scanned_channels,
331 scan_notif->tsf_low,
332 scan_notif->tsf_high, scan_notif->status);
333
334 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
335 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
336 jiffies_to_msecs(jiffies - priv->scan_start));
337
338 /*
339 * When aborting, we run the scan completed background work inline
340 * and the background work must then do nothing. The SCAN_COMPLETE
341 * bit helps implement that logic and thus needs to be set before
342 * queueing the work. Also, since the scan abort waits for SCAN_HW
343 * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
344 * to avoid a race there.
345 */
346 set_bit(STATUS_SCAN_COMPLETE, &priv->status);
347 clear_bit(STATUS_SCAN_HW, &priv->status);
348 queue_work(priv->workqueue, &priv->scan_completed);
349
350 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
351 iwl_advanced_bt_coexist(priv) &&
352 priv->bt_status != scan_notif->bt_status) {
353 if (scan_notif->bt_status) {
354 /* BT on */
355 if (!priv->bt_ch_announce)
356 priv->bt_traffic_load =
357 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
358 /*
359 * otherwise, no traffic load information provided
360 * no changes made
361 */
362 } else {
363 /* BT off */
364 priv->bt_traffic_load =
365 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
366 }
367 priv->bt_status = scan_notif->bt_status;
368 queue_work(priv->workqueue,
369 &priv->bt_traffic_change_work);
370 }
371 return 0;
372}
373
374void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
375{
376 /* scan handlers */
377 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
378 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
379 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
380 iwl_rx_scan_results_notif;
381 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
382 iwl_rx_scan_complete_notif;
383}
384
385static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
386 enum ieee80211_band band, u8 n_probes)
387{
388 if (band == IEEE80211_BAND_5GHZ)
389 return IWL_ACTIVE_DWELL_TIME_52 +
390 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
391 else
392 return IWL_ACTIVE_DWELL_TIME_24 +
393 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
394}
395
396static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
397{
398 struct iwl_rxon_context *ctx;
399 int limits[NUM_IWL_RXON_CTX] = {};
400 int n_active = 0;
401 u16 limit;
402
403 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
404
405 /*
406 * If we're associated, we clamp the dwell time 98%
407 * of the beacon interval (minus 2 * channel tune time)
408 * If both contexts are active, we have to restrict to
409 * 1/2 of the minimum of them, because they might be in
410 * lock-step with the time inbetween only half of what
411 * time we'd have in each of them.
412 */
413 for_each_context(priv, ctx) {
414 switch (ctx->staging.dev_type) {
415 case RXON_DEV_TYPE_P2P:
416 /* no timing constraints */
417 continue;
418 case RXON_DEV_TYPE_ESS:
419 default:
420 /* timing constraints if associated */
421 if (!iwl_is_associated_ctx(ctx))
422 continue;
423 break;
424 case RXON_DEV_TYPE_CP:
425 case RXON_DEV_TYPE_2STA:
426 /*
427 * These seem to always have timers for TBTT
428 * active in uCode even when not associated yet.
429 */
430 break;
431 }
432
433 limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
434 }
435
436 switch (n_active) {
437 case 0:
438 return dwell_time;
439 case 2:
440 limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
441 limit /= 2;
442 dwell_time = min(limit, dwell_time);
443 /* fall through to limit further */
444 case 1:
445 limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
446 limit /= n_active;
447 return min(limit, dwell_time);
448 default:
449 WARN_ON_ONCE(1);
450 return dwell_time;
451 }
452}
453
454static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
455 enum ieee80211_band band)
456{
457 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
458 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
459 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
460
461 return iwl_limit_dwell(priv, passive);
462}
463
464/* Return valid, unused, channel for a passive scan to reset the RF */
465static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
466 enum ieee80211_band band)
467{
468 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
469 struct iwl_rxon_context *ctx;
470 int i;
471
472 for (i = 0; i < sband->n_channels; i++) {
473 bool busy = false;
474
475 for_each_context(priv, ctx) {
476 busy = sband->channels[i].hw_value ==
477 le16_to_cpu(ctx->staging.channel);
478 if (busy)
479 break;
480 }
481
482 if (busy)
483 continue;
484
485 if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
486 return sband->channels[i].hw_value;
487 }
488
489 return 0;
490}
491
492static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
493 struct ieee80211_vif *vif,
494 enum ieee80211_band band,
495 struct iwl_scan_channel *scan_ch)
496{
497 const struct ieee80211_supported_band *sband;
498 u16 channel;
499
500 sband = iwl_get_hw_mode(priv, band);
501 if (!sband) {
502 IWL_ERR(priv, "invalid band\n");
503 return 0;
504 }
505
506 channel = iwl_get_single_channel_number(priv, band);
507 if (channel) {
508 scan_ch->channel = cpu_to_le16(channel);
509 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
510 scan_ch->active_dwell =
511 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
512 scan_ch->passive_dwell =
513 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
514 /* Set txpower levels to defaults */
515 scan_ch->dsp_atten = 110;
516 if (band == IEEE80211_BAND_5GHZ)
517 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
518 else
519 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
520 return 1;
521 }
522
523 IWL_ERR(priv, "no valid channel found\n");
524 return 0;
525}
526
527static int iwl_get_channels_for_scan(struct iwl_priv *priv,
528 struct ieee80211_vif *vif,
529 enum ieee80211_band band,
530 u8 is_active, u8 n_probes,
531 struct iwl_scan_channel *scan_ch)
532{
533 struct ieee80211_channel *chan;
534 const struct ieee80211_supported_band *sband;
535 u16 passive_dwell = 0;
536 u16 active_dwell = 0;
537 int added, i;
538 u16 channel;
539
540 sband = iwl_get_hw_mode(priv, band);
541 if (!sband)
542 return 0;
543
544 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
545 passive_dwell = iwl_get_passive_dwell_time(priv, band);
546
547 if (passive_dwell <= active_dwell)
548 passive_dwell = active_dwell + 1;
549
550 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
551 chan = priv->scan_request->channels[i];
552
553 if (chan->band != band)
554 continue;
555
556 channel = chan->hw_value;
557 scan_ch->channel = cpu_to_le16(channel);
558
559 if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
560 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
561 else
562 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
563
564 if (n_probes)
565 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
566
567 scan_ch->active_dwell = cpu_to_le16(active_dwell);
568 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
569
570 /* Set txpower levels to defaults */
571 scan_ch->dsp_atten = 110;
572
573 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
574 * power level:
575 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
576 */
577 if (band == IEEE80211_BAND_5GHZ)
578 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
579 else
580 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
581
582 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
583 channel, le32_to_cpu(scan_ch->type),
584 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
585 "ACTIVE" : "PASSIVE",
586 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
587 active_dwell : passive_dwell);
588
589 scan_ch++;
590 added++;
591 }
592
593 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
594 return added;
595}
596
597/**
598 * iwl_fill_probe_req - fill in all required fields and IE for probe request
599 */
600
601static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
602 const u8 *ies, int ie_len, const u8 *ssid,
603 u8 ssid_len, int left)
604{
605 int len = 0;
606 u8 *pos = NULL;
607
608 /* Make sure there is enough space for the probe request,
609 * two mandatory IEs and the data */
610 left -= 24;
611 if (left < 0)
612 return 0;
613
614 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
615 eth_broadcast_addr(frame->da);
616 memcpy(frame->sa, ta, ETH_ALEN);
617 eth_broadcast_addr(frame->bssid);
618 frame->seq_ctrl = 0;
619
620 len += 24;
621
622 /* ...next IE... */
623 pos = &frame->u.probe_req.variable[0];
624
625 /* fill in our SSID IE */
626 left -= ssid_len + 2;
627 if (left < 0)
628 return 0;
629 *pos++ = WLAN_EID_SSID;
630 *pos++ = ssid_len;
631 if (ssid && ssid_len) {
632 memcpy(pos, ssid, ssid_len);
633 pos += ssid_len;
634 }
635
636 len += ssid_len + 2;
637
638 if (WARN_ON(left < ie_len))
639 return len;
640
641 if (ies && ie_len) {
642 memcpy(pos, ies, ie_len);
643 len += ie_len;
644 }
645
646 return (u16)len;
647}
648
649static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
650{
651 struct iwl_host_cmd cmd = {
652 .id = REPLY_SCAN_CMD,
653 .len = { sizeof(struct iwl_scan_cmd), },
654 .flags = CMD_SYNC,
655 };
656 struct iwl_scan_cmd *scan;
657 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
658 u32 rate_flags = 0;
659 u16 cmd_len = 0;
660 u16 rx_chain = 0;
661 enum ieee80211_band band;
662 u8 n_probes = 0;
663 u8 rx_ant = priv->nvm_data->valid_rx_ant;
664 u8 rate;
665 bool is_active = false;
666 int chan_mod;
667 u8 active_chains;
668 u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
669 int ret;
670 int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
671 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
672 priv->fw->ucode_capa.max_probe_length;
673 const u8 *ssid = NULL;
674 u8 ssid_len = 0;
675
676 if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL &&
677 (!priv->scan_request ||
678 priv->scan_request->n_channels > MAX_SCAN_CHANNEL)))
679 return -EINVAL;
680
681 lockdep_assert_held(&priv->mutex);
682
683 if (vif)
684 ctx = iwl_rxon_ctx_from_vif(vif);
685
686 if (!priv->scan_cmd) {
687 priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
688 if (!priv->scan_cmd) {
689 IWL_DEBUG_SCAN(priv,
690 "fail to allocate memory for scan\n");
691 return -ENOMEM;
692 }
693 }
694 scan = priv->scan_cmd;
695 memset(scan, 0, scan_cmd_size);
696
697 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
698 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
699
700 if (priv->scan_type != IWL_SCAN_ROC &&
701 iwl_is_any_associated(priv)) {
702 u16 interval = 0;
703 u32 extra;
704 u32 suspend_time = 100;
705 u32 scan_suspend_time = 100;
706
707 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
708 switch (priv->scan_type) {
709 case IWL_SCAN_ROC:
710 WARN_ON(1);
711 break;
712 case IWL_SCAN_RADIO_RESET:
713 interval = 0;
714 break;
715 case IWL_SCAN_NORMAL:
716 interval = vif->bss_conf.beacon_int;
717 break;
718 }
719
720 scan->suspend_time = 0;
721 scan->max_out_time = cpu_to_le32(200 * 1024);
722 if (!interval)
723 interval = suspend_time;
724
725 extra = (suspend_time / interval) << 22;
726 scan_suspend_time = (extra |
727 ((suspend_time % interval) * 1024));
728 scan->suspend_time = cpu_to_le32(scan_suspend_time);
729 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
730 scan_suspend_time, interval);
731 } else if (priv->scan_type == IWL_SCAN_ROC) {
732 scan->suspend_time = 0;
733 scan->max_out_time = 0;
734 scan->quiet_time = 0;
735 scan->quiet_plcp_th = 0;
736 }
737
738 switch (priv->scan_type) {
739 case IWL_SCAN_RADIO_RESET:
740 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
741 /*
742 * Override quiet time as firmware checks that active
743 * dwell is >= quiet; since we use passive scan it'll
744 * not actually be used.
745 */
746 scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
747 break;
748 case IWL_SCAN_NORMAL:
749 if (priv->scan_request->n_ssids) {
750 int i, p = 0;
751 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
752 /*
753 * The highest priority SSID is inserted to the
754 * probe request template.
755 */
756 ssid_len = priv->scan_request->ssids[0].ssid_len;
757 ssid = priv->scan_request->ssids[0].ssid;
758
759 /*
760 * Invert the order of ssids, the firmware will invert
761 * it back.
762 */
763 for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) {
764 scan->direct_scan[p].id = WLAN_EID_SSID;
765 scan->direct_scan[p].len =
766 priv->scan_request->ssids[i].ssid_len;
767 memcpy(scan->direct_scan[p].ssid,
768 priv->scan_request->ssids[i].ssid,
769 priv->scan_request->ssids[i].ssid_len);
770 n_probes++;
771 p++;
772 }
773 is_active = true;
774 } else
775 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
776 break;
777 case IWL_SCAN_ROC:
778 IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
779 break;
780 }
781
782 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
783 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
784 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
785
786 switch (priv->scan_band) {
787 case IEEE80211_BAND_2GHZ:
788 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
789 chan_mod = le32_to_cpu(
790 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
791 RXON_FLG_CHANNEL_MODE_MSK)
792 >> RXON_FLG_CHANNEL_MODE_POS;
793 if ((priv->scan_request && priv->scan_request->no_cck) ||
794 chan_mod == CHANNEL_MODE_PURE_40) {
795 rate = IWL_RATE_6M_PLCP;
796 } else {
797 rate = IWL_RATE_1M_PLCP;
798 rate_flags = RATE_MCS_CCK_MSK;
799 }
800 /*
801 * Internal scans are passive, so we can indiscriminately set
802 * the BT ignore flag on 2.4 GHz since it applies to TX only.
803 */
804 if (priv->cfg->bt_params &&
805 priv->cfg->bt_params->advanced_bt_coexist)
806 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
807 break;
808 case IEEE80211_BAND_5GHZ:
809 rate = IWL_RATE_6M_PLCP;
810 break;
811 default:
812 IWL_WARN(priv, "Invalid scan band\n");
813 return -EIO;
814 }
815
816 /*
817 * If active scanning is requested but a certain channel is
818 * marked passive, we can do active scanning if we detect
819 * transmissions.
820 *
821 * There is an issue with some firmware versions that triggers
822 * a sysassert on a "good CRC threshold" of zero (== disabled),
823 * on a radar channel even though this means that we should NOT
824 * send probes.
825 *
826 * The "good CRC threshold" is the number of frames that we
827 * need to receive during our dwell time on a channel before
828 * sending out probes -- setting this to a huge value will
829 * mean we never reach it, but at the same time work around
830 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
831 * here instead of IWL_GOOD_CRC_TH_DISABLED.
832 *
833 * This was fixed in later versions along with some other
834 * scan changes, and the threshold behaves as a flag in those
835 * versions.
836 */
837 if (priv->new_scan_threshold_behaviour)
838 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
839 IWL_GOOD_CRC_TH_DISABLED;
840 else
841 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
842 IWL_GOOD_CRC_TH_NEVER;
843
844 band = priv->scan_band;
845
846 if (band == IEEE80211_BAND_2GHZ &&
847 priv->cfg->bt_params &&
848 priv->cfg->bt_params->advanced_bt_coexist) {
849 /* transmit 2.4 GHz probes only on first antenna */
850 scan_tx_antennas = first_antenna(scan_tx_antennas);
851 }
852
853 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv,
854 priv->scan_tx_ant[band],
855 scan_tx_antennas);
856 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
857 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
858
859 /*
860 * In power save mode while associated use one chain,
861 * otherwise use all chains
862 */
863 if (test_bit(STATUS_POWER_PMI, &priv->status) &&
864 !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
865 /* rx_ant has been set to all valid chains previously */
866 active_chains = rx_ant &
867 ((u8)(priv->chain_noise_data.active_chains));
868 if (!active_chains)
869 active_chains = rx_ant;
870
871 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
872 priv->chain_noise_data.active_chains);
873
874 rx_ant = first_antenna(active_chains);
875 }
876 if (priv->cfg->bt_params &&
877 priv->cfg->bt_params->advanced_bt_coexist &&
878 priv->bt_full_concurrent) {
879 /* operated as 1x1 in full concurrency mode */
880 rx_ant = first_antenna(rx_ant);
881 }
882
883 /* MIMO is not used here, but value is required */
884 rx_chain |=
885 priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
886 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
887 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
888 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
889 scan->rx_chain = cpu_to_le16(rx_chain);
890 switch (priv->scan_type) {
891 case IWL_SCAN_NORMAL:
892 cmd_len = iwl_fill_probe_req(
893 (struct ieee80211_mgmt *)scan->data,
894 vif->addr,
895 priv->scan_request->ie,
896 priv->scan_request->ie_len,
897 ssid, ssid_len,
898 scan_cmd_size - sizeof(*scan));
899 break;
900 case IWL_SCAN_RADIO_RESET:
901 case IWL_SCAN_ROC:
902 /* use bcast addr, will not be transmitted but must be valid */
903 cmd_len = iwl_fill_probe_req(
904 (struct ieee80211_mgmt *)scan->data,
905 iwl_bcast_addr, NULL, 0,
906 NULL, 0,
907 scan_cmd_size - sizeof(*scan));
908 break;
909 default:
910 BUG();
911 }
912 scan->tx_cmd.len = cpu_to_le16(cmd_len);
913
914 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
915 RXON_FILTER_BCON_AWARE_MSK);
916
917 switch (priv->scan_type) {
918 case IWL_SCAN_RADIO_RESET:
919 scan->channel_count =
920 iwl_get_channel_for_reset_scan(priv, vif, band,
921 (void *)&scan->data[cmd_len]);
922 break;
923 case IWL_SCAN_NORMAL:
924 scan->channel_count =
925 iwl_get_channels_for_scan(priv, vif, band,
926 is_active, n_probes,
927 (void *)&scan->data[cmd_len]);
928 break;
929 case IWL_SCAN_ROC: {
930 struct iwl_scan_channel *scan_ch;
931 int n_chan, i;
932 u16 dwell;
933
934 dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
935 n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
936
937 scan->channel_count = n_chan;
938
939 scan_ch = (void *)&scan->data[cmd_len];
940
941 for (i = 0; i < n_chan; i++) {
942 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
943 scan_ch->channel =
944 cpu_to_le16(priv->hw_roc_channel->hw_value);
945
946 if (i == n_chan - 1)
947 dwell = priv->hw_roc_duration - i * dwell;
948
949 scan_ch->active_dwell =
950 scan_ch->passive_dwell = cpu_to_le16(dwell);
951
952 /* Set txpower levels to defaults */
953 scan_ch->dsp_atten = 110;
954
955 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
956 * power level:
957 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
958 */
959 if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
960 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
961 else
962 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
963
964 scan_ch++;
965 }
966 }
967
968 break;
969 }
970
971 if (scan->channel_count == 0) {
972 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
973 return -EIO;
974 }
975
976 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
977 scan->channel_count * sizeof(struct iwl_scan_channel);
978 cmd.data[0] = scan;
979 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
980 scan->len = cpu_to_le16(cmd.len[0]);
981
982 /* set scan bit here for PAN params */
983 set_bit(STATUS_SCAN_HW, &priv->status);
984
985 ret = iwlagn_set_pan_params(priv);
986 if (ret) {
987 clear_bit(STATUS_SCAN_HW, &priv->status);
988 return ret;
989 }
990
991 ret = iwl_dvm_send_cmd(priv, &cmd);
992 if (ret) {
993 clear_bit(STATUS_SCAN_HW, &priv->status);
994 iwlagn_set_pan_params(priv);
995 }
996
997 return ret;
998}
999
1000void iwl_init_scan_params(struct iwl_priv *priv)
1001{
1002 u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
1003 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
1004 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1005 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
1006 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1007}
1008
1009int __must_check iwl_scan_initiate(struct iwl_priv *priv,
1010 struct ieee80211_vif *vif,
1011 enum iwl_scan_type scan_type,
1012 enum ieee80211_band band)
1013{
1014 int ret;
1015
1016 lockdep_assert_held(&priv->mutex);
1017
1018 cancel_delayed_work(&priv->scan_check);
1019
1020 if (!iwl_is_ready_rf(priv)) {
1021 IWL_WARN(priv, "Request scan called when driver not ready.\n");
1022 return -EIO;
1023 }
1024
1025 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1026 IWL_DEBUG_SCAN(priv,
1027 "Multiple concurrent scan requests in parallel.\n");
1028 return -EBUSY;
1029 }
1030
1031 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1032 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
1033 return -EBUSY;
1034 }
1035
1036 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
1037 scan_type == IWL_SCAN_NORMAL ? "" :
1038 scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
1039 "internal short ");
1040
1041 set_bit(STATUS_SCANNING, &priv->status);
1042 priv->scan_type = scan_type;
1043 priv->scan_start = jiffies;
1044 priv->scan_band = band;
1045
1046 ret = iwlagn_request_scan(priv, vif);
1047 if (ret) {
1048 clear_bit(STATUS_SCANNING, &priv->status);
1049 priv->scan_type = IWL_SCAN_NORMAL;
1050 return ret;
1051 }
1052
1053 queue_delayed_work(priv->workqueue, &priv->scan_check,
1054 IWL_SCAN_CHECK_WATCHDOG);
1055
1056 return 0;
1057}
1058
1059
1060/*
1061 * internal short scan, this function should only been called while associated.
1062 * It will reset and tune the radio to prevent possible RF related problem
1063 */
1064void iwl_internal_short_hw_scan(struct iwl_priv *priv)
1065{
1066 queue_work(priv->workqueue, &priv->start_internal_scan);
1067}
1068
1069static void iwl_bg_start_internal_scan(struct work_struct *work)
1070{
1071 struct iwl_priv *priv =
1072 container_of(work, struct iwl_priv, start_internal_scan);
1073
1074 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
1075
1076 mutex_lock(&priv->mutex);
1077
1078 if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
1079 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
1080 goto unlock;
1081 }
1082
1083 if (test_bit(STATUS_SCANNING, &priv->status)) {
1084 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
1085 goto unlock;
1086 }
1087
1088 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
1089 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
1090 unlock:
1091 mutex_unlock(&priv->mutex);
1092}
1093
1094static void iwl_bg_scan_check(struct work_struct *data)
1095{
1096 struct iwl_priv *priv =
1097 container_of(data, struct iwl_priv, scan_check.work);
1098
1099 IWL_DEBUG_SCAN(priv, "Scan check work\n");
1100
1101 /* Since we are here firmware does not finish scan and
1102 * most likely is in bad shape, so we don't bother to
1103 * send abort command, just force scan complete to mac80211 */
1104 mutex_lock(&priv->mutex);
1105 iwl_force_scan_end(priv);
1106 mutex_unlock(&priv->mutex);
1107}
1108
1109static void iwl_bg_abort_scan(struct work_struct *work)
1110{
1111 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
1112
1113 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
1114
1115 /* We keep scan_check work queued in case when firmware will not
1116 * report back scan completed notification */
1117 mutex_lock(&priv->mutex);
1118 iwl_scan_cancel_timeout(priv, 200);
1119 mutex_unlock(&priv->mutex);
1120}
1121
1122static void iwl_bg_scan_completed(struct work_struct *work)
1123{
1124 struct iwl_priv *priv =
1125 container_of(work, struct iwl_priv, scan_completed);
1126
1127 mutex_lock(&priv->mutex);
1128 iwl_process_scan_complete(priv);
1129 mutex_unlock(&priv->mutex);
1130}
1131
1132void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
1133{
1134 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
1135 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
1136 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
1137 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
1138}
1139
1140void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
1141{
1142 cancel_work_sync(&priv->start_internal_scan);
1143 cancel_work_sync(&priv->abort_scan);
1144 cancel_work_sync(&priv->scan_completed);
1145
1146 if (cancel_delayed_work_sync(&priv->scan_check)) {
1147 mutex_lock(&priv->mutex);
1148 iwl_force_scan_end(priv);
1149 mutex_unlock(&priv->mutex);
1150 }
1151}
1152
1153void iwl_scan_roc_expired(struct iwl_priv *priv)
1154{
1155 /*
1156 * The status bit should be set here, to prevent a race
1157 * where the atomic_read returns 1, but before the execution continues
1158 * iwl_scan_offchannel_skb_status() checks if the status bit is set
1159 */
1160 set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1161
1162 if (atomic_read(&priv->num_aux_in_flight) == 0) {
1163 ieee80211_remain_on_channel_expired(priv->hw);
1164 priv->hw_roc_channel = NULL;
1165 schedule_delayed_work(&priv->hw_roc_disable_work,
1166 10 * HZ);
1167
1168 clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1169 } else {
1170 IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
1171 atomic_read(&priv->num_aux_in_flight));
1172 }
1173}
1174
1175void iwl_scan_offchannel_skb(struct iwl_priv *priv)
1176{
1177 WARN_ON(!priv->hw_roc_start_notified);
1178 atomic_inc(&priv->num_aux_in_flight);
1179}
1180
1181void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
1182{
1183 if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
1184 test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
1185 IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
1186 iwl_scan_roc_expired(priv);
1187 }
1188}
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
deleted file mode 100644
index bdba9543c35..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ /dev/null
@@ -1,1486 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <net/mac80211.h>
31#include "iwl-trans.h"
32#include "dev.h"
33#include "agn.h"
34
35const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
36
37static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
38{
39 lockdep_assert_held(&priv->sta_lock);
40
41 if (sta_id >= IWLAGN_STATION_COUNT) {
42 IWL_ERR(priv, "invalid sta_id %u", sta_id);
43 return -EINVAL;
44 }
45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
46 IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
47 "addr %pM\n",
48 sta_id, priv->stations[sta_id].sta.sta.addr);
49
50 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
51 IWL_DEBUG_ASSOC(priv,
52 "STA id %u addr %pM already present in uCode "
53 "(according to driver)\n",
54 sta_id, priv->stations[sta_id].sta.sta.addr);
55 } else {
56 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
57 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
58 sta_id, priv->stations[sta_id].sta.sta.addr);
59 }
60 return 0;
61}
62
63static int iwl_process_add_sta_resp(struct iwl_priv *priv,
64 struct iwl_addsta_cmd *addsta,
65 struct iwl_rx_packet *pkt)
66{
67 struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
68 u8 sta_id = addsta->sta.sta_id;
69 int ret = -EIO;
70
71 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
72 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
73 pkt->hdr.flags);
74 return ret;
75 }
76
77 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
78 sta_id);
79
80 spin_lock(&priv->sta_lock);
81
82 switch (add_sta_resp->status) {
83 case ADD_STA_SUCCESS_MSK:
84 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
85 ret = iwl_sta_ucode_activate(priv, sta_id);
86 break;
87 case ADD_STA_NO_ROOM_IN_TABLE:
88 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
89 sta_id);
90 break;
91 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
92 IWL_ERR(priv, "Adding station %d failed, no block ack "
93 "resource.\n", sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 add_sta_resp->status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC address
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock(&priv->sta_lock);
123
124 return ret;
125}
126
127int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
128 struct iwl_device_cmd *cmd)
129{
130 struct iwl_rx_packet *pkt = rxb_addr(rxb);
131
132 if (!cmd)
133 return 0;
134
135 return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
136}
137
138int iwl_send_add_sta(struct iwl_priv *priv,
139 struct iwl_addsta_cmd *sta, u8 flags)
140{
141 int ret = 0;
142 struct iwl_host_cmd cmd = {
143 .id = REPLY_ADD_STA,
144 .flags = flags,
145 .data = { sta, },
146 .len = { sizeof(*sta), },
147 };
148 u8 sta_id __maybe_unused = sta->sta.sta_id;
149
150 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
152
153 if (!(flags & CMD_ASYNC)) {
154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
155 might_sleep();
156 }
157
158 ret = iwl_dvm_send_cmd(priv, &cmd);
159
160 if (ret || (flags & CMD_ASYNC))
161 return ret;
162 /*else the command was successfully sent in SYNC mode, need to free
163 * the reply page */
164
165 iwl_free_resp(&cmd);
166
167 if (cmd.handler_status)
168 IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
169 cmd.handler_status);
170
171 return cmd.handler_status;
172}
173
174bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
175 struct iwl_rxon_context *ctx,
176 struct ieee80211_sta_ht_cap *ht_cap)
177{
178 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
179 return false;
180
181#ifdef CONFIG_IWLWIFI_DEBUGFS
182 if (priv->disable_ht40)
183 return false;
184#endif
185
186 /*
187 * Remainder of this function checks ht_cap, but if it's
188 * NULL then we can do HT40 (special case for RXON)
189 */
190 if (!ht_cap)
191 return true;
192
193 if (!ht_cap->ht_supported)
194 return false;
195
196 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
197 return false;
198
199 return true;
200}
201
202static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
203 struct ieee80211_sta *sta,
204 struct iwl_rxon_context *ctx,
205 __le32 *flags, __le32 *mask)
206{
207 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
208 u8 mimo_ps_mode;
209
210 *mask = STA_FLG_RTS_MIMO_PROT_MSK |
211 STA_FLG_MIMO_DIS_MSK |
212 STA_FLG_HT40_EN_MSK |
213 STA_FLG_MAX_AGG_SIZE_MSK |
214 STA_FLG_AGG_MPDU_DENSITY_MSK;
215 *flags = 0;
216
217 if (!sta || !sta_ht_inf->ht_supported)
218 return;
219
220 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
221
222 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
223 sta->addr,
224 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
225 "static" :
226 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
227 "dynamic" : "disabled");
228
229 switch (mimo_ps_mode) {
230 case WLAN_HT_CAP_SM_PS_STATIC:
231 *flags |= STA_FLG_MIMO_DIS_MSK;
232 break;
233 case WLAN_HT_CAP_SM_PS_DYNAMIC:
234 *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
235 break;
236 case WLAN_HT_CAP_SM_PS_DISABLED:
237 break;
238 default:
239 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
240 break;
241 }
242
243 *flags |= cpu_to_le32(
244 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
245
246 *flags |= cpu_to_le32(
247 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
248
249 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
250 *flags |= STA_FLG_HT40_EN_MSK;
251}
252
253int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
254 struct ieee80211_sta *sta)
255{
256 u8 sta_id = iwl_sta_id(sta);
257 __le32 flags, mask;
258 struct iwl_addsta_cmd cmd;
259
260 if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
261 return -EINVAL;
262
263 iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
264
265 spin_lock_bh(&priv->sta_lock);
266 priv->stations[sta_id].sta.station_flags &= ~mask;
267 priv->stations[sta_id].sta.station_flags |= flags;
268 spin_unlock_bh(&priv->sta_lock);
269
270 memset(&cmd, 0, sizeof(cmd));
271 cmd.mode = STA_CONTROL_MODIFY_MSK;
272 cmd.station_flags_msk = mask;
273 cmd.station_flags = flags;
274 cmd.sta.sta_id = sta_id;
275
276 return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
277}
278
279static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
280 struct ieee80211_sta *sta,
281 struct iwl_rxon_context *ctx)
282{
283 __le32 flags, mask;
284
285 iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
286
287 lockdep_assert_held(&priv->sta_lock);
288 priv->stations[index].sta.station_flags &= ~mask;
289 priv->stations[index].sta.station_flags |= flags;
290}
291
292/**
293 * iwl_prep_station - Prepare station information for addition
294 *
295 * should be called with sta_lock held
296 */
297u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
298 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
299{
300 struct iwl_station_entry *station;
301 int i;
302 u8 sta_id = IWL_INVALID_STATION;
303
304 if (is_ap)
305 sta_id = ctx->ap_sta_id;
306 else if (is_broadcast_ether_addr(addr))
307 sta_id = ctx->bcast_sta_id;
308 else
309 for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
310 if (ether_addr_equal(priv->stations[i].sta.sta.addr,
311 addr)) {
312 sta_id = i;
313 break;
314 }
315
316 if (!priv->stations[i].used &&
317 sta_id == IWL_INVALID_STATION)
318 sta_id = i;
319 }
320
321 /*
322 * These two conditions have the same outcome, but keep them
323 * separate
324 */
325 if (unlikely(sta_id == IWL_INVALID_STATION))
326 return sta_id;
327
328 /*
329 * uCode is not able to deal with multiple requests to add a
330 * station. Keep track if one is in progress so that we do not send
331 * another.
332 */
333 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
334 IWL_DEBUG_INFO(priv, "STA %d already in process of being "
335 "added.\n", sta_id);
336 return sta_id;
337 }
338
339 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
340 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
341 ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
342 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
343 "adding again.\n", sta_id, addr);
344 return sta_id;
345 }
346
347 station = &priv->stations[sta_id];
348 station->used = IWL_STA_DRIVER_ACTIVE;
349 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
350 sta_id, addr);
351 priv->num_stations++;
352
353 /* Set up the REPLY_ADD_STA command to send to device */
354 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
355 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
356 station->sta.mode = 0;
357 station->sta.sta.sta_id = sta_id;
358 station->sta.station_flags = ctx->station_flags;
359 station->ctxid = ctx->ctxid;
360
361 if (sta) {
362 struct iwl_station_priv *sta_priv;
363
364 sta_priv = (void *)sta->drv_priv;
365 sta_priv->ctx = ctx;
366 }
367
368 /*
369 * OK to call unconditionally, since local stations (IBSS BSSID
370 * STA and broadcast STA) pass in a NULL sta, and mac80211
371 * doesn't allow HT IBSS.
372 */
373 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
374
375 return sta_id;
376
377}
378
379#define STA_WAIT_TIMEOUT (HZ/2)
380
381/**
382 * iwl_add_station_common -
383 */
384int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
385 const u8 *addr, bool is_ap,
386 struct ieee80211_sta *sta, u8 *sta_id_r)
387{
388 int ret = 0;
389 u8 sta_id;
390 struct iwl_addsta_cmd sta_cmd;
391
392 *sta_id_r = 0;
393 spin_lock_bh(&priv->sta_lock);
394 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
395 if (sta_id == IWL_INVALID_STATION) {
396 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
397 addr);
398 spin_unlock_bh(&priv->sta_lock);
399 return -EINVAL;
400 }
401
402 /*
403 * uCode is not able to deal with multiple requests to add a
404 * station. Keep track if one is in progress so that we do not send
405 * another.
406 */
407 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
408 IWL_DEBUG_INFO(priv, "STA %d already in process of being "
409 "added.\n", sta_id);
410 spin_unlock_bh(&priv->sta_lock);
411 return -EEXIST;
412 }
413
414 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
415 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
416 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
417 "adding again.\n", sta_id, addr);
418 spin_unlock_bh(&priv->sta_lock);
419 return -EEXIST;
420 }
421
422 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
423 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
424 sizeof(struct iwl_addsta_cmd));
425 spin_unlock_bh(&priv->sta_lock);
426
427 /* Add station to device's station table */
428 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
429 if (ret) {
430 spin_lock_bh(&priv->sta_lock);
431 IWL_ERR(priv, "Adding station %pM failed.\n",
432 priv->stations[sta_id].sta.sta.addr);
433 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
434 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
435 spin_unlock_bh(&priv->sta_lock);
436 }
437 *sta_id_r = sta_id;
438 return ret;
439}
440
441/**
442 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
443 */
444static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
445{
446 lockdep_assert_held(&priv->sta_lock);
447
448 /* Ucode must be active and driver must be non active */
449 if ((priv->stations[sta_id].used &
450 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
451 IWL_STA_UCODE_ACTIVE)
452 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
453
454 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
455
456 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
457 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
458}
459
460static int iwl_send_remove_station(struct iwl_priv *priv,
461 const u8 *addr, int sta_id,
462 bool temporary)
463{
464 struct iwl_rx_packet *pkt;
465 int ret;
466 struct iwl_rem_sta_cmd rm_sta_cmd;
467
468 struct iwl_host_cmd cmd = {
469 .id = REPLY_REMOVE_STA,
470 .len = { sizeof(struct iwl_rem_sta_cmd), },
471 .flags = CMD_SYNC,
472 .data = { &rm_sta_cmd, },
473 };
474
475 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
476 rm_sta_cmd.num_sta = 1;
477 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
478
479 cmd.flags |= CMD_WANT_SKB;
480
481 ret = iwl_dvm_send_cmd(priv, &cmd);
482
483 if (ret)
484 return ret;
485
486 pkt = cmd.resp_pkt;
487 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
488 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
489 pkt->hdr.flags);
490 ret = -EIO;
491 }
492
493 if (!ret) {
494 struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
495 switch (rem_sta_resp->status) {
496 case REM_STA_SUCCESS_MSK:
497 if (!temporary) {
498 spin_lock_bh(&priv->sta_lock);
499 iwl_sta_ucode_deactivate(priv, sta_id);
500 spin_unlock_bh(&priv->sta_lock);
501 }
502 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
503 break;
504 default:
505 ret = -EIO;
506 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
507 break;
508 }
509 }
510 iwl_free_resp(&cmd);
511
512 return ret;
513}
514
515/**
516 * iwl_remove_station - Remove driver's knowledge of station.
517 */
518int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
519 const u8 *addr)
520{
521 u8 tid;
522
523 if (!iwl_is_ready(priv)) {
524 IWL_DEBUG_INFO(priv,
525 "Unable to remove station %pM, device not ready.\n",
526 addr);
527 /*
528 * It is typical for stations to be removed when we are
529 * going down. Return success since device will be down
530 * soon anyway
531 */
532 return 0;
533 }
534
535 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
536 sta_id, addr);
537
538 if (WARN_ON(sta_id == IWL_INVALID_STATION))
539 return -EINVAL;
540
541 spin_lock_bh(&priv->sta_lock);
542
543 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
544 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
545 addr);
546 goto out_err;
547 }
548
549 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
550 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
551 addr);
552 goto out_err;
553 }
554
555 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
556 kfree(priv->stations[sta_id].lq);
557 priv->stations[sta_id].lq = NULL;
558 }
559
560 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
561 memset(&priv->tid_data[sta_id][tid], 0,
562 sizeof(priv->tid_data[sta_id][tid]));
563
564 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
565
566 priv->num_stations--;
567
568 if (WARN_ON(priv->num_stations < 0))
569 priv->num_stations = 0;
570
571 spin_unlock_bh(&priv->sta_lock);
572
573 return iwl_send_remove_station(priv, addr, sta_id, false);
574out_err:
575 spin_unlock_bh(&priv->sta_lock);
576 return -EINVAL;
577}
578
579void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
580 const u8 *addr)
581{
582 u8 tid;
583
584 if (!iwl_is_ready(priv)) {
585 IWL_DEBUG_INFO(priv,
586 "Unable to remove station %pM, device not ready.\n",
587 addr);
588 return;
589 }
590
591 IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
592
593 if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
594 return;
595
596 spin_lock_bh(&priv->sta_lock);
597
598 WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
599
600 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
601 memset(&priv->tid_data[sta_id][tid], 0,
602 sizeof(priv->tid_data[sta_id][tid]));
603
604 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
605
606 priv->num_stations--;
607
608 if (WARN_ON_ONCE(priv->num_stations < 0))
609 priv->num_stations = 0;
610
611 spin_unlock_bh(&priv->sta_lock);
612}
613
614static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
615 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
616{
617 int i, r;
618 u32 rate_flags = 0;
619 __le32 rate_n_flags;
620
621 lockdep_assert_held(&priv->mutex);
622
623 memset(link_cmd, 0, sizeof(*link_cmd));
624
625 /* Set up the rate scaling to start at selected rate, fall back
626 * all the way down to 1M in IEEE order, and then spin on 1M */
627 if (priv->band == IEEE80211_BAND_5GHZ)
628 r = IWL_RATE_6M_INDEX;
629 else if (ctx && ctx->vif && ctx->vif->p2p)
630 r = IWL_RATE_6M_INDEX;
631 else
632 r = IWL_RATE_1M_INDEX;
633
634 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
635 rate_flags |= RATE_MCS_CCK_MSK;
636
637 rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) <<
638 RATE_MCS_ANT_POS;
639 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
640 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
641 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
642
643 link_cmd->general_params.single_stream_ant_msk =
644 first_antenna(priv->nvm_data->valid_tx_ant);
645
646 link_cmd->general_params.dual_stream_ant_msk =
647 priv->nvm_data->valid_tx_ant &
648 ~first_antenna(priv->nvm_data->valid_tx_ant);
649 if (!link_cmd->general_params.dual_stream_ant_msk) {
650 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
651 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
652 link_cmd->general_params.dual_stream_ant_msk =
653 priv->nvm_data->valid_tx_ant;
654 }
655
656 link_cmd->agg_params.agg_dis_start_th =
657 LINK_QUAL_AGG_DISABLE_START_DEF;
658 link_cmd->agg_params.agg_time_limit =
659 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
660
661 link_cmd->sta_id = sta_id;
662}
663
664/**
665 * iwl_clear_ucode_stations - clear ucode station table bits
666 *
667 * This function clears all the bits in the driver indicating
668 * which stations are active in the ucode. Call when something
669 * other than explicit station management would cause this in
670 * the ucode, e.g. unassociated RXON.
671 */
672void iwl_clear_ucode_stations(struct iwl_priv *priv,
673 struct iwl_rxon_context *ctx)
674{
675 int i;
676 bool cleared = false;
677
678 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
679
680 spin_lock_bh(&priv->sta_lock);
681 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
682 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
683 continue;
684
685 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
686 IWL_DEBUG_INFO(priv,
687 "Clearing ucode active for station %d\n", i);
688 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
689 cleared = true;
690 }
691 }
692 spin_unlock_bh(&priv->sta_lock);
693
694 if (!cleared)
695 IWL_DEBUG_INFO(priv,
696 "No active stations found to be cleared\n");
697}
698
699/**
700 * iwl_restore_stations() - Restore driver known stations to device
701 *
702 * All stations considered active by driver, but not present in ucode, is
703 * restored.
704 *
705 * Function sleeps.
706 */
707void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
708{
709 struct iwl_addsta_cmd sta_cmd;
710 struct iwl_link_quality_cmd lq;
711 int i;
712 bool found = false;
713 int ret;
714 bool send_lq;
715
716 if (!iwl_is_ready(priv)) {
717 IWL_DEBUG_INFO(priv,
718 "Not ready yet, not restoring any stations.\n");
719 return;
720 }
721
722 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
723 spin_lock_bh(&priv->sta_lock);
724 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
725 if (ctx->ctxid != priv->stations[i].ctxid)
726 continue;
727 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
728 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
729 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
730 priv->stations[i].sta.sta.addr);
731 priv->stations[i].sta.mode = 0;
732 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
733 found = true;
734 }
735 }
736
737 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
738 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
739 memcpy(&sta_cmd, &priv->stations[i].sta,
740 sizeof(struct iwl_addsta_cmd));
741 send_lq = false;
742 if (priv->stations[i].lq) {
743 if (priv->wowlan)
744 iwl_sta_fill_lq(priv, ctx, i, &lq);
745 else
746 memcpy(&lq, priv->stations[i].lq,
747 sizeof(struct iwl_link_quality_cmd));
748 send_lq = true;
749 }
750 spin_unlock_bh(&priv->sta_lock);
751 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
752 if (ret) {
753 spin_lock_bh(&priv->sta_lock);
754 IWL_ERR(priv, "Adding station %pM failed.\n",
755 priv->stations[i].sta.sta.addr);
756 priv->stations[i].used &=
757 ~IWL_STA_DRIVER_ACTIVE;
758 priv->stations[i].used &=
759 ~IWL_STA_UCODE_INPROGRESS;
760 continue;
761 }
762 /*
763 * Rate scaling has already been initialized, send
764 * current LQ command
765 */
766 if (send_lq)
767 iwl_send_lq_cmd(priv, ctx, &lq,
768 CMD_SYNC, true);
769 spin_lock_bh(&priv->sta_lock);
770 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
771 }
772 }
773
774 spin_unlock_bh(&priv->sta_lock);
775 if (!found)
776 IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
777 "no stations to be restored.\n");
778 else
779 IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
780 "complete.\n");
781}
782
783int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
784{
785 int i;
786
787 for (i = 0; i < priv->sta_key_max_num; i++)
788 if (!test_and_set_bit(i, &priv->ucode_key_table))
789 return i;
790
791 return WEP_INVALID_OFFSET;
792}
793
794void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
795{
796 int i;
797
798 spin_lock_bh(&priv->sta_lock);
799 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
800 if (!(priv->stations[i].used & IWL_STA_BCAST))
801 continue;
802
803 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
804 priv->num_stations--;
805 if (WARN_ON(priv->num_stations < 0))
806 priv->num_stations = 0;
807 kfree(priv->stations[i].lq);
808 priv->stations[i].lq = NULL;
809 }
810 spin_unlock_bh(&priv->sta_lock);
811}
812
813#ifdef CONFIG_IWLWIFI_DEBUG
814static void iwl_dump_lq_cmd(struct iwl_priv *priv,
815 struct iwl_link_quality_cmd *lq)
816{
817 int i;
818 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
819 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
820 lq->general_params.single_stream_ant_msk,
821 lq->general_params.dual_stream_ant_msk);
822
823 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
824 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
825 i, lq->rs_table[i].rate_n_flags);
826}
827#else
828static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
829 struct iwl_link_quality_cmd *lq)
830{
831}
832#endif
833
834/**
835 * is_lq_table_valid() - Test one aspect of LQ cmd for validity
836 *
837 * It sometimes happens when a HT rate has been in use and we
838 * loose connectivity with AP then mac80211 will first tell us that the
839 * current channel is not HT anymore before removing the station. In such a
840 * scenario the RXON flags will be updated to indicate we are not
841 * communicating HT anymore, but the LQ command may still contain HT rates.
842 * Test for this to prevent driver from sending LQ command between the time
843 * RXON flags are updated and when LQ command is updated.
844 */
845static bool is_lq_table_valid(struct iwl_priv *priv,
846 struct iwl_rxon_context *ctx,
847 struct iwl_link_quality_cmd *lq)
848{
849 int i;
850
851 if (ctx->ht.enabled)
852 return true;
853
854 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
855 ctx->active.channel);
856 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
857 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
858 RATE_MCS_HT_MSK) {
859 IWL_DEBUG_INFO(priv,
860 "index %d of LQ expects HT channel\n",
861 i);
862 return false;
863 }
864 }
865 return true;
866}
867
868/**
869 * iwl_send_lq_cmd() - Send link quality command
870 * @init: This command is sent as part of station initialization right
871 * after station has been added.
872 *
873 * The link quality command is sent as the last step of station creation.
874 * This is the special case in which init is set and we call a callback in
875 * this case to clear the state indicating that station creation is in
876 * progress.
877 */
878int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
879 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
880{
881 int ret = 0;
882 struct iwl_host_cmd cmd = {
883 .id = REPLY_TX_LINK_QUALITY_CMD,
884 .len = { sizeof(struct iwl_link_quality_cmd), },
885 .flags = flags,
886 .data = { lq, },
887 };
888
889 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
890 return -EINVAL;
891
892
893 spin_lock_bh(&priv->sta_lock);
894 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
895 spin_unlock_bh(&priv->sta_lock);
896 return -EINVAL;
897 }
898 spin_unlock_bh(&priv->sta_lock);
899
900 iwl_dump_lq_cmd(priv, lq);
901 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
902 return -EINVAL;
903
904 if (is_lq_table_valid(priv, ctx, lq))
905 ret = iwl_dvm_send_cmd(priv, &cmd);
906 else
907 ret = -EINVAL;
908
909 if (cmd.flags & CMD_ASYNC)
910 return ret;
911
912 if (init) {
913 IWL_DEBUG_INFO(priv, "init LQ command complete, "
914 "clearing sta addition status for sta %d\n",
915 lq->sta_id);
916 spin_lock_bh(&priv->sta_lock);
917 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
918 spin_unlock_bh(&priv->sta_lock);
919 }
920 return ret;
921}
922
923
924static struct iwl_link_quality_cmd *
925iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
926 u8 sta_id)
927{
928 struct iwl_link_quality_cmd *link_cmd;
929
930 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
931 if (!link_cmd) {
932 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
933 return NULL;
934 }
935
936 iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
937
938 return link_cmd;
939}
940
941/*
942 * iwlagn_add_bssid_station - Add the special IBSS BSSID station
943 *
944 * Function sleeps.
945 */
946int iwlagn_add_bssid_station(struct iwl_priv *priv,
947 struct iwl_rxon_context *ctx,
948 const u8 *addr, u8 *sta_id_r)
949{
950 int ret;
951 u8 sta_id;
952 struct iwl_link_quality_cmd *link_cmd;
953
954 if (sta_id_r)
955 *sta_id_r = IWL_INVALID_STATION;
956
957 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
958 if (ret) {
959 IWL_ERR(priv, "Unable to add station %pM\n", addr);
960 return ret;
961 }
962
963 if (sta_id_r)
964 *sta_id_r = sta_id;
965
966 spin_lock_bh(&priv->sta_lock);
967 priv->stations[sta_id].used |= IWL_STA_LOCAL;
968 spin_unlock_bh(&priv->sta_lock);
969
970 /* Set up default rate scaling table in device's station table */
971 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
972 if (!link_cmd) {
973 IWL_ERR(priv,
974 "Unable to initialize rate scaling for station %pM.\n",
975 addr);
976 return -ENOMEM;
977 }
978
979 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
980 if (ret)
981 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
982
983 spin_lock_bh(&priv->sta_lock);
984 priv->stations[sta_id].lq = link_cmd;
985 spin_unlock_bh(&priv->sta_lock);
986
987 return 0;
988}
989
990/*
991 * static WEP keys
992 *
993 * For each context, the device has a table of 4 static WEP keys
994 * (one for each key index) that is updated with the following
995 * commands.
996 */
997
998static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
999 struct iwl_rxon_context *ctx,
1000 bool send_if_empty)
1001{
1002 int i, not_empty = 0;
1003 u8 buff[sizeof(struct iwl_wep_cmd) +
1004 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
1005 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
1006 size_t cmd_size = sizeof(struct iwl_wep_cmd);
1007 struct iwl_host_cmd cmd = {
1008 .id = ctx->wep_key_cmd,
1009 .data = { wep_cmd, },
1010 .flags = CMD_SYNC,
1011 };
1012
1013 might_sleep();
1014
1015 memset(wep_cmd, 0, cmd_size +
1016 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
1017
1018 for (i = 0; i < WEP_KEYS_MAX ; i++) {
1019 wep_cmd->key[i].key_index = i;
1020 if (ctx->wep_keys[i].key_size) {
1021 wep_cmd->key[i].key_offset = i;
1022 not_empty = 1;
1023 } else {
1024 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
1025 }
1026
1027 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
1028 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
1029 ctx->wep_keys[i].key_size);
1030 }
1031
1032 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
1033 wep_cmd->num_keys = WEP_KEYS_MAX;
1034
1035 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
1036
1037 cmd.len[0] = cmd_size;
1038
1039 if (not_empty || send_if_empty)
1040 return iwl_dvm_send_cmd(priv, &cmd);
1041 else
1042 return 0;
1043}
1044
1045int iwl_restore_default_wep_keys(struct iwl_priv *priv,
1046 struct iwl_rxon_context *ctx)
1047{
1048 lockdep_assert_held(&priv->mutex);
1049
1050 return iwl_send_static_wepkey_cmd(priv, ctx, false);
1051}
1052
1053int iwl_remove_default_wep_key(struct iwl_priv *priv,
1054 struct iwl_rxon_context *ctx,
1055 struct ieee80211_key_conf *keyconf)
1056{
1057 int ret;
1058
1059 lockdep_assert_held(&priv->mutex);
1060
1061 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
1062 keyconf->keyidx);
1063
1064 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
1065 if (iwl_is_rfkill(priv)) {
1066 IWL_DEBUG_WEP(priv,
1067 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
1068 /* but keys in device are clear anyway so return success */
1069 return 0;
1070 }
1071 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
1072 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
1073 keyconf->keyidx, ret);
1074
1075 return ret;
1076}
1077
1078int iwl_set_default_wep_key(struct iwl_priv *priv,
1079 struct iwl_rxon_context *ctx,
1080 struct ieee80211_key_conf *keyconf)
1081{
1082 int ret;
1083
1084 lockdep_assert_held(&priv->mutex);
1085
1086 if (keyconf->keylen != WEP_KEY_LEN_128 &&
1087 keyconf->keylen != WEP_KEY_LEN_64) {
1088 IWL_DEBUG_WEP(priv,
1089 "Bad WEP key length %d\n", keyconf->keylen);
1090 return -EINVAL;
1091 }
1092
1093 keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
1094
1095 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
1096 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
1097 keyconf->keylen);
1098
1099 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
1100 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
1101 keyconf->keylen, keyconf->keyidx, ret);
1102
1103 return ret;
1104}
1105
1106/*
1107 * dynamic (per-station) keys
1108 *
1109 * The dynamic keys are a little more complicated. The device has
1110 * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
1111 * These are linked to stations by a table that contains an index
1112 * into the key table for each station/key index/{mcast,unicast},
1113 * i.e. it's basically an array of pointers like this:
1114 * key_offset_t key_mapping[NUM_STATIONS][4][2];
1115 * (it really works differently, but you can think of it as such)
1116 *
1117 * The key uploading and linking happens in the same command, the
1118 * add station command with STA_MODIFY_KEY_MASK.
1119 */
1120
1121static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
1122 struct ieee80211_vif *vif,
1123 struct ieee80211_sta *sta)
1124{
1125 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1126
1127 if (sta)
1128 return iwl_sta_id(sta);
1129
1130 /*
1131 * The device expects GTKs for station interfaces to be
1132 * installed as GTKs for the AP station. If we have no
1133 * station ID, then use the ap_sta_id in that case.
1134 */
1135 if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
1136 return vif_priv->ctx->ap_sta_id;
1137
1138 return IWL_INVALID_STATION;
1139}
1140
1141static int iwlagn_send_sta_key(struct iwl_priv *priv,
1142 struct ieee80211_key_conf *keyconf,
1143 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
1144 u32 cmd_flags)
1145{
1146 __le16 key_flags;
1147 struct iwl_addsta_cmd sta_cmd;
1148 int i;
1149
1150 spin_lock_bh(&priv->sta_lock);
1151 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
1152 spin_unlock_bh(&priv->sta_lock);
1153
1154 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1155 key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
1156
1157 switch (keyconf->cipher) {
1158 case WLAN_CIPHER_SUITE_CCMP:
1159 key_flags |= STA_KEY_FLG_CCMP;
1160 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
1161 break;
1162 case WLAN_CIPHER_SUITE_TKIP:
1163 key_flags |= STA_KEY_FLG_TKIP;
1164 sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
1165 for (i = 0; i < 5; i++)
1166 sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1167 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
1168 break;
1169 case WLAN_CIPHER_SUITE_WEP104:
1170 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
1171 /* fall through */
1172 case WLAN_CIPHER_SUITE_WEP40:
1173 key_flags |= STA_KEY_FLG_WEP;
1174 memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
1175 break;
1176 default:
1177 WARN_ON(1);
1178 return -EINVAL;
1179 }
1180
1181 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1182 key_flags |= STA_KEY_MULTICAST_MSK;
1183
1184 /* key pointer (offset) */
1185 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1186
1187 sta_cmd.key.key_flags = key_flags;
1188 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1189 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1190
1191 return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
1192}
1193
1194void iwl_update_tkip_key(struct iwl_priv *priv,
1195 struct ieee80211_vif *vif,
1196 struct ieee80211_key_conf *keyconf,
1197 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
1198{
1199 u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
1200
1201 if (sta_id == IWL_INVALID_STATION)
1202 return;
1203
1204 if (iwl_scan_cancel(priv)) {
1205 /* cancel scan failed, just live w/ bad key and rely
1206 briefly on SW decryption */
1207 return;
1208 }
1209
1210 iwlagn_send_sta_key(priv, keyconf, sta_id,
1211 iv32, phase1key, CMD_ASYNC);
1212}
1213
1214int iwl_remove_dynamic_key(struct iwl_priv *priv,
1215 struct iwl_rxon_context *ctx,
1216 struct ieee80211_key_conf *keyconf,
1217 struct ieee80211_sta *sta)
1218{
1219 struct iwl_addsta_cmd sta_cmd;
1220 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
1221 __le16 key_flags;
1222
1223 /* if station isn't there, neither is the key */
1224 if (sta_id == IWL_INVALID_STATION)
1225 return -ENOENT;
1226
1227 spin_lock_bh(&priv->sta_lock);
1228 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
1229 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
1230 sta_id = IWL_INVALID_STATION;
1231 spin_unlock_bh(&priv->sta_lock);
1232
1233 if (sta_id == IWL_INVALID_STATION)
1234 return 0;
1235
1236 lockdep_assert_held(&priv->mutex);
1237
1238 ctx->key_mapping_keys--;
1239
1240 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
1241 keyconf->keyidx, sta_id);
1242
1243 if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
1244 IWL_ERR(priv, "offset %d not used in uCode key table.\n",
1245 keyconf->hw_key_idx);
1246
1247 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1248 key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
1249 STA_KEY_FLG_INVALID;
1250
1251 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1252 key_flags |= STA_KEY_MULTICAST_MSK;
1253
1254 sta_cmd.key.key_flags = key_flags;
1255 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1256 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1257 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1258
1259 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1260}
1261
1262int iwl_set_dynamic_key(struct iwl_priv *priv,
1263 struct iwl_rxon_context *ctx,
1264 struct ieee80211_key_conf *keyconf,
1265 struct ieee80211_sta *sta)
1266{
1267 struct ieee80211_key_seq seq;
1268 u16 p1k[5];
1269 int ret;
1270 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
1271 const u8 *addr;
1272
1273 if (sta_id == IWL_INVALID_STATION)
1274 return -EINVAL;
1275
1276 lockdep_assert_held(&priv->mutex);
1277
1278 keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
1279 if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
1280 return -ENOSPC;
1281
1282 ctx->key_mapping_keys++;
1283
1284 switch (keyconf->cipher) {
1285 case WLAN_CIPHER_SUITE_TKIP:
1286 if (sta)
1287 addr = sta->addr;
1288 else /* station mode case only */
1289 addr = ctx->active.bssid_addr;
1290
1291 /* pre-fill phase 1 key into device cache */
1292 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1293 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1294 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1295 seq.tkip.iv32, p1k, CMD_SYNC);
1296 break;
1297 case WLAN_CIPHER_SUITE_CCMP:
1298 case WLAN_CIPHER_SUITE_WEP40:
1299 case WLAN_CIPHER_SUITE_WEP104:
1300 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1301 0, NULL, CMD_SYNC);
1302 break;
1303 default:
1304 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
1305 ret = -EINVAL;
1306 }
1307
1308 if (ret) {
1309 ctx->key_mapping_keys--;
1310 clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
1311 }
1312
1313 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1314 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1315 sta ? sta->addr : NULL, ret);
1316
1317 return ret;
1318}
1319
1320/**
1321 * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
1322 *
1323 * This adds the broadcast station into the driver's station table
1324 * and marks it driver active, so that it will be restored to the
1325 * device at the next best time.
1326 */
1327int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
1328 struct iwl_rxon_context *ctx)
1329{
1330 struct iwl_link_quality_cmd *link_cmd;
1331 u8 sta_id;
1332
1333 spin_lock_bh(&priv->sta_lock);
1334 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
1335 if (sta_id == IWL_INVALID_STATION) {
1336 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1337 spin_unlock_bh(&priv->sta_lock);
1338
1339 return -EINVAL;
1340 }
1341
1342 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
1343 priv->stations[sta_id].used |= IWL_STA_BCAST;
1344 spin_unlock_bh(&priv->sta_lock);
1345
1346 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
1347 if (!link_cmd) {
1348 IWL_ERR(priv,
1349 "Unable to initialize rate scaling for bcast station.\n");
1350 return -ENOMEM;
1351 }
1352
1353 spin_lock_bh(&priv->sta_lock);
1354 priv->stations[sta_id].lq = link_cmd;
1355 spin_unlock_bh(&priv->sta_lock);
1356
1357 return 0;
1358}
1359
1360/**
1361 * iwl_update_bcast_station - update broadcast station's LQ command
1362 *
1363 * Only used by iwlagn. Placed here to have all bcast station management
1364 * code together.
1365 */
1366int iwl_update_bcast_station(struct iwl_priv *priv,
1367 struct iwl_rxon_context *ctx)
1368{
1369 struct iwl_link_quality_cmd *link_cmd;
1370 u8 sta_id = ctx->bcast_sta_id;
1371
1372 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
1373 if (!link_cmd) {
1374 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
1375 return -ENOMEM;
1376 }
1377
1378 spin_lock_bh(&priv->sta_lock);
1379 if (priv->stations[sta_id].lq)
1380 kfree(priv->stations[sta_id].lq);
1381 else
1382 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
1383 priv->stations[sta_id].lq = link_cmd;
1384 spin_unlock_bh(&priv->sta_lock);
1385
1386 return 0;
1387}
1388
1389int iwl_update_bcast_stations(struct iwl_priv *priv)
1390{
1391 struct iwl_rxon_context *ctx;
1392 int ret = 0;
1393
1394 for_each_context(priv, ctx) {
1395 ret = iwl_update_bcast_station(priv, ctx);
1396 if (ret)
1397 break;
1398 }
1399
1400 return ret;
1401}
1402
1403/**
1404 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
1405 */
1406int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1407{
1408 struct iwl_addsta_cmd sta_cmd;
1409
1410 lockdep_assert_held(&priv->mutex);
1411
1412 /* Remove "disable" flag, to enable Tx for this TID */
1413 spin_lock_bh(&priv->sta_lock);
1414 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1415 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
1416 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1417 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1418 spin_unlock_bh(&priv->sta_lock);
1419
1420 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1421}
1422
1423int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1424 int tid, u16 ssn)
1425{
1426 int sta_id;
1427 struct iwl_addsta_cmd sta_cmd;
1428
1429 lockdep_assert_held(&priv->mutex);
1430
1431 sta_id = iwl_sta_id(sta);
1432 if (sta_id == IWL_INVALID_STATION)
1433 return -ENXIO;
1434
1435 spin_lock_bh(&priv->sta_lock);
1436 priv->stations[sta_id].sta.station_flags_msk = 0;
1437 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
1438 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
1439 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
1440 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1441 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1442 spin_unlock_bh(&priv->sta_lock);
1443
1444 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1445}
1446
1447int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1448 int tid)
1449{
1450 int sta_id;
1451 struct iwl_addsta_cmd sta_cmd;
1452
1453 lockdep_assert_held(&priv->mutex);
1454
1455 sta_id = iwl_sta_id(sta);
1456 if (sta_id == IWL_INVALID_STATION) {
1457 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1458 return -ENXIO;
1459 }
1460
1461 spin_lock_bh(&priv->sta_lock);
1462 priv->stations[sta_id].sta.station_flags_msk = 0;
1463 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
1464 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
1465 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1466 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1467 spin_unlock_bh(&priv->sta_lock);
1468
1469 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1470}
1471
1472
1473
1474void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1475{
1476 struct iwl_addsta_cmd cmd = {
1477 .mode = STA_CONTROL_MODIFY_MSK,
1478 .station_flags = STA_FLG_PWR_SAVE_MSK,
1479 .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
1480 .sta.sta_id = sta_id,
1481 .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
1482 .sleep_tx_count = cpu_to_le16(cnt),
1483 };
1484
1485 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1486}
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
deleted file mode 100644
index 57b918ce3b5..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/init.h>
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/dma-mapping.h>
68#include <net/net_namespace.h>
69#include <linux/netdevice.h>
70#include <net/cfg80211.h>
71#include <net/mac80211.h>
72#include <net/netlink.h>
73
74#include "iwl-debug.h"
75#include "iwl-trans.h"
76#include "dev.h"
77#include "agn.h"
78#include "iwl-test.h"
79#include "iwl-testmode.h"
80
81static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
82 struct iwl_host_cmd *cmd)
83{
84 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
85 return iwl_dvm_send_cmd(priv, cmd);
86}
87
88static bool iwl_testmode_valid_hw_addr(u32 addr)
89{
90 if (iwlagn_hw_valid_rtc_data_addr(addr))
91 return true;
92
93 if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
94 addr < IWLAGN_RTC_INST_UPPER_BOUND)
95 return true;
96
97 return false;
98}
99
100static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
101{
102 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
103 return priv->fw->ucode_ver;
104}
105
106static struct sk_buff*
107iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
108{
109 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
110 return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
111}
112
113static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
114{
115 return cfg80211_testmode_reply(skb);
116}
117
118static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
119 int len)
120{
121 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
122 return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
123 GFP_ATOMIC);
124}
125
126static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
127{
128 return cfg80211_testmode_event(skb, GFP_ATOMIC);
129}
130
131static struct iwl_test_ops tst_ops = {
132 .send_cmd = iwl_testmode_send_cmd,
133 .valid_hw_addr = iwl_testmode_valid_hw_addr,
134 .get_fw_ver = iwl_testmode_get_fw_ver,
135 .alloc_reply = iwl_testmode_alloc_reply,
136 .reply = iwl_testmode_reply,
137 .alloc_event = iwl_testmode_alloc_event,
138 .event = iwl_testmode_event,
139};
140
141void iwl_testmode_init(struct iwl_priv *priv)
142{
143 iwl_test_init(&priv->tst, priv->trans, &tst_ops);
144}
145
146void iwl_testmode_free(struct iwl_priv *priv)
147{
148 iwl_test_free(&priv->tst);
149}
150
151static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
152{
153 struct iwl_notification_wait calib_wait;
154 static const u8 calib_complete[] = {
155 CALIBRATION_COMPLETE_NOTIFICATION
156 };
157 int ret;
158
159 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
160 calib_complete, ARRAY_SIZE(calib_complete),
161 NULL, NULL);
162 ret = iwl_init_alive_start(priv);
163 if (ret) {
164 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
165 goto cfg_init_calib_error;
166 }
167
168 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
169 if (ret)
170 IWL_ERR(priv, "Error detecting"
171 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
172 return ret;
173
174cfg_init_calib_error:
175 iwl_remove_notification(&priv->notif_wait, &calib_wait);
176 return ret;
177}
178
179/*
180 * This function handles the user application commands for driver.
181 *
182 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
183 * handlers respectively.
184 *
185 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
186 * value of the actual command execution is replied to the user application.
187 *
188 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
189 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
190 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
191 *
192 * @hw: ieee80211_hw object that represents the device
193 * @tb: gnl message fields from the user space
194 */
195static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
196{
197 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
198 struct iwl_trans *trans = priv->trans;
199 struct sk_buff *skb;
200 unsigned char *rsp_data_ptr = NULL;
201 int status = 0, rsp_data_len = 0;
202 u32 inst_size = 0, data_size = 0;
203 const struct fw_img *img;
204
205 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
206 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
207 rsp_data_ptr = (unsigned char *)priv->cfg->name;
208 rsp_data_len = strlen(priv->cfg->name);
209 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
210 rsp_data_len + 20);
211 if (!skb) {
212 IWL_ERR(priv, "Memory allocation fail\n");
213 return -ENOMEM;
214 }
215 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
216 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
217 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
218 rsp_data_len, rsp_data_ptr))
219 goto nla_put_failure;
220 status = cfg80211_testmode_reply(skb);
221 if (status < 0)
222 IWL_ERR(priv, "Error sending msg : %d\n", status);
223 break;
224
225 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
226 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
227 if (status)
228 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
229 break;
230
231 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
232 iwl_testmode_cfg_init_calib(priv);
233 priv->ucode_loaded = false;
234 iwl_trans_stop_device(trans);
235 break;
236
237 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
238 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
239 if (status) {
240 IWL_ERR(priv,
241 "Error loading runtime ucode: %d\n", status);
242 break;
243 }
244 status = iwl_alive_start(priv);
245 if (status)
246 IWL_ERR(priv,
247 "Error starting the device: %d\n", status);
248 break;
249
250 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
251 iwl_scan_cancel_timeout(priv, 200);
252 priv->ucode_loaded = false;
253 iwl_trans_stop_device(trans);
254 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
255 if (status) {
256 IWL_ERR(priv,
257 "Error loading WOWLAN ucode: %d\n", status);
258 break;
259 }
260 status = iwl_alive_start(priv);
261 if (status)
262 IWL_ERR(priv,
263 "Error starting the device: %d\n", status);
264 break;
265
266 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
267 if (priv->eeprom_blob) {
268 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
269 priv->eeprom_blob_size + 20);
270 if (!skb) {
271 IWL_ERR(priv, "Memory allocation fail\n");
272 return -ENOMEM;
273 }
274 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
275 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
276 nla_put(skb, IWL_TM_ATTR_EEPROM,
277 priv->eeprom_blob_size,
278 priv->eeprom_blob))
279 goto nla_put_failure;
280 status = cfg80211_testmode_reply(skb);
281 if (status < 0)
282 IWL_ERR(priv, "Error sending msg : %d\n",
283 status);
284 } else
285 return -ENODATA;
286 break;
287
288 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
289 if (!tb[IWL_TM_ATTR_FIXRATE]) {
290 IWL_ERR(priv, "Missing fixrate setting\n");
291 return -ENOMSG;
292 }
293 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
294 break;
295
296 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
297 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
298 if (!skb) {
299 IWL_ERR(priv, "Memory allocation fail\n");
300 return -ENOMEM;
301 }
302 if (!priv->ucode_loaded) {
303 IWL_ERR(priv, "No uCode has not been loaded\n");
304 return -EINVAL;
305 } else {
306 img = &priv->fw->img[priv->cur_ucode];
307 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
308 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
309 }
310 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
311 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
312 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
313 goto nla_put_failure;
314 status = cfg80211_testmode_reply(skb);
315 if (status < 0)
316 IWL_ERR(priv, "Error sending msg : %d\n", status);
317 break;
318
319 default:
320 IWL_ERR(priv, "Unknown testmode driver command ID\n");
321 return -ENOSYS;
322 }
323 return status;
324
325nla_put_failure:
326 kfree_skb(skb);
327 return -EMSGSIZE;
328}
329
330/*
331 * This function handles the user application switch ucode ownership.
332 *
333 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
334 * decide who the current owner of the uCode
335 *
336 * If the current owner is OWNERSHIP_TM, then the only host command
337 * can deliver to uCode is from testmode, all the other host commands
338 * will dropped.
339 *
340 * default driver is the owner of uCode in normal operational mode
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u8 owner;
349
350 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
351 IWL_ERR(priv, "Missing ucode owner\n");
352 return -ENOMSG;
353 }
354
355 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
356 if (owner == IWL_OWNERSHIP_DRIVER) {
357 priv->ucode_owner = owner;
358 iwl_test_enable_notifications(&priv->tst, false);
359 } else if (owner == IWL_OWNERSHIP_TM) {
360 priv->ucode_owner = owner;
361 iwl_test_enable_notifications(&priv->tst, true);
362 } else {
363 IWL_ERR(priv, "Invalid owner\n");
364 return -EINVAL;
365 }
366 return 0;
367}
368
369/* The testmode gnl message handler that takes the gnl message from the
370 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
371 * invoke the corresponding handlers.
372 *
373 * This function is invoked when there is user space application sending
374 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
375 * by nl80211.
376 *
377 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
378 * dispatching it to the corresponding handler.
379 *
380 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
381 * -ENOSYS is replied to the user application if the command is unknown;
382 * Otherwise, the command is dispatched to the respective handler.
383 *
384 * @hw: ieee80211_hw object that represents the device
385 * @data: pointer to user space message
386 * @len: length in byte of @data
387 */
388int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
389{
390 struct nlattr *tb[IWL_TM_ATTR_MAX];
391 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
392 int result;
393
394 result = iwl_test_parse(&priv->tst, tb, data, len);
395 if (result)
396 return result;
397
398 /* in case multiple accesses to the device happens */
399 mutex_lock(&priv->mutex);
400 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
401 case IWL_TM_CMD_APP2DEV_UCODE:
402 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
403 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
404 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
405 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
406 case IWL_TM_CMD_APP2DEV_END_TRACE:
407 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
408 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
409 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
410 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
411 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
412 result = iwl_test_handle_cmd(&priv->tst, tb);
413 break;
414
415 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
416 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
417 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
418 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
419 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
420 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
421 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
422 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
423 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
424 result = iwl_testmode_driver(hw, tb);
425 break;
426
427 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
428 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
429 result = iwl_testmode_ownership(hw, tb);
430 break;
431
432 default:
433 IWL_ERR(priv, "Unknown testmode command\n");
434 result = -ENOSYS;
435 break;
436 }
437 mutex_unlock(&priv->mutex);
438
439 if (result)
440 IWL_ERR(priv, "Test cmd failed result=%d\n", result);
441 return result;
442}
443
444int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
445 struct netlink_callback *cb,
446 void *data, int len)
447{
448 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
449 int result;
450 u32 cmd;
451
452 if (cb->args[3]) {
453 /* offset by 1 since commands start at 0 */
454 cmd = cb->args[3] - 1;
455 } else {
456 struct nlattr *tb[IWL_TM_ATTR_MAX];
457
458 result = iwl_test_parse(&priv->tst, tb, data, len);
459 if (result)
460 return result;
461
462 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
463 cb->args[3] = cmd + 1;
464 }
465
466 /* in case multiple accesses to the device happens */
467 mutex_lock(&priv->mutex);
468 result = iwl_test_dump(&priv->tst, cmd, skb, cb);
469 mutex_unlock(&priv->mutex);
470 return result;
471}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
deleted file mode 100644
index eb864433e59..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ /dev/null
@@ -1,693 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <net/mac80211.h>
35#include "iwl-io.h"
36#include "iwl-modparams.h"
37#include "iwl-debug.h"
38#include "agn.h"
39#include "dev.h"
40#include "commands.h"
41#include "tt.h"
42
43/* default Thermal Throttling transaction table
44 * Current state | Throttling Down | Throttling Up
45 *=============================================================================
46 * Condition Nxt State Condition Nxt State Condition Nxt State
47 *-----------------------------------------------------------------------------
48 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
49 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
50 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
51 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
52 *=============================================================================
53 */
54static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
55 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
56 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
57 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
58};
59static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
60 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
61 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
62 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
63};
64static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
65 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
66 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
67 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
68};
69static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
70 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
71 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
72 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
73};
74
75/* Advance Thermal Throttling default restriction table */
76static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
77 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
78 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
79 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
80 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
81};
82
83bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
84{
85 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
86
87 if (tt->state >= IWL_TI_1)
88 return true;
89 return false;
90}
91
92u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
93{
94 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
95
96 return tt->tt_power_mode;
97}
98
99bool iwl_ht_enabled(struct iwl_priv *priv)
100{
101 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
102 struct iwl_tt_restriction *restriction;
103
104 if (!priv->thermal_throttle.advanced_tt)
105 return true;
106 restriction = tt->restriction + tt->state;
107 return restriction->is_ht;
108}
109
110static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
111{
112 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
113 bool within_margin = false;
114
115 if (!priv->thermal_throttle.advanced_tt)
116 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
117 CT_KILL_THRESHOLD_LEGACY) ? true : false;
118 else
119 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
120 CT_KILL_THRESHOLD) ? true : false;
121 return within_margin;
122}
123
124bool iwl_check_for_ct_kill(struct iwl_priv *priv)
125{
126 bool is_ct_kill = false;
127
128 if (iwl_within_ct_kill_margin(priv)) {
129 iwl_tt_enter_ct_kill(priv);
130 is_ct_kill = true;
131 }
132 return is_ct_kill;
133}
134
135enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
136{
137 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
138 struct iwl_tt_restriction *restriction;
139
140 if (!priv->thermal_throttle.advanced_tt)
141 return IWL_ANT_OK_MULTI;
142 restriction = tt->restriction + tt->state;
143 return restriction->tx_stream;
144}
145
146enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
147{
148 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
149 struct iwl_tt_restriction *restriction;
150
151 if (!priv->thermal_throttle.advanced_tt)
152 return IWL_ANT_OK_MULTI;
153 restriction = tt->restriction + tt->state;
154 return restriction->rx_stream;
155}
156
157#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
158#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
159
160/*
161 * toggle the bit to wake up uCode and check the temperature
162 * if the temperature is below CT, uCode will stay awake and send card
163 * state notification with CT_KILL bit clear to inform Thermal Throttling
164 * Management to change state. Otherwise, uCode will go back to sleep
165 * without doing anything, driver should continue the 5 seconds timer
166 * to wake up uCode for temperature check until temperature drop below CT
167 */
168static void iwl_tt_check_exit_ct_kill(unsigned long data)
169{
170 struct iwl_priv *priv = (struct iwl_priv *)data;
171 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
172 unsigned long flags;
173
174 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
175 return;
176
177 if (tt->state == IWL_TI_CT_KILL) {
178 if (priv->thermal_throttle.ct_kill_toggle) {
179 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
180 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
181 priv->thermal_throttle.ct_kill_toggle = false;
182 } else {
183 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
184 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
185 priv->thermal_throttle.ct_kill_toggle = true;
186 }
187 iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
188 spin_lock_irqsave(&priv->trans->reg_lock, flags);
189 if (likely(iwl_grab_nic_access(priv->trans)))
190 iwl_release_nic_access(priv->trans);
191 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
192
193 /* Reschedule the ct_kill timer to occur in
194 * CT_KILL_EXIT_DURATION seconds to ensure we get a
195 * thermal update */
196 IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n");
197 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
198 jiffies + CT_KILL_EXIT_DURATION * HZ);
199 }
200}
201
202static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
203 bool stop)
204{
205 if (stop) {
206 IWL_DEBUG_TEMP(priv, "Stop all queues\n");
207 if (priv->mac80211_registered)
208 ieee80211_stop_queues(priv->hw);
209 IWL_DEBUG_TEMP(priv,
210 "Schedule 5 seconds CT_KILL Timer\n");
211 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
212 jiffies + CT_KILL_EXIT_DURATION * HZ);
213 } else {
214 IWL_DEBUG_TEMP(priv, "Wake all queues\n");
215 if (priv->mac80211_registered)
216 ieee80211_wake_queues(priv->hw);
217 }
218}
219
220static void iwl_tt_ready_for_ct_kill(unsigned long data)
221{
222 struct iwl_priv *priv = (struct iwl_priv *)data;
223 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
224
225 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
226 return;
227
228 /* temperature timer expired, ready to go into CT_KILL state */
229 if (tt->state != IWL_TI_CT_KILL) {
230 IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
231 "temperature timer expired\n");
232 tt->state = IWL_TI_CT_KILL;
233 set_bit(STATUS_CT_KILL, &priv->status);
234 iwl_perform_ct_kill_task(priv, true);
235 }
236}
237
238static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
239{
240 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
241 /* make request to retrieve statistics information */
242 iwl_send_statistics_request(priv, CMD_SYNC, false);
243 /* Reschedule the ct_kill wait timer */
244 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
245 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
246}
247
248#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
249#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
250#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
251
252/*
253 * Legacy thermal throttling
254 * 1) Avoid NIC destruction due to high temperatures
255 * Chip will identify dangerously high temperatures that can
256 * harm the device and will power down
257 * 2) Avoid the NIC power down due to high temperature
258 * Throttle early enough to lower the power consumption before
259 * drastic steps are needed
260 */
261static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
262{
263 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
264 enum iwl_tt_state old_state;
265
266#ifdef CONFIG_IWLWIFI_DEBUG
267 if ((tt->tt_previous_temp) &&
268 (temp > tt->tt_previous_temp) &&
269 ((temp - tt->tt_previous_temp) >
270 IWL_TT_INCREASE_MARGIN)) {
271 IWL_DEBUG_TEMP(priv,
272 "Temperature increase %d degree Celsius\n",
273 (temp - tt->tt_previous_temp));
274 }
275#endif
276 old_state = tt->state;
277 /* in Celsius */
278 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
279 tt->state = IWL_TI_CT_KILL;
280 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
281 tt->state = IWL_TI_2;
282 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
283 tt->state = IWL_TI_1;
284 else
285 tt->state = IWL_TI_0;
286
287#ifdef CONFIG_IWLWIFI_DEBUG
288 tt->tt_previous_temp = temp;
289#endif
290 /* stop ct_kill_waiting_tm timer */
291 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
292 if (tt->state != old_state) {
293 switch (tt->state) {
294 case IWL_TI_0:
295 /*
296 * When the system is ready to go back to IWL_TI_0
297 * we only have to call iwl_power_update_mode() to
298 * do so.
299 */
300 break;
301 case IWL_TI_1:
302 tt->tt_power_mode = IWL_POWER_INDEX_3;
303 break;
304 case IWL_TI_2:
305 tt->tt_power_mode = IWL_POWER_INDEX_4;
306 break;
307 default:
308 tt->tt_power_mode = IWL_POWER_INDEX_5;
309 break;
310 }
311 mutex_lock(&priv->mutex);
312 if (old_state == IWL_TI_CT_KILL)
313 clear_bit(STATUS_CT_KILL, &priv->status);
314 if (tt->state != IWL_TI_CT_KILL &&
315 iwl_power_update_mode(priv, true)) {
316 /* TT state not updated
317 * try again during next temperature read
318 */
319 if (old_state == IWL_TI_CT_KILL)
320 set_bit(STATUS_CT_KILL, &priv->status);
321 tt->state = old_state;
322 IWL_ERR(priv, "Cannot update power mode, "
323 "TT state not updated\n");
324 } else {
325 if (tt->state == IWL_TI_CT_KILL) {
326 if (force) {
327 set_bit(STATUS_CT_KILL, &priv->status);
328 iwl_perform_ct_kill_task(priv, true);
329 } else {
330 iwl_prepare_ct_kill_task(priv);
331 tt->state = old_state;
332 }
333 } else if (old_state == IWL_TI_CT_KILL &&
334 tt->state != IWL_TI_CT_KILL)
335 iwl_perform_ct_kill_task(priv, false);
336 IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
337 tt->state);
338 IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
339 tt->tt_power_mode);
340 }
341 mutex_unlock(&priv->mutex);
342 }
343}
344
345/*
346 * Advance thermal throttling
347 * 1) Avoid NIC destruction due to high temperatures
348 * Chip will identify dangerously high temperatures that can
349 * harm the device and will power down
350 * 2) Avoid the NIC power down due to high temperature
351 * Throttle early enough to lower the power consumption before
352 * drastic steps are needed
353 * Actions include relaxing the power down sleep thresholds and
354 * decreasing the number of TX streams
355 * 3) Avoid throughput performance impact as much as possible
356 *
357 *=============================================================================
358 * Condition Nxt State Condition Nxt State Condition Nxt State
359 *-----------------------------------------------------------------------------
360 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
361 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
362 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
363 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
364 *=============================================================================
365 */
366static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
367{
368 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
369 int i;
370 bool changed = false;
371 enum iwl_tt_state old_state;
372 struct iwl_tt_trans *transaction;
373
374 old_state = tt->state;
375 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
376 /* based on the current TT state,
377 * find the curresponding transaction table
378 * each table has (IWL_TI_STATE_MAX - 1) entries
379 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
380 * will advance to the correct table.
381 * then based on the current temperature
382 * find the next state need to transaction to
383 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
384 * in the current table to see if transaction is needed
385 */
386 transaction = tt->transaction +
387 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
388 if (temp >= transaction->tt_low &&
389 temp <= transaction->tt_high) {
390#ifdef CONFIG_IWLWIFI_DEBUG
391 if ((tt->tt_previous_temp) &&
392 (temp > tt->tt_previous_temp) &&
393 ((temp - tt->tt_previous_temp) >
394 IWL_TT_INCREASE_MARGIN)) {
395 IWL_DEBUG_TEMP(priv,
396 "Temperature increase %d "
397 "degree Celsius\n",
398 (temp - tt->tt_previous_temp));
399 }
400 tt->tt_previous_temp = temp;
401#endif
402 if (old_state !=
403 transaction->next_state) {
404 changed = true;
405 tt->state =
406 transaction->next_state;
407 }
408 break;
409 }
410 }
411 /* stop ct_kill_waiting_tm timer */
412 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
413 if (changed) {
414 if (tt->state >= IWL_TI_1) {
415 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
416 tt->tt_power_mode = IWL_POWER_INDEX_5;
417
418 if (!iwl_ht_enabled(priv)) {
419 struct iwl_rxon_context *ctx;
420
421 for_each_context(priv, ctx) {
422 struct iwl_rxon_cmd *rxon;
423
424 rxon = &ctx->staging;
425
426 /* disable HT */
427 rxon->flags &= ~(
428 RXON_FLG_CHANNEL_MODE_MSK |
429 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
430 RXON_FLG_HT40_PROT_MSK |
431 RXON_FLG_HT_PROT_MSK);
432 }
433 } else {
434 /* check HT capability and set
435 * according to the system HT capability
436 * in case get disabled before */
437 iwl_set_rxon_ht(priv, &priv->current_ht_config);
438 }
439
440 } else {
441 /*
442 * restore system power setting -- it will be
443 * recalculated automatically.
444 */
445
446 /* check HT capability and set
447 * according to the system HT capability
448 * in case get disabled before */
449 iwl_set_rxon_ht(priv, &priv->current_ht_config);
450 }
451 mutex_lock(&priv->mutex);
452 if (old_state == IWL_TI_CT_KILL)
453 clear_bit(STATUS_CT_KILL, &priv->status);
454 if (tt->state != IWL_TI_CT_KILL &&
455 iwl_power_update_mode(priv, true)) {
456 /* TT state not updated
457 * try again during next temperature read
458 */
459 IWL_ERR(priv, "Cannot update power mode, "
460 "TT state not updated\n");
461 if (old_state == IWL_TI_CT_KILL)
462 set_bit(STATUS_CT_KILL, &priv->status);
463 tt->state = old_state;
464 } else {
465 IWL_DEBUG_TEMP(priv,
466 "Thermal Throttling to new state: %u\n",
467 tt->state);
468 if (old_state != IWL_TI_CT_KILL &&
469 tt->state == IWL_TI_CT_KILL) {
470 if (force) {
471 IWL_DEBUG_TEMP(priv,
472 "Enter IWL_TI_CT_KILL\n");
473 set_bit(STATUS_CT_KILL, &priv->status);
474 iwl_perform_ct_kill_task(priv, true);
475 } else {
476 iwl_prepare_ct_kill_task(priv);
477 tt->state = old_state;
478 }
479 } else if (old_state == IWL_TI_CT_KILL &&
480 tt->state != IWL_TI_CT_KILL) {
481 IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n");
482 iwl_perform_ct_kill_task(priv, false);
483 }
484 }
485 mutex_unlock(&priv->mutex);
486 }
487}
488
489/* Card State Notification indicated reach critical temperature
490 * if PSP not enable, no Thermal Throttling function will be performed
491 * just set the GP1 bit to acknowledge the event
492 * otherwise, go into IWL_TI_CT_KILL state
493 * since Card State Notification will not provide any temperature reading
494 * for Legacy mode
495 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
496 * for advance mode
497 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
498 */
499static void iwl_bg_ct_enter(struct work_struct *work)
500{
501 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
502 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
503
504 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
505 return;
506
507 if (!iwl_is_ready(priv))
508 return;
509
510 if (tt->state != IWL_TI_CT_KILL) {
511 IWL_ERR(priv, "Device reached critical temperature "
512 "- ucode going to sleep!\n");
513 if (!priv->thermal_throttle.advanced_tt)
514 iwl_legacy_tt_handler(priv,
515 IWL_MINIMAL_POWER_THRESHOLD,
516 true);
517 else
518 iwl_advance_tt_handler(priv,
519 CT_KILL_THRESHOLD + 1, true);
520 }
521}
522
523/* Card State Notification indicated out of critical temperature
524 * since Card State Notification will not provide any temperature reading
525 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
526 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
527 */
528static void iwl_bg_ct_exit(struct work_struct *work)
529{
530 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
531 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
532
533 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
534 return;
535
536 if (!iwl_is_ready(priv))
537 return;
538
539 /* stop ct_kill_exit_tm timer */
540 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
541
542 if (tt->state == IWL_TI_CT_KILL) {
543 IWL_ERR(priv,
544 "Device temperature below critical"
545 "- ucode awake!\n");
546 /*
547 * exit from CT_KILL state
548 * reset the current temperature reading
549 */
550 priv->temperature = 0;
551 if (!priv->thermal_throttle.advanced_tt)
552 iwl_legacy_tt_handler(priv,
553 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
554 true);
555 else
556 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
557 true);
558 }
559}
560
561void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
562{
563 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
564 return;
565
566 IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
567 queue_work(priv->workqueue, &priv->ct_enter);
568}
569
570void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
571{
572 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
573 return;
574
575 IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
576 queue_work(priv->workqueue, &priv->ct_exit);
577}
578
579static void iwl_bg_tt_work(struct work_struct *work)
580{
581 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
582 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
583
584 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
585 return;
586
587 if (!priv->thermal_throttle.advanced_tt)
588 iwl_legacy_tt_handler(priv, temp, false);
589 else
590 iwl_advance_tt_handler(priv, temp, false);
591}
592
593void iwl_tt_handler(struct iwl_priv *priv)
594{
595 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
596 return;
597
598 IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
599 queue_work(priv->workqueue, &priv->tt_work);
600}
601
602/* Thermal throttling initialization
603 * For advance thermal throttling:
604 * Initialize Thermal Index and temperature threshold table
605 * Initialize thermal throttling restriction table
606 */
607void iwl_tt_initialize(struct iwl_priv *priv)
608{
609 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
610 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
611 struct iwl_tt_trans *transaction;
612
613 IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n");
614
615 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
616
617 tt->state = IWL_TI_0;
618 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
619 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
620 priv->thermal_throttle.ct_kill_exit_tm.function =
621 iwl_tt_check_exit_ct_kill;
622 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
623 priv->thermal_throttle.ct_kill_waiting_tm.data =
624 (unsigned long)priv;
625 priv->thermal_throttle.ct_kill_waiting_tm.function =
626 iwl_tt_ready_for_ct_kill;
627 /* setup deferred ct kill work */
628 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
629 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
630 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
631
632 if (priv->cfg->base_params->adv_thermal_throttle) {
633 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
634 tt->restriction = kcalloc(IWL_TI_STATE_MAX,
635 sizeof(struct iwl_tt_restriction),
636 GFP_KERNEL);
637 tt->transaction = kcalloc(IWL_TI_STATE_MAX *
638 (IWL_TI_STATE_MAX - 1),
639 sizeof(struct iwl_tt_trans),
640 GFP_KERNEL);
641 if (!tt->restriction || !tt->transaction) {
642 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
643 priv->thermal_throttle.advanced_tt = false;
644 kfree(tt->restriction);
645 tt->restriction = NULL;
646 kfree(tt->transaction);
647 tt->transaction = NULL;
648 } else {
649 transaction = tt->transaction +
650 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
651 memcpy(transaction, &tt_range_0[0], size);
652 transaction = tt->transaction +
653 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
654 memcpy(transaction, &tt_range_1[0], size);
655 transaction = tt->transaction +
656 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
657 memcpy(transaction, &tt_range_2[0], size);
658 transaction = tt->transaction +
659 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
660 memcpy(transaction, &tt_range_3[0], size);
661 size = sizeof(struct iwl_tt_restriction) *
662 IWL_TI_STATE_MAX;
663 memcpy(tt->restriction,
664 &restriction_range[0], size);
665 priv->thermal_throttle.advanced_tt = true;
666 }
667 } else {
668 IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n");
669 priv->thermal_throttle.advanced_tt = false;
670 }
671}
672
673/* cleanup thermal throttling management related memory and timer */
674void iwl_tt_exit(struct iwl_priv *priv)
675{
676 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
677
678 /* stop ct_kill_exit_tm timer if activated */
679 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
680 /* stop ct_kill_waiting_tm timer if activated */
681 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
682 cancel_work_sync(&priv->tt_work);
683 cancel_work_sync(&priv->ct_enter);
684 cancel_work_sync(&priv->ct_exit);
685
686 if (priv->thermal_throttle.advanced_tt) {
687 /* free advance thermal throttling memory */
688 kfree(tt->restriction);
689 tt->restriction = NULL;
690 kfree(tt->transaction);
691 tt->transaction = NULL;
692 }
693}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
deleted file mode 100644
index 44c7c8f30a2..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__
30
31#include "commands.h"
32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
118bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
119bool iwl_ht_enabled(struct iwl_priv *priv);
120enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
121enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
122void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
123void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
124void iwl_tt_handler(struct iwl_priv *priv);
125void iwl_tt_initialize(struct iwl_priv *priv);
126void iwl_tt_exit(struct iwl_priv *priv);
127
128#endif /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
deleted file mode 100644
index a790599fe2c..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ /dev/null
@@ -1,1384 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34#include <linux/ieee80211.h>
35#include "iwl-io.h"
36#include "iwl-trans.h"
37#include "iwl-agn-hw.h"
38#include "dev.h"
39#include "agn.h"
40
41static const u8 tid_to_ac[] = {
42 IEEE80211_AC_BE,
43 IEEE80211_AC_BK,
44 IEEE80211_AC_BK,
45 IEEE80211_AC_BE,
46 IEEE80211_AC_VI,
47 IEEE80211_AC_VI,
48 IEEE80211_AC_VO,
49 IEEE80211_AC_VO,
50};
51
52static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
53 struct ieee80211_tx_info *info,
54 __le16 fc, __le32 *tx_flags)
55{
56 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
57 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
58 info->flags & IEEE80211_TX_CTL_AMPDU)
59 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
60}
61
62/*
63 * handle build REPLY_TX command notification.
64 */
65static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
66 struct sk_buff *skb,
67 struct iwl_tx_cmd *tx_cmd,
68 struct ieee80211_tx_info *info,
69 struct ieee80211_hdr *hdr, u8 sta_id)
70{
71 __le16 fc = hdr->frame_control;
72 __le32 tx_flags = tx_cmd->tx_flags;
73
74 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
75
76 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
77 tx_flags |= TX_CMD_FLG_ACK_MSK;
78 else
79 tx_flags &= ~TX_CMD_FLG_ACK_MSK;
80
81 if (ieee80211_is_probe_resp(fc))
82 tx_flags |= TX_CMD_FLG_TSF_MSK;
83 else if (ieee80211_is_back_req(fc))
84 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
85 else if (info->band == IEEE80211_BAND_2GHZ &&
86 priv->cfg->bt_params &&
87 priv->cfg->bt_params->advanced_bt_coexist &&
88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
89 ieee80211_is_reassoc_req(fc) ||
90 skb->protocol == cpu_to_be16(ETH_P_PAE)))
91 tx_flags |= TX_CMD_FLG_IGNORE_BT;
92
93
94 tx_cmd->sta_id = sta_id;
95 if (ieee80211_has_morefrags(fc))
96 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
97
98 if (ieee80211_is_data_qos(fc)) {
99 u8 *qc = ieee80211_get_qos_ctl(hdr);
100 tx_cmd->tid_tspec = qc[0] & 0xf;
101 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
102 } else {
103 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
104 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
105 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
106 else
107 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
108 }
109
110 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
111
112 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
113 if (ieee80211_is_mgmt(fc)) {
114 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
115 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
116 else
117 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
118 } else {
119 tx_cmd->timeout.pm_frame_timeout = 0;
120 }
121
122 tx_cmd->driver_txop = 0;
123 tx_cmd->tx_flags = tx_flags;
124 tx_cmd->next_frame_len = 0;
125}
126
127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
128 struct iwl_tx_cmd *tx_cmd,
129 struct ieee80211_tx_info *info,
130 struct ieee80211_sta *sta,
131 __le16 fc)
132{
133 u32 rate_flags;
134 int rate_idx;
135 u8 rts_retry_limit;
136 u8 data_retry_limit;
137 u8 rate_plcp;
138
139 if (priv->wowlan) {
140 rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
141 data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
142 } else {
143 /* Set retry limit on RTS packets */
144 rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
145
146 /* Set retry limit on DATA packets and Probe Responses*/
147 if (ieee80211_is_probe_resp(fc)) {
148 data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
149 rts_retry_limit =
150 min(data_retry_limit, rts_retry_limit);
151 } else if (ieee80211_is_back_req(fc))
152 data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
153 else
154 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
155 }
156
157 tx_cmd->data_retry_limit = data_retry_limit;
158 tx_cmd->rts_retry_limit = rts_retry_limit;
159
160 /* DATA packets will use the uCode station table for rate/antenna
161 * selection */
162 if (ieee80211_is_data(fc)) {
163 tx_cmd->initial_rate_index = 0;
164 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
165#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
166 if (priv->tm_fixed_rate) {
167 /*
168 * rate overwrite by testmode
169 * we not only send lq command to change rate
170 * we also re-enforce per data pkt base.
171 */
172 tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
173 memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
174 sizeof(tx_cmd->rate_n_flags));
175 }
176#endif
177 return;
178 } else if (ieee80211_is_back_req(fc))
179 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
180
181 /**
182 * If the current TX rate stored in mac80211 has the MCS bit set, it's
183 * not really a TX rate. Thus, we use the lowest supported rate for
184 * this band. Also use the lowest supported rate if the stored rate
185 * index is invalid.
186 */
187 rate_idx = info->control.rates[0].idx;
188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
190 rate_idx = rate_lowest_index(
191 &priv->nvm_data->bands[info->band], sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE;
195 /* Get PLCP rate for tx_cmd->rate_n_flags */
196 rate_plcp = iwl_rates[rate_idx].plcp;
197 /* Zero out flags for this packet */
198 rate_flags = 0;
199
200 /* Set CCK flag as needed */
201 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
202 rate_flags |= RATE_MCS_CCK_MSK;
203
204 /* Set up antennas */
205 if (priv->cfg->bt_params &&
206 priv->cfg->bt_params->advanced_bt_coexist &&
207 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
210 first_antenna(priv->nvm_data->valid_tx_ant));
211 } else
212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
213 priv, priv->mgmt_tx_ant,
214 priv->nvm_data->valid_tx_ant);
215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
216
217 /* Set the rate in the TX cmd */
218 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
219}
220
221static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
222 struct ieee80211_tx_info *info,
223 struct iwl_tx_cmd *tx_cmd,
224 struct sk_buff *skb_frag)
225{
226 struct ieee80211_key_conf *keyconf = info->control.hw_key;
227
228 switch (keyconf->cipher) {
229 case WLAN_CIPHER_SUITE_CCMP:
230 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
231 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
232 if (info->flags & IEEE80211_TX_CTL_AMPDU)
233 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
234 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
235 break;
236
237 case WLAN_CIPHER_SUITE_TKIP:
238 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
239 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
240 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
241 break;
242
243 case WLAN_CIPHER_SUITE_WEP104:
244 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
245 /* fall through */
246 case WLAN_CIPHER_SUITE_WEP40:
247 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
248 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
249
250 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
251
252 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
253 "with key %d\n", keyconf->keyidx);
254 break;
255
256 default:
257 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
258 break;
259 }
260}
261
262/**
263 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
264 * @context: the current context
265 * @sta: mac80211 station
266 *
267 * In certain circumstances mac80211 passes a station pointer
268 * that may be %NULL, for example during TX or key setup. In
269 * that case, we need to use the broadcast station, so this
270 * inline wraps that pattern.
271 */
272static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
273 struct ieee80211_sta *sta)
274{
275 int sta_id;
276
277 if (!sta)
278 return context->bcast_sta_id;
279
280 sta_id = iwl_sta_id(sta);
281
282 /*
283 * mac80211 should not be passing a partially
284 * initialised station!
285 */
286 WARN_ON(sta_id == IWL_INVALID_STATION);
287
288 return sta_id;
289}
290
291/*
292 * start REPLY_TX command process
293 */
294int iwlagn_tx_skb(struct iwl_priv *priv,
295 struct ieee80211_sta *sta,
296 struct sk_buff *skb)
297{
298 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
300 struct iwl_station_priv *sta_priv = NULL;
301 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
302 struct iwl_device_cmd *dev_cmd;
303 struct iwl_tx_cmd *tx_cmd;
304 __le16 fc;
305 u8 hdr_len;
306 u16 len, seq_number = 0;
307 u8 sta_id, tid = IWL_MAX_TID_COUNT;
308 bool is_agg = false, is_data_qos = false;
309 int txq_id;
310
311 if (info->control.vif)
312 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
313
314 if (iwl_is_rfkill(priv)) {
315 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
316 goto drop_unlock_priv;
317 }
318
319 fc = hdr->frame_control;
320
321#ifdef CONFIG_IWLWIFI_DEBUG
322 if (ieee80211_is_auth(fc))
323 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
324 else if (ieee80211_is_assoc_req(fc))
325 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
326 else if (ieee80211_is_reassoc_req(fc))
327 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
328#endif
329
330 if (unlikely(ieee80211_is_probe_resp(fc))) {
331 struct iwl_wipan_noa_data *noa_data =
332 rcu_dereference(priv->noa_data);
333
334 if (noa_data &&
335 pskb_expand_head(skb, 0, noa_data->length,
336 GFP_ATOMIC) == 0) {
337 memcpy(skb_put(skb, noa_data->length),
338 noa_data->data, noa_data->length);
339 hdr = (struct ieee80211_hdr *)skb->data;
340 }
341 }
342
343 hdr_len = ieee80211_hdrlen(fc);
344
345 /* For management frames use broadcast id to do not break aggregation */
346 if (!ieee80211_is_data(fc))
347 sta_id = ctx->bcast_sta_id;
348 else {
349 /* Find index into station table for destination station */
350 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
351 if (sta_id == IWL_INVALID_STATION) {
352 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
353 hdr->addr1);
354 goto drop_unlock_priv;
355 }
356 }
357
358 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
359
360 if (sta)
361 sta_priv = (void *)sta->drv_priv;
362
363 if (sta_priv && sta_priv->asleep &&
364 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
365 /*
366 * This sends an asynchronous command to the device,
367 * but we can rely on it being processed before the
368 * next frame is processed -- and the next frame to
369 * this station is the one that will consume this
370 * counter.
371 * For now set the counter to just 1 since we do not
372 * support uAPSD yet.
373 *
374 * FIXME: If we get two non-bufferable frames one
375 * after the other, we might only send out one of
376 * them because this is racy.
377 */
378 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
379 }
380
381 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
382
383 if (unlikely(!dev_cmd))
384 goto drop_unlock_priv;
385
386 memset(dev_cmd, 0, sizeof(*dev_cmd));
387 tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
388
389 /* Total # bytes to be transmitted */
390 len = (u16)skb->len;
391 tx_cmd->len = cpu_to_le16(len);
392
393 if (info->control.hw_key)
394 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
395
396 /* TODO need this for burst mode later on */
397 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
398
399 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
400
401 memset(&info->status, 0, sizeof(info->status));
402
403 info->driver_data[0] = ctx;
404 info->driver_data[1] = dev_cmd;
405 /* From now on, we cannot access info->control */
406
407 spin_lock(&priv->sta_lock);
408
409 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
410 u8 *qc = NULL;
411 struct iwl_tid_data *tid_data;
412 qc = ieee80211_get_qos_ctl(hdr);
413 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
414 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
415 goto drop_unlock_sta;
416 tid_data = &priv->tid_data[sta_id][tid];
417
418 /* aggregation is on for this <sta,tid> */
419 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
420 tid_data->agg.state != IWL_AGG_ON) {
421 IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
422 " Tx flags = 0x%08x, agg.state = %d",
423 info->flags, tid_data->agg.state);
424 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
425 sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
426 goto drop_unlock_sta;
427 }
428
429 /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
430 * only. Check this here.
431 */
432 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
433 tid_data->agg.state != IWL_AGG_OFF,
434 "Tx while agg.state = %d", tid_data->agg.state))
435 goto drop_unlock_sta;
436
437 seq_number = tid_data->seq_number;
438 seq_number &= IEEE80211_SCTL_SEQ;
439 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
440 hdr->seq_ctrl |= cpu_to_le16(seq_number);
441 seq_number += 0x10;
442
443 if (info->flags & IEEE80211_TX_CTL_AMPDU)
444 is_agg = true;
445 is_data_qos = true;
446 }
447
448 /* Copy MAC header from skb into command buffer */
449 memcpy(tx_cmd->hdr, hdr, hdr_len);
450
451 if (is_agg)
452 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
453 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
454 /*
455 * Send this frame after DTIM -- there's a special queue
456 * reserved for this for contexts that support AP mode.
457 */
458 txq_id = ctx->mcast_queue;
459
460 /*
461 * The microcode will clear the more data
462 * bit in the last frame it transmits.
463 */
464 hdr->frame_control |=
465 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
466 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
467 txq_id = IWL_AUX_QUEUE;
468 else
469 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
470
471 WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
472 WARN_ON_ONCE(is_agg &&
473 priv->queue_to_mac80211[txq_id] != info->hw_queue);
474
475 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
476 goto drop_unlock_sta;
477
478 if (is_data_qos && !ieee80211_has_morefrags(fc))
479 priv->tid_data[sta_id][tid].seq_number = seq_number;
480
481 spin_unlock(&priv->sta_lock);
482
483 /*
484 * Avoid atomic ops if it isn't an associated client.
485 * Also, if this is a packet for aggregation, don't
486 * increase the counter because the ucode will stop
487 * aggregation queues when their respective station
488 * goes to sleep.
489 */
490 if (sta_priv && sta_priv->client && !is_agg)
491 atomic_inc(&sta_priv->pending_frames);
492
493 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
494 iwl_scan_offchannel_skb(priv);
495
496 return 0;
497
498drop_unlock_sta:
499 if (dev_cmd)
500 iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
501 spin_unlock(&priv->sta_lock);
502drop_unlock_priv:
503 return -1;
504}
505
506static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
507{
508 int q;
509
510 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
511 q < priv->cfg->base_params->num_of_queues; q++) {
512 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
513 priv->queue_to_mac80211[q] = mq;
514 return q;
515 }
516 }
517
518 return -ENOSPC;
519}
520
521static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
522{
523 clear_bit(q, priv->agg_q_alloc);
524 priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
525}
526
527int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
528 struct ieee80211_sta *sta, u16 tid)
529{
530 struct iwl_tid_data *tid_data;
531 int sta_id, txq_id;
532 enum iwl_agg_state agg_state;
533
534 sta_id = iwl_sta_id(sta);
535
536 if (sta_id == IWL_INVALID_STATION) {
537 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
538 return -ENXIO;
539 }
540
541 spin_lock_bh(&priv->sta_lock);
542
543 tid_data = &priv->tid_data[sta_id][tid];
544 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
545
546 switch (priv->tid_data[sta_id][tid].agg.state) {
547 case IWL_EMPTYING_HW_QUEUE_ADDBA:
548 /*
549 * This can happen if the peer stops aggregation
550 * again before we've had a chance to drain the
551 * queue we selected previously, i.e. before the
552 * session was really started completely.
553 */
554 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
555 goto turn_off;
556 case IWL_AGG_STARTING:
557 /*
558 * This can happen when the session is stopped before
559 * we receive ADDBA response
560 */
561 IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
562 goto turn_off;
563 case IWL_AGG_ON:
564 break;
565 default:
566 IWL_WARN(priv, "Stopping AGG while state not ON "
567 "or starting for %d on %d (%d)\n", sta_id, tid,
568 priv->tid_data[sta_id][tid].agg.state);
569 spin_unlock_bh(&priv->sta_lock);
570 return 0;
571 }
572
573 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
574
575 /* There are still packets for this RA / TID in the HW */
576 if (!test_bit(txq_id, priv->agg_q_alloc)) {
577 IWL_DEBUG_TX_QUEUES(priv,
578 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
579 sta_id, tid, txq_id);
580 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
581 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
582 "next_recl = %d\n",
583 tid_data->agg.ssn,
584 tid_data->next_reclaimed);
585 priv->tid_data[sta_id][tid].agg.state =
586 IWL_EMPTYING_HW_QUEUE_DELBA;
587 spin_unlock_bh(&priv->sta_lock);
588 return 0;
589 }
590
591 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
592 tid_data->agg.ssn);
593turn_off:
594 agg_state = priv->tid_data[sta_id][tid].agg.state;
595 priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
596
597 spin_unlock_bh(&priv->sta_lock);
598
599 if (test_bit(txq_id, priv->agg_q_alloc)) {
600 /*
601 * If the transport didn't know that we wanted to start
602 * agreggation, don't tell it that we want to stop them.
603 * This can happen when we don't get the addBA response on
604 * time, or we hadn't time to drain the AC queues.
605 */
606 if (agg_state == IWL_AGG_ON)
607 iwl_trans_txq_disable(priv->trans, txq_id);
608 else
609 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
610 agg_state);
611 iwlagn_dealloc_agg_txq(priv, txq_id);
612 }
613
614 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
615
616 return 0;
617}
618
619int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
620 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
621{
622 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
623 struct iwl_tid_data *tid_data;
624 int sta_id, txq_id, ret;
625
626 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
627 sta->addr, tid);
628
629 sta_id = iwl_sta_id(sta);
630 if (sta_id == IWL_INVALID_STATION) {
631 IWL_ERR(priv, "Start AGG on invalid station\n");
632 return -ENXIO;
633 }
634 if (unlikely(tid >= IWL_MAX_TID_COUNT))
635 return -EINVAL;
636
637 if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
638 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
639 return -ENXIO;
640 }
641
642 txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
643 if (txq_id < 0) {
644 IWL_DEBUG_TX_QUEUES(priv,
645 "No free aggregation queue for %pM/%d\n",
646 sta->addr, tid);
647 return txq_id;
648 }
649
650 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
651 if (ret)
652 return ret;
653
654 spin_lock_bh(&priv->sta_lock);
655 tid_data = &priv->tid_data[sta_id][tid];
656 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
657 tid_data->agg.txq_id = txq_id;
658
659 *ssn = tid_data->agg.ssn;
660
661 if (*ssn == tid_data->next_reclaimed) {
662 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
663 tid_data->agg.ssn);
664 tid_data->agg.state = IWL_AGG_STARTING;
665 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
666 } else {
667 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
668 "next_reclaimed = %d\n",
669 tid_data->agg.ssn,
670 tid_data->next_reclaimed);
671 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
672 }
673 spin_unlock_bh(&priv->sta_lock);
674
675 return ret;
676}
677
678int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
679 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
680{
681 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
682 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
683 int q, fifo;
684 u16 ssn;
685
686 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
687
688 spin_lock_bh(&priv->sta_lock);
689 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
690 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
691 priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
692 spin_unlock_bh(&priv->sta_lock);
693
694 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
695
696 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
697 buf_size, ssn);
698
699 /*
700 * If the limit is 0, then it wasn't initialised yet,
701 * use the default. We can do that since we take the
702 * minimum below, and we don't want to go above our
703 * default due to hardware restrictions.
704 */
705 if (sta_priv->max_agg_bufsize == 0)
706 sta_priv->max_agg_bufsize =
707 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
708
709 /*
710 * Even though in theory the peer could have different
711 * aggregation reorder buffer sizes for different sessions,
712 * our ucode doesn't allow for that and has a global limit
713 * for each station. Therefore, use the minimum of all the
714 * aggregation sessions and our default value.
715 */
716 sta_priv->max_agg_bufsize =
717 min(sta_priv->max_agg_bufsize, buf_size);
718
719 if (priv->hw_params.use_rts_for_aggregation) {
720 /*
721 * switch to RTS/CTS if it is the prefer protection
722 * method for HT traffic
723 */
724
725 sta_priv->lq_sta.lq.general_params.flags |=
726 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
727 }
728 priv->agg_tids_count++;
729 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
730 priv->agg_tids_count);
731
732 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
733 sta_priv->max_agg_bufsize;
734
735 IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
736 sta->addr, tid);
737
738 return iwl_send_lq_cmd(priv, ctx,
739 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
740}
741
742static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
743{
744 struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
745 enum iwl_rxon_context_id ctx;
746 struct ieee80211_vif *vif;
747 u8 *addr;
748
749 lockdep_assert_held(&priv->sta_lock);
750
751 addr = priv->stations[sta_id].sta.sta.addr;
752 ctx = priv->stations[sta_id].ctxid;
753 vif = priv->contexts[ctx].vif;
754
755 switch (priv->tid_data[sta_id][tid].agg.state) {
756 case IWL_EMPTYING_HW_QUEUE_DELBA:
757 /* There are no packets for this RA / TID in the HW any more */
758 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
759 IWL_DEBUG_TX_QUEUES(priv,
760 "Can continue DELBA flow ssn = next_recl ="
761 " %d", tid_data->next_reclaimed);
762 iwl_trans_txq_disable(priv->trans,
763 tid_data->agg.txq_id);
764 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
765 tid_data->agg.state = IWL_AGG_OFF;
766 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
767 }
768 break;
769 case IWL_EMPTYING_HW_QUEUE_ADDBA:
770 /* There are no packets for this RA / TID in the HW any more */
771 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
772 IWL_DEBUG_TX_QUEUES(priv,
773 "Can continue ADDBA flow ssn = next_recl ="
774 " %d", tid_data->next_reclaimed);
775 tid_data->agg.state = IWL_AGG_STARTING;
776 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
777 }
778 break;
779 default:
780 break;
781 }
782}
783
784static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
785 struct iwl_rxon_context *ctx,
786 const u8 *addr1)
787{
788 struct ieee80211_sta *sta;
789 struct iwl_station_priv *sta_priv;
790
791 rcu_read_lock();
792 sta = ieee80211_find_sta(ctx->vif, addr1);
793 if (sta) {
794 sta_priv = (void *)sta->drv_priv;
795 /* avoid atomic ops if this isn't a client */
796 if (sta_priv->client &&
797 atomic_dec_return(&sta_priv->pending_frames) == 0)
798 ieee80211_sta_block_awake(priv->hw, sta, false);
799 }
800 rcu_read_unlock();
801}
802
803/**
804 * translate ucode response to mac80211 tx status control values
805 */
806static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
807 struct ieee80211_tx_info *info)
808{
809 struct ieee80211_tx_rate *r = &info->status.rates[0];
810
811 info->status.antenna =
812 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
813 if (rate_n_flags & RATE_MCS_HT_MSK)
814 r->flags |= IEEE80211_TX_RC_MCS;
815 if (rate_n_flags & RATE_MCS_GF_MSK)
816 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
817 if (rate_n_flags & RATE_MCS_HT40_MSK)
818 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
819 if (rate_n_flags & RATE_MCS_DUP_MSK)
820 r->flags |= IEEE80211_TX_RC_DUP_DATA;
821 if (rate_n_flags & RATE_MCS_SGI_MSK)
822 r->flags |= IEEE80211_TX_RC_SHORT_GI;
823 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
824}
825
826#ifdef CONFIG_IWLWIFI_DEBUG
827const char *iwl_get_tx_fail_reason(u32 status)
828{
829#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
830#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
831
832 switch (status & TX_STATUS_MSK) {
833 case TX_STATUS_SUCCESS:
834 return "SUCCESS";
835 TX_STATUS_POSTPONE(DELAY);
836 TX_STATUS_POSTPONE(FEW_BYTES);
837 TX_STATUS_POSTPONE(BT_PRIO);
838 TX_STATUS_POSTPONE(QUIET_PERIOD);
839 TX_STATUS_POSTPONE(CALC_TTAK);
840 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
841 TX_STATUS_FAIL(SHORT_LIMIT);
842 TX_STATUS_FAIL(LONG_LIMIT);
843 TX_STATUS_FAIL(FIFO_UNDERRUN);
844 TX_STATUS_FAIL(DRAIN_FLOW);
845 TX_STATUS_FAIL(RFKILL_FLUSH);
846 TX_STATUS_FAIL(LIFE_EXPIRE);
847 TX_STATUS_FAIL(DEST_PS);
848 TX_STATUS_FAIL(HOST_ABORTED);
849 TX_STATUS_FAIL(BT_RETRY);
850 TX_STATUS_FAIL(STA_INVALID);
851 TX_STATUS_FAIL(FRAG_DROPPED);
852 TX_STATUS_FAIL(TID_DISABLE);
853 TX_STATUS_FAIL(FIFO_FLUSHED);
854 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
855 TX_STATUS_FAIL(PASSIVE_NO_RX);
856 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
857 }
858
859 return "UNKNOWN";
860
861#undef TX_STATUS_FAIL
862#undef TX_STATUS_POSTPONE
863}
864#endif /* CONFIG_IWLWIFI_DEBUG */
865
866static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
867{
868 status &= AGG_TX_STATUS_MSK;
869
870 switch (status) {
871 case AGG_TX_STATE_UNDERRUN_MSK:
872 priv->reply_agg_tx_stats.underrun++;
873 break;
874 case AGG_TX_STATE_BT_PRIO_MSK:
875 priv->reply_agg_tx_stats.bt_prio++;
876 break;
877 case AGG_TX_STATE_FEW_BYTES_MSK:
878 priv->reply_agg_tx_stats.few_bytes++;
879 break;
880 case AGG_TX_STATE_ABORT_MSK:
881 priv->reply_agg_tx_stats.abort++;
882 break;
883 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
884 priv->reply_agg_tx_stats.last_sent_ttl++;
885 break;
886 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
887 priv->reply_agg_tx_stats.last_sent_try++;
888 break;
889 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
890 priv->reply_agg_tx_stats.last_sent_bt_kill++;
891 break;
892 case AGG_TX_STATE_SCD_QUERY_MSK:
893 priv->reply_agg_tx_stats.scd_query++;
894 break;
895 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
896 priv->reply_agg_tx_stats.bad_crc32++;
897 break;
898 case AGG_TX_STATE_RESPONSE_MSK:
899 priv->reply_agg_tx_stats.response++;
900 break;
901 case AGG_TX_STATE_DUMP_TX_MSK:
902 priv->reply_agg_tx_stats.dump_tx++;
903 break;
904 case AGG_TX_STATE_DELAY_TX_MSK:
905 priv->reply_agg_tx_stats.delay_tx++;
906 break;
907 default:
908 priv->reply_agg_tx_stats.unknown++;
909 break;
910 }
911}
912
913static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
914 struct iwlagn_tx_resp *tx_resp)
915{
916 struct agg_tx_status *frame_status = &tx_resp->status;
917 int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
918 IWLAGN_TX_RES_TID_POS;
919 int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
920 IWLAGN_TX_RES_RA_POS;
921 struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
922 u32 status = le16_to_cpu(tx_resp->status.status);
923 int i;
924
925 WARN_ON(tid == IWL_TID_NON_QOS);
926
927 if (agg->wait_for_ba)
928 IWL_DEBUG_TX_REPLY(priv,
929 "got tx response w/o block-ack\n");
930
931 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
932 agg->wait_for_ba = (tx_resp->frame_count > 1);
933
934 /*
935 * If the BT kill count is non-zero, we'll get this
936 * notification again.
937 */
938 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
939 priv->cfg->bt_params &&
940 priv->cfg->bt_params->advanced_bt_coexist) {
941 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
942 }
943
944 if (tx_resp->frame_count == 1)
945 return;
946
947 /* Construct bit-map of pending frames within Tx window */
948 for (i = 0; i < tx_resp->frame_count; i++) {
949 u16 fstatus = le16_to_cpu(frame_status[i].status);
950
951 if (status & AGG_TX_STATUS_MSK)
952 iwlagn_count_agg_tx_err_status(priv, fstatus);
953
954 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
955 AGG_TX_STATE_ABORT_MSK))
956 continue;
957
958 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
959 "try-count (0x%08x)\n",
960 iwl_get_agg_tx_fail_reason(fstatus),
961 fstatus & AGG_TX_STATUS_MSK,
962 fstatus & AGG_TX_TRY_MSK);
963 }
964}
965
966#ifdef CONFIG_IWLWIFI_DEBUG
967#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
968
969const char *iwl_get_agg_tx_fail_reason(u16 status)
970{
971 status &= AGG_TX_STATUS_MSK;
972 switch (status) {
973 case AGG_TX_STATE_TRANSMITTED:
974 return "SUCCESS";
975 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
976 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
977 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
978 AGG_TX_STATE_FAIL(ABORT_MSK);
979 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
980 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
981 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
982 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
983 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
984 AGG_TX_STATE_FAIL(RESPONSE_MSK);
985 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
986 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
987 }
988
989 return "UNKNOWN";
990}
991#endif /* CONFIG_IWLWIFI_DEBUG */
992
993static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
994{
995 return le32_to_cpup((__le32 *)&tx_resp->status +
996 tx_resp->frame_count) & MAX_SN;
997}
998
999static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1000{
1001 status &= TX_STATUS_MSK;
1002
1003 switch (status) {
1004 case TX_STATUS_POSTPONE_DELAY:
1005 priv->reply_tx_stats.pp_delay++;
1006 break;
1007 case TX_STATUS_POSTPONE_FEW_BYTES:
1008 priv->reply_tx_stats.pp_few_bytes++;
1009 break;
1010 case TX_STATUS_POSTPONE_BT_PRIO:
1011 priv->reply_tx_stats.pp_bt_prio++;
1012 break;
1013 case TX_STATUS_POSTPONE_QUIET_PERIOD:
1014 priv->reply_tx_stats.pp_quiet_period++;
1015 break;
1016 case TX_STATUS_POSTPONE_CALC_TTAK:
1017 priv->reply_tx_stats.pp_calc_ttak++;
1018 break;
1019 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1020 priv->reply_tx_stats.int_crossed_retry++;
1021 break;
1022 case TX_STATUS_FAIL_SHORT_LIMIT:
1023 priv->reply_tx_stats.short_limit++;
1024 break;
1025 case TX_STATUS_FAIL_LONG_LIMIT:
1026 priv->reply_tx_stats.long_limit++;
1027 break;
1028 case TX_STATUS_FAIL_FIFO_UNDERRUN:
1029 priv->reply_tx_stats.fifo_underrun++;
1030 break;
1031 case TX_STATUS_FAIL_DRAIN_FLOW:
1032 priv->reply_tx_stats.drain_flow++;
1033 break;
1034 case TX_STATUS_FAIL_RFKILL_FLUSH:
1035 priv->reply_tx_stats.rfkill_flush++;
1036 break;
1037 case TX_STATUS_FAIL_LIFE_EXPIRE:
1038 priv->reply_tx_stats.life_expire++;
1039 break;
1040 case TX_STATUS_FAIL_DEST_PS:
1041 priv->reply_tx_stats.dest_ps++;
1042 break;
1043 case TX_STATUS_FAIL_HOST_ABORTED:
1044 priv->reply_tx_stats.host_abort++;
1045 break;
1046 case TX_STATUS_FAIL_BT_RETRY:
1047 priv->reply_tx_stats.bt_retry++;
1048 break;
1049 case TX_STATUS_FAIL_STA_INVALID:
1050 priv->reply_tx_stats.sta_invalid++;
1051 break;
1052 case TX_STATUS_FAIL_FRAG_DROPPED:
1053 priv->reply_tx_stats.frag_drop++;
1054 break;
1055 case TX_STATUS_FAIL_TID_DISABLE:
1056 priv->reply_tx_stats.tid_disable++;
1057 break;
1058 case TX_STATUS_FAIL_FIFO_FLUSHED:
1059 priv->reply_tx_stats.fifo_flush++;
1060 break;
1061 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1062 priv->reply_tx_stats.insuff_cf_poll++;
1063 break;
1064 case TX_STATUS_FAIL_PASSIVE_NO_RX:
1065 priv->reply_tx_stats.fail_hw_drop++;
1066 break;
1067 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1068 priv->reply_tx_stats.sta_color_mismatch++;
1069 break;
1070 default:
1071 priv->reply_tx_stats.unknown++;
1072 break;
1073 }
1074}
1075
1076static void iwlagn_set_tx_status(struct iwl_priv *priv,
1077 struct ieee80211_tx_info *info,
1078 struct iwlagn_tx_resp *tx_resp)
1079{
1080 u16 status = le16_to_cpu(tx_resp->status.status);
1081
1082 info->status.rates[0].count = tx_resp->failure_frame + 1;
1083 info->flags |= iwl_tx_status_to_mac80211(status);
1084 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1085 info);
1086 if (!iwl_is_tx_success(status))
1087 iwlagn_count_tx_err_status(priv, status);
1088}
1089
1090static void iwl_check_abort_status(struct iwl_priv *priv,
1091 u8 frame_count, u32 status)
1092{
1093 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1094 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1095 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1096 queue_work(priv->workqueue, &priv->tx_flush);
1097 }
1098}
1099
1100int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1101 struct iwl_device_cmd *cmd)
1102{
1103 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1104 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1105 int txq_id = SEQ_TO_QUEUE(sequence);
1106 int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1107 struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1108 struct ieee80211_hdr *hdr;
1109 u32 status = le16_to_cpu(tx_resp->status.status);
1110 u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1111 int tid;
1112 int sta_id;
1113 int freed;
1114 struct ieee80211_tx_info *info;
1115 struct sk_buff_head skbs;
1116 struct sk_buff *skb;
1117 struct iwl_rxon_context *ctx;
1118 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1119 bool is_offchannel_skb;
1120
1121 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1122 IWLAGN_TX_RES_TID_POS;
1123 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1124 IWLAGN_TX_RES_RA_POS;
1125
1126 spin_lock(&priv->sta_lock);
1127
1128 if (is_agg)
1129 iwl_rx_reply_tx_agg(priv, tx_resp);
1130
1131 __skb_queue_head_init(&skbs);
1132
1133 is_offchannel_skb = false;
1134
1135 if (tx_resp->frame_count == 1) {
1136 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1137 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
1138
1139 if (is_agg) {
1140 /* If this is an aggregation queue, we can rely on the
1141 * ssn since the wifi sequence number corresponds to
1142 * the index in the TFD ring (%256).
1143 * The seq_ctl is the sequence control of the packet
1144 * to which this Tx response relates. But if there is a
1145 * hole in the bitmap of the BA we received, this Tx
1146 * response may allow to reclaim the hole and all the
1147 * subsequent packets that were already acked.
1148 * In that case, seq_ctl != ssn, and the next packet
1149 * to be reclaimed will be ssn and not seq_ctl.
1150 */
1151 next_reclaimed = ssn;
1152 }
1153
1154 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1155
1156 iwlagn_check_ratid_empty(priv, sta_id, tid);
1157 freed = 0;
1158
1159 /* process frames */
1160 skb_queue_walk(&skbs, skb) {
1161 hdr = (struct ieee80211_hdr *)skb->data;
1162
1163 if (!ieee80211_is_data_qos(hdr->frame_control))
1164 priv->last_seq_ctl = tx_resp->seq_ctl;
1165
1166 info = IEEE80211_SKB_CB(skb);
1167 ctx = info->driver_data[0];
1168 iwl_trans_free_tx_cmd(priv->trans,
1169 info->driver_data[1]);
1170
1171 memset(&info->status, 0, sizeof(info->status));
1172
1173 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1174 iwl_is_associated_ctx(ctx) && ctx->vif &&
1175 ctx->vif->type == NL80211_IFTYPE_STATION) {
1176 /* block and stop all queues */
1177 priv->passive_no_rx = true;
1178 IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
1179 "passive channel");
1180 ieee80211_stop_queues(priv->hw);
1181
1182 IWL_DEBUG_TX_REPLY(priv,
1183 "TXQ %d status %s (0x%08x) "
1184 "rate_n_flags 0x%x retries %d\n",
1185 txq_id,
1186 iwl_get_tx_fail_reason(status),
1187 status,
1188 le32_to_cpu(tx_resp->rate_n_flags),
1189 tx_resp->failure_frame);
1190
1191 IWL_DEBUG_TX_REPLY(priv,
1192 "FrameCnt = %d, idx=%d\n",
1193 tx_resp->frame_count, cmd_index);
1194 }
1195
1196 /* check if BAR is needed */
1197 if (is_agg && !iwl_is_tx_success(status))
1198 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1199 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1200 tx_resp);
1201 if (!is_agg)
1202 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1203
1204 /*
1205 * W/A for FW bug - the seq_ctl isn't updated when the
1206 * queues are flushed. Fetch it from the packet itself
1207 */
1208 if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
1209 next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
1210 next_reclaimed =
1211 SEQ_TO_SN(next_reclaimed + 0x10);
1212 }
1213
1214 is_offchannel_skb =
1215 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1216 freed++;
1217 }
1218
1219 if (tid != IWL_TID_NON_QOS) {
1220 priv->tid_data[sta_id][tid].next_reclaimed =
1221 next_reclaimed;
1222 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1223 next_reclaimed);
1224 }
1225
1226 WARN_ON(!is_agg && freed != 1);
1227
1228 /*
1229 * An offchannel frame can be send only on the AUX queue, where
1230 * there is no aggregation (and reordering) so it only is single
1231 * skb is expected to be processed.
1232 */
1233 WARN_ON(is_offchannel_skb && freed != 1);
1234 }
1235
1236 iwl_check_abort_status(priv, tx_resp->frame_count, status);
1237 spin_unlock(&priv->sta_lock);
1238
1239 while (!skb_queue_empty(&skbs)) {
1240 skb = __skb_dequeue(&skbs);
1241 ieee80211_tx_status(priv->hw, skb);
1242 }
1243
1244 if (is_offchannel_skb)
1245 iwl_scan_offchannel_skb_status(priv);
1246
1247 return 0;
1248}
1249
1250/**
1251 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1252 *
1253 * Handles block-acknowledge notification from device, which reports success
1254 * of frames sent via aggregation.
1255 */
1256int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1257 struct iwl_rx_cmd_buffer *rxb,
1258 struct iwl_device_cmd *cmd)
1259{
1260 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1261 struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1262 struct iwl_ht_agg *agg;
1263 struct sk_buff_head reclaimed_skbs;
1264 struct ieee80211_tx_info *info;
1265 struct ieee80211_hdr *hdr;
1266 struct sk_buff *skb;
1267 int sta_id;
1268 int tid;
1269 int freed;
1270
1271 /* "flow" corresponds to Tx queue */
1272 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1273
1274 /* "ssn" is start of block-ack Tx window, corresponds to index
1275 * (in Tx queue's circular buffer) of first TFD/frame in window */
1276 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1277
1278 if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1279 IWL_ERR(priv,
1280 "BUG_ON scd_flow is bigger than number of queues\n");
1281 return 0;
1282 }
1283
1284 sta_id = ba_resp->sta_id;
1285 tid = ba_resp->tid;
1286 agg = &priv->tid_data[sta_id][tid].agg;
1287
1288 spin_lock(&priv->sta_lock);
1289
1290 if (unlikely(!agg->wait_for_ba)) {
1291 if (unlikely(ba_resp->bitmap))
1292 IWL_ERR(priv, "Received BA when not expected\n");
1293 spin_unlock(&priv->sta_lock);
1294 return 0;
1295 }
1296
1297 if (unlikely(scd_flow != agg->txq_id)) {
1298 /*
1299 * FIXME: this is a uCode bug which need to be addressed,
1300 * log the information and return for now.
1301 * Since it is can possibly happen very often and in order
1302 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1303 */
1304 IWL_DEBUG_TX_QUEUES(priv,
1305 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1306 scd_flow, sta_id, tid, agg->txq_id);
1307 spin_unlock(&priv->sta_lock);
1308 return 0;
1309 }
1310
1311 __skb_queue_head_init(&reclaimed_skbs);
1312
1313 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1314 * block-ack window (we assume that they've been successfully
1315 * transmitted ... if not, it's too late anyway). */
1316 iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1317 &reclaimed_skbs);
1318
1319 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1320 "sta_id = %d\n",
1321 agg->wait_for_ba,
1322 (u8 *) &ba_resp->sta_addr_lo32,
1323 ba_resp->sta_id);
1324 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1325 "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1326 ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1327 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1328 scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1329 ba_resp->txed_2_done);
1330
1331 /* Mark that the expected block-ack response arrived */
1332 agg->wait_for_ba = false;
1333
1334 /* Sanity check values reported by uCode */
1335 if (ba_resp->txed_2_done > ba_resp->txed) {
1336 IWL_DEBUG_TX_REPLY(priv,
1337 "bogus sent(%d) and ack(%d) count\n",
1338 ba_resp->txed, ba_resp->txed_2_done);
1339 /*
1340 * set txed_2_done = txed,
1341 * so it won't impact rate scale
1342 */
1343 ba_resp->txed = ba_resp->txed_2_done;
1344 }
1345
1346 priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1347
1348 iwlagn_check_ratid_empty(priv, sta_id, tid);
1349 freed = 0;
1350
1351 skb_queue_walk(&reclaimed_skbs, skb) {
1352 hdr = (struct ieee80211_hdr *)skb->data;
1353
1354 if (ieee80211_is_data_qos(hdr->frame_control))
1355 freed++;
1356 else
1357 WARN_ON_ONCE(1);
1358
1359 info = IEEE80211_SKB_CB(skb);
1360 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1361
1362 if (freed == 1) {
1363 /* this is the first skb we deliver in this batch */
1364 /* put the rate scaling data there */
1365 info = IEEE80211_SKB_CB(skb);
1366 memset(&info->status, 0, sizeof(info->status));
1367 info->flags |= IEEE80211_TX_STAT_ACK;
1368 info->flags |= IEEE80211_TX_STAT_AMPDU;
1369 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1370 info->status.ampdu_len = ba_resp->txed;
1371 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1372 info);
1373 }
1374 }
1375
1376 spin_unlock(&priv->sta_lock);
1377
1378 while (!skb_queue_empty(&reclaimed_skbs)) {
1379 skb = __skb_dequeue(&reclaimed_skbs);
1380 ieee80211_tx_status(priv->hw, skb);
1381 }
1382
1383 return 0;
1384}
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
deleted file mode 100644
index c6467e5554f..00000000000
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ /dev/null
@@ -1,557 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/init.h>
32
33#include "iwl-io.h"
34#include "iwl-agn-hw.h"
35#include "iwl-trans.h"
36#include "iwl-fh.h"
37#include "iwl-op-mode.h"
38
39#include "dev.h"
40#include "agn.h"
41#include "calib.h"
42
43/******************************************************************************
44 *
45 * uCode download functions
46 *
47 ******************************************************************************/
48
49static inline const struct fw_img *
50iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
51{
52 if (ucode_type >= IWL_UCODE_TYPE_MAX)
53 return NULL;
54
55 return &priv->fw->img[ucode_type];
56}
57
58/*
59 * Calibration
60 */
61static int iwl_set_Xtal_calib(struct iwl_priv *priv)
62{
63 struct iwl_calib_xtal_freq_cmd cmd;
64 __le16 *xtal_calib = priv->nvm_data->xtal_calib;
65
66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
68 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
69 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
70}
71
72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
73{
74 struct iwl_calib_temperature_offset_cmd cmd;
75
76 memset(&cmd, 0, sizeof(cmd));
77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
78 cmd.radio_sensor_offset = priv->nvm_data->raw_temperature;
79 if (!(cmd.radio_sensor_offset))
80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
81
82 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
83 le16_to_cpu(cmd.radio_sensor_offset));
84 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
85}
86
87static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
88{
89 struct iwl_calib_temperature_offset_v2_cmd cmd;
90
91 memset(&cmd, 0, sizeof(cmd));
92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
93 cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature;
94 cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature;
95 if (!cmd.radio_sensor_offset_low) {
96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
99 }
100 cmd.burntVoltageRef = priv->nvm_data->calib_voltage;
101
102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
103 le16_to_cpu(cmd.radio_sensor_offset_high));
104 IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
105 le16_to_cpu(cmd.radio_sensor_offset_low));
106 IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
107 le16_to_cpu(cmd.burntVoltageRef));
108
109 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
110}
111
112static int iwl_send_calib_cfg(struct iwl_priv *priv)
113{
114 struct iwl_calib_cfg_cmd calib_cfg_cmd;
115 struct iwl_host_cmd cmd = {
116 .id = CALIBRATION_CFG_CMD,
117 .len = { sizeof(struct iwl_calib_cfg_cmd), },
118 .data = { &calib_cfg_cmd, },
119 };
120
121 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
122 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
123 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
124 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
125 calib_cfg_cmd.ucd_calib_cfg.flags =
126 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
127
128 return iwl_dvm_send_cmd(priv, &cmd);
129}
130
131int iwl_init_alive_start(struct iwl_priv *priv)
132{
133 int ret;
134
135 if (priv->cfg->bt_params &&
136 priv->cfg->bt_params->advanced_bt_coexist) {
137 /*
138 * Tell uCode we are ready to perform calibration
139 * need to perform this before any calibration
140 * no need to close the envlope since we are going
141 * to load the runtime uCode later.
142 */
143 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
144 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
145 if (ret)
146 return ret;
147
148 }
149
150 ret = iwl_send_calib_cfg(priv);
151 if (ret)
152 return ret;
153
154 /**
155 * temperature offset calibration is only needed for runtime ucode,
156 * so prepare the value now.
157 */
158 if (priv->cfg->need_temp_offset_calib) {
159 if (priv->cfg->temp_offset_v2)
160 return iwl_set_temperature_offset_calib_v2(priv);
161 else
162 return iwl_set_temperature_offset_calib(priv);
163 }
164
165 return 0;
166}
167
168static int iwl_send_wimax_coex(struct iwl_priv *priv)
169{
170 struct iwl_wimax_coex_cmd coex_cmd;
171
172 /* coexistence is disabled */
173 memset(&coex_cmd, 0, sizeof(coex_cmd));
174
175 return iwl_dvm_send_cmd_pdu(priv,
176 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
177 sizeof(coex_cmd), &coex_cmd);
178}
179
180static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
181 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
182 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
183 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
184 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
185 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
186 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
187 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
188 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
189 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
190 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
191 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
192 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
193 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
194 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
195 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
196 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
197 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
198 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
199 0, 0, 0, 0, 0, 0, 0
200};
201
202void iwl_send_prio_tbl(struct iwl_priv *priv)
203{
204 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
205
206 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
207 sizeof(iwl_bt_prio_tbl));
208 if (iwl_dvm_send_cmd_pdu(priv,
209 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
210 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
211 IWL_ERR(priv, "failed to send BT prio tbl command\n");
212}
213
214int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
215{
216 struct iwl_bt_coex_prot_env_cmd env_cmd;
217 int ret;
218
219 env_cmd.action = action;
220 env_cmd.type = type;
221 ret = iwl_dvm_send_cmd_pdu(priv,
222 REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
223 sizeof(env_cmd), &env_cmd);
224 if (ret)
225 IWL_ERR(priv, "failed to send BT env command\n");
226 return ret;
227}
228
229static const u8 iwlagn_default_queue_to_tx_fifo[] = {
230 IWL_TX_FIFO_VO,
231 IWL_TX_FIFO_VI,
232 IWL_TX_FIFO_BE,
233 IWL_TX_FIFO_BK,
234};
235
236static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
237 IWL_TX_FIFO_VO,
238 IWL_TX_FIFO_VI,
239 IWL_TX_FIFO_BE,
240 IWL_TX_FIFO_BK,
241 IWL_TX_FIFO_BK_IPAN,
242 IWL_TX_FIFO_BE_IPAN,
243 IWL_TX_FIFO_VI_IPAN,
244 IWL_TX_FIFO_VO_IPAN,
245 IWL_TX_FIFO_BE_IPAN,
246 IWL_TX_FIFO_UNUSED,
247 IWL_TX_FIFO_AUX,
248};
249
250static int iwl_alive_notify(struct iwl_priv *priv)
251{
252 const u8 *queue_to_txf;
253 u8 n_queues;
254 int ret;
255 int i;
256
257 iwl_trans_fw_alive(priv->trans, 0);
258
259 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
260 priv->nvm_data->sku_cap_ipan_enable) {
261 n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
262 queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
263 } else {
264 n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
265 queue_to_txf = iwlagn_default_queue_to_tx_fifo;
266 }
267
268 for (i = 0; i < n_queues; i++)
269 if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
270 iwl_trans_ac_txq_enable(priv->trans, i,
271 queue_to_txf[i]);
272
273 priv->passive_no_rx = false;
274 priv->transport_queue_stop = 0;
275
276 ret = iwl_send_wimax_coex(priv);
277 if (ret)
278 return ret;
279
280 if (!priv->cfg->no_xtal_calib) {
281 ret = iwl_set_Xtal_calib(priv);
282 if (ret)
283 return ret;
284 }
285
286 return iwl_send_calib_results(priv);
287}
288
289
290/**
291 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
292 * using sample data 100 bytes apart. If these sample points are good,
293 * it's a pretty good bet that everything between them is good, too.
294 */
295static int iwl_verify_sec_sparse(struct iwl_priv *priv,
296 const struct fw_desc *fw_desc)
297{
298 __le32 *image = (__le32 *)fw_desc->data;
299 u32 len = fw_desc->len;
300 u32 val;
301 u32 i;
302
303 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
304
305 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
306 /* read data comes through single port, auto-incr addr */
307 /* NOTE: Use the debugless read so we don't flood kernel log
308 * if IWL_DL_IO is set */
309 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
310 i + fw_desc->offset);
311 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
312 if (val != le32_to_cpu(*image))
313 return -EIO;
314 }
315
316 return 0;
317}
318
319static void iwl_print_mismatch_sec(struct iwl_priv *priv,
320 const struct fw_desc *fw_desc)
321{
322 __le32 *image = (__le32 *)fw_desc->data;
323 u32 len = fw_desc->len;
324 u32 val;
325 u32 offs;
326 int errors = 0;
327
328 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
329
330 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
331 fw_desc->offset);
332
333 for (offs = 0;
334 offs < len && errors < 20;
335 offs += sizeof(u32), image++) {
336 /* read data comes through single port, auto-incr addr */
337 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
338 if (val != le32_to_cpu(*image)) {
339 IWL_ERR(priv, "uCode INST section at "
340 "offset 0x%x, is 0x%x, s/b 0x%x\n",
341 offs, val, le32_to_cpu(*image));
342 errors++;
343 }
344 }
345}
346
347/**
348 * iwl_verify_ucode - determine which instruction image is in SRAM,
349 * and verify its contents
350 */
351static int iwl_verify_ucode(struct iwl_priv *priv,
352 enum iwl_ucode_type ucode_type)
353{
354 const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
355
356 if (!img) {
357 IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
358 return -EINVAL;
359 }
360
361 if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
362 IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
363 return 0;
364 }
365
366 IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
367
368 iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
369 return -EIO;
370}
371
372struct iwl_alive_data {
373 bool valid;
374 u8 subtype;
375};
376
377static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
378 struct iwl_rx_packet *pkt, void *data)
379{
380 struct iwl_priv *priv =
381 container_of(notif_wait, struct iwl_priv, notif_wait);
382 struct iwl_alive_data *alive_data = data;
383 struct iwl_alive_resp *palive;
384
385 palive = (void *)pkt->data;
386
387 IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
388 "0x%01X 0x%01X\n",
389 palive->is_valid, palive->ver_type,
390 palive->ver_subtype);
391
392 priv->device_pointers.error_event_table =
393 le32_to_cpu(palive->error_event_table_ptr);
394 priv->device_pointers.log_event_table =
395 le32_to_cpu(palive->log_event_table_ptr);
396
397 alive_data->subtype = palive->ver_subtype;
398 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
399
400 return true;
401}
402
403#define UCODE_ALIVE_TIMEOUT HZ
404#define UCODE_CALIB_TIMEOUT (2*HZ)
405
406int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
407 enum iwl_ucode_type ucode_type)
408{
409 struct iwl_notification_wait alive_wait;
410 struct iwl_alive_data alive_data;
411 const struct fw_img *fw;
412 int ret;
413 enum iwl_ucode_type old_type;
414 static const u8 alive_cmd[] = { REPLY_ALIVE };
415
416 old_type = priv->cur_ucode;
417 priv->cur_ucode = ucode_type;
418 fw = iwl_get_ucode_image(priv, ucode_type);
419
420 priv->ucode_loaded = false;
421
422 if (!fw)
423 return -EINVAL;
424
425 iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
426 alive_cmd, ARRAY_SIZE(alive_cmd),
427 iwl_alive_fn, &alive_data);
428
429 ret = iwl_trans_start_fw(priv->trans, fw);
430 if (ret) {
431 priv->cur_ucode = old_type;
432 iwl_remove_notification(&priv->notif_wait, &alive_wait);
433 return ret;
434 }
435
436 /*
437 * Some things may run in the background now, but we
438 * just wait for the ALIVE notification here.
439 */
440 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
441 UCODE_ALIVE_TIMEOUT);
442 if (ret) {
443 priv->cur_ucode = old_type;
444 return ret;
445 }
446
447 if (!alive_data.valid) {
448 IWL_ERR(priv, "Loaded ucode is not valid!\n");
449 priv->cur_ucode = old_type;
450 return -EIO;
451 }
452
453 /*
454 * This step takes a long time (60-80ms!!) and
455 * WoWLAN image should be loaded quickly, so
456 * skip it for WoWLAN.
457 */
458 if (ucode_type != IWL_UCODE_WOWLAN) {
459 ret = iwl_verify_ucode(priv, ucode_type);
460 if (ret) {
461 priv->cur_ucode = old_type;
462 return ret;
463 }
464
465 /* delay a bit to give rfkill time to run */
466 msleep(5);
467 }
468
469 ret = iwl_alive_notify(priv);
470 if (ret) {
471 IWL_WARN(priv,
472 "Could not complete ALIVE transition: %d\n", ret);
473 priv->cur_ucode = old_type;
474 return ret;
475 }
476
477 priv->ucode_loaded = true;
478
479 return 0;
480}
481
482static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
483 struct iwl_rx_packet *pkt, void *data)
484{
485 struct iwl_priv *priv = data;
486 struct iwl_calib_hdr *hdr;
487 int len;
488
489 if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
490 WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
491 return true;
492 }
493
494 hdr = (struct iwl_calib_hdr *)pkt->data;
495 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
496
497 /* reduce the size by the length field itself */
498 len -= sizeof(__le32);
499
500 if (iwl_calib_set(priv, hdr, len))
501 IWL_ERR(priv, "Failed to record calibration data %d\n",
502 hdr->op_code);
503
504 return false;
505}
506
507int iwl_run_init_ucode(struct iwl_priv *priv)
508{
509 struct iwl_notification_wait calib_wait;
510 static const u8 calib_complete[] = {
511 CALIBRATION_RES_NOTIFICATION,
512 CALIBRATION_COMPLETE_NOTIFICATION
513 };
514 int ret;
515
516 lockdep_assert_held(&priv->mutex);
517
518 /* No init ucode required? Curious, but maybe ok */
519 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
520 return 0;
521
522 if (priv->init_ucode_run)
523 return 0;
524
525 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
526 calib_complete, ARRAY_SIZE(calib_complete),
527 iwlagn_wait_calib, priv);
528
529 /* Will also start the device */
530 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
531 if (ret)
532 goto error;
533
534 ret = iwl_init_alive_start(priv);
535 if (ret)
536 goto error;
537
538 /*
539 * Some things may run in the background now, but we
540 * just wait for the calibration complete notification.
541 */
542 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
543 UCODE_CALIB_TIMEOUT);
544 if (!ret)
545 priv->init_ucode_run = true;
546
547 goto out;
548
549 error:
550 iwl_remove_notification(&priv->notif_wait, &calib_wait);
551 out:
552 /* Whatever happened, stop the device */
553 iwl_trans_stop_device(priv->trans);
554 priv->ucode_loaded = false;
555
556 return ret;
557}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7960a52f6ad..0e5b842529c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -78,23 +78,10 @@
78#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \ 78#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
79 IWLAGN_RTC_DATA_LOWER_BOUND) 79 IWLAGN_RTC_DATA_LOWER_BOUND)
80 80
81#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
82#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
83#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
84#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
85#define IWL60_RTC_INST_SIZE \
86 (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
87#define IWL60_RTC_DATA_SIZE \
88 (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
89
90/* RSSI to dBm */ 81/* RSSI to dBm */
91#define IWLAGN_RSSI_OFFSET 44 82#define IWLAGN_RSSI_OFFSET 44
92 83
93#define IWLAGN_DEFAULT_TX_RETRY 15 84#define IWLAGN_DEFAULT_TX_RETRY 15
94#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3
95#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60
96#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60
97#define IWLAGN_LOW_RETRY_LIMIT 7
98 85
99/* Limit range of txpower output target to be between these values */ 86/* Limit range of txpower output target to be between these values */
100#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ 87#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
@@ -102,18 +89,23 @@
102 89
103/* EEPROM */ 90/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 91#define IWLAGN_EEPROM_IMG_SIZE 2048
105/* OTP */
106/* lower blocks contain EEPROM image and calibration data */
107#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
108/* high blocks contain PAPD data */
109#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
110#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
111#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
112#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
113#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
114#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
115
116 92
93#define IWLAGN_CMD_FIFO_NUM 7
117#define IWLAGN_NUM_QUEUES 20 94#define IWLAGN_NUM_QUEUES 20
95#define IWLAGN_NUM_AMPDU_QUEUES 10
96#define IWLAGN_FIRST_AMPDU_QUEUE 10
97
98/* Fixed (non-configurable) rx data from phy */
99
100/**
101 * struct iwlagn_schedq_bc_tbl scheduler byte count table
102 * base physical address provided by SCD_DRAM_BASE_ADDR
103 * @tfd_offset 0-12 - tx command byte count
104 * 12-16 - station index
105 */
106struct iwlagn_scd_bc_tbl {
107 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
108} __packed;
109
118 110
119#endif /* __iwl_agn_hw_h__ */ 111#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
deleted file mode 100644
index 864219d2136..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ /dev/null
@@ -1,277 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __IWL_CONFIG_H__
64#define __IWL_CONFIG_H__
65
66#include <linux/types.h>
67#include <net/mac80211.h>
68
69
70enum iwl_device_family {
71 IWL_DEVICE_FAMILY_UNDEFINED,
72 IWL_DEVICE_FAMILY_1000,
73 IWL_DEVICE_FAMILY_100,
74 IWL_DEVICE_FAMILY_2000,
75 IWL_DEVICE_FAMILY_2030,
76 IWL_DEVICE_FAMILY_105,
77 IWL_DEVICE_FAMILY_135,
78 IWL_DEVICE_FAMILY_5000,
79 IWL_DEVICE_FAMILY_5150,
80 IWL_DEVICE_FAMILY_6000,
81 IWL_DEVICE_FAMILY_6000i,
82 IWL_DEVICE_FAMILY_6005,
83 IWL_DEVICE_FAMILY_6030,
84 IWL_DEVICE_FAMILY_6050,
85 IWL_DEVICE_FAMILY_6150,
86};
87
88/*
89 * LED mode
90 * IWL_LED_DEFAULT: use device default
91 * IWL_LED_RF_STATE: turn LED on/off based on RF state
92 * LED ON = RF ON
93 * LED OFF = RF OFF
94 * IWL_LED_BLINK: adjust led blink rate based on blink table
95 * IWL_LED_DISABLE: led disabled
96 */
97enum iwl_led_mode {
98 IWL_LED_DEFAULT,
99 IWL_LED_RF_STATE,
100 IWL_LED_BLINK,
101 IWL_LED_DISABLE,
102};
103
104/*
105 * This is the threshold value of plcp error rate per 100mSecs. It is
106 * used to set and check for the validity of plcp_delta.
107 */
108#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1
109#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50
110#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100
111#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200
112#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255
113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
114
115/* TX queue watchdog timeouts in mSecs */
116#define IWL_WATCHDOG_DISABLED 0
117#define IWL_DEF_WD_TIMEOUT 2000
118#define IWL_LONG_WD_TIMEOUT 10000
119#define IWL_MAX_WD_TIMEOUT 120000
120
121/* Antenna presence definitions */
122#define ANT_NONE 0x0
123#define ANT_A BIT(0)
124#define ANT_B BIT(1)
125#define ANT_C BIT(2)
126#define ANT_AB (ANT_A | ANT_B)
127#define ANT_AC (ANT_A | ANT_C)
128#define ANT_BC (ANT_B | ANT_C)
129#define ANT_ABC (ANT_A | ANT_B | ANT_C)
130
131
132/*
133 * @max_ll_items: max number of OTP blocks
134 * @shadow_ram_support: shadow support for OTP memory
135 * @led_compensation: compensate on the led on/off time per HW according
136 * to the deviation to achieve the desired led frequency.
137 * The detail algorithm is described in iwl-led.c
138 * @chain_noise_num_beacons: number of beacons used to compute chain noise
139 * @adv_thermal_throttle: support advance thermal throttle
140 * @support_ct_kill_exit: support ct kill exit condition
141 * @plcp_delta_threshold: plcp error rate threshold used to trigger
142 * radio tuning when there is a high receiving plcp error rate
143 * @chain_noise_scale: default chain noise scale used for gain computation
144 * @wd_timeout: TX queues watchdog timeout
145 * @max_event_log_size: size of event log buffer size for ucode event logging
146 * @shadow_reg_enable: HW shadow register support
147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
148 * @no_idle_support: do not support idle mode
149 */
150struct iwl_base_params {
151 int eeprom_size;
152 int num_of_queues; /* def: HW dependent */
153 /* for iwl_pcie_apm_init() */
154 u32 pll_cfg_val;
155
156 const u16 max_ll_items;
157 const bool shadow_ram_support;
158 u16 led_compensation;
159 bool adv_thermal_throttle;
160 bool support_ct_kill_exit;
161 u8 plcp_delta_threshold;
162 s32 chain_noise_scale;
163 unsigned int wd_timeout;
164 u32 max_event_log_size;
165 const bool shadow_reg_enable;
166 const bool hd_v2;
167 const bool no_idle_support;
168};
169
170/*
171 * @advanced_bt_coexist: support advanced bt coexist
172 * @bt_init_traffic_load: specify initial bt traffic load
173 * @bt_prio_boost: default bt priority boost value
174 * @agg_time_limit: maximum number of uSec in aggregation
175 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
176 */
177struct iwl_bt_params {
178 bool advanced_bt_coexist;
179 u8 bt_init_traffic_load;
180 u32 bt_prio_boost;
181 u16 agg_time_limit;
182 bool bt_sco_disable;
183 bool bt_session_2;
184};
185
186/*
187 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
188 * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
189 */
190struct iwl_ht_params {
191 enum ieee80211_smps_mode smps_mode;
192 const bool ht_greenfield_support; /* if used set to true */
193 bool use_rts_for_aggregation;
194 u8 ht40_bands;
195};
196
197/*
198 * information on how to parse the EEPROM
199 */
200#define EEPROM_REG_BAND_1_CHANNELS 0x08
201#define EEPROM_REG_BAND_2_CHANNELS 0x26
202#define EEPROM_REG_BAND_3_CHANNELS 0x42
203#define EEPROM_REG_BAND_4_CHANNELS 0x5C
204#define EEPROM_REG_BAND_5_CHANNELS 0x74
205#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
206#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
208#define EEPROM_REGULATORY_BAND_NO_HT40 0
209
210struct iwl_eeprom_params {
211 const u8 regulatory_bands[7];
212 bool enhanced_txpower;
213};
214
215/**
216 * struct iwl_cfg
217 * @name: Offical name of the device
218 * @fw_name_pre: Firmware filename prefix. The api version and extension
219 * (.ucode) will be added to filename before loading from disk. The
220 * filename is constructed as fw_name_pre<api>.ucode.
221 * @ucode_api_max: Highest version of uCode API supported by driver.
222 * @ucode_api_ok: oldest version of the uCode API that is OK to load
223 * without a warning, for use in transitions
224 * @ucode_api_min: Lowest version of uCode API supported by driver.
225 * @max_inst_size: The maximal length of the fw inst section
226 * @max_data_size: The maximal length of the fw data section
227 * @valid_tx_ant: valid transmit antenna
228 * @valid_rx_ant: valid receive antenna
229 * @nvm_ver: NVM version
230 * @nvm_calib_ver: NVM calibration version
231 * @lib: pointer to the lib ops
232 * @base_params: pointer to basic parameters
233 * @ht_params: point to ht patameters
234 * @bt_params: pointer to bt parameters
235 * @need_temp_offset_calib: need to perform temperature offset calibration
236 * @no_xtal_calib: some devices do not need crystal calibration data,
237 * don't send it to those
238 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
239 * @adv_pm: advance power management
240 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
241 * @internal_wimax_coex: internal wifi/wimax combo device
242 * @temp_offset_v2: support v2 of temperature offset calibration
243 *
244 * We enable the driver to be backward compatible wrt. hardware features.
245 * API differences in uCode shouldn't be handled here but through TLVs
246 * and/or the uCode API version instead.
247 */
248struct iwl_cfg {
249 /* params specific to an individual device within a device family */
250 const char *name;
251 const char *fw_name_pre;
252 const unsigned int ucode_api_max;
253 const unsigned int ucode_api_ok;
254 const unsigned int ucode_api_min;
255 const enum iwl_device_family device_family;
256 const u32 max_data_size;
257 const u32 max_inst_size;
258 u8 valid_tx_ant;
259 u8 valid_rx_ant;
260 u16 nvm_ver;
261 u16 nvm_calib_ver;
262 /* params not likely to change within a device family */
263 const struct iwl_base_params *base_params;
264 /* params likely to change within a device family */
265 const struct iwl_ht_params *ht_params;
266 const struct iwl_bt_params *bt_params;
267 const struct iwl_eeprom_params *eeprom_params;
268 const bool need_temp_offset_calib; /* if used set to true */
269 const bool no_xtal_calib;
270 enum iwl_led_mode led_mode;
271 const bool adv_pm;
272 const bool rx_with_siso_diversity;
273 const bool internal_wimax_coex;
274 const bool temp_offset_v2;
275};
276
277#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 34a5287dfc2..d6dbb042304 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -97,10 +97,13 @@
97/* 97/*
98 * Hardware revision info 98 * Hardware revision info
99 * Bit fields: 99 * Bit fields:
100 * 31-16: Reserved 100 * 31-8: Reserved
101 * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions 101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D 102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc. 103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
104 */ 107 */
105#define CSR_HW_REV (CSR_BASE+0x028) 108#define CSR_HW_REV (CSR_BASE+0x028)
106 109
@@ -152,21 +155,9 @@
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) 155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153 156
154/* Bits for CSR_HW_IF_CONFIG_REG */ 157/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) 158#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
156#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) 159#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
157#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
158#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
159#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 160#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
160#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
161#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
162#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
163
164#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
165#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
166#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
167#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
168#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
169#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
170 161
171#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 162#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
172#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 163#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
@@ -279,10 +270,7 @@
279 270
280 271
281/* HW REV */ 272/* HW REV */
282#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) 273#define CSR_HW_REV_TYPE_MSK (0x00001F0)
283#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
284
285#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
286#define CSR_HW_REV_TYPE_5300 (0x0000020) 274#define CSR_HW_REV_TYPE_5300 (0x0000020)
287#define CSR_HW_REV_TYPE_5350 (0x0000030) 275#define CSR_HW_REV_TYPE_5350 (0x0000030)
288#define CSR_HW_REV_TYPE_5100 (0x0000050) 276#define CSR_HW_REV_TYPE_5100 (0x0000050)
@@ -296,8 +284,8 @@
296#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 284#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
297#define CSR_HW_REV_TYPE_2x30 (0x00000C0) 285#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
298#define CSR_HW_REV_TYPE_2x00 (0x0000100) 286#define CSR_HW_REV_TYPE_2x00 (0x0000100)
299#define CSR_HW_REV_TYPE_105 (0x0000110) 287#define CSR_HW_REV_TYPE_200 (0x0000110)
300#define CSR_HW_REV_TYPE_135 (0x0000120) 288#define CSR_HW_REV_TYPE_230 (0x0000120)
301#define CSR_HW_REV_TYPE_NONE (0x00001F0) 289#define CSR_HW_REV_TYPE_NONE (0x00001F0)
302 290
303/* EEPROM REG */ 291/* EEPROM REG */
@@ -442,9 +430,6 @@
442#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) 430#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
443#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 431#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
444 432
445/* Used to enable DBGM */
446#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c)
447
448/* 433/*
449 * Per-Tx-queue write pointer (index, really!) 434 * Per-Tx-queue write pointer (index, really!)
450 * Indicates index to next TFD that driver will fill (1 past latest filled). 435 * Indicates index to next TFD that driver will fill (1 past latest filled).
@@ -454,22 +439,4 @@
454 */ 439 */
455#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) 440#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
456 441
457/**********************************************************
458 * CSR values
459 **********************************************************/
460 /*
461 * host interrupt timeout value
462 * used with setting interrupt coalescing timer
463 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
464 *
465 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
466 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
467 */
468#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
469#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
470#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
471#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
472#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
473#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
474
475#endif /* !__iwl_csr_h__ */ 442#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
deleted file mode 100644
index 87535a67de7..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#define DEBUG
65
66#include <linux/device.h>
67#include <linux/interrupt.h>
68#include <linux/export.h>
69#include "iwl-debug.h"
70#include "iwl-devtrace.h"
71
72#define __iwl_fn(fn) \
73void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
74{ \
75 struct va_format vaf = { \
76 .fmt = fmt, \
77 }; \
78 va_list args; \
79 \
80 va_start(args, fmt); \
81 vaf.va = &args; \
82 dev_ ##fn(dev, "%pV", &vaf); \
83 trace_iwlwifi_ ##fn(&vaf); \
84 va_end(args); \
85}
86
87__iwl_fn(warn)
88EXPORT_SYMBOL_GPL(__iwl_warn);
89__iwl_fn(info)
90EXPORT_SYMBOL_GPL(__iwl_info);
91__iwl_fn(crit)
92EXPORT_SYMBOL_GPL(__iwl_crit);
93
94void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
95 const char *fmt, ...)
96{
97 struct va_format vaf = {
98 .fmt = fmt,
99 };
100 va_list args;
101
102 va_start(args, fmt);
103 vaf.va = &args;
104 if (!trace_only) {
105 if (rfkill_prefix)
106 dev_err(dev, "(RFKILL) %pV", &vaf);
107 else
108 dev_err(dev, "%pV", &vaf);
109 }
110 trace_iwlwifi_err(&vaf);
111 va_end(args);
112}
113EXPORT_SYMBOL_GPL(__iwl_err);
114
115#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
116void __iwl_dbg(struct device *dev,
117 u32 level, bool limit, const char *function,
118 const char *fmt, ...)
119{
120 struct va_format vaf = {
121 .fmt = fmt,
122 };
123 va_list args;
124
125 va_start(args, fmt);
126 vaf.va = &args;
127#ifdef CONFIG_IWLWIFI_DEBUG
128 if (iwl_have_debug_level(level) &&
129 (!limit || net_ratelimit()))
130 dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
131 function, &vaf);
132#endif
133 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
134 va_end(args);
135}
136EXPORT_SYMBOL_GPL(__iwl_dbg);
137#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 42b20b0e83b..f9a407e40af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -29,63 +29,65 @@
29#ifndef __iwl_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#include "iwl-modparams.h" 32struct iwl_priv;
33extern u32 iwl_debug_level;
33 34
35#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
34 39
35static inline bool iwl_have_debug_level(u32 level) 40#define iwl_print_hex_error(priv, p, len) \
36{
37 return iwlwifi_mod_params.debug_level & level;
38}
39
40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
41 const char *fmt, ...) __printf(4, 5);
42void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
43void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
44void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
45
46/* No matter what is m (priv, bus, trans), this will work */
47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
48#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
49#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
50#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
51#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
52
53#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
54void __iwl_dbg(struct device *dev,
55 u32 level, bool limit, const char *function,
56 const char *fmt, ...) __printf(5, 6);
57#else
58__printf(5, 6) static inline void
59__iwl_dbg(struct device *dev,
60 u32 level, bool limit, const char *function,
61 const char *fmt, ...)
62{}
63#endif
64
65#define iwl_print_hex_error(m, p, len) \
66do { \ 41do { \
67 print_hex_dump(KERN_ERR, "iwl data: ", \ 42 print_hex_dump(KERN_ERR, "iwl data: ", \
68 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
69} while (0) 44} while (0)
70 45
71#define IWL_DEBUG(m, level, fmt, args...) \
72 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
73#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
74 __iwl_dbg((dev), level, false, __func__, fmt, ##args)
75#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
76 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
77
78#ifdef CONFIG_IWLWIFI_DEBUG 46#ifdef CONFIG_IWLWIFI_DEBUG
79#define iwl_print_hex_dump(m, level, p, len) \ 47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
80do { \ 64do { \
81 if (iwl_have_debug_level(level)) \ 65 if (iwl_get_debug_level(priv) & level) \
82 print_hex_dump(KERN_DEBUG, "iwl data: ", \ 66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
83 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
84} while (0) 68} while (0)
69
85#else 70#else
86#define iwl_print_hex_dump(m, level, p, len) 71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
87#endif /* CONFIG_IWLWIFI_DEBUG */ 76#endif /* CONFIG_IWLWIFI_DEBUG */
88 77
78#ifdef CONFIG_IWLWIFI_DEBUGFS
79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
83{
84 return 0;
85}
86static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
87{
88}
89#endif /* CONFIG_IWLWIFI_DEBUGFS */
90
89/* 91/*
90 * To use the debug system: 92 * To use the debug system:
91 * 93 *
@@ -102,52 +104,55 @@ do { \
102 * 104 *
103 * The active debug levels can be accessed via files 105 * The active debug levels can be accessed via files
104 * 106 *
105 * /sys/module/iwlwifi/parameters/debug 107 * /sys/module/iwlagn/parameters/debug{50}
106 * when CONFIG_IWLWIFI_DEBUG=y. 108 * /sys/class/net/wlan0/device/debug_level
107 *
108 * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level
109 * when CONFIG_IWLWIFI_DEBUGFS=y.
110 * 109 *
110 * when CONFIG_IWLWIFI_DEBUG=y.
111 */ 111 */
112 112
113/* 0x0000000F - 0x00000001 */ 113/* 0x0000000F - 0x00000001 */
114#define IWL_DL_INFO 0x00000001 114#define IWL_DL_INFO (1 << 0)
115#define IWL_DL_MAC80211 0x00000002 115#define IWL_DL_MAC80211 (1 << 1)
116#define IWL_DL_HCMD 0x00000004 116#define IWL_DL_HCMD (1 << 2)
117#define IWL_DL_STATE 0x00000008 117#define IWL_DL_STATE (1 << 3)
118/* 0x000000F0 - 0x00000010 */ 118/* 0x000000F0 - 0x00000010 */
119#define IWL_DL_EEPROM 0x00000040 119#define IWL_DL_MACDUMP (1 << 4)
120#define IWL_DL_RADIO 0x00000080 120#define IWL_DL_HCMD_DUMP (1 << 5)
121#define IWL_DL_EEPROM (1 << 6)
122#define IWL_DL_RADIO (1 << 7)
121/* 0x00000F00 - 0x00000100 */ 123/* 0x00000F00 - 0x00000100 */
122#define IWL_DL_POWER 0x00000100 124#define IWL_DL_POWER (1 << 8)
123#define IWL_DL_TEMP 0x00000200 125#define IWL_DL_TEMP (1 << 9)
124#define IWL_DL_SCAN 0x00000800 126/* reserved (1 << 10) */
127#define IWL_DL_SCAN (1 << 11)
125/* 0x0000F000 - 0x00001000 */ 128/* 0x0000F000 - 0x00001000 */
126#define IWL_DL_ASSOC 0x00001000 129#define IWL_DL_ASSOC (1 << 12)
127#define IWL_DL_DROP 0x00002000 130#define IWL_DL_DROP (1 << 13)
128#define IWL_DL_COEX 0x00008000 131/* reserved (1 << 14) */
132#define IWL_DL_COEX (1 << 15)
129/* 0x000F0000 - 0x00010000 */ 133/* 0x000F0000 - 0x00010000 */
130#define IWL_DL_FW 0x00010000 134#define IWL_DL_FW (1 << 16)
131#define IWL_DL_RF_KILL 0x00020000 135#define IWL_DL_RF_KILL (1 << 17)
132#define IWL_DL_FW_ERRORS 0x00040000 136#define IWL_DL_FW_ERRORS (1 << 18)
133#define IWL_DL_LED 0x00080000 137#define IWL_DL_LED (1 << 19)
134/* 0x00F00000 - 0x00100000 */ 138/* 0x00F00000 - 0x00100000 */
135#define IWL_DL_RATE 0x00100000 139#define IWL_DL_RATE (1 << 20)
136#define IWL_DL_CALIB 0x00200000 140#define IWL_DL_CALIB (1 << 21)
137#define IWL_DL_WEP 0x00400000 141#define IWL_DL_WEP (1 << 22)
138#define IWL_DL_TX 0x00800000 142#define IWL_DL_TX (1 << 23)
139/* 0x0F000000 - 0x01000000 */ 143/* 0x0F000000 - 0x01000000 */
140#define IWL_DL_RX 0x01000000 144#define IWL_DL_RX (1 << 24)
141#define IWL_DL_ISR 0x02000000 145#define IWL_DL_ISR (1 << 25)
142#define IWL_DL_HT 0x04000000 146#define IWL_DL_HT (1 << 26)
143/* 0xF0000000 - 0x10000000 */ 147/* 0xF0000000 - 0x10000000 */
144#define IWL_DL_11H 0x10000000 148#define IWL_DL_11H (1 << 28)
145#define IWL_DL_STATS 0x20000000 149#define IWL_DL_STATS (1 << 29)
146#define IWL_DL_TX_REPLY 0x40000000 150#define IWL_DL_TX_REPLY (1 << 30)
147#define IWL_DL_TX_QUEUES 0x80000000 151#define IWL_DL_QOS (1 << 31)
148 152
149#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) 153#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
150#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) 154#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
155#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
151#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) 156#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
152#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) 157#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
153#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) 158#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -156,11 +161,11 @@ do { \
156#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) 161#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
157#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) 162#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
158#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) 163#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
159#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) 164#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
165#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
160#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 166#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
161#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 167#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
162#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) 168#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
163#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
164#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) 169#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
165#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ 170#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
166 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) 171 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
@@ -177,7 +182,9 @@ do { \
177#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ 182#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
178 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) 183 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
179#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) 184#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
180#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) 185#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
186 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
187#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
181#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) 188#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
182#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) 189#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
183#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) 190#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 70191ddbd8f..a635a7e7544 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved. 3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,7 @@
28 28
29/* sparse doesn't like tracepoint macros */ 29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__ 30#ifndef __CHECKER__
31#include "iwl-trans.h" 31#include "iwl-dev.h"
32 32
33#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h" 34#include "iwl-devtrace.h"
@@ -42,9 +42,4 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); 43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); 44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
45EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info);
46EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn);
47EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit);
48EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err);
49EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg);
50#endif 45#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index dc7e26b2f38..2c84ba95afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved. 3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -25,384 +25,164 @@
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#include <linux/skbuff.h>
29#include <linux/ieee80211.h>
30#include <net/cfg80211.h>
31#include "iwl-trans.h"
32#if !defined(__IWLWIFI_DEVICE_TRACE)
33static inline bool iwl_trace_data(struct sk_buff *skb)
34{
35 struct ieee80211_hdr *hdr = (void *)skb->data;
36
37 if (ieee80211_is_data(hdr->frame_control))
38 return skb->protocol != cpu_to_be16(ETH_P_PAE);
39 return false;
40}
41
42static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
43 void *rxbuf, size_t len)
44{
45 struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
46 struct ieee80211_hdr *hdr;
47
48 if (cmd->cmd != trans->rx_mpdu_cmd)
49 return len;
50
51 hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
52 trans->rx_mpdu_cmd_hdr_size);
53 if (!ieee80211_is_data(hdr->frame_control))
54 return len;
55 /* maybe try to identify EAPOL frames? */
56 return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
57 ieee80211_hdrlen(hdr->frame_control);
58}
59#endif
60
61#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
62 29
63#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
64#include <linux/device.h>
65#include "iwl-trans.h"
66
67 31
68#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 32#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
69#undef TRACE_EVENT 33#undef TRACE_EVENT
70#define TRACE_EVENT(name, proto, ...) \ 34#define TRACE_EVENT(name, proto, ...) \
71static inline void trace_ ## name(proto) {} 35static inline void trace_ ## name(proto) {}
72#undef DECLARE_EVENT_CLASS
73#define DECLARE_EVENT_CLASS(...)
74#undef DEFINE_EVENT
75#define DEFINE_EVENT(evt_class, name, proto, ...) \
76static inline void trace_ ## name(proto) {}
77#endif 36#endif
78 37
79#define DEV_ENTRY __string(dev, dev_name(dev)) 38#define PRIV_ENTRY __field(struct iwl_priv *, priv)
80#define DEV_ASSIGN __assign_str(dev, dev_name(dev)) 39#define PRIV_ASSIGN __entry->priv = priv
81 40
82#undef TRACE_SYSTEM 41#undef TRACE_SYSTEM
83#define TRACE_SYSTEM iwlwifi_io 42#define TRACE_SYSTEM iwlwifi_io
84 43
85TRACE_EVENT(iwlwifi_dev_ioread32, 44TRACE_EVENT(iwlwifi_dev_ioread32,
86 TP_PROTO(const struct device *dev, u32 offs, u32 val), 45 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
87 TP_ARGS(dev, offs, val), 46 TP_ARGS(priv, offs, val),
88 TP_STRUCT__entry( 47 TP_STRUCT__entry(
89 DEV_ENTRY 48 PRIV_ENTRY
90 __field(u32, offs) 49 __field(u32, offs)
91 __field(u32, val) 50 __field(u32, val)
92 ), 51 ),
93 TP_fast_assign( 52 TP_fast_assign(
94 DEV_ASSIGN; 53 PRIV_ASSIGN;
95 __entry->offs = offs; 54 __entry->offs = offs;
96 __entry->val = val; 55 __entry->val = val;
97 ), 56 ),
98 TP_printk("[%s] read io[%#x] = %#x", 57 TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
99 __get_str(dev), __entry->offs, __entry->val)
100); 58);
101 59
102TRACE_EVENT(iwlwifi_dev_iowrite8, 60TRACE_EVENT(iwlwifi_dev_iowrite8,
103 TP_PROTO(const struct device *dev, u32 offs, u8 val), 61 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
104 TP_ARGS(dev, offs, val), 62 TP_ARGS(priv, offs, val),
105 TP_STRUCT__entry( 63 TP_STRUCT__entry(
106 DEV_ENTRY 64 PRIV_ENTRY
107 __field(u32, offs) 65 __field(u32, offs)
108 __field(u8, val) 66 __field(u8, val)
109 ), 67 ),
110 TP_fast_assign( 68 TP_fast_assign(
111 DEV_ASSIGN; 69 PRIV_ASSIGN;
112 __entry->offs = offs; 70 __entry->offs = offs;
113 __entry->val = val; 71 __entry->val = val;
114 ), 72 ),
115 TP_printk("[%s] write io[%#x] = %#x)", 73 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
116 __get_str(dev), __entry->offs, __entry->val)
117); 74);
118 75
119TRACE_EVENT(iwlwifi_dev_iowrite32, 76TRACE_EVENT(iwlwifi_dev_iowrite32,
120 TP_PROTO(const struct device *dev, u32 offs, u32 val), 77 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
121 TP_ARGS(dev, offs, val), 78 TP_ARGS(priv, offs, val),
122 TP_STRUCT__entry( 79 TP_STRUCT__entry(
123 DEV_ENTRY 80 PRIV_ENTRY
124 __field(u32, offs) 81 __field(u32, offs)
125 __field(u32, val) 82 __field(u32, val)
126 ), 83 ),
127 TP_fast_assign( 84 TP_fast_assign(
128 DEV_ASSIGN; 85 PRIV_ASSIGN;
129 __entry->offs = offs; 86 __entry->offs = offs;
130 __entry->val = val; 87 __entry->val = val;
131 ), 88 ),
132 TP_printk("[%s] write io[%#x] = %#x)", 89 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
133 __get_str(dev), __entry->offs, __entry->val)
134);
135
136TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
137 TP_PROTO(const struct device *dev, u32 offs, u32 val),
138 TP_ARGS(dev, offs, val),
139 TP_STRUCT__entry(
140 DEV_ENTRY
141 __field(u32, offs)
142 __field(u32, val)
143 ),
144 TP_fast_assign(
145 DEV_ASSIGN;
146 __entry->offs = offs;
147 __entry->val = val;
148 ),
149 TP_printk("[%s] write PRPH[%#x] = %#x)",
150 __get_str(dev), __entry->offs, __entry->val)
151);
152
153TRACE_EVENT(iwlwifi_dev_ioread_prph32,
154 TP_PROTO(const struct device *dev, u32 offs, u32 val),
155 TP_ARGS(dev, offs, val),
156 TP_STRUCT__entry(
157 DEV_ENTRY
158 __field(u32, offs)
159 __field(u32, val)
160 ),
161 TP_fast_assign(
162 DEV_ASSIGN;
163 __entry->offs = offs;
164 __entry->val = val;
165 ),
166 TP_printk("[%s] read PRPH[%#x] = %#x",
167 __get_str(dev), __entry->offs, __entry->val)
168);
169
170TRACE_EVENT(iwlwifi_dev_irq,
171 TP_PROTO(const struct device *dev),
172 TP_ARGS(dev),
173 TP_STRUCT__entry(
174 DEV_ENTRY
175 ),
176 TP_fast_assign(
177 DEV_ASSIGN;
178 ),
179 /* TP_printk("") doesn't compile */
180 TP_printk("%d", 0)
181);
182
183TRACE_EVENT(iwlwifi_dev_ict_read,
184 TP_PROTO(const struct device *dev, u32 index, u32 value),
185 TP_ARGS(dev, index, value),
186 TP_STRUCT__entry(
187 DEV_ENTRY
188 __field(u32, index)
189 __field(u32, value)
190 ),
191 TP_fast_assign(
192 DEV_ASSIGN;
193 __entry->index = index;
194 __entry->value = value;
195 ),
196 TP_printk("[%s] read ict[%d] = %#.8x",
197 __get_str(dev), __entry->index, __entry->value)
198); 90);
199 91
200#undef TRACE_SYSTEM 92#undef TRACE_SYSTEM
201#define TRACE_SYSTEM iwlwifi_ucode 93#define TRACE_SYSTEM iwlwifi_ucode
202 94
203TRACE_EVENT(iwlwifi_dev_ucode_cont_event, 95TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
204 TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), 96 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
205 TP_ARGS(dev, time, data, ev), 97 TP_ARGS(priv, time, data, ev),
206 TP_STRUCT__entry( 98 TP_STRUCT__entry(
207 DEV_ENTRY 99 PRIV_ENTRY
208 100
209 __field(u32, time) 101 __field(u32, time)
210 __field(u32, data) 102 __field(u32, data)
211 __field(u32, ev) 103 __field(u32, ev)
212 ), 104 ),
213 TP_fast_assign( 105 TP_fast_assign(
214 DEV_ASSIGN; 106 PRIV_ASSIGN;
215 __entry->time = time; 107 __entry->time = time;
216 __entry->data = data; 108 __entry->data = data;
217 __entry->ev = ev; 109 __entry->ev = ev;
218 ), 110 ),
219 TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", 111 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
220 __get_str(dev), __entry->time, __entry->data, __entry->ev) 112 __entry->priv, __entry->time, __entry->data, __entry->ev)
221); 113);
222 114
223TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, 115TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
224 TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry), 116 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
225 TP_ARGS(dev, wraps, n_entry, p_entry), 117 TP_ARGS(priv, wraps, n_entry, p_entry),
226 TP_STRUCT__entry( 118 TP_STRUCT__entry(
227 DEV_ENTRY 119 PRIV_ENTRY
228 120
229 __field(u32, wraps) 121 __field(u32, wraps)
230 __field(u32, n_entry) 122 __field(u32, n_entry)
231 __field(u32, p_entry) 123 __field(u32, p_entry)
232 ), 124 ),
233 TP_fast_assign( 125 TP_fast_assign(
234 DEV_ASSIGN; 126 PRIV_ASSIGN;
235 __entry->wraps = wraps; 127 __entry->wraps = wraps;
236 __entry->n_entry = n_entry; 128 __entry->n_entry = n_entry;
237 __entry->p_entry = p_entry; 129 __entry->p_entry = p_entry;
238 ), 130 ),
239 TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X", 131 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
240 __get_str(dev), __entry->wraps, __entry->n_entry, 132 __entry->priv, __entry->wraps, __entry->n_entry,
241 __entry->p_entry) 133 __entry->p_entry)
242); 134);
243 135
244#undef TRACE_SYSTEM 136#undef TRACE_SYSTEM
245#define TRACE_SYSTEM iwlwifi_msg
246
247#define MAX_MSG_LEN 110
248
249DECLARE_EVENT_CLASS(iwlwifi_msg_event,
250 TP_PROTO(struct va_format *vaf),
251 TP_ARGS(vaf),
252 TP_STRUCT__entry(
253 __dynamic_array(char, msg, MAX_MSG_LEN)
254 ),
255 TP_fast_assign(
256 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
257 MAX_MSG_LEN, vaf->fmt,
258 *vaf->va) >= MAX_MSG_LEN);
259 ),
260 TP_printk("%s", __get_str(msg))
261);
262
263DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
264 TP_PROTO(struct va_format *vaf),
265 TP_ARGS(vaf)
266);
267
268DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
269 TP_PROTO(struct va_format *vaf),
270 TP_ARGS(vaf)
271);
272
273DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
274 TP_PROTO(struct va_format *vaf),
275 TP_ARGS(vaf)
276);
277
278DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
279 TP_PROTO(struct va_format *vaf),
280 TP_ARGS(vaf)
281);
282
283TRACE_EVENT(iwlwifi_dbg,
284 TP_PROTO(u32 level, bool in_interrupt, const char *function,
285 struct va_format *vaf),
286 TP_ARGS(level, in_interrupt, function, vaf),
287 TP_STRUCT__entry(
288 __field(u32, level)
289 __field(u8, in_interrupt)
290 __string(function, function)
291 __dynamic_array(char, msg, MAX_MSG_LEN)
292 ),
293 TP_fast_assign(
294 __entry->level = level;
295 __entry->in_interrupt = in_interrupt;
296 __assign_str(function, function);
297 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
298 MAX_MSG_LEN, vaf->fmt,
299 *vaf->va) >= MAX_MSG_LEN);
300 ),
301 TP_printk("%s", (char *)__get_dynamic_array(msg))
302);
303
304#undef TRACE_SYSTEM
305#define TRACE_SYSTEM iwlwifi_data
306
307TRACE_EVENT(iwlwifi_dev_tx_data,
308 TP_PROTO(const struct device *dev,
309 struct sk_buff *skb,
310 void *data, size_t data_len),
311 TP_ARGS(dev, skb, data, data_len),
312 TP_STRUCT__entry(
313 DEV_ENTRY
314
315 __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
316 ),
317 TP_fast_assign(
318 DEV_ASSIGN;
319 if (iwl_trace_data(skb))
320 memcpy(__get_dynamic_array(data), data, data_len);
321 ),
322 TP_printk("[%s] TX frame data", __get_str(dev))
323);
324
325TRACE_EVENT(iwlwifi_dev_rx_data,
326 TP_PROTO(const struct device *dev,
327 const struct iwl_trans *trans,
328 void *rxbuf, size_t len),
329 TP_ARGS(dev, trans, rxbuf, len),
330 TP_STRUCT__entry(
331 DEV_ENTRY
332
333 __dynamic_array(u8, data,
334 len - iwl_rx_trace_len(trans, rxbuf, len))
335 ),
336 TP_fast_assign(
337 size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
338 DEV_ASSIGN;
339 if (offs < len)
340 memcpy(__get_dynamic_array(data),
341 ((u8 *)rxbuf) + offs, len - offs);
342 ),
343 TP_printk("[%s] RX frame data", __get_str(dev))
344);
345
346#undef TRACE_SYSTEM
347#define TRACE_SYSTEM iwlwifi 137#define TRACE_SYSTEM iwlwifi
348 138
349TRACE_EVENT(iwlwifi_dev_hcmd, 139TRACE_EVENT(iwlwifi_dev_hcmd,
350 TP_PROTO(const struct device *dev, 140 TP_PROTO(struct iwl_priv *priv, u32 flags,
351 struct iwl_host_cmd *cmd, u16 total_size, 141 const void *hcmd0, size_t len0,
352 const void *hdr, size_t hdr_len), 142 const void *hcmd1, size_t len1,
353 TP_ARGS(dev, cmd, total_size, hdr, hdr_len), 143 const void *hcmd2, size_t len2),
144 TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
354 TP_STRUCT__entry( 145 TP_STRUCT__entry(
355 DEV_ENTRY 146 PRIV_ENTRY
356 __dynamic_array(u8, hcmd, total_size) 147 __dynamic_array(u8, hcmd0, len0)
148 __dynamic_array(u8, hcmd1, len1)
149 __dynamic_array(u8, hcmd2, len2)
357 __field(u32, flags) 150 __field(u32, flags)
358 ), 151 ),
359 TP_fast_assign( 152 TP_fast_assign(
360 int i, offset = hdr_len; 153 PRIV_ASSIGN;
361 154 memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
362 DEV_ASSIGN; 155 memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
363 __entry->flags = cmd->flags; 156 memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
364 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); 157 __entry->flags = flags;
365 158 ),
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 159 TP_printk("[%p] hcmd %#.2x (%ssync)",
367 if (!cmd->len[i]) 160 __entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0],
368 continue;
369 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
370 continue;
371 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
372 cmd->data[i], cmd->len[i]);
373 offset += cmd->len[i];
374 }
375 ),
376 TP_printk("[%s] hcmd %#.2x (%ssync)",
377 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
378 __entry->flags & CMD_ASYNC ? "a" : "") 161 __entry->flags & CMD_ASYNC ? "a" : "")
379); 162);
380 163
381TRACE_EVENT(iwlwifi_dev_rx, 164TRACE_EVENT(iwlwifi_dev_rx,
382 TP_PROTO(const struct device *dev, const struct iwl_trans *trans, 165 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
383 void *rxbuf, size_t len), 166 TP_ARGS(priv, rxbuf, len),
384 TP_ARGS(dev, trans, rxbuf, len),
385 TP_STRUCT__entry( 167 TP_STRUCT__entry(
386 DEV_ENTRY 168 PRIV_ENTRY
387 __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len)) 169 __dynamic_array(u8, rxbuf, len)
388 ), 170 ),
389 TP_fast_assign( 171 TP_fast_assign(
390 DEV_ASSIGN; 172 PRIV_ASSIGN;
391 memcpy(__get_dynamic_array(rxbuf), rxbuf, 173 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
392 iwl_rx_trace_len(trans, rxbuf, len));
393 ), 174 ),
394 TP_printk("[%s] RX cmd %#.2x", 175 TP_printk("[%p] RX cmd %#.2x",
395 __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4]) 176 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
396); 177);
397 178
398TRACE_EVENT(iwlwifi_dev_tx, 179TRACE_EVENT(iwlwifi_dev_tx,
399 TP_PROTO(const struct device *dev, struct sk_buff *skb, 180 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
400 void *tfd, size_t tfdlen,
401 void *buf0, size_t buf0_len, 181 void *buf0, size_t buf0_len,
402 void *buf1, size_t buf1_len), 182 void *buf1, size_t buf1_len),
403 TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), 183 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
404 TP_STRUCT__entry( 184 TP_STRUCT__entry(
405 DEV_ENTRY 185 PRIV_ENTRY
406 186
407 __field(size_t, framelen) 187 __field(size_t, framelen)
408 __dynamic_array(u8, tfd, tfdlen) 188 __dynamic_array(u8, tfd, tfdlen)
@@ -413,32 +193,32 @@ TRACE_EVENT(iwlwifi_dev_tx,
413 * for the possible padding). 193 * for the possible padding).
414 */ 194 */
415 __dynamic_array(u8, buf0, buf0_len) 195 __dynamic_array(u8, buf0, buf0_len)
416 __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len) 196 __dynamic_array(u8, buf1, buf1_len)
417 ), 197 ),
418 TP_fast_assign( 198 TP_fast_assign(
419 DEV_ASSIGN; 199 PRIV_ASSIGN;
420 __entry->framelen = buf0_len + buf1_len; 200 __entry->framelen = buf0_len + buf1_len;
421 memcpy(__get_dynamic_array(tfd), tfd, tfdlen); 201 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
422 memcpy(__get_dynamic_array(buf0), buf0, buf0_len); 202 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
423 if (!iwl_trace_data(skb)) 203 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
424 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
425 ), 204 ),
426 TP_printk("[%s] TX %.2x (%zu bytes)", 205 TP_printk("[%p] TX %.2x (%zu bytes)",
427 __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], 206 __entry->priv,
207 ((u8 *)__get_dynamic_array(buf0))[0],
428 __entry->framelen) 208 __entry->framelen)
429); 209);
430 210
431TRACE_EVENT(iwlwifi_dev_ucode_error, 211TRACE_EVENT(iwlwifi_dev_ucode_error,
432 TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, 212 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
433 u32 data1, u32 data2, u32 line, u32 blink1, 213 u32 data1, u32 data2, u32 line, u32 blink1,
434 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, 214 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
435 u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, 215 u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
436 u32 brd_ver), 216 u32 brd_ver),
437 TP_ARGS(dev, desc, tsf_low, data1, data2, line, 217 TP_ARGS(priv, desc, tsf_low, data1, data2, line,
438 blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, 218 blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
439 gp3, ucode_ver, hw_ver, brd_ver), 219 gp3, ucode_ver, hw_ver, brd_ver),
440 TP_STRUCT__entry( 220 TP_STRUCT__entry(
441 DEV_ENTRY 221 PRIV_ENTRY
442 __field(u32, desc) 222 __field(u32, desc)
443 __field(u32, tsf_low) 223 __field(u32, tsf_low)
444 __field(u32, data1) 224 __field(u32, data1)
@@ -457,7 +237,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
457 __field(u32, brd_ver) 237 __field(u32, brd_ver)
458 ), 238 ),
459 TP_fast_assign( 239 TP_fast_assign(
460 DEV_ASSIGN; 240 PRIV_ASSIGN;
461 __entry->desc = desc; 241 __entry->desc = desc;
462 __entry->tsf_low = tsf_low; 242 __entry->tsf_low = tsf_low;
463 __entry->data1 = data1; 243 __entry->data1 = data1;
@@ -475,11 +255,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
475 __entry->hw_ver = hw_ver; 255 __entry->hw_ver = hw_ver;
476 __entry->brd_ver = brd_ver; 256 __entry->brd_ver = brd_ver;
477 ), 257 ),
478 TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " 258 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
479 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " 259 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
480 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X " 260 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
481 "hw 0x%08X brd 0x%08X", 261 "hw 0x%08X brd 0x%08X",
482 __get_str(dev), __entry->desc, __entry->tsf_low, 262 __entry->priv, __entry->desc, __entry->tsf_low,
483 __entry->data1, 263 __entry->data1,
484 __entry->data2, __entry->line, __entry->blink1, 264 __entry->data2, __entry->line, __entry->blink1,
485 __entry->blink2, __entry->ilink1, __entry->ilink2, 265 __entry->blink2, __entry->ilink1, __entry->ilink2,
@@ -489,23 +269,23 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
489); 269);
490 270
491TRACE_EVENT(iwlwifi_dev_ucode_event, 271TRACE_EVENT(iwlwifi_dev_ucode_event,
492 TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), 272 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
493 TP_ARGS(dev, time, data, ev), 273 TP_ARGS(priv, time, data, ev),
494 TP_STRUCT__entry( 274 TP_STRUCT__entry(
495 DEV_ENTRY 275 PRIV_ENTRY
496 276
497 __field(u32, time) 277 __field(u32, time)
498 __field(u32, data) 278 __field(u32, data)
499 __field(u32, ev) 279 __field(u32, ev)
500 ), 280 ),
501 TP_fast_assign( 281 TP_fast_assign(
502 DEV_ASSIGN; 282 PRIV_ASSIGN;
503 __entry->time = time; 283 __entry->time = time;
504 __entry->data = data; 284 __entry->data = data;
505 __entry->ev = ev; 285 __entry->ev = ev;
506 ), 286 ),
507 TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", 287 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
508 __get_str(dev), __entry->time, __entry->data, __entry->ev) 288 __entry->priv, __entry->time, __entry->data, __entry->ev)
509); 289);
510#endif /* __IWLWIFI_DEVICE_TRACE */ 290#endif /* __IWLWIFI_DEVICE_TRACE */
511 291
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
deleted file mode 100644
index d3549f493a1..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ /dev/null
@@ -1,1264 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/completion.h>
64#include <linux/dma-mapping.h>
65#include <linux/firmware.h>
66#include <linux/module.h>
67#include <linux/vmalloc.h>
68
69#include "iwl-drv.h"
70#include "iwl-debug.h"
71#include "iwl-trans.h"
72#include "iwl-op-mode.h"
73#include "iwl-agn-hw.h"
74#include "iwl-fw.h"
75#include "iwl-config.h"
76#include "iwl-modparams.h"
77
78/* private includes */
79#include "iwl-fw-file.h"
80
81/******************************************************************************
82 *
83 * module boiler plate
84 *
85 ******************************************************************************/
86
87/*
88 * module name, copyright, version, etc.
89 */
90#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
91
92#ifdef CONFIG_IWLWIFI_DEBUG
93#define VD "d"
94#else
95#define VD
96#endif
97
98#define DRV_VERSION IWLWIFI_VERSION VD
99
100MODULE_DESCRIPTION(DRV_DESCRIPTION);
101MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
103MODULE_LICENSE("GPL");
104
105#ifdef CONFIG_IWLWIFI_DEBUGFS
106static struct dentry *iwl_dbgfs_root;
107#endif
108
109/**
110 * struct iwl_drv - drv common data
111 * @list: list of drv structures using this opmode
112 * @fw: the iwl_fw structure
113 * @op_mode: the running op_mode
114 * @trans: transport layer
115 * @dev: for debug prints only
116 * @cfg: configuration struct
117 * @fw_index: firmware revision to try loading
118 * @firmware_name: composite filename of ucode file to load
119 * @request_firmware_complete: the firmware has been obtained from user space
120 */
121struct iwl_drv {
122 struct list_head list;
123 struct iwl_fw fw;
124
125 struct iwl_op_mode *op_mode;
126 struct iwl_trans *trans;
127 struct device *dev;
128 const struct iwl_cfg *cfg;
129
130 int fw_index; /* firmware we're trying to load */
131 char firmware_name[25]; /* name of firmware file to load */
132
133 struct completion request_firmware_complete;
134
135#ifdef CONFIG_IWLWIFI_DEBUGFS
136 struct dentry *dbgfs_drv;
137 struct dentry *dbgfs_trans;
138 struct dentry *dbgfs_op_mode;
139#endif
140};
141
142#define DVM_OP_MODE 0
143#define MVM_OP_MODE 1
144
145/* Protects the table contents, i.e. the ops pointer & drv list */
146static struct mutex iwlwifi_opmode_table_mtx;
147static struct iwlwifi_opmode_table {
148 const char *name; /* name: iwldvm, iwlmvm, etc */
149 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
150 struct list_head drv; /* list of devices using this op_mode */
151} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
152 { .name = "iwldvm", .ops = NULL },
153 { .name = "iwlmvm", .ops = NULL },
154};
155
156/*
157 * struct fw_sec: Just for the image parsing proccess.
158 * For the fw storage we are using struct fw_desc.
159 */
160struct fw_sec {
161 const void *data; /* the sec data */
162 size_t size; /* section size */
163 u32 offset; /* offset of writing in the device */
164};
165
166static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
167{
168 vfree(desc->data);
169 desc->data = NULL;
170 desc->len = 0;
171}
172
173static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
174{
175 int i;
176 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
177 iwl_free_fw_desc(drv, &img->sec[i]);
178}
179
180static void iwl_dealloc_ucode(struct iwl_drv *drv)
181{
182 int i;
183 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
184 iwl_free_fw_img(drv, drv->fw.img + i);
185}
186
187static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
188 struct fw_sec *sec)
189{
190 void *data;
191
192 desc->data = NULL;
193
194 if (!sec || !sec->size)
195 return -EINVAL;
196
197 data = vmalloc(sec->size);
198 if (!data)
199 return -ENOMEM;
200
201 desc->len = sec->size;
202 desc->offset = sec->offset;
203 memcpy(data, sec->data, desc->len);
204 desc->data = data;
205
206 return 0;
207}
208
209static void iwl_req_fw_callback(const struct firmware *ucode_raw,
210 void *context);
211
212#define UCODE_EXPERIMENTAL_INDEX 100
213#define UCODE_EXPERIMENTAL_TAG "exp"
214
215static int iwl_request_firmware(struct iwl_drv *drv, bool first)
216{
217 const char *name_pre = drv->cfg->fw_name_pre;
218 char tag[8];
219
220 if (first) {
221#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
222 drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
223 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
224 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
225#endif
226 drv->fw_index = drv->cfg->ucode_api_max;
227 sprintf(tag, "%d", drv->fw_index);
228 } else {
229 drv->fw_index--;
230 sprintf(tag, "%d", drv->fw_index);
231 }
232
233 if (drv->fw_index < drv->cfg->ucode_api_min) {
234 IWL_ERR(drv, "no suitable firmware found!\n");
235 return -ENOENT;
236 }
237
238 sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
239
240 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
241 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
242 ? "EXPERIMENTAL " : "",
243 drv->firmware_name);
244
245 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
246 drv->trans->dev,
247 GFP_KERNEL, drv, iwl_req_fw_callback);
248}
249
250struct fw_img_parsing {
251 struct fw_sec sec[IWL_UCODE_SECTION_MAX];
252 int sec_counter;
253};
254
255/*
256 * struct fw_sec_parsing: to extract fw section and it's offset from tlv
257 */
258struct fw_sec_parsing {
259 __le32 offset;
260 const u8 data[];
261} __packed;
262
263/**
264 * struct iwl_tlv_calib_data - parse the default calib data from TLV
265 *
266 * @ucode_type: the uCode to which the following default calib relates.
267 * @calib: default calibrations.
268 */
269struct iwl_tlv_calib_data {
270 __le32 ucode_type;
271 __le64 calib;
272} __packed;
273
274struct iwl_firmware_pieces {
275 struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
276
277 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
278 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
279};
280
281/*
282 * These functions are just to extract uCode section data from the pieces
283 * structure.
284 */
285static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
286 enum iwl_ucode_type type,
287 int sec)
288{
289 return &pieces->img[type].sec[sec];
290}
291
292static void set_sec_data(struct iwl_firmware_pieces *pieces,
293 enum iwl_ucode_type type,
294 int sec,
295 const void *data)
296{
297 pieces->img[type].sec[sec].data = data;
298}
299
300static void set_sec_size(struct iwl_firmware_pieces *pieces,
301 enum iwl_ucode_type type,
302 int sec,
303 size_t size)
304{
305 pieces->img[type].sec[sec].size = size;
306}
307
308static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
309 enum iwl_ucode_type type,
310 int sec)
311{
312 return pieces->img[type].sec[sec].size;
313}
314
315static void set_sec_offset(struct iwl_firmware_pieces *pieces,
316 enum iwl_ucode_type type,
317 int sec,
318 u32 offset)
319{
320 pieces->img[type].sec[sec].offset = offset;
321}
322
323/*
324 * Gets uCode section from tlv.
325 */
326static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
327 const void *data, enum iwl_ucode_type type,
328 int size)
329{
330 struct fw_img_parsing *img;
331 struct fw_sec *sec;
332 struct fw_sec_parsing *sec_parse;
333
334 if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
335 return -1;
336
337 sec_parse = (struct fw_sec_parsing *)data;
338
339 img = &pieces->img[type];
340 sec = &img->sec[img->sec_counter];
341
342 sec->offset = le32_to_cpu(sec_parse->offset);
343 sec->data = sec_parse->data;
344 sec->size = size - sizeof(sec_parse->offset);
345
346 ++img->sec_counter;
347
348 return 0;
349}
350
351static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
352{
353 struct iwl_tlv_calib_data *def_calib =
354 (struct iwl_tlv_calib_data *)data;
355 u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
356 if (ucode_type >= IWL_UCODE_TYPE_MAX) {
357 IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
358 ucode_type);
359 return -EINVAL;
360 }
361 drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
362 return 0;
363}
364
365static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
366 const struct firmware *ucode_raw,
367 struct iwl_firmware_pieces *pieces)
368{
369 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
370 u32 api_ver, hdr_size, build;
371 char buildstr[25];
372 const u8 *src;
373
374 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
375 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
376
377 switch (api_ver) {
378 default:
379 hdr_size = 28;
380 if (ucode_raw->size < hdr_size) {
381 IWL_ERR(drv, "File size too small!\n");
382 return -EINVAL;
383 }
384 build = le32_to_cpu(ucode->u.v2.build);
385 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
386 le32_to_cpu(ucode->u.v2.inst_size));
387 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
388 le32_to_cpu(ucode->u.v2.data_size));
389 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
390 le32_to_cpu(ucode->u.v2.init_size));
391 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
392 le32_to_cpu(ucode->u.v2.init_data_size));
393 src = ucode->u.v2.data;
394 break;
395 case 0:
396 case 1:
397 case 2:
398 hdr_size = 24;
399 if (ucode_raw->size < hdr_size) {
400 IWL_ERR(drv, "File size too small!\n");
401 return -EINVAL;
402 }
403 build = 0;
404 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
405 le32_to_cpu(ucode->u.v1.inst_size));
406 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
407 le32_to_cpu(ucode->u.v1.data_size));
408 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
409 le32_to_cpu(ucode->u.v1.init_size));
410 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
411 le32_to_cpu(ucode->u.v1.init_data_size));
412 src = ucode->u.v1.data;
413 break;
414 }
415
416 if (build)
417 sprintf(buildstr, " build %u%s", build,
418 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
419 ? " (EXP)" : "");
420 else
421 buildstr[0] = '\0';
422
423 snprintf(drv->fw.fw_version,
424 sizeof(drv->fw.fw_version),
425 "%u.%u.%u.%u%s",
426 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
427 IWL_UCODE_MINOR(drv->fw.ucode_ver),
428 IWL_UCODE_API(drv->fw.ucode_ver),
429 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
430 buildstr);
431
432 /* Verify size of file vs. image size info in file's header */
433
434 if (ucode_raw->size != hdr_size +
435 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
436 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
437 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
438 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
439
440 IWL_ERR(drv,
441 "uCode file size %d does not match expected size\n",
442 (int)ucode_raw->size);
443 return -EINVAL;
444 }
445
446
447 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
448 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
449 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
450 IWLAGN_RTC_INST_LOWER_BOUND);
451 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
452 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
453 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
454 IWLAGN_RTC_DATA_LOWER_BOUND);
455 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
456 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
457 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
458 IWLAGN_RTC_INST_LOWER_BOUND);
459 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
460 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
461 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
462 IWLAGN_RTC_DATA_LOWER_BOUND);
463 return 0;
464}
465
466static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
467 const struct firmware *ucode_raw,
468 struct iwl_firmware_pieces *pieces,
469 struct iwl_ucode_capabilities *capa)
470{
471 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
472 struct iwl_ucode_tlv *tlv;
473 size_t len = ucode_raw->size;
474 const u8 *data;
475 u32 tlv_len;
476 enum iwl_ucode_tlv_type tlv_type;
477 const u8 *tlv_data;
478 char buildstr[25];
479 u32 build;
480
481 if (len < sizeof(*ucode)) {
482 IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
483 return -EINVAL;
484 }
485
486 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
487 IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
488 le32_to_cpu(ucode->magic));
489 return -EINVAL;
490 }
491
492 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
493 build = le32_to_cpu(ucode->build);
494
495 if (build)
496 sprintf(buildstr, " build %u%s", build,
497 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
498 ? " (EXP)" : "");
499 else
500 buildstr[0] = '\0';
501
502 snprintf(drv->fw.fw_version,
503 sizeof(drv->fw.fw_version),
504 "%u.%u.%u.%u%s",
505 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
506 IWL_UCODE_MINOR(drv->fw.ucode_ver),
507 IWL_UCODE_API(drv->fw.ucode_ver),
508 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
509 buildstr);
510
511 data = ucode->data;
512
513 len -= sizeof(*ucode);
514
515 while (len >= sizeof(*tlv)) {
516 len -= sizeof(*tlv);
517 tlv = (void *)data;
518
519 tlv_len = le32_to_cpu(tlv->length);
520 tlv_type = le32_to_cpu(tlv->type);
521 tlv_data = tlv->data;
522
523 if (len < tlv_len) {
524 IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
525 len, tlv_len);
526 return -EINVAL;
527 }
528 len -= ALIGN(tlv_len, 4);
529 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
530
531 switch (tlv_type) {
532 case IWL_UCODE_TLV_INST:
533 set_sec_data(pieces, IWL_UCODE_REGULAR,
534 IWL_UCODE_SECTION_INST, tlv_data);
535 set_sec_size(pieces, IWL_UCODE_REGULAR,
536 IWL_UCODE_SECTION_INST, tlv_len);
537 set_sec_offset(pieces, IWL_UCODE_REGULAR,
538 IWL_UCODE_SECTION_INST,
539 IWLAGN_RTC_INST_LOWER_BOUND);
540 break;
541 case IWL_UCODE_TLV_DATA:
542 set_sec_data(pieces, IWL_UCODE_REGULAR,
543 IWL_UCODE_SECTION_DATA, tlv_data);
544 set_sec_size(pieces, IWL_UCODE_REGULAR,
545 IWL_UCODE_SECTION_DATA, tlv_len);
546 set_sec_offset(pieces, IWL_UCODE_REGULAR,
547 IWL_UCODE_SECTION_DATA,
548 IWLAGN_RTC_DATA_LOWER_BOUND);
549 break;
550 case IWL_UCODE_TLV_INIT:
551 set_sec_data(pieces, IWL_UCODE_INIT,
552 IWL_UCODE_SECTION_INST, tlv_data);
553 set_sec_size(pieces, IWL_UCODE_INIT,
554 IWL_UCODE_SECTION_INST, tlv_len);
555 set_sec_offset(pieces, IWL_UCODE_INIT,
556 IWL_UCODE_SECTION_INST,
557 IWLAGN_RTC_INST_LOWER_BOUND);
558 break;
559 case IWL_UCODE_TLV_INIT_DATA:
560 set_sec_data(pieces, IWL_UCODE_INIT,
561 IWL_UCODE_SECTION_DATA, tlv_data);
562 set_sec_size(pieces, IWL_UCODE_INIT,
563 IWL_UCODE_SECTION_DATA, tlv_len);
564 set_sec_offset(pieces, IWL_UCODE_INIT,
565 IWL_UCODE_SECTION_DATA,
566 IWLAGN_RTC_DATA_LOWER_BOUND);
567 break;
568 case IWL_UCODE_TLV_BOOT:
569 IWL_ERR(drv, "Found unexpected BOOT ucode\n");
570 break;
571 case IWL_UCODE_TLV_PROBE_MAX_LEN:
572 if (tlv_len != sizeof(u32))
573 goto invalid_tlv_len;
574 capa->max_probe_length =
575 le32_to_cpup((__le32 *)tlv_data);
576 break;
577 case IWL_UCODE_TLV_PAN:
578 if (tlv_len)
579 goto invalid_tlv_len;
580 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
581 break;
582 case IWL_UCODE_TLV_FLAGS:
583 /* must be at least one u32 */
584 if (tlv_len < sizeof(u32))
585 goto invalid_tlv_len;
586 /* and a proper number of u32s */
587 if (tlv_len % sizeof(u32))
588 goto invalid_tlv_len;
589 /*
590 * This driver only reads the first u32 as
591 * right now no more features are defined,
592 * if that changes then either the driver
593 * will not work with the new firmware, or
594 * it'll not take advantage of new features.
595 */
596 capa->flags = le32_to_cpup((__le32 *)tlv_data);
597 break;
598 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
599 if (tlv_len != sizeof(u32))
600 goto invalid_tlv_len;
601 pieces->init_evtlog_ptr =
602 le32_to_cpup((__le32 *)tlv_data);
603 break;
604 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
605 if (tlv_len != sizeof(u32))
606 goto invalid_tlv_len;
607 pieces->init_evtlog_size =
608 le32_to_cpup((__le32 *)tlv_data);
609 break;
610 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
611 if (tlv_len != sizeof(u32))
612 goto invalid_tlv_len;
613 pieces->init_errlog_ptr =
614 le32_to_cpup((__le32 *)tlv_data);
615 break;
616 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
617 if (tlv_len != sizeof(u32))
618 goto invalid_tlv_len;
619 pieces->inst_evtlog_ptr =
620 le32_to_cpup((__le32 *)tlv_data);
621 break;
622 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
623 if (tlv_len != sizeof(u32))
624 goto invalid_tlv_len;
625 pieces->inst_evtlog_size =
626 le32_to_cpup((__le32 *)tlv_data);
627 break;
628 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
629 if (tlv_len != sizeof(u32))
630 goto invalid_tlv_len;
631 pieces->inst_errlog_ptr =
632 le32_to_cpup((__le32 *)tlv_data);
633 break;
634 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
635 if (tlv_len)
636 goto invalid_tlv_len;
637 drv->fw.enhance_sensitivity_table = true;
638 break;
639 case IWL_UCODE_TLV_WOWLAN_INST:
640 set_sec_data(pieces, IWL_UCODE_WOWLAN,
641 IWL_UCODE_SECTION_INST, tlv_data);
642 set_sec_size(pieces, IWL_UCODE_WOWLAN,
643 IWL_UCODE_SECTION_INST, tlv_len);
644 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
645 IWL_UCODE_SECTION_INST,
646 IWLAGN_RTC_INST_LOWER_BOUND);
647 break;
648 case IWL_UCODE_TLV_WOWLAN_DATA:
649 set_sec_data(pieces, IWL_UCODE_WOWLAN,
650 IWL_UCODE_SECTION_DATA, tlv_data);
651 set_sec_size(pieces, IWL_UCODE_WOWLAN,
652 IWL_UCODE_SECTION_DATA, tlv_len);
653 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
654 IWL_UCODE_SECTION_DATA,
655 IWLAGN_RTC_DATA_LOWER_BOUND);
656 break;
657 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
658 if (tlv_len != sizeof(u32))
659 goto invalid_tlv_len;
660 capa->standard_phy_calibration_size =
661 le32_to_cpup((__le32 *)tlv_data);
662 break;
663 case IWL_UCODE_TLV_SEC_RT:
664 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
665 tlv_len);
666 drv->fw.mvm_fw = true;
667 break;
668 case IWL_UCODE_TLV_SEC_INIT:
669 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
670 tlv_len);
671 drv->fw.mvm_fw = true;
672 break;
673 case IWL_UCODE_TLV_SEC_WOWLAN:
674 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
675 tlv_len);
676 drv->fw.mvm_fw = true;
677 break;
678 case IWL_UCODE_TLV_DEF_CALIB:
679 if (tlv_len != sizeof(struct iwl_tlv_calib_data))
680 goto invalid_tlv_len;
681 if (iwl_set_default_calib(drv, tlv_data))
682 goto tlv_error;
683 break;
684 case IWL_UCODE_TLV_PHY_SKU:
685 if (tlv_len != sizeof(u32))
686 goto invalid_tlv_len;
687 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
688 break;
689 default:
690 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
691 break;
692 }
693 }
694
695 if (len) {
696 IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
697 iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
698 return -EINVAL;
699 }
700
701 return 0;
702
703 invalid_tlv_len:
704 IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
705 tlv_error:
706 iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
707
708 return -EINVAL;
709}
710
711static int iwl_alloc_ucode(struct iwl_drv *drv,
712 struct iwl_firmware_pieces *pieces,
713 enum iwl_ucode_type type)
714{
715 int i;
716 for (i = 0;
717 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
718 i++)
719 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
720 get_sec(pieces, type, i)))
721 return -ENOMEM;
722 return 0;
723}
724
725static int validate_sec_sizes(struct iwl_drv *drv,
726 struct iwl_firmware_pieces *pieces,
727 const struct iwl_cfg *cfg)
728{
729 IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
730 get_sec_size(pieces, IWL_UCODE_REGULAR,
731 IWL_UCODE_SECTION_INST));
732 IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
733 get_sec_size(pieces, IWL_UCODE_REGULAR,
734 IWL_UCODE_SECTION_DATA));
735 IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
736 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
737 IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
738 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
739
740 /* Verify that uCode images will fit in card's SRAM. */
741 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
742 cfg->max_inst_size) {
743 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
744 get_sec_size(pieces, IWL_UCODE_REGULAR,
745 IWL_UCODE_SECTION_INST));
746 return -1;
747 }
748
749 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
750 cfg->max_data_size) {
751 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
752 get_sec_size(pieces, IWL_UCODE_REGULAR,
753 IWL_UCODE_SECTION_DATA));
754 return -1;
755 }
756
757 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
758 cfg->max_inst_size) {
759 IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
760 get_sec_size(pieces, IWL_UCODE_INIT,
761 IWL_UCODE_SECTION_INST));
762 return -1;
763 }
764
765 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
766 cfg->max_data_size) {
767 IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
768 get_sec_size(pieces, IWL_UCODE_REGULAR,
769 IWL_UCODE_SECTION_DATA));
770 return -1;
771 }
772 return 0;
773}
774
775static struct iwl_op_mode *
776_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
777{
778 const struct iwl_op_mode_ops *ops = op->ops;
779 struct dentry *dbgfs_dir = NULL;
780 struct iwl_op_mode *op_mode = NULL;
781
782#ifdef CONFIG_IWLWIFI_DEBUGFS
783 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
784 drv->dbgfs_drv);
785 if (!drv->dbgfs_op_mode) {
786 IWL_ERR(drv,
787 "failed to create opmode debugfs directory\n");
788 return op_mode;
789 }
790 dbgfs_dir = drv->dbgfs_op_mode;
791#endif
792
793 op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
794
795#ifdef CONFIG_IWLWIFI_DEBUGFS
796 if (!op_mode) {
797 debugfs_remove_recursive(drv->dbgfs_op_mode);
798 drv->dbgfs_op_mode = NULL;
799 }
800#endif
801
802 return op_mode;
803}
804
805static void _iwl_op_mode_stop(struct iwl_drv *drv)
806{
807 /* op_mode can be NULL if its start failed */
808 if (drv->op_mode) {
809 iwl_op_mode_stop(drv->op_mode);
810 drv->op_mode = NULL;
811
812#ifdef CONFIG_IWLWIFI_DEBUGFS
813 debugfs_remove_recursive(drv->dbgfs_op_mode);
814 drv->dbgfs_op_mode = NULL;
815#endif
816 }
817}
818
819/**
820 * iwl_req_fw_callback - callback when firmware was loaded
821 *
822 * If loaded successfully, copies the firmware into buffers
823 * for the card to fetch (via DMA).
824 */
825static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
826{
827 struct iwl_drv *drv = context;
828 struct iwl_fw *fw = &drv->fw;
829 struct iwl_ucode_header *ucode;
830 struct iwlwifi_opmode_table *op;
831 int err;
832 struct iwl_firmware_pieces pieces;
833 const unsigned int api_max = drv->cfg->ucode_api_max;
834 unsigned int api_ok = drv->cfg->ucode_api_ok;
835 const unsigned int api_min = drv->cfg->ucode_api_min;
836 u32 api_ver;
837 int i;
838 bool load_module = false;
839
840 fw->ucode_capa.max_probe_length = 200;
841 fw->ucode_capa.standard_phy_calibration_size =
842 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
843
844 if (!api_ok)
845 api_ok = api_max;
846
847 memset(&pieces, 0, sizeof(pieces));
848
849 if (!ucode_raw) {
850 if (drv->fw_index <= api_ok)
851 IWL_ERR(drv,
852 "request for firmware file '%s' failed.\n",
853 drv->firmware_name);
854 goto try_again;
855 }
856
857 IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
858 drv->firmware_name, ucode_raw->size);
859
860 /* Make sure that we got at least the API version number */
861 if (ucode_raw->size < 4) {
862 IWL_ERR(drv, "File size way too small!\n");
863 goto try_again;
864 }
865
866 /* Data from ucode file: header followed by uCode images */
867 ucode = (struct iwl_ucode_header *)ucode_raw->data;
868
869 if (ucode->ver)
870 err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
871 else
872 err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
873 &fw->ucode_capa);
874
875 if (err)
876 goto try_again;
877
878 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
879
880 /*
881 * api_ver should match the api version forming part of the
882 * firmware filename ... but we don't check for that and only rely
883 * on the API version read from firmware header from here on forward
884 */
885 /* no api version check required for experimental uCode */
886 if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
887 if (api_ver < api_min || api_ver > api_max) {
888 IWL_ERR(drv,
889 "Driver unable to support your firmware API. "
890 "Driver supports v%u, firmware is v%u.\n",
891 api_max, api_ver);
892 goto try_again;
893 }
894
895 if (api_ver < api_ok) {
896 if (api_ok != api_max)
897 IWL_ERR(drv, "Firmware has old API version, "
898 "expected v%u through v%u, got v%u.\n",
899 api_ok, api_max, api_ver);
900 else
901 IWL_ERR(drv, "Firmware has old API version, "
902 "expected v%u, got v%u.\n",
903 api_max, api_ver);
904 IWL_ERR(drv, "New firmware can be obtained from "
905 "http://www.intellinuxwireless.org/.\n");
906 }
907 }
908
909 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
910
911 /*
912 * In mvm uCode there is no difference between data and instructions
913 * sections.
914 */
915 if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
916 goto try_again;
917
918 /* Allocate ucode buffers for card's bus-master loading ... */
919
920 /* Runtime instructions and 2 copies of data:
921 * 1) unmodified from disk
922 * 2) backup cache for save/restore during power-downs */
923 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
924 if (iwl_alloc_ucode(drv, &pieces, i))
925 goto out_free_fw;
926
927 /* Now that we can no longer fail, copy information */
928
929 /*
930 * The (size - 16) / 12 formula is based on the information recorded
931 * for each event, which is of mode 1 (including timestamp) for all
932 * new microcodes that include this information.
933 */
934 fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
935 if (pieces.init_evtlog_size)
936 fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
937 else
938 fw->init_evtlog_size =
939 drv->cfg->base_params->max_event_log_size;
940 fw->init_errlog_ptr = pieces.init_errlog_ptr;
941 fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
942 if (pieces.inst_evtlog_size)
943 fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
944 else
945 fw->inst_evtlog_size =
946 drv->cfg->base_params->max_event_log_size;
947 fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
948
949 /*
950 * figure out the offset of chain noise reset and gain commands
951 * base on the size of standard phy calibration commands table size
952 */
953 if (fw->ucode_capa.standard_phy_calibration_size >
954 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
955 fw->ucode_capa.standard_phy_calibration_size =
956 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
957
958 /* We have our copies now, allow OS release its copies */
959 release_firmware(ucode_raw);
960
961 mutex_lock(&iwlwifi_opmode_table_mtx);
962 op = &iwlwifi_opmode_table[DVM_OP_MODE];
963
964 /* add this device to the list of devices using this op_mode */
965 list_add_tail(&drv->list, &op->drv);
966
967 if (op->ops) {
968 drv->op_mode = _iwl_op_mode_start(drv, op);
969
970 if (!drv->op_mode) {
971 mutex_unlock(&iwlwifi_opmode_table_mtx);
972 goto out_unbind;
973 }
974 } else {
975 load_module = true;
976 }
977 mutex_unlock(&iwlwifi_opmode_table_mtx);
978
979 /*
980 * Complete the firmware request last so that
981 * a driver unbind (stop) doesn't run while we
982 * are doing the start() above.
983 */
984 complete(&drv->request_firmware_complete);
985
986 /*
987 * Load the module last so we don't block anything
988 * else from proceeding if the module fails to load
989 * or hangs loading.
990 */
991 if (load_module)
992 request_module("%s", op->name);
993 return;
994
995 try_again:
996 /* try next, if any */
997 release_firmware(ucode_raw);
998 if (iwl_request_firmware(drv, false))
999 goto out_unbind;
1000 return;
1001
1002 out_free_fw:
1003 IWL_ERR(drv, "failed to allocate pci memory\n");
1004 iwl_dealloc_ucode(drv);
1005 release_firmware(ucode_raw);
1006 out_unbind:
1007 complete(&drv->request_firmware_complete);
1008 device_release_driver(drv->trans->dev);
1009}
1010
1011struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
1012 const struct iwl_cfg *cfg)
1013{
1014 struct iwl_drv *drv;
1015 int ret;
1016
1017 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
1018 if (!drv)
1019 return NULL;
1020
1021 drv->trans = trans;
1022 drv->dev = trans->dev;
1023 drv->cfg = cfg;
1024
1025 init_completion(&drv->request_firmware_complete);
1026 INIT_LIST_HEAD(&drv->list);
1027
1028#ifdef CONFIG_IWLWIFI_DEBUGFS
1029 /* Create the device debugfs entries. */
1030 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1031 iwl_dbgfs_root);
1032
1033 if (!drv->dbgfs_drv) {
1034 IWL_ERR(drv, "failed to create debugfs directory\n");
1035 ret = -ENOMEM;
1036 goto err_free_drv;
1037 }
1038
1039 /* Create transport layer debugfs dir */
1040 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1041
1042 if (!drv->trans->dbgfs_dir) {
1043 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1044 ret = -ENOMEM;
1045 goto err_free_dbgfs;
1046 }
1047#endif
1048
1049 ret = iwl_request_firmware(drv, true);
1050 if (ret) {
1051 IWL_ERR(trans, "Couldn't request the fw\n");
1052 goto err_fw;
1053 }
1054
1055 return drv;
1056
1057err_fw:
1058#ifdef CONFIG_IWLWIFI_DEBUGFS
1059err_free_dbgfs:
1060 debugfs_remove_recursive(drv->dbgfs_drv);
1061err_free_drv:
1062#endif
1063 kfree(drv);
1064
1065 return ERR_PTR(ret);
1066}
1067
1068void iwl_drv_stop(struct iwl_drv *drv)
1069{
1070 wait_for_completion(&drv->request_firmware_complete);
1071
1072 _iwl_op_mode_stop(drv);
1073
1074 iwl_dealloc_ucode(drv);
1075
1076 mutex_lock(&iwlwifi_opmode_table_mtx);
1077 /*
1078 * List is empty (this item wasn't added)
1079 * when firmware loading failed -- in that
1080 * case we can't remove it from any list.
1081 */
1082 if (!list_empty(&drv->list))
1083 list_del(&drv->list);
1084 mutex_unlock(&iwlwifi_opmode_table_mtx);
1085
1086#ifdef CONFIG_IWLWIFI_DEBUGFS
1087 debugfs_remove_recursive(drv->dbgfs_drv);
1088#endif
1089
1090 kfree(drv);
1091}
1092
1093
1094/* shared module parameters */
1095struct iwl_mod_params iwlwifi_mod_params = {
1096 .amsdu_size_8K = 1,
1097 .restart_fw = 1,
1098 .plcp_check = true,
1099 .bt_coex_active = true,
1100 .power_level = IWL_POWER_INDEX_1,
1101 .bt_ch_announce = true,
1102 .auto_agg = true,
1103 .wd_disable = true,
1104 /* the rest are 0 by default */
1105};
1106EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
1107
1108int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1109{
1110 int i;
1111 struct iwl_drv *drv;
1112 struct iwlwifi_opmode_table *op;
1113
1114 mutex_lock(&iwlwifi_opmode_table_mtx);
1115 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1116 op = &iwlwifi_opmode_table[i];
1117 if (strcmp(op->name, name))
1118 continue;
1119 op->ops = ops;
1120 /* TODO: need to handle exceptional case */
1121 list_for_each_entry(drv, &op->drv, list)
1122 drv->op_mode = _iwl_op_mode_start(drv, op);
1123
1124 mutex_unlock(&iwlwifi_opmode_table_mtx);
1125 return 0;
1126 }
1127 mutex_unlock(&iwlwifi_opmode_table_mtx);
1128 return -EIO;
1129}
1130EXPORT_SYMBOL_GPL(iwl_opmode_register);
1131
1132void iwl_opmode_deregister(const char *name)
1133{
1134 int i;
1135 struct iwl_drv *drv;
1136
1137 mutex_lock(&iwlwifi_opmode_table_mtx);
1138 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1139 if (strcmp(iwlwifi_opmode_table[i].name, name))
1140 continue;
1141 iwlwifi_opmode_table[i].ops = NULL;
1142
1143 /* call the stop routine for all devices */
1144 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1145 _iwl_op_mode_stop(drv);
1146
1147 mutex_unlock(&iwlwifi_opmode_table_mtx);
1148 return;
1149 }
1150 mutex_unlock(&iwlwifi_opmode_table_mtx);
1151}
1152EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
1153
1154static int __init iwl_drv_init(void)
1155{
1156 int i;
1157
1158 mutex_init(&iwlwifi_opmode_table_mtx);
1159
1160 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1161 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1162
1163 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1164 pr_info(DRV_COPYRIGHT "\n");
1165
1166#ifdef CONFIG_IWLWIFI_DEBUGFS
1167 /* Create the root of iwlwifi debugfs subsystem. */
1168 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1169
1170 if (!iwl_dbgfs_root)
1171 return -EFAULT;
1172#endif
1173
1174 return iwl_pci_register_driver();
1175}
1176module_init(iwl_drv_init);
1177
1178static void __exit iwl_drv_exit(void)
1179{
1180 iwl_pci_unregister_driver();
1181
1182#ifdef CONFIG_IWLWIFI_DEBUGFS
1183 debugfs_remove_recursive(iwl_dbgfs_root);
1184#endif
1185}
1186module_exit(iwl_drv_exit);
1187
1188#ifdef CONFIG_IWLWIFI_DEBUG
1189module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
1190 S_IRUGO | S_IWUSR);
1191MODULE_PARM_DESC(debug, "debug output mask");
1192#endif
1193
1194module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
1195MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1196module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
1197MODULE_PARM_DESC(11n_disable,
1198 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1199module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1200 int, S_IRUGO);
1201MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
1202module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
1203MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1204
1205module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1206 int, S_IRUGO);
1207MODULE_PARM_DESC(antenna_coupling,
1208 "specify antenna coupling in dB (defualt: 0 dB)");
1209
1210module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
1211 bool, S_IRUGO);
1212MODULE_PARM_DESC(bt_ch_inhibition,
1213 "Enable BT channel inhibition (default: enable)");
1214
1215module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
1216MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
1217
1218module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
1219MODULE_PARM_DESC(wd_disable,
1220 "Disable stuck queue watchdog timer 0=system default, "
1221 "1=disable, 2=enable (default: 0)");
1222
1223/*
1224 * set bt_coex_active to true, uCode will do kill/defer
1225 * every time the priority line is asserted (BT is sending signals on the
1226 * priority line in the PCIx).
1227 * set bt_coex_active to false, uCode will ignore the BT activity and
1228 * perform the normal operation
1229 *
1230 * User might experience transmit issue on some platform due to WiFi/BT
1231 * co-exist problem. The possible behaviors are:
1232 * Able to scan and finding all the available AP
1233 * Not able to associate with any AP
1234 * On those platforms, WiFi communication can be restored by set
1235 * "bt_coex_active" module parameter to "false"
1236 *
1237 * default: bt_coex_active = true (BT_COEX_ENABLE)
1238 */
1239module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
1240 bool, S_IRUGO);
1241MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
1242
1243module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO);
1244MODULE_PARM_DESC(led_mode, "0=system default, "
1245 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1246
1247module_param_named(power_save, iwlwifi_mod_params.power_save,
1248 bool, S_IRUGO);
1249MODULE_PARM_DESC(power_save,
1250 "enable WiFi power management (default: disable)");
1251
1252module_param_named(power_level, iwlwifi_mod_params.power_level,
1253 int, S_IRUGO);
1254MODULE_PARM_DESC(power_level,
1255 "default power save level (range from 1 - 5, default: 1)");
1256
1257module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
1258 bool, S_IRUGO);
1259MODULE_PARM_DESC(auto_agg,
1260 "enable agg w/o check traffic load (default: enable)");
1261
1262module_param_named(5ghz_disable, iwlwifi_mod_params.disable_5ghz,
1263 bool, S_IRUGO);
1264MODULE_PARM_DESC(5ghz_disable, "disable 5GHz band (default: 0 [enabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
deleted file mode 100644
index 285de5f68c0..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__
65
66/* for all modules */
67#define DRV_NAME "iwlwifi"
68#define IWLWIFI_VERSION "in-tree:"
69#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
70#define DRV_AUTHOR "<ilw@linux.intel.com>"
71
72
73/**
74 * DOC: Driver system flows - drv component
75 *
76 * This component implements the system flows such as bus enumeration, bus
77 * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in
78 * bus specific files (transport files). This is the code that is common among
79 * different buses.
80 *
81 * This component is also in charge of managing the several implementations of
82 * the wifi flows: it will allow to have several fw API implementation. These
83 * different implementations will differ in the way they implement mac80211's
84 * handlers too.
85
86 * The init flow wrt to the drv component looks like this:
87 * 1) The bus specific component is called from module_init
88 * 2) The bus specific component registers the bus driver
89 * 3) The bus driver calls the probe function
90 * 4) The bus specific component configures the bus
91 * 5) The bus specific component calls to the drv bus agnostic part
92 * (iwl_drv_start)
93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
94 * 7) iwl_req_fw_callback parses the fw file
95 * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
96 */
97
98struct iwl_drv;
99struct iwl_trans;
100struct iwl_cfg;
101/**
102 * iwl_drv_start - start the drv
103 *
104 * @trans_ops: the ops of the transport
105 * @cfg: device specific constants / virtual functions
106 *
107 * starts the driver: fetches the firmware. This should be called by bus
108 * specific system flows implementations. For example, the bus specific probe
109 * function should do bus related operations only, and then call to this
110 * function. It returns the driver object or %NULL if an error occured.
111 */
112struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
113 const struct iwl_cfg *cfg);
114
115/**
116 * iwl_drv_stop - stop the drv
117 *
118 * @drv:
119 *
120 * Stop the driver. This should be called by bus specific system flows
121 * implementations. For example, the bus specific remove function should first
122 * call this function and then do the bus related operations only.
123 */
124void iwl_drv_stop(struct iwl_drv *drv);
125
126#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
deleted file mode 100644
index 471986690cf..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ /dev/null
@@ -1,931 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65#include "iwl-modparams.h"
66#include "iwl-eeprom-parse.h"
67
68/* EEPROM offset definitions */
69
70/* indirect access definitions */
71#define ADDRESS_MSK 0x0000FFFF
72#define INDIRECT_TYPE_MSK 0x000F0000
73#define INDIRECT_HOST 0x00010000
74#define INDIRECT_GENERAL 0x00020000
75#define INDIRECT_REGULATORY 0x00030000
76#define INDIRECT_CALIBRATION 0x00040000
77#define INDIRECT_PROCESS_ADJST 0x00050000
78#define INDIRECT_OTHERS 0x00060000
79#define INDIRECT_TXP_LIMIT 0x00070000
80#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
81#define INDIRECT_ADDRESS 0x00100000
82
83/* corresponding link offsets in EEPROM */
84#define EEPROM_LINK_HOST (2*0x64)
85#define EEPROM_LINK_GENERAL (2*0x65)
86#define EEPROM_LINK_REGULATORY (2*0x66)
87#define EEPROM_LINK_CALIBRATION (2*0x67)
88#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
89#define EEPROM_LINK_OTHERS (2*0x69)
90#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
91#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
92
93/* General */
94#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
95#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
96#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
97#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
98#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
99#define EEPROM_VERSION (2*0x44) /* 2 bytes */
100#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
101#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
102#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
103#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
104
105/* calibration */
106struct iwl_eeprom_calib_hdr {
107 u8 version;
108 u8 pa_type;
109 __le16 voltage;
110} __packed;
111
112#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
113#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
114
115/* temperature */
116#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
117#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
118
119/* SKU Capabilities (actual values from EEPROM definition) */
120enum eeprom_sku_bits {
121 EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
122 EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
123 EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
124 EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
125 EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
126};
127
128/* radio config bits (actual values from EEPROM definition) */
129#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
130#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
131#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
132#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
133#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
134#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
135
136
137/*
138 * EEPROM bands
139 * These are the channel numbers from each band in the order
140 * that they are stored in the EEPROM band information. Note
141 * that EEPROM bands aren't the same as mac80211 bands, and
142 * there are even special "ht40 bands" in the EEPROM.
143 */
144static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
145 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
146};
147
148static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
149 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
150};
151
152static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
153 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
154};
155
156static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
157 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
158};
159
160static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
161 145, 149, 153, 157, 161, 165
162};
163
164static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
165 1, 2, 3, 4, 5, 6, 7
166};
167
168static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
169 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
170};
171
172#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
173 ARRAY_SIZE(iwl_eeprom_band_2) + \
174 ARRAY_SIZE(iwl_eeprom_band_3) + \
175 ARRAY_SIZE(iwl_eeprom_band_4) + \
176 ARRAY_SIZE(iwl_eeprom_band_5))
177
178/* rate data (static) */
179static struct ieee80211_rate iwl_cfg80211_rates[] = {
180 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
181 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
182 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
183 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
184 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
185 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
186 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
187 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
188 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
189 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
190 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
191 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
192 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
193 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
194 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
195};
196#define RATES_24_OFFS 0
197#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
198#define RATES_52_OFFS 4
199#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
200
201/* EEPROM reading functions */
202
203static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
204{
205 if (WARN_ON(offset + sizeof(u16) > eeprom_size))
206 return 0;
207 return le16_to_cpup((__le16 *)(eeprom + offset));
208}
209
210static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
211 u32 address)
212{
213 u16 offset = 0;
214
215 if ((address & INDIRECT_ADDRESS) == 0)
216 return address;
217
218 switch (address & INDIRECT_TYPE_MSK) {
219 case INDIRECT_HOST:
220 offset = iwl_eeprom_query16(eeprom, eeprom_size,
221 EEPROM_LINK_HOST);
222 break;
223 case INDIRECT_GENERAL:
224 offset = iwl_eeprom_query16(eeprom, eeprom_size,
225 EEPROM_LINK_GENERAL);
226 break;
227 case INDIRECT_REGULATORY:
228 offset = iwl_eeprom_query16(eeprom, eeprom_size,
229 EEPROM_LINK_REGULATORY);
230 break;
231 case INDIRECT_TXP_LIMIT:
232 offset = iwl_eeprom_query16(eeprom, eeprom_size,
233 EEPROM_LINK_TXP_LIMIT);
234 break;
235 case INDIRECT_TXP_LIMIT_SIZE:
236 offset = iwl_eeprom_query16(eeprom, eeprom_size,
237 EEPROM_LINK_TXP_LIMIT_SIZE);
238 break;
239 case INDIRECT_CALIBRATION:
240 offset = iwl_eeprom_query16(eeprom, eeprom_size,
241 EEPROM_LINK_CALIBRATION);
242 break;
243 case INDIRECT_PROCESS_ADJST:
244 offset = iwl_eeprom_query16(eeprom, eeprom_size,
245 EEPROM_LINK_PROCESS_ADJST);
246 break;
247 case INDIRECT_OTHERS:
248 offset = iwl_eeprom_query16(eeprom, eeprom_size,
249 EEPROM_LINK_OTHERS);
250 break;
251 default:
252 WARN_ON(1);
253 break;
254 }
255
256 /* translate the offset from words to byte */
257 return (address & ADDRESS_MSK) + (offset << 1);
258}
259
260static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
261 u32 offset)
262{
263 u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
264
265 if (WARN_ON(address >= eeprom_size))
266 return NULL;
267
268 return &eeprom[address];
269}
270
271static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
272 struct iwl_nvm_data *data)
273{
274 struct iwl_eeprom_calib_hdr *hdr;
275
276 hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
277 EEPROM_CALIB_ALL);
278 if (!hdr)
279 return -ENODATA;
280 data->calib_version = hdr->version;
281 data->calib_voltage = hdr->voltage;
282
283 return 0;
284}
285
286/**
287 * enum iwl_eeprom_channel_flags - channel flags in EEPROM
288 * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
289 * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
290 * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
291 * @EEPROM_CHANNEL_RADAR: radar detection required
292 * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
293 * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
294 */
295enum iwl_eeprom_channel_flags {
296 EEPROM_CHANNEL_VALID = BIT(0),
297 EEPROM_CHANNEL_IBSS = BIT(1),
298 EEPROM_CHANNEL_ACTIVE = BIT(3),
299 EEPROM_CHANNEL_RADAR = BIT(4),
300 EEPROM_CHANNEL_WIDE = BIT(5),
301 EEPROM_CHANNEL_DFS = BIT(7),
302};
303
304/**
305 * struct iwl_eeprom_channel - EEPROM channel data
306 * @flags: %EEPROM_CHANNEL_* flags
307 * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
308 */
309struct iwl_eeprom_channel {
310 u8 flags;
311 s8 max_power_avg;
312} __packed;
313
314
315enum iwl_eeprom_enhanced_txpwr_flags {
316 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
317 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
318 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
319 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
320 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
321 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
322 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
323 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
324};
325
326/**
327 * iwl_eeprom_enhanced_txpwr structure
328 * @flags: entry flags
329 * @channel: channel number
330 * @chain_a_max_pwr: chain a max power in 1/2 dBm
331 * @chain_b_max_pwr: chain b max power in 1/2 dBm
332 * @chain_c_max_pwr: chain c max power in 1/2 dBm
333 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
334 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
335 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
336 *
337 * This structure presents the enhanced regulatory tx power limit layout
338 * in an EEPROM image.
339 */
340struct iwl_eeprom_enhanced_txpwr {
341 u8 flags;
342 u8 channel;
343 s8 chain_a_max;
344 s8 chain_b_max;
345 s8 chain_c_max;
346 u8 delta_20_in_40;
347 s8 mimo2_max;
348 s8 mimo3_max;
349} __packed;
350
351static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
352 struct iwl_eeprom_enhanced_txpwr *txp)
353{
354 s8 result = 0; /* (.5 dBm) */
355
356 /* Take the highest tx power from any valid chains */
357 if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
358 result = txp->chain_a_max;
359
360 if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
361 result = txp->chain_b_max;
362
363 if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
364 result = txp->chain_c_max;
365
366 if ((data->valid_tx_ant == ANT_AB ||
367 data->valid_tx_ant == ANT_BC ||
368 data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
369 result = txp->mimo2_max;
370
371 if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
372 result = txp->mimo3_max;
373
374 return result;
375}
376
377#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
378#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
379#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
380
381#define TXP_CHECK_AND_PRINT(x) \
382 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
383
384static void
385iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
386 struct iwl_eeprom_enhanced_txpwr *txp,
387 int n_channels, s8 max_txpower_avg)
388{
389 int ch_idx;
390 enum ieee80211_band band;
391
392 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
393 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
394
395 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
396 struct ieee80211_channel *chan = &data->channels[ch_idx];
397
398 /* update matching channel or from common data only */
399 if (txp->channel != 0 && chan->hw_value != txp->channel)
400 continue;
401
402 /* update matching band only */
403 if (band != chan->band)
404 continue;
405
406 if (chan->max_power < max_txpower_avg &&
407 !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
408 chan->max_power = max_txpower_avg;
409 }
410}
411
412static void iwl_eeprom_enhanced_txpower(struct device *dev,
413 struct iwl_nvm_data *data,
414 const u8 *eeprom, size_t eeprom_size,
415 int n_channels)
416{
417 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
418 int idx, entries;
419 __le16 *txp_len;
420 s8 max_txp_avg_halfdbm;
421
422 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
423
424 /* the length is in 16-bit words, but we want entries */
425 txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
426 EEPROM_TXP_SZ_OFFS);
427 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
428
429 txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
430 EEPROM_TXP_OFFS);
431
432 for (idx = 0; idx < entries; idx++) {
433 txp = &txp_array[idx];
434 /* skip invalid entries */
435 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
436 continue;
437
438 IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
439 (txp->channel && (txp->flags &
440 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
441 "Common " : (txp->channel) ?
442 "Channel" : "Common",
443 (txp->channel),
444 TXP_CHECK_AND_PRINT(VALID),
445 TXP_CHECK_AND_PRINT(BAND_52G),
446 TXP_CHECK_AND_PRINT(OFDM),
447 TXP_CHECK_AND_PRINT(40MHZ),
448 TXP_CHECK_AND_PRINT(HT_AP),
449 TXP_CHECK_AND_PRINT(RES1),
450 TXP_CHECK_AND_PRINT(RES2),
451 TXP_CHECK_AND_PRINT(COMMON_TYPE),
452 txp->flags);
453 IWL_DEBUG_EEPROM(dev,
454 "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
455 txp->chain_a_max, txp->chain_b_max,
456 txp->chain_c_max);
457 IWL_DEBUG_EEPROM(dev,
458 "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
459 txp->mimo2_max, txp->mimo3_max,
460 ((txp->delta_20_in_40 & 0xf0) >> 4),
461 (txp->delta_20_in_40 & 0x0f));
462
463 max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
464
465 iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
466 DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
467
468 if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
469 data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
470 }
471}
472
473static void iwl_init_band_reference(const struct iwl_cfg *cfg,
474 const u8 *eeprom, size_t eeprom_size,
475 int eeprom_band, int *eeprom_ch_count,
476 const struct iwl_eeprom_channel **ch_info,
477 const u8 **eeprom_ch_array)
478{
479 u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
480
481 offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
482
483 *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
484
485 switch (eeprom_band) {
486 case 1: /* 2.4GHz band */
487 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
488 *eeprom_ch_array = iwl_eeprom_band_1;
489 break;
490 case 2: /* 4.9GHz band */
491 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
492 *eeprom_ch_array = iwl_eeprom_band_2;
493 break;
494 case 3: /* 5.2GHz band */
495 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
496 *eeprom_ch_array = iwl_eeprom_band_3;
497 break;
498 case 4: /* 5.5GHz band */
499 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
500 *eeprom_ch_array = iwl_eeprom_band_4;
501 break;
502 case 5: /* 5.7GHz band */
503 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
504 *eeprom_ch_array = iwl_eeprom_band_5;
505 break;
506 case 6: /* 2.4GHz ht40 channels */
507 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
508 *eeprom_ch_array = iwl_eeprom_band_6;
509 break;
510 case 7: /* 5 GHz ht40 channels */
511 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
512 *eeprom_ch_array = iwl_eeprom_band_7;
513 break;
514 default:
515 *eeprom_ch_count = 0;
516 *eeprom_ch_array = NULL;
517 WARN_ON(1);
518 }
519}
520
521#define CHECK_AND_PRINT(x) \
522 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
523
524static void iwl_mod_ht40_chan_info(struct device *dev,
525 struct iwl_nvm_data *data, int n_channels,
526 enum ieee80211_band band, u16 channel,
527 const struct iwl_eeprom_channel *eeprom_ch,
528 u8 clear_ht40_extension_channel)
529{
530 struct ieee80211_channel *chan = NULL;
531 int i;
532
533 for (i = 0; i < n_channels; i++) {
534 if (data->channels[i].band != band)
535 continue;
536 if (data->channels[i].hw_value != channel)
537 continue;
538 chan = &data->channels[i];
539 break;
540 }
541
542 if (!chan)
543 return;
544
545 IWL_DEBUG_EEPROM(dev,
546 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
547 channel,
548 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
549 CHECK_AND_PRINT(IBSS),
550 CHECK_AND_PRINT(ACTIVE),
551 CHECK_AND_PRINT(RADAR),
552 CHECK_AND_PRINT(WIDE),
553 CHECK_AND_PRINT(DFS),
554 eeprom_ch->flags,
555 eeprom_ch->max_power_avg,
556 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
557 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
558 : "not ");
559
560 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
561 chan->flags &= ~clear_ht40_extension_channel;
562}
563
564#define CHECK_AND_PRINT_I(x) \
565 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
566
567static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
568 struct iwl_nvm_data *data,
569 const u8 *eeprom, size_t eeprom_size)
570{
571 int band, ch_idx;
572 const struct iwl_eeprom_channel *eeprom_ch_info;
573 const u8 *eeprom_ch_array;
574 int eeprom_ch_count;
575 int n_channels = 0;
576
577 /*
578 * Loop through the 5 EEPROM bands and add them to the parse list
579 */
580 for (band = 1; band <= 5; band++) {
581 struct ieee80211_channel *channel;
582
583 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
584 &eeprom_ch_count, &eeprom_ch_info,
585 &eeprom_ch_array);
586
587 /* Loop through each band adding each of the channels */
588 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
589 const struct iwl_eeprom_channel *eeprom_ch;
590
591 eeprom_ch = &eeprom_ch_info[ch_idx];
592
593 if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
594 IWL_DEBUG_EEPROM(dev,
595 "Ch. %d Flags %x [%sGHz] - No traffic\n",
596 eeprom_ch_array[ch_idx],
597 eeprom_ch_info[ch_idx].flags,
598 (band != 1) ? "5.2" : "2.4");
599 continue;
600 }
601
602 channel = &data->channels[n_channels];
603 n_channels++;
604
605 channel->hw_value = eeprom_ch_array[ch_idx];
606 channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
607 : IEEE80211_BAND_5GHZ;
608 channel->center_freq =
609 ieee80211_channel_to_frequency(
610 channel->hw_value, channel->band);
611
612 /* set no-HT40, will enable as appropriate later */
613 channel->flags = IEEE80211_CHAN_NO_HT40;
614
615 if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
616 channel->flags |= IEEE80211_CHAN_NO_IBSS;
617
618 if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
619 channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
620
621 if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
622 channel->flags |= IEEE80211_CHAN_RADAR;
623
624 /* Initialize regulatory-based run-time data */
625 channel->max_power =
626 eeprom_ch_info[ch_idx].max_power_avg;
627 IWL_DEBUG_EEPROM(dev,
628 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
629 channel->hw_value,
630 (band != 1) ? "5.2" : "2.4",
631 CHECK_AND_PRINT_I(VALID),
632 CHECK_AND_PRINT_I(IBSS),
633 CHECK_AND_PRINT_I(ACTIVE),
634 CHECK_AND_PRINT_I(RADAR),
635 CHECK_AND_PRINT_I(WIDE),
636 CHECK_AND_PRINT_I(DFS),
637 eeprom_ch_info[ch_idx].flags,
638 eeprom_ch_info[ch_idx].max_power_avg,
639 ((eeprom_ch_info[ch_idx].flags &
640 EEPROM_CHANNEL_IBSS) &&
641 !(eeprom_ch_info[ch_idx].flags &
642 EEPROM_CHANNEL_RADAR))
643 ? "" : "not ");
644 }
645 }
646
647 if (cfg->eeprom_params->enhanced_txpower) {
648 /*
649 * for newer device (6000 series and up)
650 * EEPROM contain enhanced tx power information
651 * driver need to process addition information
652 * to determine the max channel tx power limits
653 */
654 iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
655 n_channels);
656 } else {
657 /* All others use data from channel map */
658 int i;
659
660 data->max_tx_pwr_half_dbm = -128;
661
662 for (i = 0; i < n_channels; i++)
663 data->max_tx_pwr_half_dbm =
664 max_t(s8, data->max_tx_pwr_half_dbm,
665 data->channels[i].max_power * 2);
666 }
667
668 /* Check if we do have HT40 channels */
669 if (cfg->eeprom_params->regulatory_bands[5] ==
670 EEPROM_REGULATORY_BAND_NO_HT40 &&
671 cfg->eeprom_params->regulatory_bands[6] ==
672 EEPROM_REGULATORY_BAND_NO_HT40)
673 return n_channels;
674
675 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
676 for (band = 6; band <= 7; band++) {
677 enum ieee80211_band ieeeband;
678
679 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
680 &eeprom_ch_count, &eeprom_ch_info,
681 &eeprom_ch_array);
682
683 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
684 ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
685 : IEEE80211_BAND_5GHZ;
686
687 /* Loop through each band adding each of the channels */
688 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
689 /* Set up driver's info for lower half */
690 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
691 eeprom_ch_array[ch_idx],
692 &eeprom_ch_info[ch_idx],
693 IEEE80211_CHAN_NO_HT40PLUS);
694
695 /* Set up driver's info for upper half */
696 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
697 eeprom_ch_array[ch_idx] + 4,
698 &eeprom_ch_info[ch_idx],
699 IEEE80211_CHAN_NO_HT40MINUS);
700 }
701 }
702
703 return n_channels;
704}
705
706static int iwl_init_sband_channels(struct iwl_nvm_data *data,
707 struct ieee80211_supported_band *sband,
708 int n_channels, enum ieee80211_band band)
709{
710 struct ieee80211_channel *chan = &data->channels[0];
711 int n = 0, idx = 0;
712
713 while (chan->band != band && idx < n_channels)
714 chan = &data->channels[++idx];
715
716 sband->channels = &data->channels[idx];
717
718 while (chan->band == band && idx < n_channels) {
719 chan = &data->channels[++idx];
720 n++;
721 }
722
723 sband->n_channels = n;
724
725 return n;
726}
727
728#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
729#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
730
731static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
732 struct iwl_nvm_data *data,
733 struct ieee80211_sta_ht_cap *ht_info,
734 enum ieee80211_band band)
735{
736 int max_bit_rate = 0;
737 u8 rx_chains;
738 u8 tx_chains;
739
740 tx_chains = hweight8(data->valid_tx_ant);
741 if (cfg->rx_with_siso_diversity)
742 rx_chains = 1;
743 else
744 rx_chains = hweight8(data->valid_rx_ant);
745
746 if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
747 ht_info->ht_supported = false;
748 return;
749 }
750
751 ht_info->ht_supported = true;
752 ht_info->cap = 0;
753
754 if (iwlwifi_mod_params.amsdu_size_8K)
755 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
756
757 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
758 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
759
760 ht_info->mcs.rx_mask[0] = 0xFF;
761 if (rx_chains >= 2)
762 ht_info->mcs.rx_mask[1] = 0xFF;
763 if (rx_chains >= 3)
764 ht_info->mcs.rx_mask[2] = 0xFF;
765
766 if (cfg->ht_params->ht_greenfield_support)
767 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
768 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
769
770 max_bit_rate = MAX_BIT_RATE_20_MHZ;
771
772 if (cfg->ht_params->ht40_bands & BIT(band)) {
773 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
774 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
775 ht_info->mcs.rx_mask[4] = 0x01;
776 max_bit_rate = MAX_BIT_RATE_40_MHZ;
777 }
778
779 /* Highest supported Rx data rate */
780 max_bit_rate *= rx_chains;
781 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
782 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
783
784 /* Tx MCS capabilities */
785 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
786 if (tx_chains != rx_chains) {
787 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
788 ht_info->mcs.tx_params |= ((tx_chains - 1) <<
789 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
790 }
791}
792
793static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
794 struct iwl_nvm_data *data,
795 const u8 *eeprom, size_t eeprom_size)
796{
797 int n_channels = iwl_init_channel_map(dev, cfg, data,
798 eeprom, eeprom_size);
799 int n_used = 0;
800 struct ieee80211_supported_band *sband;
801
802 sband = &data->bands[IEEE80211_BAND_2GHZ];
803 sband->band = IEEE80211_BAND_2GHZ;
804 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
805 sband->n_bitrates = N_RATES_24;
806 n_used += iwl_init_sband_channels(data, sband, n_channels,
807 IEEE80211_BAND_2GHZ);
808 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
809
810 sband = &data->bands[IEEE80211_BAND_5GHZ];
811 sband->band = IEEE80211_BAND_5GHZ;
812 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
813 sband->n_bitrates = N_RATES_52;
814 n_used += iwl_init_sband_channels(data, sband, n_channels,
815 IEEE80211_BAND_5GHZ);
816 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
817
818 if (n_channels != n_used)
819 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
820 n_used, n_channels);
821}
822
823/* EEPROM data functions */
824
825struct iwl_nvm_data *
826iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
827 const u8 *eeprom, size_t eeprom_size)
828{
829 struct iwl_nvm_data *data;
830 const void *tmp;
831 u16 radio_cfg, sku;
832
833 if (WARN_ON(!cfg || !cfg->eeprom_params))
834 return NULL;
835
836 data = kzalloc(sizeof(*data) +
837 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
838 GFP_KERNEL);
839 if (!data)
840 return NULL;
841
842 /* get MAC address(es) */
843 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
844 if (!tmp)
845 goto err_free;
846 memcpy(data->hw_addr, tmp, ETH_ALEN);
847 data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
848 EEPROM_NUM_MAC_ADDRESS);
849
850 if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
851 goto err_free;
852
853 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
854 if (!tmp)
855 goto err_free;
856 memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
857
858 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
859 EEPROM_RAW_TEMPERATURE);
860 if (!tmp)
861 goto err_free;
862 data->raw_temperature = *(__le16 *)tmp;
863
864 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
865 EEPROM_KELVIN_TEMPERATURE);
866 if (!tmp)
867 goto err_free;
868 data->kelvin_temperature = *(__le16 *)tmp;
869 data->kelvin_voltage = *((__le16 *)tmp + 1);
870
871 radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
872 EEPROM_RADIO_CONFIG);
873 data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
874 data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
875 data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
876 data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
877 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
878 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
879
880 sku = iwl_eeprom_query16(eeprom, eeprom_size,
881 EEPROM_SKU_CAP);
882 data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
883 data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
884 data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
885 data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
886 data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
887 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
888 data->sku_cap_11n_enable = false;
889
890 data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
891 EEPROM_VERSION);
892
893 /* check overrides (some devices have wrong EEPROM) */
894 if (cfg->valid_tx_ant)
895 data->valid_tx_ant = cfg->valid_tx_ant;
896 if (cfg->valid_rx_ant)
897 data->valid_rx_ant = cfg->valid_rx_ant;
898
899 if (!data->valid_tx_ant || !data->valid_rx_ant) {
900 IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
901 data->valid_tx_ant, data->valid_rx_ant);
902 goto err_free;
903 }
904
905 iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
906
907 return data;
908 err_free:
909 kfree(data);
910 return NULL;
911}
912EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
913
914/* helper functions */
915int iwl_nvm_check_version(struct iwl_nvm_data *data,
916 struct iwl_trans *trans)
917{
918 if (data->nvm_version >= trans->cfg->nvm_ver ||
919 data->calib_version >= trans->cfg->nvm_calib_ver) {
920 IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
921 data->nvm_version, data->calib_version);
922 return 0;
923 }
924
925 IWL_ERR(trans,
926 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
927 data->nvm_version, trans->cfg->nvm_ver,
928 data->calib_version, trans->cfg->nvm_calib_ver);
929 return -EINVAL;
930}
931EXPORT_SYMBOL_GPL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
deleted file mode 100644
index 555f0eb61d4..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_eeprom_parse_h__
63#define __iwl_eeprom_parse_h__
64
65#include <linux/types.h>
66#include <linux/if_ether.h>
67#include "iwl-trans.h"
68
69struct iwl_nvm_data {
70 int n_hw_addrs;
71 u8 hw_addr[ETH_ALEN];
72
73 u8 calib_version;
74 __le16 calib_voltage;
75
76 __le16 raw_temperature;
77 __le16 kelvin_temperature;
78 __le16 kelvin_voltage;
79 __le16 xtal_calib[2];
80
81 bool sku_cap_band_24GHz_enable;
82 bool sku_cap_band_52GHz_enable;
83 bool sku_cap_11n_enable;
84 bool sku_cap_amt_enable;
85 bool sku_cap_ipan_enable;
86
87 u8 radio_cfg_type;
88 u8 radio_cfg_step;
89 u8 radio_cfg_dash;
90 u8 radio_cfg_pnum;
91 u8 valid_tx_ant, valid_rx_ant;
92
93 u16 nvm_version;
94 s8 max_tx_pwr_half_dbm;
95
96 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
97 struct ieee80211_channel channels[];
98};
99
100/**
101 * iwl_parse_eeprom_data - parse EEPROM data and return values
102 *
103 * @dev: device pointer we're parsing for, for debug only
104 * @cfg: device configuration for parsing and overrides
105 * @eeprom: the EEPROM data
106 * @eeprom_size: length of the EEPROM data
107 *
108 * This function parses all EEPROM values we need and then
109 * returns a (newly allocated) struct containing all the
110 * relevant values for driver use. The struct must be freed
111 * later with iwl_free_nvm_data().
112 */
113struct iwl_nvm_data *
114iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
115 const u8 *eeprom, size_t eeprom_size);
116
117/**
118 * iwl_free_nvm_data - free NVM data
119 * @data: the data to free
120 */
121static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
122{
123 kfree(data);
124}
125
126int iwl_nvm_check_version(struct iwl_nvm_data *data,
127 struct iwl_trans *trans);
128
129#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
deleted file mode 100644
index 27c7da3c6ed..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ /dev/null
@@ -1,463 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65
66#include "iwl-debug.h"
67#include "iwl-eeprom-read.h"
68#include "iwl-io.h"
69#include "iwl-prph.h"
70#include "iwl-csr.h"
71
72/*
73 * EEPROM access time values:
74 *
75 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
76 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
77 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
78 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
79 */
80#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
81
82#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
83#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
84
85
86/*
87 * The device's EEPROM semaphore prevents conflicts between driver and uCode
88 * when accessing the EEPROM; each access is a series of pulses to/from the
89 * EEPROM chip, not a single event, so even reads could conflict if they
90 * weren't arbitrated by the semaphore.
91 */
92
93#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
94#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
95
96static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
97{
98 u16 count;
99 int ret;
100
101 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
102 /* Request semaphore */
103 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
104 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
105
106 /* See if we got it */
107 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
108 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
109 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
110 EEPROM_SEM_TIMEOUT);
111 if (ret >= 0) {
112 IWL_DEBUG_EEPROM(trans->dev,
113 "Acquired semaphore after %d tries.\n",
114 count+1);
115 return ret;
116 }
117 }
118
119 return ret;
120}
121
122static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
123{
124 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
125 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
126}
127
128static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
129{
130 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
131
132 IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
133
134 switch (gp) {
135 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
136 if (!nvm_is_otp) {
137 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
138 gp);
139 return -ENOENT;
140 }
141 return 0;
142 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
143 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
144 if (nvm_is_otp) {
145 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
146 return -ENOENT;
147 }
148 return 0;
149 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
150 default:
151 IWL_ERR(trans,
152 "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
153 nvm_is_otp ? "OTP" : "EEPROM", gp);
154 return -ENOENT;
155 }
156}
157
158/******************************************************************************
159 *
160 * OTP related functions
161 *
162******************************************************************************/
163
164static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
165{
166 iwl_read32(trans, CSR_OTP_GP_REG);
167
168 iwl_clear_bit(trans, CSR_OTP_GP_REG,
169 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
170}
171
172static int iwl_nvm_is_otp(struct iwl_trans *trans)
173{
174 u32 otpgp;
175
176 /* OTP only valid for CP/PP and after */
177 switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
178 case CSR_HW_REV_TYPE_NONE:
179 IWL_ERR(trans, "Unknown hardware type\n");
180 return -EIO;
181 case CSR_HW_REV_TYPE_5300:
182 case CSR_HW_REV_TYPE_5350:
183 case CSR_HW_REV_TYPE_5100:
184 case CSR_HW_REV_TYPE_5150:
185 return 0;
186 default:
187 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
188 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
189 return 1;
190 return 0;
191 }
192}
193
194static int iwl_init_otp_access(struct iwl_trans *trans)
195{
196 int ret;
197
198 /* Enable 40MHz radio clock */
199 iwl_write32(trans, CSR_GP_CNTRL,
200 iwl_read32(trans, CSR_GP_CNTRL) |
201 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
202
203 /* wait for clock to be ready */
204 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
205 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
206 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
207 25000);
208 if (ret < 0) {
209 IWL_ERR(trans, "Time out access OTP\n");
210 } else {
211 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
212 APMG_PS_CTRL_VAL_RESET_REQ);
213 udelay(5);
214 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
215 APMG_PS_CTRL_VAL_RESET_REQ);
216
217 /*
218 * CSR auto clock gate disable bit -
219 * this is only applicable for HW with OTP shadow RAM
220 */
221 if (trans->cfg->base_params->shadow_ram_support)
222 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
223 CSR_RESET_LINK_PWR_MGMT_DISABLED);
224 }
225 return ret;
226}
227
228static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
229 __le16 *eeprom_data)
230{
231 int ret = 0;
232 u32 r;
233 u32 otpgp;
234
235 iwl_write32(trans, CSR_EEPROM_REG,
236 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
237 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
238 CSR_EEPROM_REG_READ_VALID_MSK,
239 CSR_EEPROM_REG_READ_VALID_MSK,
240 IWL_EEPROM_ACCESS_TIMEOUT);
241 if (ret < 0) {
242 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
243 return ret;
244 }
245 r = iwl_read32(trans, CSR_EEPROM_REG);
246 /* check for ECC errors: */
247 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
248 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
249 /* stop in this case */
250 /* set the uncorrectable OTP ECC bit for acknowledgement */
251 iwl_set_bit(trans, CSR_OTP_GP_REG,
252 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
253 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
254 return -EINVAL;
255 }
256 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
257 /* continue in this case */
258 /* set the correctable OTP ECC bit for acknowledgement */
259 iwl_set_bit(trans, CSR_OTP_GP_REG,
260 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
261 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
262 }
263 *eeprom_data = cpu_to_le16(r >> 16);
264 return 0;
265}
266
267/*
268 * iwl_is_otp_empty: check for empty OTP
269 */
270static bool iwl_is_otp_empty(struct iwl_trans *trans)
271{
272 u16 next_link_addr = 0;
273 __le16 link_value;
274 bool is_empty = false;
275
276 /* locate the beginning of OTP link list */
277 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
278 if (!link_value) {
279 IWL_ERR(trans, "OTP is empty\n");
280 is_empty = true;
281 }
282 } else {
283 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
284 is_empty = true;
285 }
286
287 return is_empty;
288}
289
290
291/*
292 * iwl_find_otp_image: find EEPROM image in OTP
293 * finding the OTP block that contains the EEPROM image.
294 * the last valid block on the link list (the block _before_ the last block)
295 * is the block we should read and used to configure the device.
296 * If all the available OTP blocks are full, the last block will be the block
297 * we should read and used to configure the device.
298 * only perform this operation if shadow RAM is disabled
299 */
300static int iwl_find_otp_image(struct iwl_trans *trans,
301 u16 *validblockaddr)
302{
303 u16 next_link_addr = 0, valid_addr;
304 __le16 link_value = 0;
305 int usedblocks = 0;
306
307 /* set addressing mode to absolute to traverse the link list */
308 iwl_set_otp_access_absolute(trans);
309
310 /* checking for empty OTP or error */
311 if (iwl_is_otp_empty(trans))
312 return -EINVAL;
313
314 /*
315 * start traverse link list
316 * until reach the max number of OTP blocks
317 * different devices have different number of OTP blocks
318 */
319 do {
320 /* save current valid block address
321 * check for more block on the link list
322 */
323 valid_addr = next_link_addr;
324 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
325 IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
326 usedblocks, next_link_addr);
327 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
328 return -EINVAL;
329 if (!link_value) {
330 /*
331 * reach the end of link list, return success and
332 * set address point to the starting address
333 * of the image
334 */
335 *validblockaddr = valid_addr;
336 /* skip first 2 bytes (link list pointer) */
337 *validblockaddr += 2;
338 return 0;
339 }
340 /* more in the link list, continue */
341 usedblocks++;
342 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
343
344 /* OTP has no valid blocks */
345 IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
346 return -EINVAL;
347}
348
349/**
350 * iwl_read_eeprom - read EEPROM contents
351 *
352 * Load the EEPROM contents from adapter and return it
353 * and its size.
354 *
355 * NOTE: This routine uses the non-debug IO access functions.
356 */
357int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
358{
359 __le16 *e;
360 u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
361 int sz;
362 int ret;
363 u16 addr;
364 u16 validblockaddr = 0;
365 u16 cache_addr = 0;
366 int nvm_is_otp;
367
368 if (!eeprom || !eeprom_size)
369 return -EINVAL;
370
371 nvm_is_otp = iwl_nvm_is_otp(trans);
372 if (nvm_is_otp < 0)
373 return nvm_is_otp;
374
375 sz = trans->cfg->base_params->eeprom_size;
376 IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
377
378 e = kmalloc(sz, GFP_KERNEL);
379 if (!e)
380 return -ENOMEM;
381
382 ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
383 if (ret < 0) {
384 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
385 goto err_free;
386 }
387
388 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
389 ret = iwl_eeprom_acquire_semaphore(trans);
390 if (ret < 0) {
391 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
392 goto err_free;
393 }
394
395 if (nvm_is_otp) {
396 ret = iwl_init_otp_access(trans);
397 if (ret) {
398 IWL_ERR(trans, "Failed to initialize OTP access.\n");
399 goto err_unlock;
400 }
401
402 iwl_write32(trans, CSR_EEPROM_GP,
403 iwl_read32(trans, CSR_EEPROM_GP) &
404 ~CSR_EEPROM_GP_IF_OWNER_MSK);
405
406 iwl_set_bit(trans, CSR_OTP_GP_REG,
407 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
408 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
409 /* traversing the linked list if no shadow ram supported */
410 if (!trans->cfg->base_params->shadow_ram_support) {
411 ret = iwl_find_otp_image(trans, &validblockaddr);
412 if (ret)
413 goto err_unlock;
414 }
415 for (addr = validblockaddr; addr < validblockaddr + sz;
416 addr += sizeof(u16)) {
417 __le16 eeprom_data;
418
419 ret = iwl_read_otp_word(trans, addr, &eeprom_data);
420 if (ret)
421 goto err_unlock;
422 e[cache_addr / 2] = eeprom_data;
423 cache_addr += sizeof(u16);
424 }
425 } else {
426 /* eeprom is an array of 16bit values */
427 for (addr = 0; addr < sz; addr += sizeof(u16)) {
428 u32 r;
429
430 iwl_write32(trans, CSR_EEPROM_REG,
431 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
432
433 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
434 CSR_EEPROM_REG_READ_VALID_MSK,
435 CSR_EEPROM_REG_READ_VALID_MSK,
436 IWL_EEPROM_ACCESS_TIMEOUT);
437 if (ret < 0) {
438 IWL_ERR(trans,
439 "Time out reading EEPROM[%d]\n", addr);
440 goto err_unlock;
441 }
442 r = iwl_read32(trans, CSR_EEPROM_REG);
443 e[addr / 2] = cpu_to_le16(r >> 16);
444 }
445 }
446
447 IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
448 nvm_is_otp ? "OTP" : "EEPROM");
449
450 iwl_eeprom_release_semaphore(trans);
451
452 *eeprom_size = sz;
453 *eeprom = (u8 *)e;
454 return 0;
455
456 err_unlock:
457 iwl_eeprom_release_semaphore(trans);
458 err_free:
459 kfree(e);
460
461 return ret;
462}
463EXPORT_SYMBOL_GPL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
deleted file mode 100644
index 1337c9d36fe..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include "iwl-trans.h"
67
68int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
69
70#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index ec48563d3c6..0ad60b3c04d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
63#ifndef __iwl_fh_h__ 63#ifndef __iwl_fh_h__
64#define __iwl_fh_h__ 64#define __iwl_fh_h__
65 65
66#include <linux/types.h>
67
68/****************************/ 66/****************************/
69/* Flow Handler Definitions */ 67/* Flow Handler Definitions */
70/****************************/ 68/****************************/
@@ -104,29 +102,15 @@
104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte 103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
106 * aligned (address bits 0-7 must be 0). 104 * aligned (address bits 0-7 must be 0).
107 * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
108 * for them are in different places.
109 * 105 *
110 * Bit fields in each pointer register: 106 * Bit fields in each pointer register:
111 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned 107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
112 */ 108 */
113#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) 109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
114#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) 110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
115#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) 111
116#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) 112/* Find TFD CB base pointer for given queue (range 0-15). */
117#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) 113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
118#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
119
120/* Find TFD CB base pointer for given queue */
121static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
122{
123 if (chnl < 16)
124 return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
125 if (chnl < 20)
126 return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
127 WARN_ON_ONCE(chnl >= 32);
128 return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
129}
130 114
131 115
132/** 116/**
@@ -267,7 +251,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
267 251
268#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) 252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
269#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) 253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
270#define RX_RB_TIMEOUT (0x11) 254#define RX_RB_TIMEOUT (0x10)
271 255
272#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) 256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
273#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) 257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
@@ -282,6 +266,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
282#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) 266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
283#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) 267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
284 268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
285/** 271/**
286 * Rx Shared Status Registers (RSSR) 272 * Rx Shared Status Registers (RSSR)
287 * 273 *
@@ -421,8 +407,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 407 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
422 408
423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) 409#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
424#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
425
426/* Instruct FH to increment the retry count of a packet when 410/* Instruct FH to increment the retry count of a packet when
427 * it is brought from the memory to TX-FIFO 411 * it is brought from the memory to TX-FIFO
428 */ 412 */
@@ -438,6 +422,10 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_FREE_BUFFERS 64 422#define RX_FREE_BUFFERS 64
439#define RX_LOW_WATERMARK 8 423#define RX_LOW_WATERMARK 8
440 424
425/* Size of one Rx buffer in host DRAM */
426#define IWL_RX_BUF_SIZE_4K (4 * 1024)
427#define IWL_RX_BUF_SIZE_8K (8 * 1024)
428
441/** 429/**
442 * struct iwl_rb_status - reseve buffer status 430 * struct iwl_rb_status - reseve buffer status
443 * host memory mapped FH registers 431 * host memory mapped FH registers
@@ -520,16 +508,4 @@ struct iwl_tfd {
520/* Keep Warm Size */ 508/* Keep Warm Size */
521#define IWL_KW_SIZE 0x1000 /* 4k */ 509#define IWL_KW_SIZE 0x1000 /* 4k */
522 510
523/* Fixed (non-configurable) rx data from phy */
524
525/**
526 * struct iwlagn_schedq_bc_tbl scheduler byte count table
527 * base physical address provided by SCD_DRAM_BASE_ADDR
528 * @tfd_offset 0-12 - tx command byte count
529 * 12-16 - station index
530 */
531struct iwlagn_scd_bc_tbl {
532 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
533} __packed;
534
535#endif /* !__iwl_fh_h__ */ 511#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
deleted file mode 100644
index e71564053e7..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ /dev/null
@@ -1,156 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_fw_file_h__
64#define __iwl_fw_file_h__
65
66#include <linux/netdevice.h>
67
68/* v1/v2 uCode file layout */
69struct iwl_ucode_header {
70 __le32 ver; /* major/minor/API/serial */
71 union {
72 struct {
73 __le32 inst_size; /* bytes of runtime code */
74 __le32 data_size; /* bytes of runtime data */
75 __le32 init_size; /* bytes of init code */
76 __le32 init_data_size; /* bytes of init data */
77 __le32 boot_size; /* bytes of bootstrap code */
78 u8 data[0]; /* in same order as sizes */
79 } v1;
80 struct {
81 __le32 build; /* build number */
82 __le32 inst_size; /* bytes of runtime code */
83 __le32 data_size; /* bytes of runtime data */
84 __le32 init_size; /* bytes of init code */
85 __le32 init_data_size; /* bytes of init data */
86 __le32 boot_size; /* bytes of bootstrap code */
87 u8 data[0]; /* in same order as sizes */
88 } v2;
89 } u;
90};
91
92/*
93 * new TLV uCode file layout
94 *
95 * The new TLV file format contains TLVs, that each specify
96 * some piece of data.
97 */
98
99enum iwl_ucode_tlv_type {
100 IWL_UCODE_TLV_INVALID = 0, /* unused */
101 IWL_UCODE_TLV_INST = 1,
102 IWL_UCODE_TLV_DATA = 2,
103 IWL_UCODE_TLV_INIT = 3,
104 IWL_UCODE_TLV_INIT_DATA = 4,
105 IWL_UCODE_TLV_BOOT = 5,
106 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
107 IWL_UCODE_TLV_PAN = 7,
108 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
109 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
110 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
111 IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
112 IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
113 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
114 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
115 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
116 IWL_UCODE_TLV_WOWLAN_INST = 16,
117 IWL_UCODE_TLV_WOWLAN_DATA = 17,
118 IWL_UCODE_TLV_FLAGS = 18,
119 IWL_UCODE_TLV_SEC_RT = 19,
120 IWL_UCODE_TLV_SEC_INIT = 20,
121 IWL_UCODE_TLV_SEC_WOWLAN = 21,
122 IWL_UCODE_TLV_DEF_CALIB = 22,
123 IWL_UCODE_TLV_PHY_SKU = 23,
124};
125
126struct iwl_ucode_tlv {
127 __le32 type; /* see above */
128 __le32 length; /* not including type/length fields */
129 u8 data[0];
130};
131
132#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
133
134struct iwl_tlv_ucode_header {
135 /*
136 * The TLV style ucode header is distinguished from
137 * the v1/v2 style header by first four bytes being
138 * zero, as such is an invalid combination of
139 * major/minor/API/serial versions.
140 */
141 __le32 zero;
142 __le32 magic;
143 u8 human_readable[64];
144 __le32 ver; /* major/minor/API/serial */
145 __le32 build;
146 __le64 ignore;
147 /*
148 * The data contained herein has a TLV layout,
149 * see above for the TLV header and types.
150 * Note that each TLV is padded to a length
151 * that is a multiple of 4 for alignment.
152 */
153 u8 data[0];
154};
155
156#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
deleted file mode 100644
index d1a86b66bc5..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ /dev/null
@@ -1,177 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_fw_h__
64#define __iwl_fw_h__
65#include <linux/types.h>
66#include <net/mac80211.h>
67
68/**
69 * enum iwl_ucode_tlv_flag - ucode API flags
70 * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
71 * was a separate TLV but moved here to save space.
72 * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
73 * treats good CRC threshold as a boolean
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 */
77enum iwl_ucode_tlv_flag {
78 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
79 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
80 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
81 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
82};
83
84/* The default calibrate table size if not specified by firmware file */
85#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
86#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
87#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
88
89/**
90 * enum iwl_ucode_type
91 *
92 * The type of ucode.
93 *
94 * @IWL_UCODE_REGULAR: Normal runtime ucode
95 * @IWL_UCODE_INIT: Initial ucode
96 * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
97 */
98enum iwl_ucode_type {
99 IWL_UCODE_REGULAR,
100 IWL_UCODE_INIT,
101 IWL_UCODE_WOWLAN,
102 IWL_UCODE_TYPE_MAX,
103};
104
105/*
106 * enumeration of ucode section.
107 * This enumeration is used for legacy tlv style (before 16.0 uCode).
108 */
109enum iwl_ucode_sec {
110 IWL_UCODE_SECTION_INST,
111 IWL_UCODE_SECTION_DATA,
112};
113/*
114 * For 16.0 uCode and above, there is no differentiation between sections,
115 * just an offset to the HW address.
116 */
117#define IWL_UCODE_SECTION_MAX 4
118
119struct iwl_ucode_capabilities {
120 u32 max_probe_length;
121 u32 standard_phy_calibration_size;
122 u32 flags;
123};
124
125/* one for each uCode image (inst/data, init/runtime/wowlan) */
126struct fw_desc {
127 const void *data; /* vmalloc'ed data */
128 u32 len; /* size in bytes */
129 u32 offset; /* offset in the device */
130};
131
132struct fw_img {
133 struct fw_desc sec[IWL_UCODE_SECTION_MAX];
134};
135
136/* uCode version contains 4 values: Major/Minor/API/Serial */
137#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
138#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
139#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
140#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
141
142/**
143 * struct iwl_fw - variables associated with the firmware
144 *
145 * @ucode_ver: ucode version from the ucode file
146 * @fw_version: firmware version string
147 * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
148 * @ucode_capa: capabilities parsed from the ucode file.
149 * @enhance_sensitivity_table: device can do enhanced sensitivity.
150 * @init_evtlog_ptr: event log offset for init ucode.
151 * @init_evtlog_size: event log size for init ucode.
152 * @init_errlog_ptr: error log offfset for init ucode.
153 * @inst_evtlog_ptr: event log offset for runtime ucode.
154 * @inst_evtlog_size: event log size for runtime ucode.
155 * @inst_errlog_ptr: error log offfset for runtime ucode.
156 */
157struct iwl_fw {
158 u32 ucode_ver;
159
160 char fw_version[ETHTOOL_BUSINFO_LEN];
161
162 /* ucode images */
163 struct fw_img img[IWL_UCODE_TYPE_MAX];
164
165 struct iwl_ucode_capabilities ucode_capa;
166 bool enhance_sensitivity_table;
167
168 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
169 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
170
171 u64 default_calib[IWL_UCODE_TYPE_MAX];
172 u32 phy_config;
173
174 bool mvm_fw;
175};
176
177#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index cdaff957205..aa4a9067445 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -25,71 +25,46 @@
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/delay.h>
29#include <linux/device.h>
30#include <linux/export.h>
31 28
32#include "iwl-io.h" 29#include "iwl-io.h"
33#include "iwl-csr.h"
34#include "iwl-debug.h"
35 30
36#define IWL_POLL_INTERVAL 10 /* microseconds */ 31#define IWL_POLL_INTERVAL 10 /* microseconds */
37 32
38static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) 33static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
39{ 34{
40 iwl_write32(trans, reg, iwl_read32(trans, reg) | mask); 35 iwl_write32(priv, reg, iwl_read32(priv, reg) | mask);
41} 36}
42 37
43static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) 38static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
44{ 39{
45 iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask); 40 iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask);
46} 41}
47 42
48void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) 43void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
49{ 44{
50 unsigned long flags; 45 unsigned long flags;
51 46
52 spin_lock_irqsave(&trans->reg_lock, flags); 47 spin_lock_irqsave(&priv->reg_lock, flags);
53 __iwl_set_bit(trans, reg, mask); 48 __iwl_set_bit(priv, reg, mask);
54 spin_unlock_irqrestore(&trans->reg_lock, flags); 49 spin_unlock_irqrestore(&priv->reg_lock, flags);
55} 50}
56EXPORT_SYMBOL_GPL(iwl_set_bit);
57 51
58void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) 52void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
59{ 53{
60 unsigned long flags; 54 unsigned long flags;
61 55
62 spin_lock_irqsave(&trans->reg_lock, flags); 56 spin_lock_irqsave(&priv->reg_lock, flags);
63 __iwl_clear_bit(trans, reg, mask); 57 __iwl_clear_bit(priv, reg, mask);
64 spin_unlock_irqrestore(&trans->reg_lock, flags); 58 spin_unlock_irqrestore(&priv->reg_lock, flags);
65} 59}
66EXPORT_SYMBOL_GPL(iwl_clear_bit);
67 60
68void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 61int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
69{
70 unsigned long flags;
71 u32 v;
72
73#ifdef CONFIG_IWLWIFI_DEBUG
74 WARN_ON_ONCE(value & ~mask);
75#endif
76
77 spin_lock_irqsave(&trans->reg_lock, flags);
78 v = iwl_read32(trans, reg);
79 v &= ~mask;
80 v |= value;
81 iwl_write32(trans, reg, v);
82 spin_unlock_irqrestore(&trans->reg_lock, flags);
83}
84EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
85
86int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
87 u32 bits, u32 mask, int timeout) 62 u32 bits, u32 mask, int timeout)
88{ 63{
89 int t = 0; 64 int t = 0;
90 65
91 do { 66 do {
92 if ((iwl_read32(trans, addr) & mask) == (bits & mask)) 67 if ((iwl_read32(priv, addr) & mask) == (bits & mask))
93 return t; 68 return t;
94 udelay(IWL_POLL_INTERVAL); 69 udelay(IWL_POLL_INTERVAL);
95 t += IWL_POLL_INTERVAL; 70 t += IWL_POLL_INTERVAL;
@@ -97,17 +72,15 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
97 72
98 return -ETIMEDOUT; 73 return -ETIMEDOUT;
99} 74}
100EXPORT_SYMBOL_GPL(iwl_poll_bit);
101 75
102int iwl_grab_nic_access_silent(struct iwl_trans *trans) 76int iwl_grab_nic_access_silent(struct iwl_priv *priv)
103{ 77{
104 int ret; 78 int ret;
105 79
106 lockdep_assert_held(&trans->reg_lock); 80 lockdep_assert_held(&priv->reg_lock);
107 81
108 /* this bit wakes up the NIC */ 82 /* this bit wakes up the NIC */
109 __iwl_set_bit(trans, CSR_GP_CNTRL, 83 __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
110 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
111 84
112 /* 85 /*
113 * These bits say the device is running, and should keep running for 86 * These bits say the device is running, and should keep running for
@@ -128,83 +101,70 @@ int iwl_grab_nic_access_silent(struct iwl_trans *trans)
128 * 5000 series and later (including 1000 series) have non-volatile SRAM, 101 * 5000 series and later (including 1000 series) have non-volatile SRAM,
129 * and do not save/restore SRAM when power cycling. 102 * and do not save/restore SRAM when power cycling.
130 */ 103 */
131 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 104 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
132 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 105 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
133 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 106 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
134 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 107 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
135 if (ret < 0) { 108 if (ret < 0) {
136 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 109 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
137 return -EIO; 110 return -EIO;
138 } 111 }
139 112
140 return 0; 113 return 0;
141} 114}
142EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
143 115
144bool iwl_grab_nic_access(struct iwl_trans *trans) 116int iwl_grab_nic_access(struct iwl_priv *priv)
145{ 117{
146 int ret = iwl_grab_nic_access_silent(trans); 118 int ret = iwl_grab_nic_access_silent(priv);
147 if (unlikely(ret)) { 119 if (ret) {
148 u32 val = iwl_read32(trans, CSR_GP_CNTRL); 120 u32 val = iwl_read32(priv, CSR_GP_CNTRL);
149 WARN_ONCE(1, "Timeout waiting for hardware access " 121 IWL_ERR(priv,
150 "(CSR_GP_CNTRL 0x%08x)\n", val); 122 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
151 return false;
152 } 123 }
153 124
154 return true; 125 return ret;
155} 126}
156EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
157 127
158void iwl_release_nic_access(struct iwl_trans *trans) 128void iwl_release_nic_access(struct iwl_priv *priv)
159{ 129{
160 lockdep_assert_held(&trans->reg_lock); 130 lockdep_assert_held(&priv->reg_lock);
161 __iwl_clear_bit(trans, CSR_GP_CNTRL, 131 __iwl_clear_bit(priv, CSR_GP_CNTRL,
162 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 132 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
163 /*
164 * Above we read the CSR_GP_CNTRL register, which will flush
165 * any previous writes, but we need the write that clears the
166 * MAC_ACCESS_REQ bit to be performed before any other writes
167 * scheduled on different CPUs (after we drop reg_lock).
168 */
169 mmiowb();
170} 133}
171EXPORT_SYMBOL_GPL(iwl_release_nic_access);
172 134
173u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) 135u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
174{ 136{
175 u32 value; 137 u32 value;
176 unsigned long flags; 138 unsigned long flags;
177 139
178 spin_lock_irqsave(&trans->reg_lock, flags); 140 spin_lock_irqsave(&priv->reg_lock, flags);
179 iwl_grab_nic_access(trans); 141 iwl_grab_nic_access(priv);
180 value = iwl_read32(trans, reg); 142 value = iwl_read32(priv, reg);
181 iwl_release_nic_access(trans); 143 iwl_release_nic_access(priv);
182 spin_unlock_irqrestore(&trans->reg_lock, flags); 144 spin_unlock_irqrestore(&priv->reg_lock, flags);
183 145
184 return value; 146 return value;
185} 147}
186EXPORT_SYMBOL_GPL(iwl_read_direct32);
187 148
188void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) 149void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
189{ 150{
190 unsigned long flags; 151 unsigned long flags;
191 152
192 spin_lock_irqsave(&trans->reg_lock, flags); 153 spin_lock_irqsave(&priv->reg_lock, flags);
193 if (likely(iwl_grab_nic_access(trans))) { 154 if (!iwl_grab_nic_access(priv)) {
194 iwl_write32(trans, reg, value); 155 iwl_write32(priv, reg, value);
195 iwl_release_nic_access(trans); 156 iwl_release_nic_access(priv);
196 } 157 }
197 spin_unlock_irqrestore(&trans->reg_lock, flags); 158 spin_unlock_irqrestore(&priv->reg_lock, flags);
198} 159}
199EXPORT_SYMBOL_GPL(iwl_write_direct32);
200 160
201int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 161int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
202 int timeout) 162 int timeout)
203{ 163{
204 int t = 0; 164 int t = 0;
205 165
206 do { 166 do {
207 if ((iwl_read_direct32(trans, addr) & mask) == mask) 167 if ((iwl_read_direct32(priv, addr) & mask) == mask)
208 return t; 168 return t;
209 udelay(IWL_POLL_INTERVAL); 169 udelay(IWL_POLL_INTERVAL);
210 t += IWL_POLL_INTERVAL; 170 t += IWL_POLL_INTERVAL;
@@ -212,143 +172,123 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
212 172
213 return -ETIMEDOUT; 173 return -ETIMEDOUT;
214} 174}
215EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
216 175
217static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) 176static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg)
218{ 177{
219 u32 val = iwl_trans_read_prph(trans, ofs); 178 iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
220 trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); 179 rmb();
221 return val; 180 return iwl_read32(priv, HBUS_TARG_PRPH_RDAT);
222} 181}
223 182
224static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 183static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
225{ 184{
226 trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); 185 iwl_write32(priv, HBUS_TARG_PRPH_WADDR,
227 iwl_trans_write_prph(trans, ofs, val); 186 ((addr & 0x0000FFFF) | (3 << 24)));
187 wmb();
188 iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val);
228} 189}
229 190
230u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) 191u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
231{ 192{
232 unsigned long flags; 193 unsigned long flags;
233 u32 val; 194 u32 val;
234 195
235 spin_lock_irqsave(&trans->reg_lock, flags); 196 spin_lock_irqsave(&priv->reg_lock, flags);
236 iwl_grab_nic_access(trans); 197 iwl_grab_nic_access(priv);
237 val = __iwl_read_prph(trans, ofs); 198 val = __iwl_read_prph(priv, reg);
238 iwl_release_nic_access(trans); 199 iwl_release_nic_access(priv);
239 spin_unlock_irqrestore(&trans->reg_lock, flags); 200 spin_unlock_irqrestore(&priv->reg_lock, flags);
240 return val; 201 return val;
241} 202}
242EXPORT_SYMBOL_GPL(iwl_read_prph);
243 203
244void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 204void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
245{ 205{
246 unsigned long flags; 206 unsigned long flags;
247 207
248 spin_lock_irqsave(&trans->reg_lock, flags); 208 spin_lock_irqsave(&priv->reg_lock, flags);
249 if (likely(iwl_grab_nic_access(trans))) { 209 if (!iwl_grab_nic_access(priv)) {
250 __iwl_write_prph(trans, ofs, val); 210 __iwl_write_prph(priv, addr, val);
251 iwl_release_nic_access(trans); 211 iwl_release_nic_access(priv);
252 } 212 }
253 spin_unlock_irqrestore(&trans->reg_lock, flags); 213 spin_unlock_irqrestore(&priv->reg_lock, flags);
254} 214}
255EXPORT_SYMBOL_GPL(iwl_write_prph);
256 215
257void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 216void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
258{ 217{
259 unsigned long flags; 218 unsigned long flags;
260 219
261 spin_lock_irqsave(&trans->reg_lock, flags); 220 spin_lock_irqsave(&priv->reg_lock, flags);
262 if (likely(iwl_grab_nic_access(trans))) { 221 iwl_grab_nic_access(priv);
263 __iwl_write_prph(trans, ofs, 222 __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask);
264 __iwl_read_prph(trans, ofs) | mask); 223 iwl_release_nic_access(priv);
265 iwl_release_nic_access(trans); 224 spin_unlock_irqrestore(&priv->reg_lock, flags);
266 }
267 spin_unlock_irqrestore(&trans->reg_lock, flags);
268} 225}
269EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
270 226
271void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 227void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
272 u32 bits, u32 mask) 228 u32 bits, u32 mask)
273{ 229{
274 unsigned long flags; 230 unsigned long flags;
275 231
276 spin_lock_irqsave(&trans->reg_lock, flags); 232 spin_lock_irqsave(&priv->reg_lock, flags);
277 if (likely(iwl_grab_nic_access(trans))) { 233 iwl_grab_nic_access(priv);
278 __iwl_write_prph(trans, ofs, 234 __iwl_write_prph(priv, reg,
279 (__iwl_read_prph(trans, ofs) & mask) | bits); 235 (__iwl_read_prph(priv, reg) & mask) | bits);
280 iwl_release_nic_access(trans); 236 iwl_release_nic_access(priv);
281 } 237 spin_unlock_irqrestore(&priv->reg_lock, flags);
282 spin_unlock_irqrestore(&trans->reg_lock, flags);
283} 238}
284EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
285 239
286void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 240void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
287{ 241{
288 unsigned long flags; 242 unsigned long flags;
289 u32 val; 243 u32 val;
290 244
291 spin_lock_irqsave(&trans->reg_lock, flags); 245 spin_lock_irqsave(&priv->reg_lock, flags);
292 if (likely(iwl_grab_nic_access(trans))) { 246 iwl_grab_nic_access(priv);
293 val = __iwl_read_prph(trans, ofs); 247 val = __iwl_read_prph(priv, reg);
294 __iwl_write_prph(trans, ofs, (val & ~mask)); 248 __iwl_write_prph(priv, reg, (val & ~mask));
295 iwl_release_nic_access(trans); 249 iwl_release_nic_access(priv);
296 } 250 spin_unlock_irqrestore(&priv->reg_lock, flags);
297 spin_unlock_irqrestore(&trans->reg_lock, flags);
298} 251}
299EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
300 252
301void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 253void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
302 void *buf, int dwords) 254 void *buf, int words)
303{ 255{
304 unsigned long flags; 256 unsigned long flags;
305 int offs; 257 int offs;
306 u32 *vals = buf; 258 u32 *vals = buf;
307 259
308 spin_lock_irqsave(&trans->reg_lock, flags); 260 spin_lock_irqsave(&priv->reg_lock, flags);
309 if (likely(iwl_grab_nic_access(trans))) { 261 iwl_grab_nic_access(priv);
310 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 262
311 for (offs = 0; offs < dwords; offs++) 263 iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr);
312 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 264 rmb();
313 iwl_release_nic_access(trans); 265
314 } 266 for (offs = 0; offs < words; offs++)
315 spin_unlock_irqrestore(&trans->reg_lock, flags); 267 vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
268
269 iwl_release_nic_access(priv);
270 spin_unlock_irqrestore(&priv->reg_lock, flags);
316} 271}
317EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
318 272
319u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) 273u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
320{ 274{
321 u32 value; 275 u32 value;
322 276
323 _iwl_read_targ_mem_dwords(trans, addr, &value, 1); 277 _iwl_read_targ_mem_words(priv, addr, &value, 1);
324 278
325 return value; 279 return value;
326} 280}
327EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
328 281
329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 282void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
330 const void *buf, int dwords)
331{ 283{
332 unsigned long flags; 284 unsigned long flags;
333 int offs, result = 0;
334 const u32 *vals = buf;
335
336 spin_lock_irqsave(&trans->reg_lock, flags);
337 if (likely(iwl_grab_nic_access(trans))) {
338 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
339 for (offs = 0; offs < dwords; offs++)
340 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
341 iwl_release_nic_access(trans);
342 } else
343 result = -EBUSY;
344 spin_unlock_irqrestore(&trans->reg_lock, flags);
345
346 return result;
347}
348EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
349 285
350int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) 286 spin_lock_irqsave(&priv->reg_lock, flags);
351{ 287 if (!iwl_grab_nic_access(priv)) {
352 return _iwl_write_targ_mem_dwords(trans, addr, &val, 1); 288 iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr);
289 wmb();
290 iwl_write32(priv, HBUS_TARG_MEM_WDAT, val);
291 iwl_release_nic_access(priv);
292 }
293 spin_unlock_irqrestore(&priv->reg_lock, flags);
353} 294}
354EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 48dc753e374..19a09310112 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -29,66 +29,65 @@
29#ifndef __iwl_io_h__ 29#ifndef __iwl_io_h__
30#define __iwl_io_h__ 30#define __iwl_io_h__
31 31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
32#include "iwl-devtrace.h" 36#include "iwl-devtrace.h"
33#include "iwl-trans.h" 37#include "iwl-bus.h"
34 38
35static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) 39static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
36{ 40{
37 trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); 41 trace_iwlwifi_dev_iowrite8(priv, ofs, val);
38 iwl_trans_write8(trans, ofs, val); 42 bus_write8(priv->bus, ofs, val);
39} 43}
40 44
41static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) 45static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
42{ 46{
43 trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val); 47 trace_iwlwifi_dev_iowrite32(priv, ofs, val);
44 iwl_trans_write32(trans, ofs, val); 48 bus_write32(priv->bus, ofs, val);
45} 49}
46 50
47static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs) 51static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
48{ 52{
49 u32 val = iwl_trans_read32(trans, ofs); 53 u32 val = bus_read32(priv->bus, ofs);
50 trace_iwlwifi_dev_ioread32(trans->dev, ofs, val); 54 trace_iwlwifi_dev_ioread32(priv, ofs, val);
51 return val; 55 return val;
52} 56}
53 57
54void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask); 58void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask);
55void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask); 59void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask);
56
57void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
58 60
59int iwl_poll_bit(struct iwl_trans *trans, u32 addr, 61int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
60 u32 bits, u32 mask, int timeout); 62 u32 bits, u32 mask, int timeout);
61int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 63int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
62 int timeout); 64 int timeout);
63 65
64int iwl_grab_nic_access_silent(struct iwl_trans *trans); 66int iwl_grab_nic_access_silent(struct iwl_priv *priv);
65bool iwl_grab_nic_access(struct iwl_trans *trans); 67int iwl_grab_nic_access(struct iwl_priv *priv);
66void iwl_release_nic_access(struct iwl_trans *trans); 68void iwl_release_nic_access(struct iwl_priv *priv);
67 69
68u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); 70u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg);
69void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); 71void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value);
70 72
71 73
72u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); 74u32 iwl_read_prph(struct iwl_priv *priv, u32 reg);
73void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); 75void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val);
74void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 76void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
75void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 77void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
76 u32 bits, u32 mask); 78 u32 bits, u32 mask);
77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 79void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
78 80
79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 81void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
80 void *buf, int dwords); 82 void *buf, int words);
81 83
82#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \ 84#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \
83 do { \ 85 do { \
84 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 86 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
85 _iwl_read_targ_mem_dwords(trans, addr, buf, \ 87 _iwl_read_targ_mem_words(priv, addr, buf, \
86 (bufsize) / sizeof(u32));\ 88 (bufsize) / sizeof(u32));\
87 } while (0) 89 } while (0)
88 90
89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 91u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr);
90 const void *buf, int dwords); 92void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val);
91
92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
94#endif 93#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
deleted file mode 100644
index d9a86d6b2bd..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_modparams_h__
64#define __iwl_modparams_h__
65
66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h>
70
71extern struct iwl_mod_params iwlwifi_mod_params;
72
73enum iwl_power_level {
74 IWL_POWER_INDEX_1,
75 IWL_POWER_INDEX_2,
76 IWL_POWER_INDEX_3,
77 IWL_POWER_INDEX_4,
78 IWL_POWER_INDEX_5,
79 IWL_POWER_NUM
80};
81
82#define IWL_DISABLE_HT_ALL BIT(0)
83#define IWL_DISABLE_HT_TXAGG BIT(1)
84#define IWL_DISABLE_HT_RXAGG BIT(2)
85
86/**
87 * struct iwl_mod_params
88 *
89 * Holds the module parameters
90 *
91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1
95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0
98 * @bt_coex_active: enable bt coex, default = true
99 * @led_mode: system default, default = 0
100 * @power_save: disable power save, default = false
101 * @power_level: power level, default = 1
102 * @debug_level: levels are IWL_DL_*
103 * @ant_coupling: antenna coupling in dB, default = 0
104 * @bt_ch_announce: BT channel inhibition, default = enable
105 * @auto_agg: enable agg. without check, default = true
106 * @disable_5ghz: disable 5GHz capability, default = false
107 */
108struct iwl_mod_params {
109 int sw_crypto;
110 unsigned int disable_11n;
111 int amsdu_size_8K;
112 int restart_fw;
113 bool plcp_check;
114 int wd_disable;
115 bool bt_coex_active;
116 int led_mode;
117 bool power_save;
118 int power_level;
119 u32 debug_level;
120 int ant_coupling;
121 bool bt_ch_announce;
122 bool auto_agg;
123 bool disable_5ghz;
124};
125
126#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
deleted file mode 100644
index c61f2070f15..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/sched.h>
64#include <linux/export.h>
65
66#include "iwl-notif-wait.h"
67
68
69void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
70{
71 spin_lock_init(&notif_wait->notif_wait_lock);
72 INIT_LIST_HEAD(&notif_wait->notif_waits);
73 init_waitqueue_head(&notif_wait->notif_waitq);
74}
75EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
76
77void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
78 struct iwl_rx_packet *pkt)
79{
80 bool triggered = false;
81
82 if (!list_empty(&notif_wait->notif_waits)) {
83 struct iwl_notification_wait *w;
84
85 spin_lock(&notif_wait->notif_wait_lock);
86 list_for_each_entry(w, &notif_wait->notif_waits, list) {
87 int i;
88 bool found = false;
89
90 /*
91 * If it already finished (triggered) or has been
92 * aborted then don't evaluate it again to avoid races,
93 * Otherwise the function could be called again even
94 * though it returned true before
95 */
96 if (w->triggered || w->aborted)
97 continue;
98
99 for (i = 0; i < w->n_cmds; i++) {
100 if (w->cmds[i] == pkt->hdr.cmd) {
101 found = true;
102 break;
103 }
104 }
105 if (!found)
106 continue;
107
108 if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
109 w->triggered = true;
110 triggered = true;
111 }
112 }
113 spin_unlock(&notif_wait->notif_wait_lock);
114
115 }
116
117 if (triggered)
118 wake_up_all(&notif_wait->notif_waitq);
119}
120EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
121
122void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
123{
124 struct iwl_notification_wait *wait_entry;
125
126 spin_lock(&notif_wait->notif_wait_lock);
127 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
128 wait_entry->aborted = true;
129 spin_unlock(&notif_wait->notif_wait_lock);
130
131 wake_up_all(&notif_wait->notif_waitq);
132}
133EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
134
135void
136iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
137 struct iwl_notification_wait *wait_entry,
138 const u8 *cmds, int n_cmds,
139 bool (*fn)(struct iwl_notif_wait_data *notif_wait,
140 struct iwl_rx_packet *pkt, void *data),
141 void *fn_data)
142{
143 if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
144 n_cmds = MAX_NOTIF_CMDS;
145
146 wait_entry->fn = fn;
147 wait_entry->fn_data = fn_data;
148 wait_entry->n_cmds = n_cmds;
149 memcpy(wait_entry->cmds, cmds, n_cmds);
150 wait_entry->triggered = false;
151 wait_entry->aborted = false;
152
153 spin_lock_bh(&notif_wait->notif_wait_lock);
154 list_add(&wait_entry->list, &notif_wait->notif_waits);
155 spin_unlock_bh(&notif_wait->notif_wait_lock);
156}
157EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
158
159int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
160 struct iwl_notification_wait *wait_entry,
161 unsigned long timeout)
162{
163 int ret;
164
165 ret = wait_event_timeout(notif_wait->notif_waitq,
166 wait_entry->triggered || wait_entry->aborted,
167 timeout);
168
169 spin_lock_bh(&notif_wait->notif_wait_lock);
170 list_del(&wait_entry->list);
171 spin_unlock_bh(&notif_wait->notif_wait_lock);
172
173 if (wait_entry->aborted)
174 return -EIO;
175
176 /* return value is always >= 0 */
177 if (ret <= 0)
178 return -ETIMEDOUT;
179 return 0;
180}
181EXPORT_SYMBOL_GPL(iwl_wait_notification);
182
183void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
184 struct iwl_notification_wait *wait_entry)
185{
186 spin_lock_bh(&notif_wait->notif_wait_lock);
187 list_del(&wait_entry->list);
188 spin_unlock_bh(&notif_wait->notif_wait_lock);
189}
190EXPORT_SYMBOL_GPL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
deleted file mode 100644
index 821523100cf..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *****************************************************************************/
62#ifndef __iwl_notif_wait_h__
63#define __iwl_notif_wait_h__
64
65#include <linux/wait.h>
66
67#include "iwl-trans.h"
68
69struct iwl_notif_wait_data {
70 struct list_head notif_waits;
71 spinlock_t notif_wait_lock;
72 wait_queue_head_t notif_waitq;
73};
74
75#define MAX_NOTIF_CMDS 5
76
77/**
78 * struct iwl_notification_wait - notification wait entry
79 * @list: list head for global list
80 * @fn: Function called with the notification. If the function
81 * returns true, the wait is over, if it returns false then
82 * the waiter stays blocked. If no function is given, any
83 * of the listed commands will unblock the waiter.
84 * @cmds: command IDs
85 * @n_cmds: number of command IDs
86 * @triggered: waiter should be woken up
87 * @aborted: wait was aborted
88 *
89 * This structure is not used directly, to wait for a
90 * notification declare it on the stack, and call
91 * iwlagn_init_notification_wait() with appropriate
92 * parameters. Then do whatever will cause the ucode
93 * to notify the driver, and to wait for that then
94 * call iwlagn_wait_notification().
95 *
96 * Each notification is one-shot. If at some point we
97 * need to support multi-shot notifications (which
98 * can't be allocated on the stack) we need to modify
99 * the code for them.
100 */
101struct iwl_notification_wait {
102 struct list_head list;
103
104 bool (*fn)(struct iwl_notif_wait_data *notif_data,
105 struct iwl_rx_packet *pkt, void *data);
106 void *fn_data;
107
108 u8 cmds[MAX_NOTIF_CMDS];
109 u8 n_cmds;
110 bool triggered, aborted;
111};
112
113
114/* caller functions */
115void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
116void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
117 struct iwl_rx_packet *pkt);
118void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
119
120/* user functions */
121void __acquires(wait_entry)
122iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
123 struct iwl_notification_wait *wait_entry,
124 const u8 *cmds, int n_cmds,
125 bool (*fn)(struct iwl_notif_wait_data *notif_data,
126 struct iwl_rx_packet *pkt, void *data),
127 void *fn_data);
128
129int __must_check __releases(wait_entry)
130iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
131 struct iwl_notification_wait *wait_entry,
132 unsigned long timeout);
133
134void __releases(wait_entry)
135iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
136 struct iwl_notification_wait *wait_entry);
137
138#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
deleted file mode 100644
index c8d9b951746..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ /dev/null
@@ -1,227 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_op_mode_h__
64#define __iwl_op_mode_h__
65
66struct iwl_op_mode;
67struct iwl_trans;
68struct sk_buff;
69struct iwl_device_cmd;
70struct iwl_rx_cmd_buffer;
71struct iwl_fw;
72struct iwl_cfg;
73
74/**
75 * DOC: Operational mode - what is it ?
76 *
77 * The operational mode (a.k.a. op_mode) is the layer that implements
78 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
79 * the transport API to access the HW. The op_mode doesn't need to know how the
80 * underlying HW works, since the transport layer takes care of that.
81 *
82 * There can be several op_mode: i.e. different fw APIs will require two
83 * different op_modes. This is why the op_mode is virtualized.
84 */
85
86/**
87 * DOC: Life cycle of the Operational mode
88 *
89 * The operational mode has a very simple life cycle.
90 *
91 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
92 * capabilities advertized by the fw file (in TLV format).
93 * 2) The driver layer starts the op_mode (ops->start)
94 * 3) The op_mode registers registers mac80211
95 * 4) The op_mode is governed by mac80211
96 * 5) The driver layer stops the op_mode
97 */
98
99/**
100 * struct iwl_op_mode_ops - op_mode specific operations
101 *
102 * The op_mode exports its ops so that external components can start it and
103 * interact with it. The driver layer typically calls the start and stop
104 * handlers, the transport layer calls the others.
105 *
106 * All the handlers MUST be implemented
107 *
108 * @start: start the op_mode. The transport layer is already allocated.
109 * May sleep
110 * @stop: stop the op_mode. Must free all the memory allocated.
111 * May sleep
112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
113 * HCMD the this Rx responds to.
114 * Must be atomic and called with BH disabled.
115 * @queue_full: notifies that a HW queue is full.
116 * Must be atomic and called with BH disabled.
117 * @queue_not_full: notifies that a HW queue is not full any more.
118 * Must be atomic and called with BH disabled.
119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
120 * the radio is killed. Must be atomic.
121 * @free_skb: allows the transport layer to free skbs that haven't been
122 * reclaimed by the op_mode. This can happen when the driver is freed and
123 * there are Tx packets pending in the transport layer.
124 * Must be atomic
125 * @nic_error: error notification. Must be atomic and must be called with BH
126 * disabled.
127 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
128 * called with BH disabled.
129 * @nic_config: configure NIC, called before firmware is started.
130 * May sleep
131 * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
132 * with BH disabled.
133 */
134struct iwl_op_mode_ops {
135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
136 const struct iwl_cfg *cfg,
137 const struct iwl_fw *fw,
138 struct dentry *dbgfs_dir);
139 void (*stop)(struct iwl_op_mode *op_mode);
140 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
141 struct iwl_device_cmd *cmd);
142 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
143 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
144 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
145 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
146 void (*nic_error)(struct iwl_op_mode *op_mode);
147 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
148 void (*nic_config)(struct iwl_op_mode *op_mode);
149 void (*wimax_active)(struct iwl_op_mode *op_mode);
150};
151
152int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
153void iwl_opmode_deregister(const char *name);
154
155/**
156 * struct iwl_op_mode - operational mode
157 *
158 * This holds an implementation of the mac80211 / fw API.
159 *
160 * @ops - pointer to its own ops
161 */
162struct iwl_op_mode {
163 const struct iwl_op_mode_ops *ops;
164 const struct iwl_trans *trans;
165
166 char op_mode_specific[0] __aligned(sizeof(void *));
167};
168
169static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
170{
171 might_sleep();
172 op_mode->ops->stop(op_mode);
173}
174
175static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
176 struct iwl_rx_cmd_buffer *rxb,
177 struct iwl_device_cmd *cmd)
178{
179 return op_mode->ops->rx(op_mode, rxb, cmd);
180}
181
182static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
183 int queue)
184{
185 op_mode->ops->queue_full(op_mode, queue);
186}
187
188static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
189 int queue)
190{
191 op_mode->ops->queue_not_full(op_mode, queue);
192}
193
194static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
195 bool state)
196{
197 op_mode->ops->hw_rf_kill(op_mode, state);
198}
199
200static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
201 struct sk_buff *skb)
202{
203 op_mode->ops->free_skb(op_mode, skb);
204}
205
206static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
207{
208 op_mode->ops->nic_error(op_mode);
209}
210
211static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
212{
213 op_mode->ops->cmd_queue_full(op_mode);
214}
215
216static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
217{
218 might_sleep();
219 op_mode->ops->nic_config(op_mode);
220}
221
222static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
223{
224 op_mode->ops->wimax_active(op_mode);
225}
226
227#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c3a4bb41e53..2f267b8aabb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -187,7 +187,7 @@
187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) 187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
188#define SCD_QUEUE_STTS_REG_POS_WSL (4) 188#define SCD_QUEUE_STTS_REG_POS_WSL (4)
189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) 189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
190#define SCD_QUEUE_STTS_REG_MSK (0x017F0000) 190#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
191 191
192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) 192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) 193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
@@ -213,12 +213,13 @@
213#define SCD_CONTEXT_QUEUE_OFFSET(x)\ 213#define SCD_CONTEXT_QUEUE_OFFSET(x)\
214 (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) 214 (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
215 215
216#define SCD_TX_STTS_QUEUE_OFFSET(x)\
217 (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
218
219#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ 216#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
220 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) 217 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
221 218
219#define SCD_QUEUECHAIN_SEL_ALL(priv) \
220 (((1<<(priv)->hw_params.max_txq_num) - 1) &\
221 (~(1<<(priv)->cmd_queue)))
222
222#define SCD_BASE (PRPH_BASE + 0xa02c00) 223#define SCD_BASE (PRPH_BASE + 0xa02c00)
223 224
224#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) 225#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
@@ -226,34 +227,12 @@
226#define SCD_AIT (SCD_BASE + 0x0c) 227#define SCD_AIT (SCD_BASE + 0x0c)
227#define SCD_TXFACT (SCD_BASE + 0x10) 228#define SCD_TXFACT (SCD_BASE + 0x10)
228#define SCD_ACTIVE (SCD_BASE + 0x14) 229#define SCD_ACTIVE (SCD_BASE + 0x14)
230#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
231#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
229#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 232#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
230#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
231#define SCD_AGGR_SEL (SCD_BASE + 0x248) 233#define SCD_AGGR_SEL (SCD_BASE + 0x248)
232#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 234#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
233 235#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
234static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
235{
236 if (chnl < 20)
237 return SCD_BASE + 0x18 + chnl * 4;
238 WARN_ON_ONCE(chnl >= 32);
239 return SCD_BASE + 0x284 + (chnl - 20) * 4;
240}
241
242static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
243{
244 if (chnl < 20)
245 return SCD_BASE + 0x68 + chnl * 4;
246 WARN_ON_ONCE(chnl >= 32);
247 return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
248}
249
250static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
251{
252 if (chnl < 20)
253 return SCD_BASE + 0x10c + chnl * 4;
254 WARN_ON_ONCE(chnl >= 32);
255 return SCD_BASE + 0x384 + (chnl - 20) * 4;
256}
257 236
258/*********************** END TX SCHEDULER *************************************/ 237/*********************** END TX SCHEDULER *************************************/
259 238
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
deleted file mode 100644
index 81e8c7126d7..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ /dev/null
@@ -1,856 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/export.h>
65#include <net/netlink.h>
66
67#include "iwl-io.h"
68#include "iwl-fh.h"
69#include "iwl-prph.h"
70#include "iwl-trans.h"
71#include "iwl-test.h"
72#include "iwl-csr.h"
73#include "iwl-testmode.h"
74
75/*
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
79 */
80#define IWL_ABS_PRPH_START (0xA00000)
81
82/*
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86 * See iwl-testmode.h
87 */
88static
89struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123};
124
125static inline void iwl_test_trace_clear(struct iwl_test *tst)
126{
127 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128}
129
130static void iwl_test_trace_stop(struct iwl_test *tst)
131{
132 if (!tst->trace.enabled)
133 return;
134
135 if (tst->trace.cpu_addr && tst->trace.dma_addr)
136 dma_free_coherent(tst->trans->dev,
137 tst->trace.tsize,
138 tst->trace.cpu_addr,
139 tst->trace.dma_addr);
140
141 iwl_test_trace_clear(tst);
142}
143
144static inline void iwl_test_mem_clear(struct iwl_test *tst)
145{
146 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147}
148
149static inline void iwl_test_mem_stop(struct iwl_test *tst)
150{
151 if (!tst->mem.in_read)
152 return;
153
154 iwl_test_mem_clear(tst);
155}
156
157/*
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
161 */
162void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163 struct iwl_test_ops *ops)
164{
165 tst->trans = trans;
166 tst->ops = ops;
167
168 iwl_test_trace_clear(tst);
169 iwl_test_mem_clear(tst);
170}
171EXPORT_SYMBOL_GPL(iwl_test_init);
172
173/*
174 * Stop the test object
175 */
176void iwl_test_free(struct iwl_test *tst)
177{
178 iwl_test_mem_stop(tst);
179 iwl_test_trace_stop(tst);
180}
181EXPORT_SYMBOL_GPL(iwl_test_free);
182
183static inline int iwl_test_send_cmd(struct iwl_test *tst,
184 struct iwl_host_cmd *cmd)
185{
186 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187}
188
189static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190{
191 return tst->ops->valid_hw_addr(addr);
192}
193
194static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195{
196 return tst->ops->get_fw_ver(tst->trans->op_mode);
197}
198
199static inline struct sk_buff*
200iwl_test_alloc_reply(struct iwl_test *tst, int len)
201{
202 return tst->ops->alloc_reply(tst->trans->op_mode, len);
203}
204
205static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206{
207 return tst->ops->reply(tst->trans->op_mode, skb);
208}
209
210static inline struct sk_buff*
211iwl_test_alloc_event(struct iwl_test *tst, int len)
212{
213 return tst->ops->alloc_event(tst->trans->op_mode, len);
214}
215
216static inline void
217iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218{
219 return tst->ops->event(tst->trans->op_mode, skb);
220}
221
222/*
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
226 */
227static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
228{
229 struct iwl_host_cmd cmd;
230 struct iwl_rx_packet *pkt;
231 struct sk_buff *skb;
232 void *reply_buf;
233 u32 reply_len;
234 int ret;
235 bool cmd_want_skb;
236
237 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242 return -ENOMSG;
243 }
244
245 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247 if (cmd_want_skb)
248 cmd.flags |= CMD_WANT_SKB;
249
250 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd.id, cmd.flags, cmd.len[0]);
256
257 ret = iwl_test_send_cmd(tst, &cmd);
258 if (ret) {
259 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260 return ret;
261 }
262 if (!cmd_want_skb)
263 return ret;
264
265 /* Handling return of SKB to the user */
266 pkt = cmd.resp_pkt;
267 if (!pkt) {
268 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269 return ret;
270 }
271
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273 skb = iwl_test_alloc_reply(tst, reply_len + 20);
274 reply_buf = kmalloc(reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) {
276 kfree_skb(skb);
277 kfree(reply_buf);
278 return -ENOMEM;
279 }
280
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
289 return iwl_test_reply(tst, skb);
290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335 skb = iwl_test_alloc_reply(tst, 20);
336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
342 status = iwl_test_reply(tst, skb);
343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
431 status = iwl_test_reply(tst, skb);
432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 spin_lock_irqsave(&trans->reg_lock, flags);
470 iwl_grab_nic_access(trans);
471 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472 addr | (3 << 24));
473 for (i = 0; i < size; i += 4)
474 *(u32 *)(tst->mem.addr + i) =
475 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
476 iwl_release_nic_access(trans);
477 spin_unlock_irqrestore(&trans->reg_lock, flags);
478 } else { /* target memory (SRAM) */
479 _iwl_read_targ_mem_dwords(trans, addr,
480 tst->mem.addr,
481 tst->mem.size / 4);
482 }
483
484 tst->mem.nchunks =
485 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486 tst->mem.in_read = true;
487 return 0;
488
489}
490
491/*
492 * Handles indirect write to the periphery or SRAM. The is performed to a
493 * temporary buffer.
494 */
495static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496 u32 size, unsigned char *buf)
497{
498 struct iwl_trans *trans = tst->trans;
499 u32 val, i;
500 unsigned long flags;
501
502 if (IWL_ABS_PRPH_START <= addr &&
503 addr < IWL_ABS_PRPH_START + PRPH_END) {
504 /* Periphery writes can be 1-3 bytes long, or DWORDs */
505 if (size < 4) {
506 memcpy(&val, buf, size);
507 spin_lock_irqsave(&trans->reg_lock, flags);
508 iwl_grab_nic_access(trans);
509 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510 (addr & 0x0000FFFF) |
511 ((size - 1) << 24));
512 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513 iwl_release_nic_access(trans);
514 /* needed after consecutive writes w/o read */
515 mmiowb();
516 spin_unlock_irqrestore(&trans->reg_lock, flags);
517 } else {
518 if (size % 4)
519 return -EINVAL;
520 for (i = 0; i < size; i += 4)
521 iwl_write_prph(trans, addr+i,
522 *(u32 *)(buf+i));
523 }
524 } else if (iwl_test_valid_hw_addr(tst, addr)) {
525 _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
526 } else {
527 return -EINVAL;
528 }
529 return 0;
530}
531
532/*
533 * Handles the user application commands for indirect read/write
534 * to/from the periphery or the SRAM.
535 */
536static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
537{
538 u32 addr, size, cmd;
539 unsigned char *buf;
540
541 /* Both read and write should be blocked, for atomicity */
542 if (tst->mem.in_read)
543 return -EBUSY;
544
545 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
546 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
547 IWL_ERR(tst->trans, "Error finding memory offset address\n");
548 return -ENOMSG;
549 }
550 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
551 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
552 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
553 return -ENOMSG;
554 }
555 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
556
557 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
558 return iwl_test_indirect_read(tst, addr, size);
559 } else {
560 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
561 return -EINVAL;
562 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
563 return iwl_test_indirect_write(tst, addr, size, buf);
564 }
565}
566
567/*
568 * Enable notifications to user space
569 */
570static int iwl_test_notifications(struct iwl_test *tst,
571 struct nlattr **tb)
572{
573 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
574 return 0;
575}
576
577/*
578 * Handles the request to get the device id
579 */
580static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
581{
582 u32 devid = tst->trans->hw_id;
583 struct sk_buff *skb;
584 int status;
585
586 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
587
588 skb = iwl_test_alloc_reply(tst, 20);
589 if (!skb) {
590 IWL_ERR(tst->trans, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593
594 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
595 goto nla_put_failure;
596 status = iwl_test_reply(tst, skb);
597 if (status < 0)
598 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
599
600 return 0;
601
602nla_put_failure:
603 kfree_skb(skb);
604 return -EMSGSIZE;
605}
606
607/*
608 * Handles the request to get the FW version
609 */
610static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611{
612 struct sk_buff *skb;
613 int status;
614 u32 ver = iwl_test_fw_ver(tst);
615
616 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
617
618 skb = iwl_test_alloc_reply(tst, 20);
619 if (!skb) {
620 IWL_ERR(tst->trans, "Memory allocation fail\n");
621 return -ENOMEM;
622 }
623
624 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
625 goto nla_put_failure;
626
627 status = iwl_test_reply(tst, skb);
628 if (status < 0)
629 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
630
631 return 0;
632
633nla_put_failure:
634 kfree_skb(skb);
635 return -EMSGSIZE;
636}
637
638/*
639 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
640 */
641int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
642 void *data, int len)
643{
644 int result;
645
646 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
647 iwl_testmode_gnl_msg_policy);
648 if (result) {
649 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650 return result;
651 }
652
653 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
654 if (!tb[IWL_TM_ATTR_COMMAND]) {
655 IWL_ERR(tst->trans, "Missing testmode command type\n");
656 return -ENOMSG;
657 }
658 return 0;
659}
660EXPORT_SYMBOL_GPL(iwl_test_parse);
661
662/*
663 * Handle test commands.
664 * Returns 1 for unknown commands (not handled by the test object); negative
665 * value in case of error.
666 */
667int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668{
669 int result;
670
671 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
672 case IWL_TM_CMD_APP2DEV_UCODE:
673 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
674 result = iwl_test_fw_cmd(tst, tb);
675 break;
676
677 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
678 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
679 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
680 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
681 result = iwl_test_reg(tst, tb);
682 break;
683
684 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
685 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
686 result = iwl_test_trace_begin(tst, tb);
687 break;
688
689 case IWL_TM_CMD_APP2DEV_END_TRACE:
690 iwl_test_trace_stop(tst);
691 result = 0;
692 break;
693
694 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
695 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
696 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
697 result = iwl_test_indirect_mem(tst, tb);
698 break;
699
700 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
701 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
702 result = iwl_test_notifications(tst, tb);
703 break;
704
705 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
706 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
707 result = iwl_test_get_fw_ver(tst, tb);
708 break;
709
710 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
711 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
712 result = iwl_test_get_dev_id(tst, tb);
713 break;
714
715 default:
716 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
717 result = 1;
718 break;
719 }
720 return result;
721}
722EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
723
724static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
725 struct netlink_callback *cb)
726{
727 int idx, length;
728
729 if (!tst->trace.enabled || !tst->trace.trace_addr)
730 return -EFAULT;
731
732 idx = cb->args[4];
733 if (idx >= tst->trace.nchunks)
734 return -ENOENT;
735
736 length = DUMP_CHUNK_SIZE;
737 if (((idx + 1) == tst->trace.nchunks) &&
738 (tst->trace.size % DUMP_CHUNK_SIZE))
739 length = tst->trace.size %
740 DUMP_CHUNK_SIZE;
741
742 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
743 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
744 goto nla_put_failure;
745
746 cb->args[4] = ++idx;
747 return 0;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
754 struct netlink_callback *cb)
755{
756 int idx, length;
757
758 if (!tst->mem.in_read)
759 return -EFAULT;
760
761 idx = cb->args[4];
762 if (idx >= tst->mem.nchunks) {
763 iwl_test_mem_stop(tst);
764 return -ENOENT;
765 }
766
767 length = DUMP_CHUNK_SIZE;
768 if (((idx + 1) == tst->mem.nchunks) &&
769 (tst->mem.size % DUMP_CHUNK_SIZE))
770 length = tst->mem.size % DUMP_CHUNK_SIZE;
771
772 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
773 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
774 goto nla_put_failure;
775
776 cb->args[4] = ++idx;
777 return 0;
778
779 nla_put_failure:
780 return -ENOBUFS;
781}
782
783/*
784 * Handle dump commands.
785 * Returns 1 for unknown commands (not handled by the test object); negative
786 * value in case of error.
787 */
788int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
789 struct netlink_callback *cb)
790{
791 int result;
792
793 switch (cmd) {
794 case IWL_TM_CMD_APP2DEV_READ_TRACE:
795 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
796 result = iwl_test_trace_dump(tst, skb, cb);
797 break;
798
799 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
800 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
801 result = iwl_test_buffer_dump(tst, skb, cb);
802 break;
803
804 default:
805 result = 1;
806 break;
807 }
808 return result;
809}
810EXPORT_SYMBOL_GPL(iwl_test_dump);
811
812/*
813 * Multicast a spontaneous messages from the device to the user space.
814 */
815static void iwl_test_send_rx(struct iwl_test *tst,
816 struct iwl_rx_cmd_buffer *rxb)
817{
818 struct sk_buff *skb;
819 struct iwl_rx_packet *data;
820 int length;
821
822 data = rxb_addr(rxb);
823 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
824
825 /* the length doesn't include len_n_flags field, so add it manually */
826 length += sizeof(__le32);
827
828 skb = iwl_test_alloc_event(tst, length + 20);
829 if (skb == NULL) {
830 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831 return;
832 }
833
834 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
835 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
836 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
837 goto nla_put_failure;
838
839 iwl_test_event(tst, skb);
840 return;
841
842nla_put_failure:
843 kfree_skb(skb);
844 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845}
846
847/*
848 * Called whenever a Rx frames is recevied from the device. If notifications to
849 * the user space are requested, sends the frames to the user.
850 */
851void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
852{
853 if (tst->notify)
854 iwl_test_send_rx(tst, rxb);
855}
856EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
deleted file mode 100644
index e13ffa8acc0..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ /dev/null
@@ -1,161 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_TEST_H__
65#define __IWL_TEST_H__
66
67#include <linux/types.h>
68#include "iwl-trans.h"
69
70struct iwl_test_trace {
71 u32 size;
72 u32 tsize;
73 u32 nchunks;
74 u8 *cpu_addr;
75 u8 *trace_addr;
76 dma_addr_t dma_addr;
77 bool enabled;
78};
79
80struct iwl_test_mem {
81 u32 size;
82 u32 nchunks;
83 u8 *addr;
84 bool in_read;
85};
86
87/*
88 * struct iwl_test_ops: callback to the op mode
89 *
90 * The structure defines the callbacks that the op_mode should handle,
91 * inorder to handle logic that is out of the scope of iwl_test. The
92 * op_mode must set all the callbacks.
93
94 * @send_cmd: handler that is used by the test object to request the
95 * op_mode to send a command to the fw.
96 *
97 * @valid_hw_addr: handler that is used by the test object to request the
98 * op_mode to check if the given address is a valid address.
99 *
100 * @get_fw_ver: handler used to get the FW version.
101 *
102 * @alloc_reply: handler used by the test object to request the op_mode
103 * to allocate an skb for sending a reply to the user, and initialize
104 * the skb. It is assumed that the test object only fills the required
105 * attributes.
106 *
107 * @reply: handler used by the test object to request the op_mode to reply
108 * to a request. The skb is an skb previously allocated by the the
109 * alloc_reply callback.
110 I
111 * @alloc_event: handler used by the test object to request the op_mode
112 * to allocate an skb for sending an event, and initialize
113 * the skb. It is assumed that the test object only fills the required
114 * attributes.
115 *
116 * @reply: handler used by the test object to request the op_mode to send
117 * an event. The skb is an skb previously allocated by the the
118 * alloc_event callback.
119 */
120struct iwl_test_ops {
121 int (*send_cmd)(struct iwl_op_mode *op_modes,
122 struct iwl_host_cmd *cmd);
123 bool (*valid_hw_addr)(u32 addr);
124 u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
125
126 struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
127 int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
128 struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
129 void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
130};
131
132struct iwl_test {
133 struct iwl_trans *trans;
134 struct iwl_test_ops *ops;
135 struct iwl_test_trace trace;
136 struct iwl_test_mem mem;
137 bool notify;
138};
139
140void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
141 struct iwl_test_ops *ops);
142
143void iwl_test_free(struct iwl_test *tst);
144
145int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
146 void *data, int len);
147
148int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
149
150int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
151 struct netlink_callback *cb);
152
153void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
154
155static inline void iwl_test_enable_notifications(struct iwl_test *tst,
156 bool enable)
157{
158 tst->notify = enable;
159}
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 6ba211b0942..b980bda4b0f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -76,9 +76,9 @@
76 * the actual uCode host command ID is carried with 76 * the actual uCode host command ID is carried with
77 * IWL_TM_ATTR_UCODE_CMD_ID 77 * IWL_TM_ATTR_UCODE_CMD_ID
78 * 78 *
79 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: 79 * @IWL_TM_CMD_APP2DEV_REG_READ32:
80 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: 80 * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
81 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: 81 * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
82 * commands from user applicaiton to access register 82 * commands from user applicaiton to access register
83 * 83 *
84 * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name 84 * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -99,38 +99,20 @@
99 * to user application 99 * to user application
100 * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT: 100 * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
101 * commands from kernel space to multicast the spontaneous messages 101 * commands from kernel space to multicast the spontaneous messages
102 * to user application, or reply of host commands 102 * to user application
103 * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: 103 * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
104 * commands from kernel space to carry the eeprom response 104 * commands from kernel space to carry the eeprom response
105 * to user application 105 * to user application
106 *
107 * @IWL_TM_CMD_APP2DEV_OWNERSHIP: 106 * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
108 * commands from user application to own change the ownership of the uCode 107 * commands from user application to own change the ownership of the uCode
109 * if application has the ownership, the only host command from 108 * if application has the ownership, the only host command from
110 * testmode will deliver to uCode. Default owner is driver 109 * testmode will deliver to uCode. Default owner is driver
111 *
112 * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
113 * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
114 * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
115 * @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
116 * retrieve information of existing loaded uCode image
117 *
118 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
119 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
120 * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
121 * Commands to read/write data from periphery or SRAM memory ranges.
122 * Fore reading, a READ command is sent from the userspace and the data
123 * is returned when the user calls a DUMP command.
124 * For writing, only a WRITE command is used.
125 * @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
126 * Command to enable/disable notifications (currently RX packets) from the
127 * driver to userspace.
128 */ 110 */
129enum iwl_tm_cmd_t { 111enum iwl_tm_cmd_t {
130 IWL_TM_CMD_APP2DEV_UCODE = 1, 112 IWL_TM_CMD_APP2DEV_UCODE = 1,
131 IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2, 113 IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
132 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3, 114 IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
133 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4, 115 IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
134 IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, 116 IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
135 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, 117 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
136 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, 118 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -144,19 +126,7 @@ enum iwl_tm_cmd_t {
144 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, 126 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
145 IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, 127 IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
146 IWL_TM_CMD_APP2DEV_OWNERSHIP = 17, 128 IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
147 RESERVED_18 = 18, 129 IWL_TM_CMD_MAX = 18,
148 RESERVED_19 = 19,
149 RESERVED_20 = 20,
150 RESERVED_21 = 21,
151 IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
152 IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
153 IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
154 IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
155 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
156 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
157 IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
158 IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
159 IWL_TM_CMD_MAX = 30,
160}; 130};
161 131
162/* 132/*
@@ -177,6 +147,8 @@ enum iwl_tm_cmd_t {
177 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE, 147 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
178 * The mandatory fields are : 148 * The mandatory fields are :
179 * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID; 149 * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
150 * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands;
151 * The optional fields are:
180 * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload 152 * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
181 * to the ucode 153 * to the ucode
182 * 154 *
@@ -224,46 +196,6 @@ enum iwl_tm_cmd_t {
224 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP, 196 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
225 * The mandatory fields are: 197 * The mandatory fields are:
226 * IWL_TM_ATTR_UCODE_OWNER for the new owner 198 * IWL_TM_ATTR_UCODE_OWNER for the new owner
227 *
228 * @IWL_TM_ATTR_MEM_ADDR:
229 * @IWL_TM_ATTR_BUFFER_SIZE:
230 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
231 * or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
232 * The mandatory fields are:
233 * IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
234 * IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
235 *
236 * @IWL_TM_ATTR_BUFFER_DUMP:
237 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
238 * IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
239 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
240 * this attribute contains the data to write.
241 *
242 * @IWL_TM_ATTR_FW_VERSION:
243 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
244 * IWL_TM_ATTR_FW_VERSION for the uCode version
245 *
246 * @IWL_TM_ATTR_DEVICE_ID:
247 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
248 * IWL_TM_ATTR_DEVICE_ID for the device ID information
249 *
250 * @IWL_TM_ATTR_FW_TYPE:
251 * @IWL_TM_ATTR_FW_INST_SIZE:
252 * @IWL_TM_ATTR_FW_DATA_SIZE:
253 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
254 * The mandatory fields are:
255 * IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
256 * IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
257 * IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
258 *
259 * @IWL_TM_ATTR_UCODE_CMD_SKB:
260 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
261 * indicates that the user wants to receive the response of the command
262 * in a reply SKB. If it's not present, the response is not returned.
263 * @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
264 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
265 * flag enables (if present) or disables (if not) the forwarding
266 * to userspace.
267 */ 199 */
268enum iwl_tm_attr_t { 200enum iwl_tm_attr_t {
269 IWL_TM_ATTR_NOT_APPLICABLE = 0, 201 IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -281,17 +213,7 @@ enum iwl_tm_attr_t {
281 IWL_TM_ATTR_TRACE_DUMP = 12, 213 IWL_TM_ATTR_TRACE_DUMP = 12,
282 IWL_TM_ATTR_FIXRATE = 13, 214 IWL_TM_ATTR_FIXRATE = 13,
283 IWL_TM_ATTR_UCODE_OWNER = 14, 215 IWL_TM_ATTR_UCODE_OWNER = 14,
284 IWL_TM_ATTR_MEM_ADDR = 15, 216 IWL_TM_ATTR_MAX = 15,
285 IWL_TM_ATTR_BUFFER_SIZE = 16,
286 IWL_TM_ATTR_BUFFER_DUMP = 17,
287 IWL_TM_ATTR_FW_VERSION = 18,
288 IWL_TM_ATTR_DEVICE_ID = 19,
289 IWL_TM_ATTR_FW_TYPE = 20,
290 IWL_TM_ATTR_FW_INST_SIZE = 21,
291 IWL_TM_ATTR_FW_DATA_SIZE = 22,
292 IWL_TM_ATTR_UCODE_CMD_SKB = 23,
293 IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
294 IWL_TM_ATTR_MAX = 25,
295}; 217};
296 218
297/* uCode trace buffer */ 219/* uCode trace buffer */
@@ -299,11 +221,6 @@ enum iwl_tm_attr_t {
299#define TRACE_BUFF_SIZE_MIN 0x20000 221#define TRACE_BUFF_SIZE_MIN 0x20000
300#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN 222#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
301#define TRACE_BUFF_PADD 0x2000 223#define TRACE_BUFF_PADD 0x2000
302 224#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
303/* Maximum data size of each dump it packet */
304#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
305
306/* Address offset of data segment in SRAM */
307#define SRAM_DATA_SEG_OFFSET 0x800000
308 225
309#endif 226#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index b76532e238c..7993aa7ae66 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -63,631 +63,163 @@
63#ifndef __iwl_trans_h__ 63#ifndef __iwl_trans_h__
64#define __iwl_trans_h__ 64#define __iwl_trans_h__
65 65
66#include <linux/ieee80211.h> 66 /*This file includes the declaration that are exported from the transport
67#include <linux/mm.h> /* for page_address */ 67 * layer */
68 68
69#include "iwl-debug.h" 69struct iwl_priv;
70#include "iwl-config.h" 70struct iwl_rxon_context;
71#include "iwl-fw.h" 71struct iwl_host_cmd;
72
73/**
74 * DOC: Transport layer - what is it ?
75 *
76 * The tranport layer is the layer that deals with the HW directly. It provides
77 * an abstraction of the underlying HW to the upper layer. The transport layer
78 * doesn't provide any policy, algorithm or anything of this kind, but only
79 * mechanisms to make the HW do something.It is not completely stateless but
80 * close to it.
81 * We will have an implementation for each different supported bus.
82 */
83
84/**
85 * DOC: Life cycle of the transport layer
86 *
87 * The transport layer has a very precise life cycle.
88 *
89 * 1) A helper function is called during the module initialization and
90 * registers the bus driver's ops with the transport's alloc function.
91 * 2) Bus's probe calls to the transport layer's allocation functions.
92 * Of course this function is bus specific.
93 * 3) This allocation functions will spawn the upper layer which will
94 * register mac80211.
95 *
96 * 4) At some point (i.e. mac80211's start call), the op_mode will call
97 * the following sequence:
98 * start_hw
99 * start_fw
100 *
101 * 5) Then when finished (or reset):
102 * stop_fw (a.k.a. stop device for the moment)
103 * stop_hw
104 *
105 * 6) Eventually, the free function will be called.
106 */
107
108/**
109 * DOC: Host command section
110 *
111 * A host command is a commaned issued by the upper layer to the fw. There are
112 * several versions of fw that have several APIs. The transport layer is
113 * completely agnostic to these differences.
114 * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
115 */
116#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
117#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
118#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
119#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
120#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
121#define SEQ_TO_INDEX(s) ((s) & 0xff)
122#define INDEX_TO_SEQ(i) ((i) & 0xff)
123#define SEQ_RX_FRAME cpu_to_le16(0x8000)
124
125/**
126 * struct iwl_cmd_header
127 *
128 * This header format appears in the beginning of each command sent from the
129 * driver, and each response/notification received from uCode.
130 */
131struct iwl_cmd_header {
132 u8 cmd; /* Command ID: REPLY_RXON, etc. */
133 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
134 /*
135 * The driver sets up the sequence number to values of its choosing.
136 * uCode does not use this value, but passes it back to the driver
137 * when sending the response to each driver-originated command, so
138 * the driver can match the response to the command. Since the values
139 * don't get used by uCode, the driver may set up an arbitrary format.
140 *
141 * There is one exception: uCode sets bit 15 when it originates
142 * the response/notification, i.e. when the response/notification
143 * is not a direct response to a command sent by the driver. For
144 * example, uCode issues REPLY_RX when it sends a received frame
145 * to the driver; it is not a direct response to any driver command.
146 *
147 * The Linux driver uses the following format:
148 *
149 * 0:7 tfd index - position within TX queue
150 * 8:12 TX queue id
151 * 13:14 reserved
152 * 15 unsolicited RX or uCode-originated notification
153 */
154 __le16 sequence;
155} __packed;
156
157/* iwl_cmd_header flags value */
158#define IWL_CMD_FAILED_MSK 0x40
159
160
161#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
162#define FH_RSCSR_FRAME_INVALID 0x55550000
163#define FH_RSCSR_FRAME_ALIGN 0x40
164
165struct iwl_rx_packet {
166 /*
167 * The first 4 bytes of the RX frame header contain both the RX frame
168 * size and some flags.
169 * Bit fields:
170 * 31: flag flush RB request
171 * 30: flag ignore TC (terminal counter) request
172 * 29: flag fast IRQ request
173 * 28-14: Reserved
174 * 13-00: RX frame size
175 */
176 __le32 len_n_flags;
177 struct iwl_cmd_header hdr;
178 u8 data[];
179} __packed;
180
181/**
182 * enum CMD_MODE - how to send the host commands ?
183 *
184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
185 * @CMD_ASYNC: Return right away and don't want for the response
186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
187 * response. The caller needs to call iwl_free_resp when done.
188 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
189 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
190 * copied. The pointer passed to the response handler is in the transport
191 * ownership and don't need to be freed by the op_mode. This also means
192 * that the pointer is invalidated after the op_mode's handler returns.
193 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
194 */
195enum CMD_MODE {
196 CMD_SYNC = 0,
197 CMD_ASYNC = BIT(0),
198 CMD_WANT_SKB = BIT(1),
199 CMD_WANT_HCMD = BIT(2),
200 CMD_ON_DEMAND = BIT(3),
201};
202
203#define DEF_CMD_PAYLOAD_SIZE 320
204
205/**
206 * struct iwl_device_cmd
207 *
208 * For allocation of the command and tx queues, this establishes the overall
209 * size of the largest command we send to uCode, except for commands that
210 * aren't fully copied and use other TFD space.
211 */
212struct iwl_device_cmd {
213 struct iwl_cmd_header hdr; /* uCode API */
214 u8 payload[DEF_CMD_PAYLOAD_SIZE];
215} __packed;
216
217#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
218
219#define IWL_MAX_CMD_TFDS 2
220
221/**
222 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
223 *
224 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
225 * ring. The transport layer doesn't map the command's buffer to DMA, but
226 * rather copies it to an previously allocated DMA buffer. This flag tells
227 * the transport layer not to copy the command, but to map the existing
228 * buffer (that is passed in) instead. This saves the memcpy and allows
229 * commands that are bigger than the fixed buffer to be submitted.
230 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
231 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
232 * chunk internally and free it again after the command completes. This
233 * can (currently) be used only once per command.
234 * Note that a TFD entry after a DUP one cannot be a normal copied one.
235 */
236enum iwl_hcmd_dataflag {
237 IWL_HCMD_DFL_NOCOPY = BIT(0),
238 IWL_HCMD_DFL_DUP = BIT(1),
239};
240
241/**
242 * struct iwl_host_cmd - Host command to the uCode
243 *
244 * @data: array of chunks that composes the data of the host command
245 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
246 * @_rx_page_order: (internally used to free response packet)
247 * @_rx_page_addr: (internally used to free response packet)
248 * @handler_status: return value of the handler of the command
249 * (put in setup_rx_handlers) - valid for SYNC mode only
250 * @flags: can be CMD_*
251 * @len: array of the lenths of the chunks in data
252 * @dataflags: IWL_HCMD_DFL_*
253 * @id: id of the host command
254 */
255struct iwl_host_cmd {
256 const void *data[IWL_MAX_CMD_TFDS];
257 struct iwl_rx_packet *resp_pkt;
258 unsigned long _rx_page_addr;
259 u32 _rx_page_order;
260 int handler_status;
261
262 u32 flags;
263 u16 len[IWL_MAX_CMD_TFDS];
264 u8 dataflags[IWL_MAX_CMD_TFDS];
265 u8 id;
266};
267
268static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
269{
270 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
271}
272
273struct iwl_rx_cmd_buffer {
274 struct page *_page;
275 int _offset;
276 bool _page_stolen;
277 unsigned int truesize;
278};
279
280static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
281{
282 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
283}
284
285static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
286{
287 return r->_offset;
288}
289
290static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
291{
292 r->_page_stolen = true;
293 get_page(r->_page);
294 return r->_page;
295}
296
297#define MAX_NO_RECLAIM_CMDS 6
298
299#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
300
301/*
302 * Maximum number of HW queues the transport layer
303 * currently supports
304 */
305#define IWL_MAX_HW_QUEUES 32
306#define IWL_INVALID_STATION 255
307#define IWL_MAX_TID_COUNT 8
308#define IWL_FRAME_LIMIT 64
309
310/**
311 * struct iwl_trans_config - transport configuration
312 *
313 * @op_mode: pointer to the upper layer.
314 * @cmd_queue: the index of the command queue.
315 * Must be set before start_fw.
316 * @cmd_fifo: the fifo for host commands
317 * @no_reclaim_cmds: Some devices erroneously don't set the
318 * SEQ_RX_FRAME bit on some notifications, this is the
319 * list of such notifications to filter. Max length is
320 * %MAX_NO_RECLAIM_CMDS.
321 * @n_no_reclaim_cmds: # of commands in list
322 * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
323 * if unset 4k will be the RX buffer size
324 * @queue_watchdog_timeout: time (in ms) after which queues
325 * are considered stuck and will trigger device restart
326 * @command_names: array of command names, must be 256 entries
327 * (one for each command); for debugging only
328 */
329struct iwl_trans_config {
330 struct iwl_op_mode *op_mode;
331
332 u8 cmd_queue;
333 u8 cmd_fifo;
334 const u8 *no_reclaim_cmds;
335 int n_no_reclaim_cmds;
336
337 bool rx_buf_size_8k;
338 unsigned int queue_watchdog_timeout;
339 const char **command_names;
340};
341
342struct iwl_trans;
343 72
344/** 73/**
345 * struct iwl_trans_ops - transport specific operations 74 * struct iwl_trans_ops - transport specific operations
346 * 75 * @start_device: allocates and inits all the resources for the transport
347 * All the handlers MUST be implemented 76 * layer.
348 * 77 * @prepare_card_hw: claim the ownership on the HW. Will be called during
349 * @start_hw: starts the HW- from that point on, the HW can send interrupts 78 * probe.
350 * May sleep 79 * @tx_start: starts and configures all the Tx fifo - usually done once the fw
351 * @stop_hw: stops the HW- from that point on, the HW will be in low power but 80 * is alive.
352 * will still issue interrupt if the HW RF kill is triggered unless
353 * op_mode_leaving is true.
354 * May sleep
355 * @start_fw: allocates and inits all the resources for the transport
356 * layer. Also kick a fw image.
357 * May sleep
358 * @fw_alive: called when the fw sends alive notification. If the fw provides
359 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
360 * May sleep
361 * @stop_device:stops the whole device (embedded CPU put to reset) 81 * @stop_device:stops the whole device (embedded CPU put to reset)
362 * May sleep 82 * @rx_free: frees the rx memory
363 * @wowlan_suspend: put the device into the correct mode for WoWLAN during 83 * @tx_free: frees the tx memory
364 * suspend. This is optional, if not implemented WoWLAN will not be 84 * @send_cmd:send a host command
365 * supported. This callback may sleep. 85 * @send_cmd_pdu:send a host command: flags can be CMD_*
366 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 86 * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
367 * If RFkill is asserted in the middle of a SYNC host command, it must
368 * return -ERFKILL straight away.
369 * May sleep only if CMD_SYNC is set
370 * @tx: send an skb 87 * @tx: send an skb
371 * Must be atomic 88 * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
372 * @reclaim: free packet until ssn. Returns a list of freed packets. 89 * ready and a successful ADDBA response has been received.
373 * Must be atomic 90 * @txq_agg_disable: de-configure a Tx queue to send AMPDUs
374 * @txq_enable: setup a queue. To setup an AC queue, use the 91 * @kick_nic: remove the RESET from the embedded CPU and let it run
375 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 92 * @sync_irq: the upper layer will typically disable interrupt and call this
376 * this one. The op_mode must not configure the HCMD queue. May sleep. 93 * handler. After this handler returns, it is guaranteed that all
377 * @txq_disable: de-configure a Tx queue to send AMPDUs 94 * the ISR / tasklet etc... have finished running and the transport
378 * Must be atomic 95 * layer shall not pass any Rx.
379 * @wait_tx_queue_empty: wait until all tx queues are empty 96 * @free: release all the ressource for the transport layer itself such as
380 * May sleep 97 * irq, tasklet etc...
381 * @dbgfs_register: add the dbgfs files under this directory. Files will be
382 * automatically deleted.
383 * @suspend: stop the device unless WoWLAN is configured
384 * @resume: resume activity of the device
385 * @write8: write a u8 to a register at offset ofs from the BAR
386 * @write32: write a u32 to a register at offset ofs from the BAR
387 * @read32: read a u32 register at offset ofs from the BAR
388 * @read_prph: read a DWORD from a periphery register
389 * @write_prph: write a DWORD to a periphery register
390 * @configure: configure parameters required by the transport layer from
391 * the op_mode. May be called several times before start_fw, can't be
392 * called after that.
393 * @set_pmi: set the power pmi state
394 */ 98 */
395struct iwl_trans_ops { 99struct iwl_trans_ops {
396 100
397 int (*start_hw)(struct iwl_trans *iwl_trans); 101 int (*start_device)(struct iwl_priv *priv);
398 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving); 102 int (*prepare_card_hw)(struct iwl_priv *priv);
399 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); 103 void (*stop_device)(struct iwl_priv *priv);
400 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 104 void (*tx_start)(struct iwl_priv *priv);
401 void (*stop_device)(struct iwl_trans *trans); 105 void (*tx_free)(struct iwl_priv *priv);
402 106 void (*rx_free)(struct iwl_priv *priv);
403 void (*wowlan_suspend)(struct iwl_trans *trans);
404 107
405 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 108 int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
406 109
407 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 110 int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
408 struct iwl_device_cmd *dev_cmd, int queue); 111 const void *data);
409 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 112 struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id);
410 struct sk_buff_head *skbs); 113 int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
114 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
115 struct iwl_rxon_context *ctx);
411 116
412 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo, 117 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id,
413 int sta_id, int tid, int frame_limit, u16 ssn); 118 u16 ssn_idx, u8 tx_fifo);
414 void (*txq_disable)(struct iwl_trans *trans, int queue); 119 void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
120 int frame_limit);
415 121
416 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 122 void (*kick_nic)(struct iwl_priv *priv);
417 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
418#ifdef CONFIG_PM_SLEEP
419 int (*suspend)(struct iwl_trans *trans);
420 int (*resume)(struct iwl_trans *trans);
421#endif
422 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
423 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
424 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
425 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
426 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
427 void (*configure)(struct iwl_trans *trans,
428 const struct iwl_trans_config *trans_cfg);
429 void (*set_pmi)(struct iwl_trans *trans, bool state);
430};
431 123
432/** 124 void (*sync_irq)(struct iwl_priv *priv);
433 * enum iwl_trans_state - state of the transport layer 125 void (*free)(struct iwl_priv *priv);
434 *
435 * @IWL_TRANS_NO_FW: no fw has sent an alive response
436 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
437 */
438enum iwl_trans_state {
439 IWL_TRANS_NO_FW = 0,
440 IWL_TRANS_FW_ALIVE = 1,
441}; 126};
442 127
443/**
444 * struct iwl_trans - transport common data
445 *
446 * @ops - pointer to iwl_trans_ops
447 * @op_mode - pointer to the op_mode
448 * @cfg - pointer to the configuration
449 * @reg_lock - protect hw register access
450 * @dev - pointer to struct device * that represents the device
451 * @hw_id: a u32 with the ID of the device / subdevice.
452 * Set during transport allocation.
453 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
454 * @pm_support: set to true in start_hw if link pm is supported
455 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
456 * The user should use iwl_trans_{alloc,free}_tx_cmd.
457 * @dev_cmd_headroom: room needed for the transport's private use before the
458 * device_cmd for Tx - for internal use only
459 * The user should use iwl_trans_{alloc,free}_tx_cmd.
460 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
461 * starting the firmware, used for tracing
462 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
463 * start of the 802.11 header in the @rx_mpdu_cmd
464 */
465struct iwl_trans { 128struct iwl_trans {
466 const struct iwl_trans_ops *ops; 129 const struct iwl_trans_ops *ops;
467 struct iwl_op_mode *op_mode; 130 struct iwl_priv *priv;
468 const struct iwl_cfg *cfg;
469 enum iwl_trans_state state;
470 spinlock_t reg_lock;
471
472 struct device *dev;
473 u32 hw_rev;
474 u32 hw_id;
475 char hw_id_str[52];
476
477 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
478
479 bool pm_support;
480
481 /* The following fields are internal only */
482 struct kmem_cache *dev_cmd_pool;
483 size_t dev_cmd_headroom;
484 char dev_cmd_pool_name[50];
485
486 struct dentry *dbgfs_dir;
487
488 /* pointer to trans specific struct */
489 /*Ensure that this pointer will always be aligned to sizeof pointer */
490 char trans_specific[0] __aligned(sizeof(void *));
491}; 131};
492 132
493static inline void iwl_trans_configure(struct iwl_trans *trans, 133static inline int trans_start_device(struct iwl_trans *trans)
494 const struct iwl_trans_config *trans_cfg)
495{ 134{
496 trans->op_mode = trans_cfg->op_mode; 135 return trans->ops->start_device(trans->priv);
497
498 trans->ops->configure(trans, trans_cfg);
499} 136}
500 137
501static inline int iwl_trans_start_hw(struct iwl_trans *trans) 138static inline int trans_prepare_card_hw(struct iwl_trans *trans)
502{ 139{
503 might_sleep(); 140 return trans->ops->prepare_card_hw(trans->priv);
504
505 return trans->ops->start_hw(trans);
506} 141}
507 142
508static inline void iwl_trans_stop_hw(struct iwl_trans *trans, 143static inline void trans_stop_device(struct iwl_trans *trans)
509 bool op_mode_leaving)
510{ 144{
511 might_sleep(); 145 trans->ops->stop_device(trans->priv);
512
513 trans->ops->stop_hw(trans, op_mode_leaving);
514
515 if (op_mode_leaving)
516 trans->op_mode = NULL;
517
518 trans->state = IWL_TRANS_NO_FW;
519} 146}
520 147
521static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 148static inline void trans_tx_start(struct iwl_trans *trans)
522{ 149{
523 might_sleep(); 150 trans->ops->tx_start(trans->priv);
524
525 trans->state = IWL_TRANS_FW_ALIVE;
526
527 trans->ops->fw_alive(trans, scd_addr);
528} 151}
529 152
530static inline int iwl_trans_start_fw(struct iwl_trans *trans, 153static inline void trans_rx_free(struct iwl_trans *trans)
531 const struct fw_img *fw)
532{ 154{
533 might_sleep(); 155 trans->ops->rx_free(trans->priv);
534
535 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
536
537 return trans->ops->start_fw(trans, fw);
538}
539
540static inline void iwl_trans_stop_device(struct iwl_trans *trans)
541{
542 might_sleep();
543
544 trans->ops->stop_device(trans);
545
546 trans->state = IWL_TRANS_NO_FW;
547} 156}
548 157
549static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans) 158static inline void trans_tx_free(struct iwl_trans *trans)
550{ 159{
551 might_sleep(); 160 trans->ops->tx_free(trans->priv);
552 trans->ops->wowlan_suspend(trans);
553} 161}
554 162
555static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 163static inline int trans_send_cmd(struct iwl_trans *trans,
556 struct iwl_host_cmd *cmd) 164 struct iwl_host_cmd *cmd)
557{ 165{
558 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 166 return trans->ops->send_cmd(trans->priv, cmd);
559 "%s bad state = %d", __func__, trans->state);
560
561 return trans->ops->send_cmd(trans, cmd);
562} 167}
563 168
564static inline struct iwl_device_cmd * 169static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
565iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 170 u16 len, const void *data)
566{ 171{
567 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); 172 return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data);
568
569 if (unlikely(dev_cmd_ptr == NULL))
570 return NULL;
571
572 return (struct iwl_device_cmd *)
573 (dev_cmd_ptr + trans->dev_cmd_headroom);
574}
575
576static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
577 struct iwl_device_cmd *dev_cmd)
578{
579 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
580
581 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
582}
583
584static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
585 struct iwl_device_cmd *dev_cmd, int queue)
586{
587 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
588 "%s bad state = %d", __func__, trans->state);
589
590 return trans->ops->tx(trans, skb, dev_cmd, queue);
591}
592
593static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
594 int ssn, struct sk_buff_head *skbs)
595{
596 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
597 "%s bad state = %d", __func__, trans->state);
598
599 trans->ops->reclaim(trans, queue, ssn, skbs);
600}
601
602static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
603{
604 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
605 "%s bad state = %d", __func__, trans->state);
606
607 trans->ops->txq_disable(trans, queue);
608} 173}
609 174
610static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 175static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans,
611 int fifo, int sta_id, int tid, 176 int txq_id)
612 int frame_limit, u16 ssn)
613{ 177{
614 might_sleep(); 178 return trans->ops->get_tx_cmd(trans->priv, txq_id);
615
616 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
617 "%s bad state = %d", __func__, trans->state);
618
619 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
620 frame_limit, ssn);
621} 179}
622 180
623static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, 181static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
624 int fifo) 182 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
183 struct iwl_rxon_context *ctx)
625{ 184{
626 iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION, 185 return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx);
627 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
628} 186}
629 187
630static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 188static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id,
189 u16 ssn_idx, u8 tx_fifo)
631{ 190{
632 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 191 return trans->ops->txq_agg_disable(trans->priv, txq_id,
633 "%s bad state = %d", __func__, trans->state); 192 ssn_idx, tx_fifo);
634
635 return trans->ops->wait_tx_queue_empty(trans);
636} 193}
637 194
638static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 195static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
639 struct dentry *dir) 196 int tid, int frame_limit)
640{ 197{
641 return trans->ops->dbgfs_register(trans, dir); 198 trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit);
642} 199}
643 200
644#ifdef CONFIG_PM_SLEEP 201static inline void trans_kick_nic(struct iwl_trans *trans)
645static inline int iwl_trans_suspend(struct iwl_trans *trans)
646{ 202{
647 return trans->ops->suspend(trans); 203 trans->ops->kick_nic(trans->priv);
648} 204}
649 205
650static inline int iwl_trans_resume(struct iwl_trans *trans) 206static inline void trans_sync_irq(struct iwl_trans *trans)
651{ 207{
652 return trans->ops->resume(trans); 208 trans->ops->sync_irq(trans->priv);
653} 209}
654#endif
655 210
656static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 211static inline void trans_free(struct iwl_trans *trans)
657{ 212{
658 trans->ops->write8(trans, ofs, val); 213 trans->ops->free(trans->priv);
659} 214}
660 215
661static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 216int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv);
662{
663 trans->ops->write32(trans, ofs, val);
664}
665
666static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
667{
668 return trans->ops->read32(trans, ofs);
669}
670 217
671static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 218/*TODO: this functions should NOT be exported from trans module - export it
672{ 219 * until the reclaim flow will be brought to the transport module too */
673 return trans->ops->read_prph(trans, ofs);
674}
675
676static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
677 u32 val)
678{
679 return trans->ops->write_prph(trans, ofs, val);
680}
681
682static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
683{
684 trans->ops->set_pmi(trans, state);
685}
686 220
687/***************************************************** 221struct iwl_tx_queue;
688* driver (transport) register/unregister functions 222void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
689******************************************************/ 223 struct iwl_tx_queue *txq);
690int __must_check iwl_pci_register_driver(void);
691void iwl_pci_unregister_driver(void);
692 224
693#endif /* __iwl_trans_h__ */ 225#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
deleted file mode 100644
index f8620ecae6b..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ /dev/null
@@ -1,141 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-csr.h"
31#include "iwl-agn-hw.h"
32#include "cfg.h"
33
34/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5
36#define IWL100_UCODE_API_MAX 5
37
38/* Oldest version we won't warn about */
39#define IWL1000_UCODE_API_OK 5
40#define IWL100_UCODE_API_OK 5
41
42/* Lowest firmware API version supported */
43#define IWL1000_UCODE_API_MIN 1
44#define IWL100_UCODE_API_MIN 5
45
46/* EEPROM version */
47#define EEPROM_1000_TX_POWER_VERSION (4)
48#define EEPROM_1000_EEPROM_VERSION (0x15C)
49
50#define IWL1000_FW_PRE "iwlwifi-1000-"
51#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
52
53#define IWL100_FW_PRE "iwlwifi-100-"
54#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
55
56
57static const struct iwl_base_params iwl1000_base_params = {
58 .num_of_queues = IWLAGN_NUM_QUEUES,
59 .eeprom_size = OTP_LOW_IMAGE_SIZE,
60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
61 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
62 .shadow_ram_support = false,
63 .led_compensation = 51,
64 .support_ct_kill_exit = true,
65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
66 .chain_noise_scale = 1000,
67 .wd_timeout = IWL_WATCHDOG_DISABLED,
68 .max_event_log_size = 128,
69};
70
71static const struct iwl_ht_params iwl1000_ht_params = {
72 .ht_greenfield_support = true,
73 .use_rts_for_aggregation = true, /* use rts/cts protection */
74 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
75};
76
77static const struct iwl_eeprom_params iwl1000_eeprom_params = {
78 .regulatory_bands = {
79 EEPROM_REG_BAND_1_CHANNELS,
80 EEPROM_REG_BAND_2_CHANNELS,
81 EEPROM_REG_BAND_3_CHANNELS,
82 EEPROM_REG_BAND_4_CHANNELS,
83 EEPROM_REG_BAND_5_CHANNELS,
84 EEPROM_REG_BAND_24_HT40_CHANNELS,
85 EEPROM_REGULATORY_BAND_NO_HT40,
86 }
87};
88
89#define IWL_DEVICE_1000 \
90 .fw_name_pre = IWL1000_FW_PRE, \
91 .ucode_api_max = IWL1000_UCODE_API_MAX, \
92 .ucode_api_ok = IWL1000_UCODE_API_OK, \
93 .ucode_api_min = IWL1000_UCODE_API_MIN, \
94 .device_family = IWL_DEVICE_FAMILY_1000, \
95 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
96 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
97 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
98 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
99 .base_params = &iwl1000_base_params, \
100 .eeprom_params = &iwl1000_eeprom_params, \
101 .led_mode = IWL_LED_BLINK
102
103const struct iwl_cfg iwl1000_bgn_cfg = {
104 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
105 IWL_DEVICE_1000,
106 .ht_params = &iwl1000_ht_params,
107};
108
109const struct iwl_cfg iwl1000_bg_cfg = {
110 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
111 IWL_DEVICE_1000,
112};
113
114#define IWL_DEVICE_100 \
115 .fw_name_pre = IWL100_FW_PRE, \
116 .ucode_api_max = IWL100_UCODE_API_MAX, \
117 .ucode_api_ok = IWL100_UCODE_API_OK, \
118 .ucode_api_min = IWL100_UCODE_API_MIN, \
119 .device_family = IWL_DEVICE_FAMILY_100, \
120 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
121 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
122 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
123 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
124 .base_params = &iwl1000_base_params, \
125 .eeprom_params = &iwl1000_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE, \
127 .rx_with_siso_diversity = true
128
129const struct iwl_cfg iwl100_bgn_cfg = {
130 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
131 IWL_DEVICE_100,
132 .ht_params = &iwl1000_ht_params,
133};
134
135const struct iwl_cfg iwl100_bg_cfg = {
136 .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
137 IWL_DEVICE_100,
138};
139
140MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
141MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
deleted file mode 100644
index 244019cec3e..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ /dev/null
@@ -1,243 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33
34/* Highest firmware API version supported */
35#define IWL2030_UCODE_API_MAX 6
36#define IWL2000_UCODE_API_MAX 6
37#define IWL105_UCODE_API_MAX 6
38#define IWL135_UCODE_API_MAX 6
39
40/* Oldest version we won't warn about */
41#define IWL2030_UCODE_API_OK 6
42#define IWL2000_UCODE_API_OK 6
43#define IWL105_UCODE_API_OK 6
44#define IWL135_UCODE_API_OK 6
45
46/* Lowest firmware API version supported */
47#define IWL2030_UCODE_API_MIN 5
48#define IWL2000_UCODE_API_MIN 5
49#define IWL105_UCODE_API_MIN 5
50#define IWL135_UCODE_API_MIN 5
51
52/* EEPROM version */
53#define EEPROM_2000_TX_POWER_VERSION (6)
54#define EEPROM_2000_EEPROM_VERSION (0x805)
55
56
57#define IWL2030_FW_PRE "iwlwifi-2030-"
58#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
59
60#define IWL2000_FW_PRE "iwlwifi-2000-"
61#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
62
63#define IWL105_FW_PRE "iwlwifi-105-"
64#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
65
66#define IWL135_FW_PRE "iwlwifi-135-"
67#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
68
69static const struct iwl_base_params iwl2000_base_params = {
70 .eeprom_size = OTP_LOW_IMAGE_SIZE,
71 .num_of_queues = IWLAGN_NUM_QUEUES,
72 .pll_cfg_val = 0,
73 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
74 .shadow_ram_support = true,
75 .led_compensation = 51,
76 .adv_thermal_throttle = true,
77 .support_ct_kill_exit = true,
78 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
79 .chain_noise_scale = 1000,
80 .wd_timeout = IWL_DEF_WD_TIMEOUT,
81 .max_event_log_size = 512,
82 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
83 .hd_v2 = true,
84};
85
86
87static const struct iwl_base_params iwl2030_base_params = {
88 .eeprom_size = OTP_LOW_IMAGE_SIZE,
89 .num_of_queues = IWLAGN_NUM_QUEUES,
90 .pll_cfg_val = 0,
91 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
92 .shadow_ram_support = true,
93 .led_compensation = 57,
94 .adv_thermal_throttle = true,
95 .support_ct_kill_exit = true,
96 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
97 .chain_noise_scale = 1000,
98 .wd_timeout = IWL_LONG_WD_TIMEOUT,
99 .max_event_log_size = 512,
100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .hd_v2 = true,
102};
103
104static const struct iwl_ht_params iwl2000_ht_params = {
105 .ht_greenfield_support = true,
106 .use_rts_for_aggregation = true, /* use rts/cts protection */
107 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
108};
109
110static const struct iwl_bt_params iwl2030_bt_params = {
111 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
112 .advanced_bt_coexist = true,
113 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
114 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
115 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
116 .bt_sco_disable = true,
117 .bt_session_2 = true,
118};
119
120static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
121 .regulatory_bands = {
122 EEPROM_REG_BAND_1_CHANNELS,
123 EEPROM_REG_BAND_2_CHANNELS,
124 EEPROM_REG_BAND_3_CHANNELS,
125 EEPROM_REG_BAND_4_CHANNELS,
126 EEPROM_REG_BAND_5_CHANNELS,
127 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
128 EEPROM_REGULATORY_BAND_NO_HT40,
129 },
130 .enhanced_txpower = true,
131};
132
133#define IWL_DEVICE_2000 \
134 .fw_name_pre = IWL2000_FW_PRE, \
135 .ucode_api_max = IWL2000_UCODE_API_MAX, \
136 .ucode_api_ok = IWL2000_UCODE_API_OK, \
137 .ucode_api_min = IWL2000_UCODE_API_MIN, \
138 .device_family = IWL_DEVICE_FAMILY_2000, \
139 .max_inst_size = IWL60_RTC_INST_SIZE, \
140 .max_data_size = IWL60_RTC_DATA_SIZE, \
141 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
142 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
143 .base_params = &iwl2000_base_params, \
144 .eeprom_params = &iwl20x0_eeprom_params, \
145 .need_temp_offset_calib = true, \
146 .temp_offset_v2 = true, \
147 .led_mode = IWL_LED_RF_STATE
148
149const struct iwl_cfg iwl2000_2bgn_cfg = {
150 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
151 IWL_DEVICE_2000,
152 .ht_params = &iwl2000_ht_params,
153};
154
155const struct iwl_cfg iwl2000_2bgn_d_cfg = {
156 .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
157 IWL_DEVICE_2000,
158 .ht_params = &iwl2000_ht_params,
159};
160
161#define IWL_DEVICE_2030 \
162 .fw_name_pre = IWL2030_FW_PRE, \
163 .ucode_api_max = IWL2030_UCODE_API_MAX, \
164 .ucode_api_ok = IWL2030_UCODE_API_OK, \
165 .ucode_api_min = IWL2030_UCODE_API_MIN, \
166 .device_family = IWL_DEVICE_FAMILY_2030, \
167 .max_inst_size = IWL60_RTC_INST_SIZE, \
168 .max_data_size = IWL60_RTC_DATA_SIZE, \
169 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
170 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
171 .base_params = &iwl2030_base_params, \
172 .bt_params = &iwl2030_bt_params, \
173 .eeprom_params = &iwl20x0_eeprom_params, \
174 .need_temp_offset_calib = true, \
175 .temp_offset_v2 = true, \
176 .led_mode = IWL_LED_RF_STATE, \
177 .adv_pm = true
178
179const struct iwl_cfg iwl2030_2bgn_cfg = {
180 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
181 IWL_DEVICE_2030,
182 .ht_params = &iwl2000_ht_params,
183};
184
185#define IWL_DEVICE_105 \
186 .fw_name_pre = IWL105_FW_PRE, \
187 .ucode_api_max = IWL105_UCODE_API_MAX, \
188 .ucode_api_ok = IWL105_UCODE_API_OK, \
189 .ucode_api_min = IWL105_UCODE_API_MIN, \
190 .device_family = IWL_DEVICE_FAMILY_105, \
191 .max_inst_size = IWL60_RTC_INST_SIZE, \
192 .max_data_size = IWL60_RTC_DATA_SIZE, \
193 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
194 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
195 .base_params = &iwl2000_base_params, \
196 .eeprom_params = &iwl20x0_eeprom_params, \
197 .need_temp_offset_calib = true, \
198 .temp_offset_v2 = true, \
199 .led_mode = IWL_LED_RF_STATE, \
200 .adv_pm = true, \
201 .rx_with_siso_diversity = true
202
203const struct iwl_cfg iwl105_bgn_cfg = {
204 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
205 IWL_DEVICE_105,
206 .ht_params = &iwl2000_ht_params,
207};
208
209const struct iwl_cfg iwl105_bgn_d_cfg = {
210 .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
211 IWL_DEVICE_105,
212 .ht_params = &iwl2000_ht_params,
213};
214
215#define IWL_DEVICE_135 \
216 .fw_name_pre = IWL135_FW_PRE, \
217 .ucode_api_max = IWL135_UCODE_API_MAX, \
218 .ucode_api_ok = IWL135_UCODE_API_OK, \
219 .ucode_api_min = IWL135_UCODE_API_MIN, \
220 .device_family = IWL_DEVICE_FAMILY_135, \
221 .max_inst_size = IWL60_RTC_INST_SIZE, \
222 .max_data_size = IWL60_RTC_DATA_SIZE, \
223 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
224 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
225 .base_params = &iwl2030_base_params, \
226 .bt_params = &iwl2030_bt_params, \
227 .eeprom_params = &iwl20x0_eeprom_params, \
228 .need_temp_offset_calib = true, \
229 .temp_offset_v2 = true, \
230 .led_mode = IWL_LED_RF_STATE, \
231 .adv_pm = true, \
232 .rx_with_siso_diversity = true
233
234const struct iwl_cfg iwl135_bgn_cfg = {
235 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
236 IWL_DEVICE_135,
237 .ht_params = &iwl2000_ht_params,
238};
239
240MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
241MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
242MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
243MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
deleted file mode 100644
index 83ca40321ff..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "iwl-csr.h"
32#include "cfg.h"
33
34/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5
36#define IWL5150_UCODE_API_MAX 2
37
38/* Oldest version we won't warn about */
39#define IWL5000_UCODE_API_OK 5
40#define IWL5150_UCODE_API_OK 2
41
42/* Lowest firmware API version supported */
43#define IWL5000_UCODE_API_MIN 1
44#define IWL5150_UCODE_API_MIN 1
45
46/* EEPROM versions */
47#define EEPROM_5000_TX_POWER_VERSION (4)
48#define EEPROM_5000_EEPROM_VERSION (0x11A)
49#define EEPROM_5050_TX_POWER_VERSION (4)
50#define EEPROM_5050_EEPROM_VERSION (0x21E)
51
52#define IWL5000_FW_PRE "iwlwifi-5000-"
53#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
54
55#define IWL5150_FW_PRE "iwlwifi-5150-"
56#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
57
58static const struct iwl_base_params iwl5000_base_params = {
59 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
60 .num_of_queues = IWLAGN_NUM_QUEUES,
61 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
62 .led_compensation = 51,
63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
64 .chain_noise_scale = 1000,
65 .wd_timeout = IWL_WATCHDOG_DISABLED,
66 .max_event_log_size = 512,
67 .no_idle_support = true,
68};
69
70static const struct iwl_ht_params iwl5000_ht_params = {
71 .ht_greenfield_support = true,
72 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
73};
74
75static const struct iwl_eeprom_params iwl5000_eeprom_params = {
76 .regulatory_bands = {
77 EEPROM_REG_BAND_1_CHANNELS,
78 EEPROM_REG_BAND_2_CHANNELS,
79 EEPROM_REG_BAND_3_CHANNELS,
80 EEPROM_REG_BAND_4_CHANNELS,
81 EEPROM_REG_BAND_5_CHANNELS,
82 EEPROM_REG_BAND_24_HT40_CHANNELS,
83 EEPROM_REG_BAND_52_HT40_CHANNELS
84 },
85};
86
87#define IWL_DEVICE_5000 \
88 .fw_name_pre = IWL5000_FW_PRE, \
89 .ucode_api_max = IWL5000_UCODE_API_MAX, \
90 .ucode_api_ok = IWL5000_UCODE_API_OK, \
91 .ucode_api_min = IWL5000_UCODE_API_MIN, \
92 .device_family = IWL_DEVICE_FAMILY_5000, \
93 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
94 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
95 .nvm_ver = EEPROM_5000_EEPROM_VERSION, \
96 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
97 .base_params = &iwl5000_base_params, \
98 .eeprom_params = &iwl5000_eeprom_params, \
99 .led_mode = IWL_LED_BLINK
100
101const struct iwl_cfg iwl5300_agn_cfg = {
102 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
103 IWL_DEVICE_5000,
104 /* at least EEPROM 0x11A has wrong info */
105 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
106 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
107 .ht_params = &iwl5000_ht_params,
108};
109
110const struct iwl_cfg iwl5100_bgn_cfg = {
111 .name = "Intel(R) WiFi Link 5100 BGN",
112 IWL_DEVICE_5000,
113 .valid_tx_ant = ANT_B, /* .cfg overwrite */
114 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
115 .ht_params = &iwl5000_ht_params,
116};
117
118const struct iwl_cfg iwl5100_abg_cfg = {
119 .name = "Intel(R) WiFi Link 5100 ABG",
120 IWL_DEVICE_5000,
121 .valid_tx_ant = ANT_B, /* .cfg overwrite */
122 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
123};
124
125const struct iwl_cfg iwl5100_agn_cfg = {
126 .name = "Intel(R) WiFi Link 5100 AGN",
127 IWL_DEVICE_5000,
128 .valid_tx_ant = ANT_B, /* .cfg overwrite */
129 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
130 .ht_params = &iwl5000_ht_params,
131};
132
133const struct iwl_cfg iwl5350_agn_cfg = {
134 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
135 .fw_name_pre = IWL5000_FW_PRE,
136 .ucode_api_max = IWL5000_UCODE_API_MAX,
137 .ucode_api_ok = IWL5000_UCODE_API_OK,
138 .ucode_api_min = IWL5000_UCODE_API_MIN,
139 .device_family = IWL_DEVICE_FAMILY_5000,
140 .max_inst_size = IWLAGN_RTC_INST_SIZE,
141 .max_data_size = IWLAGN_RTC_DATA_SIZE,
142 .nvm_ver = EEPROM_5050_EEPROM_VERSION,
143 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
144 .base_params = &iwl5000_base_params,
145 .eeprom_params = &iwl5000_eeprom_params,
146 .ht_params = &iwl5000_ht_params,
147 .led_mode = IWL_LED_BLINK,
148 .internal_wimax_coex = true,
149};
150
151#define IWL_DEVICE_5150 \
152 .fw_name_pre = IWL5150_FW_PRE, \
153 .ucode_api_max = IWL5150_UCODE_API_MAX, \
154 .ucode_api_ok = IWL5150_UCODE_API_OK, \
155 .ucode_api_min = IWL5150_UCODE_API_MIN, \
156 .device_family = IWL_DEVICE_FAMILY_5150, \
157 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
158 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
159 .nvm_ver = EEPROM_5050_EEPROM_VERSION, \
160 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
161 .base_params = &iwl5000_base_params, \
162 .eeprom_params = &iwl5000_eeprom_params, \
163 .no_xtal_calib = true, \
164 .led_mode = IWL_LED_BLINK, \
165 .internal_wimax_coex = true
166
167const struct iwl_cfg iwl5150_agn_cfg = {
168 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
169 IWL_DEVICE_5150,
170 .ht_params = &iwl5000_ht_params,
171
172};
173
174const struct iwl_cfg iwl5150_abg_cfg = {
175 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
176 IWL_DEVICE_5150,
177};
178
179MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
180MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
deleted file mode 100644
index d4df976d470..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ /dev/null
@@ -1,403 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33
34/* Highest firmware API version supported */
35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6
38#define IWL6035_UCODE_API_MAX 6
39
40/* Oldest version we won't warn about */
41#define IWL6000_UCODE_API_OK 4
42#define IWL6000G2_UCODE_API_OK 5
43#define IWL6050_UCODE_API_OK 5
44#define IWL6000G2B_UCODE_API_OK 6
45#define IWL6035_UCODE_API_OK 6
46
47/* Lowest firmware API version supported */
48#define IWL6000_UCODE_API_MIN 4
49#define IWL6050_UCODE_API_MIN 4
50#define IWL6000G2_UCODE_API_MIN 5
51#define IWL6035_UCODE_API_MIN 6
52
53/* EEPROM versions */
54#define EEPROM_6000_TX_POWER_VERSION (4)
55#define EEPROM_6000_EEPROM_VERSION (0x423)
56#define EEPROM_6050_TX_POWER_VERSION (4)
57#define EEPROM_6050_EEPROM_VERSION (0x532)
58#define EEPROM_6150_TX_POWER_VERSION (6)
59#define EEPROM_6150_EEPROM_VERSION (0x553)
60#define EEPROM_6005_TX_POWER_VERSION (6)
61#define EEPROM_6005_EEPROM_VERSION (0x709)
62#define EEPROM_6030_TX_POWER_VERSION (6)
63#define EEPROM_6030_EEPROM_VERSION (0x709)
64#define EEPROM_6035_TX_POWER_VERSION (6)
65#define EEPROM_6035_EEPROM_VERSION (0x753)
66
67#define IWL6000_FW_PRE "iwlwifi-6000-"
68#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
69
70#define IWL6050_FW_PRE "iwlwifi-6050-"
71#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
72
73#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
74#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
75
76#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
77#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
78
79static const struct iwl_base_params iwl6000_base_params = {
80 .eeprom_size = OTP_LOW_IMAGE_SIZE,
81 .num_of_queues = IWLAGN_NUM_QUEUES,
82 .pll_cfg_val = 0,
83 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
84 .shadow_ram_support = true,
85 .led_compensation = 51,
86 .adv_thermal_throttle = true,
87 .support_ct_kill_exit = true,
88 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
89 .chain_noise_scale = 1000,
90 .wd_timeout = IWL_DEF_WD_TIMEOUT,
91 .max_event_log_size = 512,
92 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
93};
94
95static const struct iwl_base_params iwl6050_base_params = {
96 .eeprom_size = OTP_LOW_IMAGE_SIZE,
97 .num_of_queues = IWLAGN_NUM_QUEUES,
98 .pll_cfg_val = 0,
99 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
100 .shadow_ram_support = true,
101 .led_compensation = 51,
102 .adv_thermal_throttle = true,
103 .support_ct_kill_exit = true,
104 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
105 .chain_noise_scale = 1500,
106 .wd_timeout = IWL_DEF_WD_TIMEOUT,
107 .max_event_log_size = 1024,
108 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
109};
110
111static const struct iwl_base_params iwl6000_g2_base_params = {
112 .eeprom_size = OTP_LOW_IMAGE_SIZE,
113 .num_of_queues = IWLAGN_NUM_QUEUES,
114 .pll_cfg_val = 0,
115 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
116 .shadow_ram_support = true,
117 .led_compensation = 57,
118 .adv_thermal_throttle = true,
119 .support_ct_kill_exit = true,
120 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
121 .chain_noise_scale = 1000,
122 .wd_timeout = IWL_LONG_WD_TIMEOUT,
123 .max_event_log_size = 512,
124 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
125};
126
127static const struct iwl_ht_params iwl6000_ht_params = {
128 .ht_greenfield_support = true,
129 .use_rts_for_aggregation = true, /* use rts/cts protection */
130 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
131};
132
133static const struct iwl_bt_params iwl6000_bt_params = {
134 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
135 .advanced_bt_coexist = true,
136 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
137 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
138 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
139 .bt_sco_disable = true,
140};
141
142static const struct iwl_eeprom_params iwl6000_eeprom_params = {
143 .regulatory_bands = {
144 EEPROM_REG_BAND_1_CHANNELS,
145 EEPROM_REG_BAND_2_CHANNELS,
146 EEPROM_REG_BAND_3_CHANNELS,
147 EEPROM_REG_BAND_4_CHANNELS,
148 EEPROM_REG_BAND_5_CHANNELS,
149 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
150 EEPROM_REG_BAND_52_HT40_CHANNELS
151 },
152 .enhanced_txpower = true,
153};
154
155#define IWL_DEVICE_6005 \
156 .fw_name_pre = IWL6005_FW_PRE, \
157 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
158 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
159 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
160 .device_family = IWL_DEVICE_FAMILY_6005, \
161 .max_inst_size = IWL60_RTC_INST_SIZE, \
162 .max_data_size = IWL60_RTC_DATA_SIZE, \
163 .nvm_ver = EEPROM_6005_EEPROM_VERSION, \
164 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
165 .base_params = &iwl6000_g2_base_params, \
166 .eeprom_params = &iwl6000_eeprom_params, \
167 .need_temp_offset_calib = true, \
168 .led_mode = IWL_LED_RF_STATE
169
170const struct iwl_cfg iwl6005_2agn_cfg = {
171 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
172 IWL_DEVICE_6005,
173 .ht_params = &iwl6000_ht_params,
174};
175
176const struct iwl_cfg iwl6005_2abg_cfg = {
177 .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
178 IWL_DEVICE_6005,
179};
180
181const struct iwl_cfg iwl6005_2bg_cfg = {
182 .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
183 IWL_DEVICE_6005,
184};
185
186const struct iwl_cfg iwl6005_2agn_sff_cfg = {
187 .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
188 IWL_DEVICE_6005,
189 .ht_params = &iwl6000_ht_params,
190};
191
192const struct iwl_cfg iwl6005_2agn_d_cfg = {
193 .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
194 IWL_DEVICE_6005,
195 .ht_params = &iwl6000_ht_params,
196};
197
198const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
199 .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
200 IWL_DEVICE_6005,
201 .ht_params = &iwl6000_ht_params,
202};
203
204const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
205 .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
206 IWL_DEVICE_6005,
207 .ht_params = &iwl6000_ht_params,
208};
209
210#define IWL_DEVICE_6030 \
211 .fw_name_pre = IWL6030_FW_PRE, \
212 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
213 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
214 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
215 .device_family = IWL_DEVICE_FAMILY_6030, \
216 .max_inst_size = IWL60_RTC_INST_SIZE, \
217 .max_data_size = IWL60_RTC_DATA_SIZE, \
218 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
219 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
220 .base_params = &iwl6000_g2_base_params, \
221 .bt_params = &iwl6000_bt_params, \
222 .eeprom_params = &iwl6000_eeprom_params, \
223 .need_temp_offset_calib = true, \
224 .led_mode = IWL_LED_RF_STATE, \
225 .adv_pm = true \
226
227const struct iwl_cfg iwl6030_2agn_cfg = {
228 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
229 IWL_DEVICE_6030,
230 .ht_params = &iwl6000_ht_params,
231};
232
233const struct iwl_cfg iwl6030_2abg_cfg = {
234 .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
235 IWL_DEVICE_6030,
236};
237
238const struct iwl_cfg iwl6030_2bgn_cfg = {
239 .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
240 IWL_DEVICE_6030,
241 .ht_params = &iwl6000_ht_params,
242};
243
244const struct iwl_cfg iwl6030_2bg_cfg = {
245 .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
246 IWL_DEVICE_6030,
247};
248
249#define IWL_DEVICE_6035 \
250 .fw_name_pre = IWL6030_FW_PRE, \
251 .ucode_api_max = IWL6035_UCODE_API_MAX, \
252 .ucode_api_ok = IWL6035_UCODE_API_OK, \
253 .ucode_api_min = IWL6035_UCODE_API_MIN, \
254 .device_family = IWL_DEVICE_FAMILY_6030, \
255 .max_inst_size = IWL60_RTC_INST_SIZE, \
256 .max_data_size = IWL60_RTC_DATA_SIZE, \
257 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
258 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
259 .base_params = &iwl6000_g2_base_params, \
260 .bt_params = &iwl6000_bt_params, \
261 .eeprom_params = &iwl6000_eeprom_params, \
262 .need_temp_offset_calib = true, \
263 .led_mode = IWL_LED_RF_STATE, \
264 .adv_pm = true
265
266const struct iwl_cfg iwl6035_2agn_cfg = {
267 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
268 IWL_DEVICE_6035,
269 .ht_params = &iwl6000_ht_params,
270};
271
272const struct iwl_cfg iwl1030_bgn_cfg = {
273 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
274 IWL_DEVICE_6030,
275 .ht_params = &iwl6000_ht_params,
276};
277
278const struct iwl_cfg iwl1030_bg_cfg = {
279 .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
280 IWL_DEVICE_6030,
281};
282
283const struct iwl_cfg iwl130_bgn_cfg = {
284 .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
285 IWL_DEVICE_6030,
286 .ht_params = &iwl6000_ht_params,
287 .rx_with_siso_diversity = true,
288};
289
290const struct iwl_cfg iwl130_bg_cfg = {
291 .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
292 IWL_DEVICE_6030,
293 .rx_with_siso_diversity = true,
294};
295
296/*
297 * "i": Internal configuration, use internal Power Amplifier
298 */
299#define IWL_DEVICE_6000i \
300 .fw_name_pre = IWL6000_FW_PRE, \
301 .ucode_api_max = IWL6000_UCODE_API_MAX, \
302 .ucode_api_ok = IWL6000_UCODE_API_OK, \
303 .ucode_api_min = IWL6000_UCODE_API_MIN, \
304 .device_family = IWL_DEVICE_FAMILY_6000i, \
305 .max_inst_size = IWL60_RTC_INST_SIZE, \
306 .max_data_size = IWL60_RTC_DATA_SIZE, \
307 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
308 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
309 .nvm_ver = EEPROM_6000_EEPROM_VERSION, \
310 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
311 .base_params = &iwl6000_base_params, \
312 .eeprom_params = &iwl6000_eeprom_params, \
313 .led_mode = IWL_LED_BLINK
314
315const struct iwl_cfg iwl6000i_2agn_cfg = {
316 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
317 IWL_DEVICE_6000i,
318 .ht_params = &iwl6000_ht_params,
319};
320
321const struct iwl_cfg iwl6000i_2abg_cfg = {
322 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
323 IWL_DEVICE_6000i,
324};
325
326const struct iwl_cfg iwl6000i_2bg_cfg = {
327 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
328 IWL_DEVICE_6000i,
329};
330
331#define IWL_DEVICE_6050 \
332 .fw_name_pre = IWL6050_FW_PRE, \
333 .ucode_api_max = IWL6050_UCODE_API_MAX, \
334 .ucode_api_min = IWL6050_UCODE_API_MIN, \
335 .device_family = IWL_DEVICE_FAMILY_6050, \
336 .max_inst_size = IWL60_RTC_INST_SIZE, \
337 .max_data_size = IWL60_RTC_DATA_SIZE, \
338 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
339 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
340 .nvm_ver = EEPROM_6050_EEPROM_VERSION, \
341 .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
342 .base_params = &iwl6050_base_params, \
343 .eeprom_params = &iwl6000_eeprom_params, \
344 .led_mode = IWL_LED_BLINK, \
345 .internal_wimax_coex = true
346
347const struct iwl_cfg iwl6050_2agn_cfg = {
348 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
349 IWL_DEVICE_6050,
350 .ht_params = &iwl6000_ht_params,
351};
352
353const struct iwl_cfg iwl6050_2abg_cfg = {
354 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
355 IWL_DEVICE_6050,
356};
357
358#define IWL_DEVICE_6150 \
359 .fw_name_pre = IWL6050_FW_PRE, \
360 .ucode_api_max = IWL6050_UCODE_API_MAX, \
361 .ucode_api_min = IWL6050_UCODE_API_MIN, \
362 .device_family = IWL_DEVICE_FAMILY_6150, \
363 .max_inst_size = IWL60_RTC_INST_SIZE, \
364 .max_data_size = IWL60_RTC_DATA_SIZE, \
365 .nvm_ver = EEPROM_6150_EEPROM_VERSION, \
366 .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
367 .base_params = &iwl6050_base_params, \
368 .eeprom_params = &iwl6000_eeprom_params, \
369 .led_mode = IWL_LED_BLINK, \
370 .internal_wimax_coex = true
371
372const struct iwl_cfg iwl6150_bgn_cfg = {
373 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
374 IWL_DEVICE_6150,
375 .ht_params = &iwl6000_ht_params,
376};
377
378const struct iwl_cfg iwl6150_bg_cfg = {
379 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
380 IWL_DEVICE_6150,
381};
382
383const struct iwl_cfg iwl6000_3agn_cfg = {
384 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
385 .fw_name_pre = IWL6000_FW_PRE,
386 .ucode_api_max = IWL6000_UCODE_API_MAX,
387 .ucode_api_ok = IWL6000_UCODE_API_OK,
388 .ucode_api_min = IWL6000_UCODE_API_MIN,
389 .device_family = IWL_DEVICE_FAMILY_6000,
390 .max_inst_size = IWL60_RTC_INST_SIZE,
391 .max_data_size = IWL60_RTC_DATA_SIZE,
392 .nvm_ver = EEPROM_6000_EEPROM_VERSION,
393 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
394 .base_params = &iwl6000_base_params,
395 .eeprom_params = &iwl6000_eeprom_params,
396 .ht_params = &iwl6000_ht_params,
397 .led_mode = IWL_LED_BLINK,
398};
399
400MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
401MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
402MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
403MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
deleted file mode 100644
index 82152311d73..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66
67/*
68 * This file declares the config structures for all devices.
69 */
70
71extern const struct iwl_cfg iwl5300_agn_cfg;
72extern const struct iwl_cfg iwl5100_agn_cfg;
73extern const struct iwl_cfg iwl5350_agn_cfg;
74extern const struct iwl_cfg iwl5100_bgn_cfg;
75extern const struct iwl_cfg iwl5100_abg_cfg;
76extern const struct iwl_cfg iwl5150_agn_cfg;
77extern const struct iwl_cfg iwl5150_abg_cfg;
78extern const struct iwl_cfg iwl6005_2agn_cfg;
79extern const struct iwl_cfg iwl6005_2abg_cfg;
80extern const struct iwl_cfg iwl6005_2bg_cfg;
81extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
82extern const struct iwl_cfg iwl6005_2agn_d_cfg;
83extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
84extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
85extern const struct iwl_cfg iwl1030_bgn_cfg;
86extern const struct iwl_cfg iwl1030_bg_cfg;
87extern const struct iwl_cfg iwl6030_2agn_cfg;
88extern const struct iwl_cfg iwl6030_2abg_cfg;
89extern const struct iwl_cfg iwl6030_2bgn_cfg;
90extern const struct iwl_cfg iwl6030_2bg_cfg;
91extern const struct iwl_cfg iwl6000i_2agn_cfg;
92extern const struct iwl_cfg iwl6000i_2abg_cfg;
93extern const struct iwl_cfg iwl6000i_2bg_cfg;
94extern const struct iwl_cfg iwl6000_3agn_cfg;
95extern const struct iwl_cfg iwl6050_2agn_cfg;
96extern const struct iwl_cfg iwl6050_2abg_cfg;
97extern const struct iwl_cfg iwl6150_bgn_cfg;
98extern const struct iwl_cfg iwl6150_bg_cfg;
99extern const struct iwl_cfg iwl1000_bgn_cfg;
100extern const struct iwl_cfg iwl1000_bg_cfg;
101extern const struct iwl_cfg iwl100_bgn_cfg;
102extern const struct iwl_cfg iwl100_bg_cfg;
103extern const struct iwl_cfg iwl130_bgn_cfg;
104extern const struct iwl_cfg iwl130_bg_cfg;
105extern const struct iwl_cfg iwl2000_2bgn_cfg;
106extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
107extern const struct iwl_cfg iwl2030_2bgn_cfg;
108extern const struct iwl_cfg iwl6035_2agn_cfg;
109extern const struct iwl_cfg iwl105_bgn_cfg;
110extern const struct iwl_cfg iwl105_bgn_d_cfg;
111extern const struct iwl_cfg iwl135_bgn_cfg;
112
113#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
deleted file mode 100644
index c2e141af353..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/pci-aspm.h>
69
70#include "iwl-trans.h"
71#include "iwl-drv.h"
72
73#include "cfg.h"
74#include "internal.h"
75
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \
77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
78 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
79 .driver_data = (kernel_ulong_t)&(cfg)
80
81/* Hardware specific file defines the PCI IDs table for that hardware module */
82static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
83 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
84 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
85 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
86 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
87 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
88 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
89 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
90 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
91 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
92 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
93 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
94 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
95 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
96 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
97 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
98 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
99 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
100 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
101 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
102 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
103 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
104 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
105 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
106 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
107
108/* 5300 Series WiFi */
109 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
110 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
111 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
112 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
113 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
114 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
115 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
116 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
117 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
118 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
119 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
120 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
121
122/* 5350 Series WiFi/WiMax */
123 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
124 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
125 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
126
127/* 5150 Series Wifi/WiMax */
128 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
129 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
130 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
131 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
132 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
133 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
134
135 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
136 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
137 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
138 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
139
140/* 6x00 Series */
141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
143 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
144 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
145 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
146 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
147 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
148 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
149 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
150 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
151
152/* 6x05 Series */
153 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
154 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
155 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
156 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
157 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
158 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
159 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
160 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
161 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
164 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
165
166/* 6x30 Series */
167 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
168 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
169 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
170 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
171 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
172 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
173 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
174 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
175 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
176 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
177 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
178 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
179 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
180 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
181 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
182 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
183
184/* 6x50 WiFi/WiMax Series */
185 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
186 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
187 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
188 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
189 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
190 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
191
192/* 6150 WiFi/WiMax Series */
193 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
194 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
195 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
196 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
197 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
198 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
199
200/* 1000 Series WiFi */
201 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
202 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
203 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
204 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
205 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
206 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
207 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
208 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
209 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
210 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
211 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
212 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
213
214/* 100 Series WiFi */
215 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
216 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
217 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
218 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
219 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
220 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
221
222/* 130 Series WiFi */
223 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
224 {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
225 {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
226 {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
227 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
228 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
229
230/* 2x00 Series */
231 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
232 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
233 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
234 {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
235
236/* 2x30 Series */
237 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
238 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
239 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
240
241/* 6x35 Series */
242 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
243 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
244 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
246
247/* 105 Series */
248 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
249 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
250 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
251 {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
252
253/* 135 Series */
254 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
255 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
256 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
257
258 {0}
259};
260MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
261
262/* PCI registers */
263#define PCI_CFG_RETRY_TIMEOUT 0x041
264
265static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
266{
267 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
268 struct iwl_trans *iwl_trans;
269 struct iwl_trans_pcie *trans_pcie;
270 int ret;
271
272 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
273 if (iwl_trans == NULL)
274 return -ENOMEM;
275
276 pci_set_drvdata(pdev, iwl_trans);
277
278 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
279 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
280
281 if (IS_ERR_OR_NULL(trans_pcie->drv)) {
282 ret = PTR_ERR(trans_pcie->drv);
283 goto out_free_trans;
284 }
285
286 /* register transport layer debugfs here */
287 ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
288 if (ret)
289 goto out_free_drv;
290
291 return 0;
292
293out_free_drv:
294 iwl_drv_stop(trans_pcie->drv);
295out_free_trans:
296 iwl_trans_pcie_free(iwl_trans);
297 pci_set_drvdata(pdev, NULL);
298 return ret;
299}
300
301static void iwl_pci_remove(struct pci_dev *pdev)
302{
303 struct iwl_trans *trans = pci_get_drvdata(pdev);
304 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
305
306 iwl_drv_stop(trans_pcie->drv);
307 iwl_trans_pcie_free(trans);
308
309 pci_set_drvdata(pdev, NULL);
310}
311
312#ifdef CONFIG_PM_SLEEP
313
314static int iwl_pci_suspend(struct device *device)
315{
316 struct pci_dev *pdev = to_pci_dev(device);
317 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
318
319 /* Before you put code here, think about WoWLAN. You cannot check here
320 * whether WoWLAN is enabled or not, and your code will run even if
321 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
322 */
323
324 return iwl_trans_suspend(iwl_trans);
325}
326
327static int iwl_pci_resume(struct device *device)
328{
329 struct pci_dev *pdev = to_pci_dev(device);
330 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
331
332 /* Before you put code here, think about WoWLAN. You cannot check here
333 * whether WoWLAN is enabled or not, and your code will run even if
334 * WoWLAN is enabled - the NIC may be alive.
335 */
336
337 /*
338 * We disable the RETRY_TIMEOUT register (0x41) to keep
339 * PCI Tx retries from interfering with C3 CPU state.
340 */
341 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
342
343 return iwl_trans_resume(iwl_trans);
344}
345
346static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
347
348#define IWL_PM_OPS (&iwl_dev_pm_ops)
349
350#else
351
352#define IWL_PM_OPS NULL
353
354#endif
355
356static struct pci_driver iwl_pci_driver = {
357 .name = DRV_NAME,
358 .id_table = iwl_hw_card_ids,
359 .probe = iwl_pci_probe,
360 .remove = iwl_pci_remove,
361 .driver.pm = IWL_PM_OPS,
362};
363
364int __must_check iwl_pci_register_driver(void)
365{
366 int ret;
367 ret = pci_register_driver(&iwl_pci_driver);
368 if (ret)
369 pr_err("Unable to initialize PCI module\n");
370
371 return ret;
372}
373
374void iwl_pci_unregister_driver(void)
375{
376 pci_unregister_driver(&iwl_pci_driver);
377}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
deleted file mode 100644
index d91d2e8c62f..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ /dev/null
@@ -1,451 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_trans_int_pcie_h__
30#define __iwl_trans_int_pcie_h__
31
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36#include <linux/pci.h>
37#include <linux/timer.h>
38
39#include "iwl-fh.h"
40#include "iwl-csr.h"
41#include "iwl-trans.h"
42#include "iwl-debug.h"
43#include "iwl-io.h"
44#include "iwl-op-mode.h"
45
46struct iwl_host_cmd;
47
48/*This file includes the declaration that are internal to the
49 * trans_pcie layer */
50
51struct iwl_rx_mem_buffer {
52 dma_addr_t page_dma;
53 struct page *page;
54 struct list_head list;
55};
56
57/**
58 * struct isr_statistics - interrupt statistics
59 *
60 */
61struct isr_statistics {
62 u32 hw;
63 u32 sw;
64 u32 err_code;
65 u32 sch;
66 u32 alive;
67 u32 rfkill;
68 u32 ctkill;
69 u32 wakeup;
70 u32 rx;
71 u32 tx;
72 u32 unhandled;
73};
74
75/**
76 * struct iwl_rxq - Rx queue
77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
79 * @pool:
80 * @queue:
81 * @read: Shared index to newest available Rx buffer
82 * @write: Shared index to oldest written Rx packet
83 * @free_count: Number of pre-allocated buffers in rx_free
84 * @write_actual:
85 * @rx_free: list of free SKBs for use
86 * @rx_used: List of Rx buffers with no SKB
87 * @need_update: flag to indicate we need to update read/write index
88 * @rb_stts: driver's pointer to receive buffer status
89 * @rb_stts_dma: bus address of receive buffer status
90 * @lock:
91 *
92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
93 */
94struct iwl_rxq {
95 __le32 *bd;
96 dma_addr_t bd_dma;
97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
98 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
99 u32 read;
100 u32 write;
101 u32 free_count;
102 u32 write_actual;
103 struct list_head rx_free;
104 struct list_head rx_used;
105 int need_update;
106 struct iwl_rb_status *rb_stts;
107 dma_addr_t rb_stts_dma;
108 spinlock_t lock;
109};
110
111struct iwl_dma_ptr {
112 dma_addr_t dma;
113 void *addr;
114 size_t size;
115};
116
117/**
118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
119 * @index -- current index
120 * @n_bd -- total number of entries in queue (must be power of 2)
121 */
122static inline int iwl_queue_inc_wrap(int index, int n_bd)
123{
124 return ++index & (n_bd - 1);
125}
126
127/**
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
129 * @index -- current index
130 * @n_bd -- total number of entries in queue (must be power of 2)
131 */
132static inline int iwl_queue_dec_wrap(int index, int n_bd)
133{
134 return --index & (n_bd - 1);
135}
136
137struct iwl_cmd_meta {
138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd *source;
140
141 DEFINE_DMA_UNMAP_ADDR(mapping);
142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags;
145};
146
147/*
148 * Generic queue structure
149 *
150 * Contains common data for Rx and Tx queues.
151 *
152 * Note the difference between n_bd and n_window: the hardware
153 * always assumes 256 descriptors, so n_bd is always 256 (unless
154 * there might be HW changes in the future). For the normal TX
155 * queues, n_window, which is the size of the software queue data
156 * is also 256; however, for the command queue, n_window is only
157 * 32 since we don't need so many commands pending. Since the HW
158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
159 * the software buffers (in the variables @meta, @txb in struct
160 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
161 * the same struct) have 256.
162 * This means that we end up with the following:
163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
164 * SW entries: | 0 | ... | 31 |
165 * where N is a number between 0 and 7. This means that the SW
166 * data is a window overlayed over the HW queue.
167 */
168struct iwl_queue {
169 int n_bd; /* number of BDs in this queue */
170 int write_ptr; /* 1-st empty entry (index) host_w*/
171 int read_ptr; /* last used entry (index) host_r*/
172 /* use for monitoring and recovering the stuck queue */
173 dma_addr_t dma_addr; /* physical addr for BD's */
174 int n_window; /* safe queue window */
175 u32 id;
176 int low_mark; /* low watermark, resume queue if free
177 * space more than this */
178 int high_mark; /* high watermark, stop queue if free
179 * space less than this */
180};
181
182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32
184
185struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd;
188 struct sk_buff *skb;
189 /* buffer to free after command completes */
190 const void *free_buf;
191 struct iwl_cmd_meta meta;
192};
193
194/**
195 * struct iwl_txq - Tx Queue for DMA
196 * @q: generic Rx/Tx queue descriptor
197 * @tfds: transmit frame descriptors (DMA memory)
198 * @entries: transmit entries (driver state)
199 * @lock: queue lock
200 * @stuck_timer: timer that fires if queue gets stuck
201 * @trans_pcie: pointer back to transport (for timer)
202 * @need_update: indicates need to update read/write index
203 * @active: stores if queue is active
204 *
205 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
206 * descriptors) and required locking structures.
207 */
208struct iwl_txq {
209 struct iwl_queue q;
210 struct iwl_tfd *tfds;
211 struct iwl_pcie_txq_entry *entries;
212 spinlock_t lock;
213 struct timer_list stuck_timer;
214 struct iwl_trans_pcie *trans_pcie;
215 u8 need_update;
216 u8 active;
217};
218
219/**
220 * struct iwl_trans_pcie - PCIe transport specific data
221 * @rxq: all the RX queue data
222 * @rx_replenish: work that will be called when buffers need to be allocated
223 * @drv - pointer to iwl_drv
224 * @trans: pointer to the generic transport area
225 * @irq - the irq number for the device
226 * @irq_requested: true when the irq has been requested
227 * @scd_base_addr: scheduler sram base address in SRAM
228 * @scd_bc_tbls: pointer to the byte count table of the scheduler
229 * @kw: keep warm address
230 * @pci_dev: basic pci-network driver stuff
231 * @hw_base: pci hardware address support
232 * @ucode_write_complete: indicates that the ucode has been copied.
233 * @ucode_write_waitq: wait queue for uCode load
234 * @status - transport specific status flags
235 * @cmd_queue - command queue number
236 * @rx_buf_size_8k: 8 kB RX buffer size
237 * @rx_page_order: page order for receive buffer size
238 * @wd_timeout: queue watchdog timeout (jiffies)
239 */
240struct iwl_trans_pcie {
241 struct iwl_rxq rxq;
242 struct work_struct rx_replenish;
243 struct iwl_trans *trans;
244 struct iwl_drv *drv;
245
246 /* INT ICT Table */
247 __le32 *ict_tbl;
248 dma_addr_t ict_tbl_dma;
249 int ict_index;
250 u32 inta;
251 bool use_ict;
252 bool irq_requested;
253 struct tasklet_struct irq_tasklet;
254 struct isr_statistics isr_stats;
255
256 unsigned int irq;
257 spinlock_t irq_lock;
258 u32 inta_mask;
259 u32 scd_base_addr;
260 struct iwl_dma_ptr scd_bc_tbls;
261 struct iwl_dma_ptr kw;
262
263 struct iwl_txq *txq;
264 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
265 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
266
267 /* PCI bus related data */
268 struct pci_dev *pci_dev;
269 void __iomem *hw_base;
270
271 bool ucode_write_complete;
272 wait_queue_head_t ucode_write_waitq;
273 wait_queue_head_t wait_command_queue;
274
275 unsigned long status;
276 u8 cmd_queue;
277 u8 cmd_fifo;
278 u8 n_no_reclaim_cmds;
279 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
280
281 bool rx_buf_size_8k;
282 u32 rx_page_order;
283
284 const char **command_names;
285
286 /* queue watchdog */
287 unsigned long wd_timeout;
288};
289
290/**
291 * enum iwl_pcie_status: status of the PCIe transport
292 * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
293 * @STATUS_DEVICE_ENABLED: APM is enabled
294 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
295 * @STATUS_INT_ENABLED: interrupts are enabled
296 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
297 * @STATUS_FW_ERROR: the fw is in error state
298 */
299enum iwl_pcie_status {
300 STATUS_HCMD_ACTIVE,
301 STATUS_DEVICE_ENABLED,
302 STATUS_TPOWER_PMI,
303 STATUS_INT_ENABLED,
304 STATUS_RFKILL,
305 STATUS_FW_ERROR,
306};
307
308#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
309 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
310
311static inline struct iwl_trans *
312iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
313{
314 return container_of((void *)trans_pcie, struct iwl_trans,
315 trans_specific);
316}
317
318/*
319 * Convention: trans API functions: iwl_trans_pcie_XXX
320 * Other functions: iwl_pcie_XXX
321 */
322struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
323 const struct pci_device_id *ent,
324 const struct iwl_cfg *cfg);
325void iwl_trans_pcie_free(struct iwl_trans *trans);
326
327/*****************************************************
328* RX
329******************************************************/
330int iwl_pcie_rx_init(struct iwl_trans *trans);
331void iwl_pcie_tasklet(struct iwl_trans *trans);
332int iwl_pcie_rx_stop(struct iwl_trans *trans);
333void iwl_pcie_rx_free(struct iwl_trans *trans);
334
335/*****************************************************
336* ICT - interrupt handling
337******************************************************/
338irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
339int iwl_pcie_alloc_ict(struct iwl_trans *trans);
340void iwl_pcie_free_ict(struct iwl_trans *trans);
341void iwl_pcie_reset_ict(struct iwl_trans *trans);
342void iwl_pcie_disable_ict(struct iwl_trans *trans);
343
344/*****************************************************
345* TX / HCMD
346******************************************************/
347int iwl_pcie_tx_init(struct iwl_trans *trans);
348void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
349int iwl_pcie_tx_stop(struct iwl_trans *trans);
350void iwl_pcie_tx_free(struct iwl_trans *trans);
351void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
352 int sta_id, int tid, int frame_limit, u16 ssn);
353void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
354int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
355 struct iwl_device_cmd *dev_cmd, int txq_id);
356void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
357int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
358void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
359 struct iwl_rx_cmd_buffer *rxb, int handler_status);
360void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
361 struct sk_buff_head *skbs);
362/*****************************************************
363* Error handling
364******************************************************/
365int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
366void iwl_pcie_dump_csr(struct iwl_trans *trans);
367
368/*****************************************************
369* Helpers
370******************************************************/
371static inline void iwl_disable_interrupts(struct iwl_trans *trans)
372{
373 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
374 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
375
376 /* disable interrupts from uCode/NIC to host */
377 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
378
379 /* acknowledge/clear/reset any interrupts still pending
380 * from uCode or flow handler (Rx/Tx DMA) */
381 iwl_write32(trans, CSR_INT, 0xffffffff);
382 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
383 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
384}
385
386static inline void iwl_enable_interrupts(struct iwl_trans *trans)
387{
388 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
389
390 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
391 set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
392 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
393}
394
395static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
396{
397 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
398 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
399}
400
401static inline void iwl_wake_queue(struct iwl_trans *trans,
402 struct iwl_txq *txq)
403{
404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
405
406 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
407 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
408 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
409 }
410}
411
412static inline void iwl_stop_queue(struct iwl_trans *trans,
413 struct iwl_txq *txq)
414{
415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
416
417 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
418 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
419 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
420 } else
421 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
422 txq->q.id);
423}
424
425static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
426{
427 return q->write_ptr >= q->read_ptr ?
428 (i >= q->read_ptr && i < q->write_ptr) :
429 !(i < q->read_ptr && i >= q->write_ptr);
430}
431
432static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
433{
434 return index & (q->n_window - 1);
435}
436
437static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
438 u8 cmd)
439{
440 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
441 return "UNKNOWN";
442 return trans_pcie->command_names[cmd];
443}
444
445static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
446{
447 return !(iwl_read32(trans, CSR_GP_CNTRL) &
448 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
449}
450
451#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
deleted file mode 100644
index 8389cd38338..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ /dev/null
@@ -1,1297 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/gfp.h>
32
33#include "iwl-prph.h"
34#include "iwl-io.h"
35#include "internal.h"
36#include "iwl-op-mode.h"
37
38/******************************************************************************
39 *
40 * RX path functions
41 *
42 ******************************************************************************/
43
44/*
45 * Rx theory of operation
46 *
47 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48 * each of which point to Receive Buffers to be filled by the NIC. These get
49 * used not only for Rx frames, but for any command response or notification
50 * from the NIC. The driver and NIC manage the Rx buffers by means
51 * of indexes into the circular buffer.
52 *
53 * Rx Queue Indexes
54 * The host/firmware share two index registers for managing the Rx buffers.
55 *
56 * The READ index maps to the first position that the firmware may be writing
57 * to -- the driver can read up to (but not including) this position and get
58 * good data.
59 * The READ index is managed by the firmware once the card is enabled.
60 *
61 * The WRITE index maps to the last position the driver has read from -- the
62 * position preceding WRITE is the last slot the firmware can place a packet.
63 *
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
65 * WRITE = READ.
66 *
67 * During initialization, the host sets up the READ queue position to the first
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
69 *
70 * When the firmware places a packet in a buffer, it will advance the READ index
71 * and fire the RX interrupt. The driver can then query the READ index and
72 * process as many packets as possible, moving the WRITE index forward as it
73 * resets the Rx queue buffers with new memory.
74 *
75 * The management in the driver is as follows:
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack,
83 * detached from the iwl->rxq. The driver 'processed' index is updated.
84 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
85 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
86 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
87 * were enough free buffers and RX_STALLED is set it is cleared.
88 *
89 *
90 * Driver sequence:
91 *
92 * iwl_rxq_alloc() Allocates rx_free
93 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_pcie_rxq_restock
95 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_pcie_rx_replenish
99 *
100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_pcie_rxq_restock to refill any empty
105 * slots.
106 * ...
107 *
108 */
109
110/*
111 * iwl_rxq_space - Return number of free slots available in queue.
112 */
113static int iwl_rxq_space(const struct iwl_rxq *q)
114{
115 int s = q->read - q->write;
116 if (s <= 0)
117 s += RX_QUEUE_SIZE;
118 /* keep some buffer to not confuse full and empty queue */
119 s -= 2;
120 if (s < 0)
121 s = 0;
122 return s;
123}
124
125/*
126 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
127 */
128static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
129{
130 return cpu_to_le32((u32)(dma_addr >> 8));
131}
132
133/*
134 * iwl_pcie_rx_stop - stops the Rx DMA
135 */
136int iwl_pcie_rx_stop(struct iwl_trans *trans)
137{
138 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
139 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
140 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
141}
142
143/*
144 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
145 */
146static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
147{
148 unsigned long flags;
149 u32 reg;
150
151 spin_lock_irqsave(&q->lock, flags);
152
153 if (q->need_update == 0)
154 goto exit_unlock;
155
156 if (trans->cfg->base_params->shadow_reg_enable) {
157 /* shadow register enabled */
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
161 } else {
162 struct iwl_trans_pcie *trans_pcie =
163 IWL_TRANS_GET_PCIE_TRANS(trans);
164
165 /* If power-saving is in use, make sure device is awake */
166 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
167 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
168
169 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
170 IWL_DEBUG_INFO(trans,
171 "Rx queue requesting wakeup,"
172 " GP1 = 0x%x\n", reg);
173 iwl_set_bit(trans, CSR_GP_CNTRL,
174 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
175 goto exit_unlock;
176 }
177
178 q->write_actual = (q->write & ~0x7);
179 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
180 q->write_actual);
181
182 /* Else device is assumed to be awake */
183 } else {
184 /* Device expects a multiple of 8 */
185 q->write_actual = (q->write & ~0x7);
186 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
187 q->write_actual);
188 }
189 }
190 q->need_update = 0;
191
192 exit_unlock:
193 spin_unlock_irqrestore(&q->lock, flags);
194}
195
196/*
197 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
198 *
199 * If there are slots in the RX queue that need to be restocked,
200 * and we have free pre-allocated buffers, fill the ranks as much
201 * as we can, pulling from rx_free.
202 *
203 * This moves the 'write' index forward to catch up with 'processed', and
204 * also updates the memory address in the firmware to reference the new
205 * target buffer.
206 */
207static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
208{
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 struct iwl_rxq *rxq = &trans_pcie->rxq;
211 struct iwl_rx_mem_buffer *rxb;
212 unsigned long flags;
213
214 /*
215 * If the device isn't enabled - not need to try to add buffers...
216 * This can happen when we stop the device and still have an interrupt
217 * pending. We stop the APM before we sync the interrupts / tasklets
218 * because we have to (see comment there). On the other hand, since
219 * the APM is stopped, we cannot access the HW (in particular not prph).
220 * So don't try to restock if the APM has been already stopped.
221 */
222 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
223 return;
224
225 spin_lock_irqsave(&rxq->lock, flags);
226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
227 /* The overwritten rxb must be a used one */
228 rxb = rxq->queue[rxq->write];
229 BUG_ON(rxb && rxb->page);
230
231 /* Get next free Rx buffer, remove from free list */
232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
233 list);
234 list_del(&rxb->list);
235
236 /* Point to Rx buffer via next RBD in circular buffer */
237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
238 rxq->queue[rxq->write] = rxb;
239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
240 rxq->free_count--;
241 }
242 spin_unlock_irqrestore(&rxq->lock, flags);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
247
248 /* If we've added more space for the firmware to place data, tell it.
249 * Increment device's write pointer in multiples of 8. */
250 if (rxq->write_actual != (rxq->write & ~0x7)) {
251 spin_lock_irqsave(&rxq->lock, flags);
252 rxq->need_update = 1;
253 spin_unlock_irqrestore(&rxq->lock, flags);
254 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
255 }
256}
257
258/*
259 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
260 *
261 * A used RBD is an Rx buffer that has been given to the stack. To use it again
262 * a page must be allocated and the RBD must point to the page. This function
263 * doesn't change the HW pointer but handles the list of pages that is used by
264 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
265 * allocated buffers.
266 */
267static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
268{
269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
270 struct iwl_rxq *rxq = &trans_pcie->rxq;
271 struct iwl_rx_mem_buffer *rxb;
272 struct page *page;
273 unsigned long flags;
274 gfp_t gfp_mask = priority;
275
276 while (1) {
277 spin_lock_irqsave(&rxq->lock, flags);
278 if (list_empty(&rxq->rx_used)) {
279 spin_unlock_irqrestore(&rxq->lock, flags);
280 return;
281 }
282 spin_unlock_irqrestore(&rxq->lock, flags);
283
284 if (rxq->free_count > RX_LOW_WATERMARK)
285 gfp_mask |= __GFP_NOWARN;
286
287 if (trans_pcie->rx_page_order > 0)
288 gfp_mask |= __GFP_COMP;
289
290 /* Alloc a new receive buffer */
291 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
292 if (!page) {
293 if (net_ratelimit())
294 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
295 "order: %d\n",
296 trans_pcie->rx_page_order);
297
298 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
299 net_ratelimit())
300 IWL_CRIT(trans, "Failed to alloc_pages with %s."
301 "Only %u free buffers remaining.\n",
302 priority == GFP_ATOMIC ?
303 "GFP_ATOMIC" : "GFP_KERNEL",
304 rxq->free_count);
305 /* We don't reschedule replenish work here -- we will
306 * call the restock method and if it still needs
307 * more buffers it will schedule replenish */
308 return;
309 }
310
311 spin_lock_irqsave(&rxq->lock, flags);
312
313 if (list_empty(&rxq->rx_used)) {
314 spin_unlock_irqrestore(&rxq->lock, flags);
315 __free_pages(page, trans_pcie->rx_page_order);
316 return;
317 }
318 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
319 list);
320 list_del(&rxb->list);
321 spin_unlock_irqrestore(&rxq->lock, flags);
322
323 BUG_ON(rxb->page);
324 rxb->page = page;
325 /* Get physical address of the RB */
326 rxb->page_dma =
327 dma_map_page(trans->dev, page, 0,
328 PAGE_SIZE << trans_pcie->rx_page_order,
329 DMA_FROM_DEVICE);
330 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
331 rxb->page = NULL;
332 spin_lock_irqsave(&rxq->lock, flags);
333 list_add(&rxb->list, &rxq->rx_used);
334 spin_unlock_irqrestore(&rxq->lock, flags);
335 __free_pages(page, trans_pcie->rx_page_order);
336 return;
337 }
338 /* dma address must be no more than 36 bits */
339 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
340 /* and also 256 byte aligned! */
341 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
342
343 spin_lock_irqsave(&rxq->lock, flags);
344
345 list_add_tail(&rxb->list, &rxq->rx_free);
346 rxq->free_count++;
347
348 spin_unlock_irqrestore(&rxq->lock, flags);
349 }
350}
351
352static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
353{
354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
355 struct iwl_rxq *rxq = &trans_pcie->rxq;
356 int i;
357
358 /* Fill the rx_used queue with _all_ of the Rx buffers */
359 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
360 /* In the reset function, these buffers may have been allocated
361 * to an SKB, so we need to unmap and free potential storage */
362 if (rxq->pool[i].page != NULL) {
363 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
364 PAGE_SIZE << trans_pcie->rx_page_order,
365 DMA_FROM_DEVICE);
366 __free_pages(rxq->pool[i].page,
367 trans_pcie->rx_page_order);
368 rxq->pool[i].page = NULL;
369 }
370 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
371 }
372}
373
374/*
375 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
376 *
377 * When moving to rx_free an page is allocated for the slot.
378 *
379 * Also restock the Rx queue via iwl_pcie_rxq_restock.
380 * This is called as a scheduled work item (except for during initialization)
381 */
382static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
383{
384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 unsigned long flags;
386
387 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
388
389 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
390 iwl_pcie_rxq_restock(trans);
391 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392}
393
394static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
395{
396 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
397
398 iwl_pcie_rxq_restock(trans);
399}
400
401static void iwl_pcie_rx_replenish_work(struct work_struct *data)
402{
403 struct iwl_trans_pcie *trans_pcie =
404 container_of(data, struct iwl_trans_pcie, rx_replenish);
405
406 iwl_pcie_rx_replenish(trans_pcie->trans);
407}
408
409static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
410{
411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412 struct iwl_rxq *rxq = &trans_pcie->rxq;
413 struct device *dev = trans->dev;
414
415 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
416
417 spin_lock_init(&rxq->lock);
418
419 if (WARN_ON(rxq->bd || rxq->rb_stts))
420 return -EINVAL;
421
422 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
423 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
424 &rxq->bd_dma, GFP_KERNEL);
425 if (!rxq->bd)
426 goto err_bd;
427
428 /*Allocate the driver's pointer to receive buffer status */
429 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
430 &rxq->rb_stts_dma, GFP_KERNEL);
431 if (!rxq->rb_stts)
432 goto err_rb_stts;
433
434 return 0;
435
436err_rb_stts:
437 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
438 rxq->bd, rxq->bd_dma);
439 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
440 rxq->bd = NULL;
441err_bd:
442 return -ENOMEM;
443}
444
445static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
446{
447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
448 u32 rb_size;
449 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
450
451 if (trans_pcie->rx_buf_size_8k)
452 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
453 else
454 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
455
456 /* Stop Rx DMA */
457 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458
459 /* Reset driver's Rx queue write index */
460 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
461
462 /* Tell device where to find RBD circular buffer in DRAM */
463 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
464 (u32)(rxq->bd_dma >> 8));
465
466 /* Tell device where in DRAM to update its Rx status */
467 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
468 rxq->rb_stts_dma >> 4);
469
470 /* Enable Rx DMA
471 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
472 * the credit mechanism in 5000 HW RX FIFO
473 * Direct rx interrupts to hosts
474 * Rx buffer size 4 or 8k
475 * RB timeout 0x10
476 * 256 RBDs
477 */
478 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
479 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
480 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
481 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
482 rb_size|
483 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
484 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
485
486 /* Set interrupt coalescing timer to default (2048 usecs) */
487 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
488}
489
490int iwl_pcie_rx_init(struct iwl_trans *trans)
491{
492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 struct iwl_rxq *rxq = &trans_pcie->rxq;
494
495 int i, err;
496 unsigned long flags;
497
498 if (!rxq->bd) {
499 err = iwl_pcie_rx_alloc(trans);
500 if (err)
501 return err;
502 }
503
504 spin_lock_irqsave(&rxq->lock, flags);
505 INIT_LIST_HEAD(&rxq->rx_free);
506 INIT_LIST_HEAD(&rxq->rx_used);
507
508 INIT_WORK(&trans_pcie->rx_replenish,
509 iwl_pcie_rx_replenish_work);
510
511 iwl_pcie_rxq_free_rbs(trans);
512
513 for (i = 0; i < RX_QUEUE_SIZE; i++)
514 rxq->queue[i] = NULL;
515
516 /* Set us so that we have processed and used all buffers, but have
517 * not restocked the Rx queue with fresh buffers */
518 rxq->read = rxq->write = 0;
519 rxq->write_actual = 0;
520 rxq->free_count = 0;
521 spin_unlock_irqrestore(&rxq->lock, flags);
522
523 iwl_pcie_rx_replenish(trans);
524
525 iwl_pcie_rx_hw_init(trans, rxq);
526
527 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
528 rxq->need_update = 1;
529 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
530 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
531
532 return 0;
533}
534
535void iwl_pcie_rx_free(struct iwl_trans *trans)
536{
537 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
538 struct iwl_rxq *rxq = &trans_pcie->rxq;
539 unsigned long flags;
540
541 /*if rxq->bd is NULL, it means that nothing has been allocated,
542 * exit now */
543 if (!rxq->bd) {
544 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
545 return;
546 }
547
548 spin_lock_irqsave(&rxq->lock, flags);
549 iwl_pcie_rxq_free_rbs(trans);
550 spin_unlock_irqrestore(&rxq->lock, flags);
551
552 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
553 rxq->bd, rxq->bd_dma);
554 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
555 rxq->bd = NULL;
556
557 if (rxq->rb_stts)
558 dma_free_coherent(trans->dev,
559 sizeof(struct iwl_rb_status),
560 rxq->rb_stts, rxq->rb_stts_dma);
561 else
562 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
563 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
564 rxq->rb_stts = NULL;
565}
566
567static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
568 struct iwl_rx_mem_buffer *rxb)
569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571 struct iwl_rxq *rxq = &trans_pcie->rxq;
572 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
573 unsigned long flags;
574 bool page_stolen = false;
575 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
576 u32 offset = 0;
577
578 if (WARN_ON(!rxb))
579 return;
580
581 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
582
583 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
584 struct iwl_rx_packet *pkt;
585 struct iwl_device_cmd *cmd;
586 u16 sequence;
587 bool reclaim;
588 int index, cmd_index, err, len;
589 struct iwl_rx_cmd_buffer rxcb = {
590 ._offset = offset,
591 ._page = rxb->page,
592 ._page_stolen = false,
593 .truesize = max_len,
594 };
595
596 pkt = rxb_addr(&rxcb);
597
598 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
599 break;
600
601 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
602 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
603 pkt->hdr.cmd);
604
605 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
606 len += sizeof(u32); /* account for status word */
607 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
608 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
609
610 /* Reclaim a command buffer only if this packet is a response
611 * to a (driver-originated) command.
612 * If the packet (e.g. Rx frame) originated from uCode,
613 * there is no command buffer to reclaim.
614 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
615 * but apparently a few don't get set; catch them here. */
616 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
617 if (reclaim) {
618 int i;
619
620 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
621 if (trans_pcie->no_reclaim_cmds[i] ==
622 pkt->hdr.cmd) {
623 reclaim = false;
624 break;
625 }
626 }
627 }
628
629 sequence = le16_to_cpu(pkt->hdr.sequence);
630 index = SEQ_TO_INDEX(sequence);
631 cmd_index = get_cmd_index(&txq->q, index);
632
633 if (reclaim) {
634 struct iwl_pcie_txq_entry *ent;
635 ent = &txq->entries[cmd_index];
636 cmd = ent->copy_cmd;
637 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
638 } else {
639 cmd = NULL;
640 }
641
642 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
643
644 if (reclaim) {
645 /* The original command isn't needed any more */
646 kfree(txq->entries[cmd_index].copy_cmd);
647 txq->entries[cmd_index].copy_cmd = NULL;
648 /* nor is the duplicated part of the command */
649 kfree(txq->entries[cmd_index].free_buf);
650 txq->entries[cmd_index].free_buf = NULL;
651 }
652
653 /*
654 * After here, we should always check rxcb._page_stolen,
655 * if it is true then one of the handlers took the page.
656 */
657
658 if (reclaim) {
659 /* Invoke any callbacks, transfer the buffer to caller,
660 * and fire off the (possibly) blocking
661 * iwl_trans_send_cmd()
662 * as we reclaim the driver command queue */
663 if (!rxcb._page_stolen)
664 iwl_pcie_hcmd_complete(trans, &rxcb, err);
665 else
666 IWL_WARN(trans, "Claim null rxb?\n");
667 }
668
669 page_stolen |= rxcb._page_stolen;
670 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
671 }
672
673 /* page was stolen from us -- free our reference */
674 if (page_stolen) {
675 __free_pages(rxb->page, trans_pcie->rx_page_order);
676 rxb->page = NULL;
677 }
678
679 /* Reuse the page if possible. For notification packets and
680 * SKBs that fail to Rx correctly, add them back into the
681 * rx_free list for reuse later. */
682 spin_lock_irqsave(&rxq->lock, flags);
683 if (rxb->page != NULL) {
684 rxb->page_dma =
685 dma_map_page(trans->dev, rxb->page, 0,
686 PAGE_SIZE << trans_pcie->rx_page_order,
687 DMA_FROM_DEVICE);
688 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
689 /*
690 * free the page(s) as well to not break
691 * the invariant that the items on the used
692 * list have no page(s)
693 */
694 __free_pages(rxb->page, trans_pcie->rx_page_order);
695 rxb->page = NULL;
696 list_add_tail(&rxb->list, &rxq->rx_used);
697 } else {
698 list_add_tail(&rxb->list, &rxq->rx_free);
699 rxq->free_count++;
700 }
701 } else
702 list_add_tail(&rxb->list, &rxq->rx_used);
703 spin_unlock_irqrestore(&rxq->lock, flags);
704}
705
706/*
707 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
708 */
709static void iwl_pcie_rx_handle(struct iwl_trans *trans)
710{
711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
712 struct iwl_rxq *rxq = &trans_pcie->rxq;
713 u32 r, i;
714 u8 fill_rx = 0;
715 u32 count = 8;
716 int total_empty;
717
718 /* uCode's read index (stored in shared DRAM) indicates the last Rx
719 * buffer that the driver may process (last buffer filled by ucode). */
720 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
721 i = rxq->read;
722
723 /* Rx interrupt, but nothing sent from uCode */
724 if (i == r)
725 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
726
727 /* calculate total frames need to be restock after handling RX */
728 total_empty = r - rxq->write_actual;
729 if (total_empty < 0)
730 total_empty += RX_QUEUE_SIZE;
731
732 if (total_empty > (RX_QUEUE_SIZE / 2))
733 fill_rx = 1;
734
735 while (i != r) {
736 struct iwl_rx_mem_buffer *rxb;
737
738 rxb = rxq->queue[i];
739 rxq->queue[i] = NULL;
740
741 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
742 r, i, rxb);
743 iwl_pcie_rx_handle_rb(trans, rxb);
744
745 i = (i + 1) & RX_QUEUE_MASK;
746 /* If there are a lot of unused frames,
747 * restock the Rx queue so ucode wont assert. */
748 if (fill_rx) {
749 count++;
750 if (count >= 8) {
751 rxq->read = i;
752 iwl_pcie_rx_replenish_now(trans);
753 count = 0;
754 }
755 }
756 }
757
758 /* Backtrack one entry */
759 rxq->read = i;
760 if (fill_rx)
761 iwl_pcie_rx_replenish_now(trans);
762 else
763 iwl_pcie_rxq_restock(trans);
764}
765
766/*
767 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
768 */
769static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
770{
771 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
772
773 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
774 if (trans->cfg->internal_wimax_coex &&
775 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
776 APMS_CLK_VAL_MRB_FUNC_MODE) ||
777 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
778 APMG_PS_CTRL_VAL_RESET_REQ))) {
779 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
780 iwl_op_mode_wimax_active(trans->op_mode);
781 wake_up(&trans_pcie->wait_command_queue);
782 return;
783 }
784
785 iwl_pcie_dump_csr(trans);
786 iwl_pcie_dump_fh(trans, NULL);
787
788 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
789 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
790 wake_up(&trans_pcie->wait_command_queue);
791
792 iwl_op_mode_nic_error(trans->op_mode);
793}
794
795void iwl_pcie_tasklet(struct iwl_trans *trans)
796{
797 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
798 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
799 u32 inta = 0;
800 u32 handled = 0;
801 unsigned long flags;
802 u32 i;
803#ifdef CONFIG_IWLWIFI_DEBUG
804 u32 inta_mask;
805#endif
806
807 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
808
809 /* Ack/clear/reset pending uCode interrupts.
810 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
811 */
812 /* There is a hardware bug in the interrupt mask function that some
813 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
814 * they are disabled in the CSR_INT_MASK register. Furthermore the
815 * ICT interrupt handling mechanism has another bug that might cause
816 * these unmasked interrupts fail to be detected. We workaround the
817 * hardware bugs here by ACKing all the possible interrupts so that
818 * interrupt coalescing can still be achieved.
819 */
820 iwl_write32(trans, CSR_INT,
821 trans_pcie->inta | ~trans_pcie->inta_mask);
822
823 inta = trans_pcie->inta;
824
825#ifdef CONFIG_IWLWIFI_DEBUG
826 if (iwl_have_debug_level(IWL_DL_ISR)) {
827 /* just for debug */
828 inta_mask = iwl_read32(trans, CSR_INT_MASK);
829 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
830 inta, inta_mask);
831 }
832#endif
833
834 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
835 trans_pcie->inta = 0;
836
837 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
838
839 /* Now service all interrupt bits discovered above. */
840 if (inta & CSR_INT_BIT_HW_ERR) {
841 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
842
843 /* Tell the device to stop sending interrupts */
844 iwl_disable_interrupts(trans);
845
846 isr_stats->hw++;
847 iwl_pcie_irq_handle_error(trans);
848
849 handled |= CSR_INT_BIT_HW_ERR;
850
851 return;
852 }
853
854#ifdef CONFIG_IWLWIFI_DEBUG
855 if (iwl_have_debug_level(IWL_DL_ISR)) {
856 /* NIC fires this, but we don't use it, redundant with WAKEUP */
857 if (inta & CSR_INT_BIT_SCD) {
858 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
859 "the frame/frames.\n");
860 isr_stats->sch++;
861 }
862
863 /* Alive notification via Rx interrupt will do the real work */
864 if (inta & CSR_INT_BIT_ALIVE) {
865 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
866 isr_stats->alive++;
867 }
868 }
869#endif
870 /* Safely ignore these bits for debug checks below */
871 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
872
873 /* HW RF KILL switch toggled */
874 if (inta & CSR_INT_BIT_RF_KILL) {
875 bool hw_rfkill;
876
877 hw_rfkill = iwl_is_rfkill_set(trans);
878 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
879 hw_rfkill ? "disable radio" : "enable radio");
880
881 isr_stats->rfkill++;
882
883 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
884 if (hw_rfkill) {
885 set_bit(STATUS_RFKILL, &trans_pcie->status);
886 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
887 &trans_pcie->status))
888 IWL_DEBUG_RF_KILL(trans,
889 "Rfkill while SYNC HCMD in flight\n");
890 wake_up(&trans_pcie->wait_command_queue);
891 } else {
892 clear_bit(STATUS_RFKILL, &trans_pcie->status);
893 }
894
895 handled |= CSR_INT_BIT_RF_KILL;
896 }
897
898 /* Chip got too hot and stopped itself */
899 if (inta & CSR_INT_BIT_CT_KILL) {
900 IWL_ERR(trans, "Microcode CT kill error detected.\n");
901 isr_stats->ctkill++;
902 handled |= CSR_INT_BIT_CT_KILL;
903 }
904
905 /* Error detected by uCode */
906 if (inta & CSR_INT_BIT_SW_ERR) {
907 IWL_ERR(trans, "Microcode SW error detected. "
908 " Restarting 0x%X.\n", inta);
909 isr_stats->sw++;
910 iwl_pcie_irq_handle_error(trans);
911 handled |= CSR_INT_BIT_SW_ERR;
912 }
913
914 /* uCode wakes up after power-down sleep */
915 if (inta & CSR_INT_BIT_WAKEUP) {
916 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
917 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
918 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
919 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
920
921 isr_stats->wakeup++;
922
923 handled |= CSR_INT_BIT_WAKEUP;
924 }
925
926 /* All uCode command responses, including Tx command responses,
927 * Rx "responses" (frame-received notification), and other
928 * notifications from uCode come through here*/
929 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
930 CSR_INT_BIT_RX_PERIODIC)) {
931 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
932 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
933 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
934 iwl_write32(trans, CSR_FH_INT_STATUS,
935 CSR_FH_INT_RX_MASK);
936 }
937 if (inta & CSR_INT_BIT_RX_PERIODIC) {
938 handled |= CSR_INT_BIT_RX_PERIODIC;
939 iwl_write32(trans,
940 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
941 }
942 /* Sending RX interrupt require many steps to be done in the
943 * the device:
944 * 1- write interrupt to current index in ICT table.
945 * 2- dma RX frame.
946 * 3- update RX shared data to indicate last write index.
947 * 4- send interrupt.
948 * This could lead to RX race, driver could receive RX interrupt
949 * but the shared data changes does not reflect this;
950 * periodic interrupt will detect any dangling Rx activity.
951 */
952
953 /* Disable periodic interrupt; we use it as just a one-shot. */
954 iwl_write8(trans, CSR_INT_PERIODIC_REG,
955 CSR_INT_PERIODIC_DIS);
956
957 iwl_pcie_rx_handle(trans);
958
959 /*
960 * Enable periodic interrupt in 8 msec only if we received
961 * real RX interrupt (instead of just periodic int), to catch
962 * any dangling Rx interrupt. If it was just the periodic
963 * interrupt, there was no dangling Rx activity, and no need
964 * to extend the periodic interrupt; one-shot is enough.
965 */
966 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
967 iwl_write8(trans, CSR_INT_PERIODIC_REG,
968 CSR_INT_PERIODIC_ENA);
969
970 isr_stats->rx++;
971 }
972
973 /* This "Tx" DMA channel is used only for loading uCode */
974 if (inta & CSR_INT_BIT_FH_TX) {
975 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
976 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
977 isr_stats->tx++;
978 handled |= CSR_INT_BIT_FH_TX;
979 /* Wake up uCode load routine, now that load is complete */
980 trans_pcie->ucode_write_complete = true;
981 wake_up(&trans_pcie->ucode_write_waitq);
982 }
983
984 if (inta & ~handled) {
985 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
986 isr_stats->unhandled++;
987 }
988
989 if (inta & ~(trans_pcie->inta_mask)) {
990 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
991 inta & ~trans_pcie->inta_mask);
992 }
993
994 /* Re-enable all interrupts */
995 /* only Re-enable if disabled by irq */
996 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
997 iwl_enable_interrupts(trans);
998 /* Re-enable RF_KILL if it occurred */
999 else if (handled & CSR_INT_BIT_RF_KILL)
1000 iwl_enable_rfkill_int(trans);
1001}
1002
1003/******************************************************************************
1004 *
1005 * ICT functions
1006 *
1007 ******************************************************************************/
1008
1009/* a device (PCI-E) page is 4096 bytes long */
1010#define ICT_SHIFT 12
1011#define ICT_SIZE (1 << ICT_SHIFT)
1012#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1013
1014/* Free dram table */
1015void iwl_pcie_free_ict(struct iwl_trans *trans)
1016{
1017 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1018
1019 if (trans_pcie->ict_tbl) {
1020 dma_free_coherent(trans->dev, ICT_SIZE,
1021 trans_pcie->ict_tbl,
1022 trans_pcie->ict_tbl_dma);
1023 trans_pcie->ict_tbl = NULL;
1024 trans_pcie->ict_tbl_dma = 0;
1025 }
1026}
1027
1028/*
1029 * allocate dram shared table, it is an aligned memory
1030 * block of ICT_SIZE.
1031 * also reset all data related to ICT table interrupt.
1032 */
1033int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1034{
1035 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1036
1037 trans_pcie->ict_tbl =
1038 dma_alloc_coherent(trans->dev, ICT_SIZE,
1039 &trans_pcie->ict_tbl_dma,
1040 GFP_KERNEL);
1041 if (!trans_pcie->ict_tbl)
1042 return -ENOMEM;
1043
1044 /* just an API sanity check ... it is guaranteed to be aligned */
1045 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1046 iwl_pcie_free_ict(trans);
1047 return -EINVAL;
1048 }
1049
1050 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
1051 (unsigned long long)trans_pcie->ict_tbl_dma);
1052
1053 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1054
1055 /* reset table and index to all 0 */
1056 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1057 trans_pcie->ict_index = 0;
1058
1059 /* add periodic RX interrupt */
1060 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1061 return 0;
1062}
1063
1064/* Device is going up inform it about using ICT interrupt table,
1065 * also we need to tell the driver to start using ICT interrupt.
1066 */
1067void iwl_pcie_reset_ict(struct iwl_trans *trans)
1068{
1069 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1070 u32 val;
1071 unsigned long flags;
1072
1073 if (!trans_pcie->ict_tbl)
1074 return;
1075
1076 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1077 iwl_disable_interrupts(trans);
1078
1079 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1080
1081 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1082
1083 val |= CSR_DRAM_INT_TBL_ENABLE;
1084 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1085
1086 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1087
1088 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1089 trans_pcie->use_ict = true;
1090 trans_pcie->ict_index = 0;
1091 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1092 iwl_enable_interrupts(trans);
1093 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1094}
1095
1096/* Device is going down disable ict interrupt usage */
1097void iwl_pcie_disable_ict(struct iwl_trans *trans)
1098{
1099 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1103 trans_pcie->use_ict = false;
1104 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1105}
1106
1107/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1108static irqreturn_t iwl_pcie_isr(int irq, void *data)
1109{
1110 struct iwl_trans *trans = data;
1111 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1112 u32 inta, inta_mask;
1113#ifdef CONFIG_IWLWIFI_DEBUG
1114 u32 inta_fh;
1115#endif
1116
1117 lockdep_assert_held(&trans_pcie->irq_lock);
1118
1119 trace_iwlwifi_dev_irq(trans->dev);
1120
1121 /* Disable (but don't clear!) interrupts here to avoid
1122 * back-to-back ISRs and sporadic interrupts from our NIC.
1123 * If we have something to service, the tasklet will re-enable ints.
1124 * If we *don't* have something, we'll re-enable before leaving here. */
1125 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1126 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1127
1128 /* Discover which interrupts are active/pending */
1129 inta = iwl_read32(trans, CSR_INT);
1130
1131 if (inta & (~inta_mask)) {
1132 IWL_DEBUG_ISR(trans,
1133 "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1134 inta & (~inta_mask));
1135 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1136 inta &= inta_mask;
1137 }
1138
1139 /* Ignore interrupt if there's nothing in NIC to service.
1140 * This may be due to IRQ shared with another device,
1141 * or due to sporadic interrupts thrown from our NIC. */
1142 if (!inta) {
1143 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1144 goto none;
1145 }
1146
1147 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1148 /* Hardware disappeared. It might have already raised
1149 * an interrupt */
1150 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1151 return IRQ_HANDLED;
1152 }
1153
1154#ifdef CONFIG_IWLWIFI_DEBUG
1155 if (iwl_have_debug_level(IWL_DL_ISR)) {
1156 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
1157 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
1158 "fh 0x%08x\n", inta, inta_mask, inta_fh);
1159 }
1160#endif
1161
1162 trans_pcie->inta |= inta;
1163 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1164 if (likely(inta))
1165 tasklet_schedule(&trans_pcie->irq_tasklet);
1166 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1167 !trans_pcie->inta)
1168 iwl_enable_interrupts(trans);
1169 return IRQ_HANDLED;
1170
1171none:
1172 /* re-enable interrupts here since we don't have anything to service. */
1173 /* only Re-enable if disabled by irq and no schedules tasklet. */
1174 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1175 !trans_pcie->inta)
1176 iwl_enable_interrupts(trans);
1177
1178 return IRQ_NONE;
1179}
1180
1181/* interrupt handler using ict table, with this interrupt driver will
1182 * stop using INTA register to get device's interrupt, reading this register
1183 * is expensive, device will write interrupts in ICT dram table, increment
1184 * index then will fire interrupt to driver, driver will OR all ICT table
1185 * entries from current index up to table entry with 0 value. the result is
1186 * the interrupt we need to service, driver will set the entries back to 0 and
1187 * set index.
1188 */
1189irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1190{
1191 struct iwl_trans *trans = data;
1192 struct iwl_trans_pcie *trans_pcie;
1193 u32 inta, inta_mask;
1194 u32 val = 0;
1195 u32 read;
1196 unsigned long flags;
1197
1198 if (!trans)
1199 return IRQ_NONE;
1200
1201 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1202
1203 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1204
1205 /* dram interrupt table not set yet,
1206 * use legacy interrupt.
1207 */
1208 if (unlikely(!trans_pcie->use_ict)) {
1209 irqreturn_t ret = iwl_pcie_isr(irq, data);
1210 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1211 return ret;
1212 }
1213
1214 trace_iwlwifi_dev_irq(trans->dev);
1215
1216 /* Disable (but don't clear!) interrupts here to avoid
1217 * back-to-back ISRs and sporadic interrupts from our NIC.
1218 * If we have something to service, the tasklet will re-enable ints.
1219 * If we *don't* have something, we'll re-enable before leaving here.
1220 */
1221 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1222 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1223
1224 /* Ignore interrupt if there's nothing in NIC to service.
1225 * This may be due to IRQ shared with another device,
1226 * or due to sporadic interrupts thrown from our NIC. */
1227 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1228 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1229 if (!read) {
1230 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1231 goto none;
1232 }
1233
1234 /*
1235 * Collect all entries up to the first 0, starting from ict_index;
1236 * note we already read at ict_index.
1237 */
1238 do {
1239 val |= read;
1240 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1241 trans_pcie->ict_index, read);
1242 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1243 trans_pcie->ict_index =
1244 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1245
1246 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1247 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1248 read);
1249 } while (read);
1250
1251 /* We should not get this value, just ignore it. */
1252 if (val == 0xffffffff)
1253 val = 0;
1254
1255 /*
1256 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1257 * (bit 15 before shifting it to 31) to clear when using interrupt
1258 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1259 * so we use them to decide on the real state of the Rx bit.
1260 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1261 */
1262 if (val & 0xC0000)
1263 val |= 0x8000;
1264
1265 inta = (0xff & val) | ((0xff00 & val) << 16);
1266 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1267 inta, inta_mask, val);
1268
1269 inta &= trans_pcie->inta_mask;
1270 trans_pcie->inta |= inta;
1271
1272 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1273 if (likely(inta))
1274 tasklet_schedule(&trans_pcie->irq_tasklet);
1275 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1276 !trans_pcie->inta) {
1277 /* Allow interrupt if was disabled by this handler and
1278 * no tasklet was schedules, We should not enable interrupt,
1279 * tasklet will enable it.
1280 */
1281 iwl_enable_interrupts(trans);
1282 }
1283
1284 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1285 return IRQ_HANDLED;
1286
1287 none:
1288 /* re-enable interrupts here since we don't have anything to service.
1289 * only Re-enable if disabled by irq.
1290 */
1291 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1292 !trans_pcie->inta)
1293 iwl_enable_interrupts(trans);
1294
1295 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1296 return IRQ_NONE;
1297}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
deleted file mode 100644
index 35708b959ad..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ /dev/null
@@ -1,1358 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/pci.h>
64#include <linux/pci-aspm.h>
65#include <linux/interrupt.h>
66#include <linux/debugfs.h>
67#include <linux/sched.h>
68#include <linux/bitops.h>
69#include <linux/gfp.h>
70
71#include "iwl-drv.h"
72#include "iwl-trans.h"
73#include "iwl-csr.h"
74#include "iwl-prph.h"
75#include "iwl-agn-hw.h"
76#include "internal.h"
77
78static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
79{
80/*
81 * (for documentation purposes)
82 * to set power to V_AUX, do:
83
84 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
85 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
86 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
87 ~APMG_PS_CTRL_MSK_PWR_SRC);
88 */
89
90 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
91 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
92 ~APMG_PS_CTRL_MSK_PWR_SRC);
93}
94
95/* PCI registers */
96#define PCI_CFG_RETRY_TIMEOUT 0x041
97
98static void iwl_pcie_apm_config(struct iwl_trans *trans)
99{
100 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
101 u16 lctl;
102
103 /*
104 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
105 * Check if BIOS (or OS) enabled L1-ASPM on this device.
106 * If so (likely), disable L0S, so device moves directly L0->L1;
107 * costs negligible amount of power savings.
108 * If not (unlikely), enable L0S, so there is at least some
109 * power savings, even without L1.
110 */
111 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
112 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
113 /* L1-ASPM enabled; disable(!) L0S */
114 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
115 dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
116 } else {
117 /* L1-ASPM disabled; enable(!) L0S */
118 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
119 dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
120 }
121 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
122}
123
124/*
125 * Start up NIC's basic functionality after it has been reset
126 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
127 * NOTE: This does not load uCode nor start the embedded processor
128 */
129static int iwl_pcie_apm_init(struct iwl_trans *trans)
130{
131 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
132 int ret = 0;
133 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
134
135 /*
136 * Use "set_bit" below rather than "write", to preserve any hardware
137 * bits already set by default after reset.
138 */
139
140 /* Disable L0S exit timer (platform NMI Work/Around) */
141 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
142 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
143
144 /*
145 * Disable L0s without affecting L1;
146 * don't wait for ICH L0s (ICH bug W/A)
147 */
148 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
149 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
150
151 /* Set FH wait threshold to maximum (HW error during stress W/A) */
152 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
153
154 /*
155 * Enable HAP INTA (interrupt from management bus) to
156 * wake device's PCI Express link L1a -> L0s
157 */
158 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
159 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
160
161 iwl_pcie_apm_config(trans);
162
163 /* Configure analog phase-lock-loop before activating to D0A */
164 if (trans->cfg->base_params->pll_cfg_val)
165 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
166 trans->cfg->base_params->pll_cfg_val);
167
168 /*
169 * Set "initialization complete" bit to move adapter from
170 * D0U* --> D0A* (powered-up active) state.
171 */
172 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
173
174 /*
175 * Wait for clock stabilization; once stabilized, access to
176 * device-internal resources is supported, e.g. iwl_write_prph()
177 * and accesses to uCode SRAM.
178 */
179 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
180 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
181 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
182 if (ret < 0) {
183 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
184 goto out;
185 }
186
187 /*
188 * Enable DMA clock and wait for it to stabilize.
189 *
190 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
191 * do not disable clocks. This preserves any hardware bits already
192 * set by default in "CLK_CTRL_REG" after reset.
193 */
194 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
195 udelay(20);
196
197 /* Disable L1-Active */
198 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
199 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
200
201 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
202
203out:
204 return ret;
205}
206
207static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
208{
209 int ret = 0;
210
211 /* stop device's busmaster DMA activity */
212 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
213
214 ret = iwl_poll_bit(trans, CSR_RESET,
215 CSR_RESET_REG_FLAG_MASTER_DISABLED,
216 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
217 if (ret)
218 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
219
220 IWL_DEBUG_INFO(trans, "stop master\n");
221
222 return ret;
223}
224
225static void iwl_pcie_apm_stop(struct iwl_trans *trans)
226{
227 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
228 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
229
230 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
231
232 /* Stop device's DMA activity */
233 iwl_pcie_apm_stop_master(trans);
234
235 /* Reset the entire device */
236 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
237
238 udelay(10);
239
240 /*
241 * Clear "initialization complete" bit to move adapter from
242 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
243 */
244 iwl_clear_bit(trans, CSR_GP_CNTRL,
245 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
246}
247
248static int iwl_pcie_nic_init(struct iwl_trans *trans)
249{
250 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
251 unsigned long flags;
252
253 /* nic_init */
254 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
255 iwl_pcie_apm_init(trans);
256
257 /* Set interrupt coalescing calibration timer to default (512 usecs) */
258 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
259
260 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
261
262 iwl_pcie_set_pwr_vmain(trans);
263
264 iwl_op_mode_nic_config(trans->op_mode);
265
266 /* Allocate the RX queue, or reset if it is already allocated */
267 iwl_pcie_rx_init(trans);
268
269 /* Allocate or reset and init all Tx and Command queues */
270 if (iwl_pcie_tx_init(trans))
271 return -ENOMEM;
272
273 if (trans->cfg->base_params->shadow_reg_enable) {
274 /* enable shadow regs in HW */
275 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
276 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
277 }
278
279 return 0;
280}
281
282#define HW_READY_TIMEOUT (50)
283
284/* Note: returns poll_bit return value, which is >= 0 if success */
285static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
286{
287 int ret;
288
289 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
290 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
291
292 /* See if we got it */
293 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
294 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
295 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
296 HW_READY_TIMEOUT);
297
298 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
299 return ret;
300}
301
302/* Note: returns standard 0/-ERROR code */
303static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
304{
305 int ret;
306 int t = 0;
307
308 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
309
310 ret = iwl_pcie_set_hw_ready(trans);
311 /* If the card is ready, exit 0 */
312 if (ret >= 0)
313 return 0;
314
315 /* If HW is not ready, prepare the conditions to check again */
316 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
317 CSR_HW_IF_CONFIG_REG_PREPARE);
318
319 do {
320 ret = iwl_pcie_set_hw_ready(trans);
321 if (ret >= 0)
322 return 0;
323
324 usleep_range(200, 1000);
325 t += 200;
326 } while (t < 150000);
327
328 return ret;
329}
330
331/*
332 * ucode
333 */
334static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
335 dma_addr_t phy_addr, u32 byte_cnt)
336{
337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
338 int ret;
339
340 trans_pcie->ucode_write_complete = false;
341
342 iwl_write_direct32(trans,
343 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
344 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
345
346 iwl_write_direct32(trans,
347 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
348 dst_addr);
349
350 iwl_write_direct32(trans,
351 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
352 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
353
354 iwl_write_direct32(trans,
355 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
356 (iwl_get_dma_hi_addr(phy_addr)
357 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
358
359 iwl_write_direct32(trans,
360 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
361 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
362 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
363 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
364
365 iwl_write_direct32(trans,
366 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
367 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
368 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
369 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
370
371 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
372 trans_pcie->ucode_write_complete, 5 * HZ);
373 if (!ret) {
374 IWL_ERR(trans, "Failed to load firmware chunk!\n");
375 return -ETIMEDOUT;
376 }
377
378 return 0;
379}
380
381static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
382 const struct fw_desc *section)
383{
384 u8 *v_addr;
385 dma_addr_t p_addr;
386 u32 offset;
387 int ret = 0;
388
389 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
390 section_num);
391
392 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
393 if (!v_addr)
394 return -ENOMEM;
395
396 for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
397 u32 copy_size;
398
399 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
400
401 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
402 ret = iwl_pcie_load_firmware_chunk(trans,
403 section->offset + offset,
404 p_addr, copy_size);
405 if (ret) {
406 IWL_ERR(trans,
407 "Could not load the [%d] uCode section\n",
408 section_num);
409 break;
410 }
411 }
412
413 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
414 return ret;
415}
416
417static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
418 const struct fw_img *image)
419{
420 int i, ret = 0;
421
422 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
423 if (!image->sec[i].data)
424 break;
425
426 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
427 if (ret)
428 return ret;
429 }
430
431 /* Remove all resets to allow NIC to operate */
432 iwl_write32(trans, CSR_RESET, 0);
433
434 return 0;
435}
436
437static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
438 const struct fw_img *fw)
439{
440 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
441 int ret;
442 bool hw_rfkill;
443
444 /* This may fail if AMT took ownership of the device */
445 if (iwl_pcie_prepare_card_hw(trans)) {
446 IWL_WARN(trans, "Exit HW not ready\n");
447 return -EIO;
448 }
449
450 clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
451
452 iwl_enable_rfkill_int(trans);
453
454 /* If platform's RF_KILL switch is NOT set to KILL */
455 hw_rfkill = iwl_is_rfkill_set(trans);
456 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
457 if (hw_rfkill)
458 return -ERFKILL;
459
460 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
461
462 ret = iwl_pcie_nic_init(trans);
463 if (ret) {
464 IWL_ERR(trans, "Unable to init nic\n");
465 return ret;
466 }
467
468 /* make sure rfkill handshake bits are cleared */
469 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
470 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
471 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
472
473 /* clear (again), then enable host interrupts */
474 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
475 iwl_enable_interrupts(trans);
476
477 /* really make sure rfkill handshake bits are cleared */
478 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
479 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
480
481 /* Load the given image to the HW */
482 return iwl_pcie_load_given_ucode(trans, fw);
483}
484
485static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
486{
487 iwl_pcie_reset_ict(trans);
488 iwl_pcie_tx_start(trans, scd_addr);
489}
490
491static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
492{
493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
494 unsigned long flags;
495
496 /* tell the device to stop sending interrupts */
497 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
498 iwl_disable_interrupts(trans);
499 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
500
501 /* device going down, Stop using ICT table */
502 iwl_pcie_disable_ict(trans);
503
504 /*
505 * If a HW restart happens during firmware loading,
506 * then the firmware loading might call this function
507 * and later it might be called again due to the
508 * restart. So don't process again if the device is
509 * already dead.
510 */
511 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
512 iwl_pcie_tx_stop(trans);
513 iwl_pcie_rx_stop(trans);
514
515 /* Power-down device's busmaster DMA clocks */
516 iwl_write_prph(trans, APMG_CLK_DIS_REG,
517 APMG_CLK_VAL_DMA_CLK_RQT);
518 udelay(5);
519 }
520
521 /* Make sure (redundant) we've released our request to stay awake */
522 iwl_clear_bit(trans, CSR_GP_CNTRL,
523 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
524
525 /* Stop the device, and put it in low power state */
526 iwl_pcie_apm_stop(trans);
527
528 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
529 * Clean again the interrupt here
530 */
531 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
532 iwl_disable_interrupts(trans);
533 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
534
535 iwl_enable_rfkill_int(trans);
536
537 /* wait to make sure we flush pending tasklet*/
538 synchronize_irq(trans_pcie->irq);
539 tasklet_kill(&trans_pcie->irq_tasklet);
540
541 cancel_work_sync(&trans_pcie->rx_replenish);
542
543 /* stop and reset the on-board processor */
544 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
545
546 /* clear all status bits */
547 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
548 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
549 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
550 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
551 clear_bit(STATUS_RFKILL, &trans_pcie->status);
552}
553
554static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
555{
556 /* let the ucode operate on its own */
557 iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
558 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
559
560 iwl_disable_interrupts(trans);
561 iwl_clear_bit(trans, CSR_GP_CNTRL,
562 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
563}
564
565static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
566{
567 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
568 int err;
569 bool hw_rfkill;
570
571 trans_pcie->inta_mask = CSR_INI_SET_MASK;
572
573 if (!trans_pcie->irq_requested) {
574 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
575 iwl_pcie_tasklet, (unsigned long)trans);
576
577 iwl_pcie_alloc_ict(trans);
578
579 err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
580 IRQF_SHARED, DRV_NAME, trans);
581 if (err) {
582 IWL_ERR(trans, "Error allocating IRQ %d\n",
583 trans_pcie->irq);
584 goto error;
585 }
586
587 trans_pcie->irq_requested = true;
588 }
589
590 err = iwl_pcie_prepare_card_hw(trans);
591 if (err) {
592 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
593 goto err_free_irq;
594 }
595
596 iwl_pcie_apm_init(trans);
597
598 /* From now on, the op_mode will be kept updated about RF kill state */
599 iwl_enable_rfkill_int(trans);
600
601 hw_rfkill = iwl_is_rfkill_set(trans);
602 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
603
604 return err;
605
606err_free_irq:
607 trans_pcie->irq_requested = false;
608 free_irq(trans_pcie->irq, trans);
609error:
610 iwl_pcie_free_ict(trans);
611 tasklet_kill(&trans_pcie->irq_tasklet);
612 return err;
613}
614
615static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
616 bool op_mode_leaving)
617{
618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
619 bool hw_rfkill;
620 unsigned long flags;
621
622 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
623 iwl_disable_interrupts(trans);
624 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
625
626 iwl_pcie_apm_stop(trans);
627
628 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
629 iwl_disable_interrupts(trans);
630 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
631
632 iwl_pcie_disable_ict(trans);
633
634 if (!op_mode_leaving) {
635 /*
636 * Even if we stop the HW, we still want the RF kill
637 * interrupt
638 */
639 iwl_enable_rfkill_int(trans);
640
641 /*
642 * Check again since the RF kill state may have changed while
643 * all the interrupts were disabled, in this case we couldn't
644 * receive the RF kill interrupt and update the state in the
645 * op_mode.
646 */
647 hw_rfkill = iwl_is_rfkill_set(trans);
648 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
649 }
650}
651
652static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
653{
654 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
655}
656
657static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
658{
659 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
660}
661
662static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
663{
664 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
665}
666
667static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
668{
669 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
670 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
671}
672
673static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
674 u32 val)
675{
676 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
677 ((addr & 0x0000FFFF) | (3 << 24)));
678 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
679}
680
681static void iwl_trans_pcie_configure(struct iwl_trans *trans,
682 const struct iwl_trans_config *trans_cfg)
683{
684 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
685
686 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
687 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
688 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
689 trans_pcie->n_no_reclaim_cmds = 0;
690 else
691 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
692 if (trans_pcie->n_no_reclaim_cmds)
693 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
694 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
695
696 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
697 if (trans_pcie->rx_buf_size_8k)
698 trans_pcie->rx_page_order = get_order(8 * 1024);
699 else
700 trans_pcie->rx_page_order = get_order(4 * 1024);
701
702 trans_pcie->wd_timeout =
703 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
704
705 trans_pcie->command_names = trans_cfg->command_names;
706}
707
708void iwl_trans_pcie_free(struct iwl_trans *trans)
709{
710 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
711
712 iwl_pcie_tx_free(trans);
713 iwl_pcie_rx_free(trans);
714
715 if (trans_pcie->irq_requested == true) {
716 free_irq(trans_pcie->irq, trans);
717 iwl_pcie_free_ict(trans);
718 }
719
720 pci_disable_msi(trans_pcie->pci_dev);
721 iounmap(trans_pcie->hw_base);
722 pci_release_regions(trans_pcie->pci_dev);
723 pci_disable_device(trans_pcie->pci_dev);
724 kmem_cache_destroy(trans->dev_cmd_pool);
725
726 kfree(trans);
727}
728
729static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
730{
731 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
732
733 if (state)
734 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
735 else
736 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
737}
738
739#ifdef CONFIG_PM_SLEEP
740static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
741{
742 return 0;
743}
744
745static int iwl_trans_pcie_resume(struct iwl_trans *trans)
746{
747 bool hw_rfkill;
748
749 iwl_enable_rfkill_int(trans);
750
751 hw_rfkill = iwl_is_rfkill_set(trans);
752 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
753
754 if (!hw_rfkill)
755 iwl_enable_interrupts(trans);
756
757 return 0;
758}
759#endif /* CONFIG_PM_SLEEP */
760
761#define IWL_FLUSH_WAIT_MS 2000
762
763static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
764{
765 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
766 struct iwl_txq *txq;
767 struct iwl_queue *q;
768 int cnt;
769 unsigned long now = jiffies;
770 int ret = 0;
771
772 /* waiting for all the tx frames complete might take a while */
773 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
774 if (cnt == trans_pcie->cmd_queue)
775 continue;
776 txq = &trans_pcie->txq[cnt];
777 q = &txq->q;
778 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
779 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
780 msleep(1);
781
782 if (q->read_ptr != q->write_ptr) {
783 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
784 ret = -ETIMEDOUT;
785 break;
786 }
787 }
788 return ret;
789}
790
791static const char *get_fh_string(int cmd)
792{
793#define IWL_CMD(x) case x: return #x
794 switch (cmd) {
795 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
796 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
797 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
798 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
799 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
800 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
801 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
802 IWL_CMD(FH_TSSR_TX_STATUS_REG);
803 IWL_CMD(FH_TSSR_TX_ERROR_REG);
804 default:
805 return "UNKNOWN";
806 }
807#undef IWL_CMD
808}
809
810int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
811{
812 int i;
813 static const u32 fh_tbl[] = {
814 FH_RSCSR_CHNL0_STTS_WPTR_REG,
815 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
816 FH_RSCSR_CHNL0_WPTR,
817 FH_MEM_RCSR_CHNL0_CONFIG_REG,
818 FH_MEM_RSSR_SHARED_CTRL_REG,
819 FH_MEM_RSSR_RX_STATUS_REG,
820 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
821 FH_TSSR_TX_STATUS_REG,
822 FH_TSSR_TX_ERROR_REG
823 };
824
825#ifdef CONFIG_IWLWIFI_DEBUGFS
826 if (buf) {
827 int pos = 0;
828 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
829
830 *buf = kmalloc(bufsz, GFP_KERNEL);
831 if (!*buf)
832 return -ENOMEM;
833
834 pos += scnprintf(*buf + pos, bufsz - pos,
835 "FH register values:\n");
836
837 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
838 pos += scnprintf(*buf + pos, bufsz - pos,
839 " %34s: 0X%08x\n",
840 get_fh_string(fh_tbl[i]),
841 iwl_read_direct32(trans, fh_tbl[i]));
842
843 return pos;
844 }
845#endif
846
847 IWL_ERR(trans, "FH register values:\n");
848 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
849 IWL_ERR(trans, " %34s: 0X%08x\n",
850 get_fh_string(fh_tbl[i]),
851 iwl_read_direct32(trans, fh_tbl[i]));
852
853 return 0;
854}
855
856static const char *get_csr_string(int cmd)
857{
858#define IWL_CMD(x) case x: return #x
859 switch (cmd) {
860 IWL_CMD(CSR_HW_IF_CONFIG_REG);
861 IWL_CMD(CSR_INT_COALESCING);
862 IWL_CMD(CSR_INT);
863 IWL_CMD(CSR_INT_MASK);
864 IWL_CMD(CSR_FH_INT_STATUS);
865 IWL_CMD(CSR_GPIO_IN);
866 IWL_CMD(CSR_RESET);
867 IWL_CMD(CSR_GP_CNTRL);
868 IWL_CMD(CSR_HW_REV);
869 IWL_CMD(CSR_EEPROM_REG);
870 IWL_CMD(CSR_EEPROM_GP);
871 IWL_CMD(CSR_OTP_GP_REG);
872 IWL_CMD(CSR_GIO_REG);
873 IWL_CMD(CSR_GP_UCODE_REG);
874 IWL_CMD(CSR_GP_DRIVER_REG);
875 IWL_CMD(CSR_UCODE_DRV_GP1);
876 IWL_CMD(CSR_UCODE_DRV_GP2);
877 IWL_CMD(CSR_LED_REG);
878 IWL_CMD(CSR_DRAM_INT_TBL_REG);
879 IWL_CMD(CSR_GIO_CHICKEN_BITS);
880 IWL_CMD(CSR_ANA_PLL_CFG);
881 IWL_CMD(CSR_HW_REV_WA_REG);
882 IWL_CMD(CSR_DBG_HPET_MEM_REG);
883 default:
884 return "UNKNOWN";
885 }
886#undef IWL_CMD
887}
888
889void iwl_pcie_dump_csr(struct iwl_trans *trans)
890{
891 int i;
892 static const u32 csr_tbl[] = {
893 CSR_HW_IF_CONFIG_REG,
894 CSR_INT_COALESCING,
895 CSR_INT,
896 CSR_INT_MASK,
897 CSR_FH_INT_STATUS,
898 CSR_GPIO_IN,
899 CSR_RESET,
900 CSR_GP_CNTRL,
901 CSR_HW_REV,
902 CSR_EEPROM_REG,
903 CSR_EEPROM_GP,
904 CSR_OTP_GP_REG,
905 CSR_GIO_REG,
906 CSR_GP_UCODE_REG,
907 CSR_GP_DRIVER_REG,
908 CSR_UCODE_DRV_GP1,
909 CSR_UCODE_DRV_GP2,
910 CSR_LED_REG,
911 CSR_DRAM_INT_TBL_REG,
912 CSR_GIO_CHICKEN_BITS,
913 CSR_ANA_PLL_CFG,
914 CSR_HW_REV_WA_REG,
915 CSR_DBG_HPET_MEM_REG
916 };
917 IWL_ERR(trans, "CSR values:\n");
918 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
919 "CSR_INT_PERIODIC_REG)\n");
920 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
921 IWL_ERR(trans, " %25s: 0X%08x\n",
922 get_csr_string(csr_tbl[i]),
923 iwl_read32(trans, csr_tbl[i]));
924 }
925}
926
927#ifdef CONFIG_IWLWIFI_DEBUGFS
928/* create and remove of files */
929#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
930 if (!debugfs_create_file(#name, mode, parent, trans, \
931 &iwl_dbgfs_##name##_ops)) \
932 goto err; \
933} while (0)
934
935/* file operation */
936#define DEBUGFS_READ_FUNC(name) \
937static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
938 char __user *user_buf, \
939 size_t count, loff_t *ppos);
940
941#define DEBUGFS_WRITE_FUNC(name) \
942static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
943 const char __user *user_buf, \
944 size_t count, loff_t *ppos);
945
946#define DEBUGFS_READ_FILE_OPS(name) \
947 DEBUGFS_READ_FUNC(name); \
948static const struct file_operations iwl_dbgfs_##name##_ops = { \
949 .read = iwl_dbgfs_##name##_read, \
950 .open = simple_open, \
951 .llseek = generic_file_llseek, \
952};
953
954#define DEBUGFS_WRITE_FILE_OPS(name) \
955 DEBUGFS_WRITE_FUNC(name); \
956static const struct file_operations iwl_dbgfs_##name##_ops = { \
957 .write = iwl_dbgfs_##name##_write, \
958 .open = simple_open, \
959 .llseek = generic_file_llseek, \
960};
961
962#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
963 DEBUGFS_READ_FUNC(name); \
964 DEBUGFS_WRITE_FUNC(name); \
965static const struct file_operations iwl_dbgfs_##name##_ops = { \
966 .write = iwl_dbgfs_##name##_write, \
967 .read = iwl_dbgfs_##name##_read, \
968 .open = simple_open, \
969 .llseek = generic_file_llseek, \
970};
971
972static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
973 char __user *user_buf,
974 size_t count, loff_t *ppos)
975{
976 struct iwl_trans *trans = file->private_data;
977 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
978 struct iwl_txq *txq;
979 struct iwl_queue *q;
980 char *buf;
981 int pos = 0;
982 int cnt;
983 int ret;
984 size_t bufsz;
985
986 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
987
988 if (!trans_pcie->txq)
989 return -EAGAIN;
990
991 buf = kzalloc(bufsz, GFP_KERNEL);
992 if (!buf)
993 return -ENOMEM;
994
995 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
996 txq = &trans_pcie->txq[cnt];
997 q = &txq->q;
998 pos += scnprintf(buf + pos, bufsz - pos,
999 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1000 cnt, q->read_ptr, q->write_ptr,
1001 !!test_bit(cnt, trans_pcie->queue_used),
1002 !!test_bit(cnt, trans_pcie->queue_stopped));
1003 }
1004 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1005 kfree(buf);
1006 return ret;
1007}
1008
1009static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1010 char __user *user_buf,
1011 size_t count, loff_t *ppos)
1012{
1013 struct iwl_trans *trans = file->private_data;
1014 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1015 struct iwl_rxq *rxq = &trans_pcie->rxq;
1016 char buf[256];
1017 int pos = 0;
1018 const size_t bufsz = sizeof(buf);
1019
1020 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1021 rxq->read);
1022 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1023 rxq->write);
1024 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1025 rxq->free_count);
1026 if (rxq->rb_stts) {
1027 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1028 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1029 } else {
1030 pos += scnprintf(buf + pos, bufsz - pos,
1031 "closed_rb_num: Not Allocated\n");
1032 }
1033 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1034}
1035
1036static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1037 char __user *user_buf,
1038 size_t count, loff_t *ppos)
1039{
1040 struct iwl_trans *trans = file->private_data;
1041 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1042 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1043
1044 int pos = 0;
1045 char *buf;
1046 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1047 ssize_t ret;
1048
1049 buf = kzalloc(bufsz, GFP_KERNEL);
1050 if (!buf)
1051 return -ENOMEM;
1052
1053 pos += scnprintf(buf + pos, bufsz - pos,
1054 "Interrupt Statistics Report:\n");
1055
1056 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1057 isr_stats->hw);
1058 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1059 isr_stats->sw);
1060 if (isr_stats->sw || isr_stats->hw) {
1061 pos += scnprintf(buf + pos, bufsz - pos,
1062 "\tLast Restarting Code: 0x%X\n",
1063 isr_stats->err_code);
1064 }
1065#ifdef CONFIG_IWLWIFI_DEBUG
1066 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1067 isr_stats->sch);
1068 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1069 isr_stats->alive);
1070#endif
1071 pos += scnprintf(buf + pos, bufsz - pos,
1072 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1073
1074 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1075 isr_stats->ctkill);
1076
1077 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1078 isr_stats->wakeup);
1079
1080 pos += scnprintf(buf + pos, bufsz - pos,
1081 "Rx command responses:\t\t %u\n", isr_stats->rx);
1082
1083 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1084 isr_stats->tx);
1085
1086 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1087 isr_stats->unhandled);
1088
1089 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1090 kfree(buf);
1091 return ret;
1092}
1093
1094static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1095 const char __user *user_buf,
1096 size_t count, loff_t *ppos)
1097{
1098 struct iwl_trans *trans = file->private_data;
1099 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1100 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1101
1102 char buf[8];
1103 int buf_size;
1104 u32 reset_flag;
1105
1106 memset(buf, 0, sizeof(buf));
1107 buf_size = min(count, sizeof(buf) - 1);
1108 if (copy_from_user(buf, user_buf, buf_size))
1109 return -EFAULT;
1110 if (sscanf(buf, "%x", &reset_flag) != 1)
1111 return -EFAULT;
1112 if (reset_flag == 0)
1113 memset(isr_stats, 0, sizeof(*isr_stats));
1114
1115 return count;
1116}
1117
1118static ssize_t iwl_dbgfs_csr_write(struct file *file,
1119 const char __user *user_buf,
1120 size_t count, loff_t *ppos)
1121{
1122 struct iwl_trans *trans = file->private_data;
1123 char buf[8];
1124 int buf_size;
1125 int csr;
1126
1127 memset(buf, 0, sizeof(buf));
1128 buf_size = min(count, sizeof(buf) - 1);
1129 if (copy_from_user(buf, user_buf, buf_size))
1130 return -EFAULT;
1131 if (sscanf(buf, "%d", &csr) != 1)
1132 return -EFAULT;
1133
1134 iwl_pcie_dump_csr(trans);
1135
1136 return count;
1137}
1138
1139static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1140 char __user *user_buf,
1141 size_t count, loff_t *ppos)
1142{
1143 struct iwl_trans *trans = file->private_data;
1144 char *buf = NULL;
1145 int pos = 0;
1146 ssize_t ret = -EFAULT;
1147
1148 ret = pos = iwl_pcie_dump_fh(trans, &buf);
1149 if (buf) {
1150 ret = simple_read_from_buffer(user_buf,
1151 count, ppos, buf, pos);
1152 kfree(buf);
1153 }
1154
1155 return ret;
1156}
1157
1158static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1159 const char __user *user_buf,
1160 size_t count, loff_t *ppos)
1161{
1162 struct iwl_trans *trans = file->private_data;
1163
1164 if (!trans->op_mode)
1165 return -EAGAIN;
1166
1167 local_bh_disable();
1168 iwl_op_mode_nic_error(trans->op_mode);
1169 local_bh_enable();
1170
1171 return count;
1172}
1173
1174DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1175DEBUGFS_READ_FILE_OPS(fh_reg);
1176DEBUGFS_READ_FILE_OPS(rx_queue);
1177DEBUGFS_READ_FILE_OPS(tx_queue);
1178DEBUGFS_WRITE_FILE_OPS(csr);
1179DEBUGFS_WRITE_FILE_OPS(fw_restart);
1180
1181/*
1182 * Create the debugfs files and directories
1183 *
1184 */
1185static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1186 struct dentry *dir)
1187{
1188 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1189 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1190 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1191 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1192 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1193 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
1194 return 0;
1195
1196err:
1197 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1198 return -ENOMEM;
1199}
1200#else
1201static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1202 struct dentry *dir)
1203{
1204 return 0;
1205}
1206#endif /*CONFIG_IWLWIFI_DEBUGFS */
1207
1208static const struct iwl_trans_ops trans_ops_pcie = {
1209 .start_hw = iwl_trans_pcie_start_hw,
1210 .stop_hw = iwl_trans_pcie_stop_hw,
1211 .fw_alive = iwl_trans_pcie_fw_alive,
1212 .start_fw = iwl_trans_pcie_start_fw,
1213 .stop_device = iwl_trans_pcie_stop_device,
1214
1215 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
1216
1217 .send_cmd = iwl_trans_pcie_send_hcmd,
1218
1219 .tx = iwl_trans_pcie_tx,
1220 .reclaim = iwl_trans_pcie_reclaim,
1221
1222 .txq_disable = iwl_trans_pcie_txq_disable,
1223 .txq_enable = iwl_trans_pcie_txq_enable,
1224
1225 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1226
1227 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1228
1229#ifdef CONFIG_PM_SLEEP
1230 .suspend = iwl_trans_pcie_suspend,
1231 .resume = iwl_trans_pcie_resume,
1232#endif
1233 .write8 = iwl_trans_pcie_write8,
1234 .write32 = iwl_trans_pcie_write32,
1235 .read32 = iwl_trans_pcie_read32,
1236 .read_prph = iwl_trans_pcie_read_prph,
1237 .write_prph = iwl_trans_pcie_write_prph,
1238 .configure = iwl_trans_pcie_configure,
1239 .set_pmi = iwl_trans_pcie_set_pmi,
1240};
1241
1242struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1243 const struct pci_device_id *ent,
1244 const struct iwl_cfg *cfg)
1245{
1246 struct iwl_trans_pcie *trans_pcie;
1247 struct iwl_trans *trans;
1248 u16 pci_cmd;
1249 int err;
1250
1251 trans = kzalloc(sizeof(struct iwl_trans) +
1252 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1253
1254 if (!trans)
1255 return NULL;
1256
1257 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1258
1259 trans->ops = &trans_ops_pcie;
1260 trans->cfg = cfg;
1261 trans_pcie->trans = trans;
1262 spin_lock_init(&trans_pcie->irq_lock);
1263 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1264
1265 /* W/A - seems to solve weird behavior. We need to remove this if we
1266 * don't want to stay in L1 all the time. This wastes a lot of power */
1267 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1268 PCIE_LINK_STATE_CLKPM);
1269
1270 if (pci_enable_device(pdev)) {
1271 err = -ENODEV;
1272 goto out_no_pci;
1273 }
1274
1275 pci_set_master(pdev);
1276
1277 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1278 if (!err)
1279 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1280 if (err) {
1281 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1282 if (!err)
1283 err = pci_set_consistent_dma_mask(pdev,
1284 DMA_BIT_MASK(32));
1285 /* both attempts failed: */
1286 if (err) {
1287 dev_err(&pdev->dev, "No suitable DMA available\n");
1288 goto out_pci_disable_device;
1289 }
1290 }
1291
1292 err = pci_request_regions(pdev, DRV_NAME);
1293 if (err) {
1294 dev_err(&pdev->dev, "pci_request_regions failed\n");
1295 goto out_pci_disable_device;
1296 }
1297
1298 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
1299 if (!trans_pcie->hw_base) {
1300 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
1301 err = -ENODEV;
1302 goto out_pci_release_regions;
1303 }
1304
1305 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1306 * PCI Tx retries from interfering with C3 CPU state */
1307 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1308
1309 err = pci_enable_msi(pdev);
1310 if (err) {
1311 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
1312 /* enable rfkill interrupt: hw bug w/a */
1313 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1314 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1315 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1316 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1317 }
1318 }
1319
1320 trans->dev = &pdev->dev;
1321 trans_pcie->irq = pdev->irq;
1322 trans_pcie->pci_dev = pdev;
1323 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1324 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1325 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
1326 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
1327
1328 /* Initialize the wait queue for commands */
1329 init_waitqueue_head(&trans_pcie->wait_command_queue);
1330 spin_lock_init(&trans->reg_lock);
1331
1332 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
1333 "iwl_cmd_pool:%s", dev_name(trans->dev));
1334
1335 trans->dev_cmd_headroom = 0;
1336 trans->dev_cmd_pool =
1337 kmem_cache_create(trans->dev_cmd_pool_name,
1338 sizeof(struct iwl_device_cmd)
1339 + trans->dev_cmd_headroom,
1340 sizeof(void *),
1341 SLAB_HWCACHE_ALIGN,
1342 NULL);
1343
1344 if (!trans->dev_cmd_pool)
1345 goto out_pci_disable_msi;
1346
1347 return trans;
1348
1349out_pci_disable_msi:
1350 pci_disable_msi(pdev);
1351out_pci_release_regions:
1352 pci_release_regions(pdev);
1353out_pci_disable_device:
1354 pci_disable_device(pdev);
1355out_no_pci:
1356 kfree(trans);
1357 return NULL;
1358}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
deleted file mode 100644
index 6c5b867c353..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ /dev/null
@@ -1,1691 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/slab.h>
31#include <linux/sched.h>
32
33#include "iwl-debug.h"
34#include "iwl-csr.h"
35#include "iwl-prph.h"
36#include "iwl-io.h"
37#include "iwl-op-mode.h"
38#include "internal.h"
39/* FIXME: need to abstract out TX command (once we know what it looks like) */
40#include "dvm/commands.h"
41
42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4
44
45/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
46 * DMA services
47 *
48 * Theory of operation
49 *
50 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
51 * of buffer descriptors, each of which points to one or more data buffers for
52 * the device to read from or fill. Driver and device exchange status of each
53 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
54 * entries in each circular buffer, to protect against confusing empty and full
55 * queue states.
56 *
57 * The device reads or writes the data in the queues via the device's several
58 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
59 *
60 * For Tx queue, there are low mark and high mark limits. If, after queuing
61 * the packet for Tx, free space become < low mark, Tx queue stopped. When
62 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
63 * Tx queue resumed.
64 *
65 ***************************************************/
66static int iwl_queue_space(const struct iwl_queue *q)
67{
68 int s = q->read_ptr - q->write_ptr;
69
70 if (q->read_ptr > q->write_ptr)
71 s -= q->n_bd;
72
73 if (s <= 0)
74 s += q->n_window;
75 /* keep some reserve to not confuse empty and full situations */
76 s -= 2;
77 if (s < 0)
78 s = 0;
79 return s;
80}
81
82/*
83 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
84 */
85static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
86{
87 q->n_bd = count;
88 q->n_window = slots_num;
89 q->id = id;
90
91 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
92 * and iwl_queue_dec_wrap are broken. */
93 if (WARN_ON(!is_power_of_2(count)))
94 return -EINVAL;
95
96 /* slots_num must be power-of-two size, otherwise
97 * get_cmd_index is broken. */
98 if (WARN_ON(!is_power_of_2(slots_num)))
99 return -EINVAL;
100
101 q->low_mark = q->n_window / 4;
102 if (q->low_mark < 4)
103 q->low_mark = 4;
104
105 q->high_mark = q->n_window / 8;
106 if (q->high_mark < 2)
107 q->high_mark = 2;
108
109 q->write_ptr = 0;
110 q->read_ptr = 0;
111
112 return 0;
113}
114
115static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
116 struct iwl_dma_ptr *ptr, size_t size)
117{
118 if (WARN_ON(ptr->addr))
119 return -EINVAL;
120
121 ptr->addr = dma_alloc_coherent(trans->dev, size,
122 &ptr->dma, GFP_KERNEL);
123 if (!ptr->addr)
124 return -ENOMEM;
125 ptr->size = size;
126 return 0;
127}
128
129static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
130 struct iwl_dma_ptr *ptr)
131{
132 if (unlikely(!ptr->addr))
133 return;
134
135 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
136 memset(ptr, 0, sizeof(*ptr));
137}
138
139static void iwl_pcie_txq_stuck_timer(unsigned long data)
140{
141 struct iwl_txq *txq = (void *)data;
142 struct iwl_queue *q = &txq->q;
143 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
144 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
145 u32 scd_sram_addr = trans_pcie->scd_base_addr +
146 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
147 u8 buf[16];
148 int i;
149
150 spin_lock(&txq->lock);
151 /* check if triggered erroneously */
152 if (txq->q.read_ptr == txq->q.write_ptr) {
153 spin_unlock(&txq->lock);
154 return;
155 }
156 spin_unlock(&txq->lock);
157
158 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
159 jiffies_to_msecs(trans_pcie->wd_timeout));
160 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
161 txq->q.read_ptr, txq->q.write_ptr);
162
163 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
164
165 iwl_print_hex_error(trans, buf, sizeof(buf));
166
167 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
168 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
169 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
170
171 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
172 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
173 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
174 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
175 u32 tbl_dw =
176 iwl_read_targ_mem(trans,
177 trans_pcie->scd_base_addr +
178 SCD_TRANS_TBL_OFFSET_QUEUE(i));
179
180 if (i & 0x1)
181 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
182 else
183 tbl_dw = tbl_dw & 0x0000FFFF;
184
185 IWL_ERR(trans,
186 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
187 i, active ? "" : "in", fifo, tbl_dw,
188 iwl_read_prph(trans,
189 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
190 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
191 }
192
193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) {
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch));
199 }
200
201 iwl_op_mode_nic_error(trans->op_mode);
202}
203
204/*
205 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
206 */
207static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
208 struct iwl_txq *txq, u16 byte_cnt)
209{
210 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
212 int write_ptr = txq->q.write_ptr;
213 int txq_id = txq->q.id;
214 u8 sec_ctl = 0;
215 u8 sta_id = 0;
216 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
217 __le16 bc_ent;
218 struct iwl_tx_cmd *tx_cmd =
219 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
220
221 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
222
223 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
224
225 sta_id = tx_cmd->sta_id;
226 sec_ctl = tx_cmd->sec_ctl;
227
228 switch (sec_ctl & TX_CMD_SEC_MSK) {
229 case TX_CMD_SEC_CCM:
230 len += CCMP_MIC_LEN;
231 break;
232 case TX_CMD_SEC_TKIP:
233 len += TKIP_ICV_LEN;
234 break;
235 case TX_CMD_SEC_WEP:
236 len += WEP_IV_LEN + WEP_ICV_LEN;
237 break;
238 }
239
240 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
241
242 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
243
244 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
245 scd_bc_tbl[txq_id].
246 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
247}
248
249static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
250 struct iwl_txq *txq)
251{
252 struct iwl_trans_pcie *trans_pcie =
253 IWL_TRANS_GET_PCIE_TRANS(trans);
254 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
255 int txq_id = txq->q.id;
256 int read_ptr = txq->q.read_ptr;
257 u8 sta_id = 0;
258 __le16 bc_ent;
259 struct iwl_tx_cmd *tx_cmd =
260 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
261
262 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
263
264 if (txq_id != trans_pcie->cmd_queue)
265 sta_id = tx_cmd->sta_id;
266
267 bc_ent = cpu_to_le16(1 | (sta_id << 12));
268 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
269
270 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
271 scd_bc_tbl[txq_id].
272 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
273}
274
275/*
276 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
277 */
278void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
279{
280 u32 reg = 0;
281 int txq_id = txq->q.id;
282
283 if (txq->need_update == 0)
284 return;
285
286 if (trans->cfg->base_params->shadow_reg_enable) {
287 /* shadow register enabled */
288 iwl_write32(trans, HBUS_TARG_WRPTR,
289 txq->q.write_ptr | (txq_id << 8));
290 } else {
291 struct iwl_trans_pcie *trans_pcie =
292 IWL_TRANS_GET_PCIE_TRANS(trans);
293 /* if we're trying to save power */
294 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
295 /* wake up nic if it's powered down ...
296 * uCode will wake up, and interrupt us again, so next
297 * time we'll skip this part. */
298 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
299
300 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
301 IWL_DEBUG_INFO(trans,
302 "Tx queue %d requesting wakeup,"
303 " GP1 = 0x%x\n", txq_id, reg);
304 iwl_set_bit(trans, CSR_GP_CNTRL,
305 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
306 return;
307 }
308
309 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
310 txq->q.write_ptr | (txq_id << 8));
311
312 /*
313 * else not in power-save mode,
314 * uCode will never sleep when we're
315 * trying to tx (during RFKILL, we're not trying to tx).
316 */
317 } else
318 iwl_write32(trans, HBUS_TARG_WRPTR,
319 txq->q.write_ptr | (txq_id << 8));
320 }
321 txq->need_update = 0;
322}
323
324static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
325{
326 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
327
328 dma_addr_t addr = get_unaligned_le32(&tb->lo);
329 if (sizeof(dma_addr_t) > sizeof(u32))
330 addr |=
331 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
332
333 return addr;
334}
335
336static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
337{
338 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
339
340 return le16_to_cpu(tb->hi_n_len) >> 4;
341}
342
343static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
344 dma_addr_t addr, u16 len)
345{
346 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
347 u16 hi_n_len = len << 4;
348
349 put_unaligned_le32(addr, &tb->lo);
350 if (sizeof(dma_addr_t) > sizeof(u32))
351 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
352
353 tb->hi_n_len = cpu_to_le16(hi_n_len);
354
355 tfd->num_tbs = idx + 1;
356}
357
358static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
359{
360 return tfd->num_tbs & 0x1f;
361}
362
363static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
364 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
365 enum dma_data_direction dma_dir)
366{
367 int i;
368 int num_tbs;
369
370 /* Sanity check on number of chunks */
371 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
372
373 if (num_tbs >= IWL_NUM_OF_TBS) {
374 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
375 /* @todo issue fatal error, it is quite serious situation */
376 return;
377 }
378
379 /* Unmap tx_cmd */
380 if (num_tbs)
381 dma_unmap_single(trans->dev,
382 dma_unmap_addr(meta, mapping),
383 dma_unmap_len(meta, len),
384 DMA_BIDIRECTIONAL);
385
386 /* Unmap chunks, if any. */
387 for (i = 1; i < num_tbs; i++)
388 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
389 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
390
391 tfd->num_tbs = 0;
392}
393
394/*
395 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
396 * @trans - transport private data
397 * @txq - tx queue
398 * @dma_dir - the direction of the DMA mapping
399 *
400 * Does NOT advance any TFD circular buffer read/write indexes
401 * Does NOT free the TFD itself (which is within circular buffer)
402 */
403static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
404 enum dma_data_direction dma_dir)
405{
406 struct iwl_tfd *tfd_tmp = txq->tfds;
407
408 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
409 int rd_ptr = txq->q.read_ptr;
410 int idx = get_cmd_index(&txq->q, rd_ptr);
411
412 lockdep_assert_held(&txq->lock);
413
414 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
415 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
416 dma_dir);
417
418 /* free SKB */
419 if (txq->entries) {
420 struct sk_buff *skb;
421
422 skb = txq->entries[idx].skb;
423
424 /* Can be called from irqs-disabled context
425 * If skb is not NULL, it means that the whole queue is being
426 * freed and that the queue is not empty - free the skb
427 */
428 if (skb) {
429 iwl_op_mode_free_skb(trans->op_mode, skb);
430 txq->entries[idx].skb = NULL;
431 }
432 }
433}
434
435static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
436 dma_addr_t addr, u16 len, u8 reset)
437{
438 struct iwl_queue *q;
439 struct iwl_tfd *tfd, *tfd_tmp;
440 u32 num_tbs;
441
442 q = &txq->q;
443 tfd_tmp = txq->tfds;
444 tfd = &tfd_tmp[q->write_ptr];
445
446 if (reset)
447 memset(tfd, 0, sizeof(*tfd));
448
449 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
450
451 /* Each TFD can point to a maximum 20 Tx buffers */
452 if (num_tbs >= IWL_NUM_OF_TBS) {
453 IWL_ERR(trans, "Error can not send more than %d chunks\n",
454 IWL_NUM_OF_TBS);
455 return -EINVAL;
456 }
457
458 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
459 return -EINVAL;
460
461 if (unlikely(addr & ~IWL_TX_DMA_MASK))
462 IWL_ERR(trans, "Unaligned address = %llx\n",
463 (unsigned long long)addr);
464
465 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
466
467 return 0;
468}
469
470static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
471 struct iwl_txq *txq, int slots_num,
472 u32 txq_id)
473{
474 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
475 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
476 int i;
477
478 if (WARN_ON(txq->entries || txq->tfds))
479 return -EINVAL;
480
481 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
482 (unsigned long)txq);
483 txq->trans_pcie = trans_pcie;
484
485 txq->q.n_window = slots_num;
486
487 txq->entries = kcalloc(slots_num,
488 sizeof(struct iwl_pcie_txq_entry),
489 GFP_KERNEL);
490
491 if (!txq->entries)
492 goto error;
493
494 if (txq_id == trans_pcie->cmd_queue)
495 for (i = 0; i < slots_num; i++) {
496 txq->entries[i].cmd =
497 kmalloc(sizeof(struct iwl_device_cmd),
498 GFP_KERNEL);
499 if (!txq->entries[i].cmd)
500 goto error;
501 }
502
503 /* Circular buffer of transmit frame descriptors (TFDs),
504 * shared with device */
505 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
506 &txq->q.dma_addr, GFP_KERNEL);
507 if (!txq->tfds) {
508 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
509 goto error;
510 }
511 txq->q.id = txq_id;
512
513 return 0;
514error:
515 if (txq->entries && txq_id == trans_pcie->cmd_queue)
516 for (i = 0; i < slots_num; i++)
517 kfree(txq->entries[i].cmd);
518 kfree(txq->entries);
519 txq->entries = NULL;
520
521 return -ENOMEM;
522
523}
524
525static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
526 int slots_num, u32 txq_id)
527{
528 int ret;
529
530 txq->need_update = 0;
531
532 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
533 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
534 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
535
536 /* Initialize queue's high/low-water marks, and head/tail indexes */
537 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
538 txq_id);
539 if (ret)
540 return ret;
541
542 spin_lock_init(&txq->lock);
543
544 /*
545 * Tell nic where to find circular buffer of Tx Frame Descriptors for
546 * given Tx queue, and enable the DMA channel used for that queue.
547 * Circular buffer (TFD queue in DRAM) physical base address */
548 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
549 txq->q.dma_addr >> 8);
550
551 return 0;
552}
553
554/*
555 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
556 */
557static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
558{
559 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
560 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
561 struct iwl_queue *q = &txq->q;
562 enum dma_data_direction dma_dir;
563
564 if (!q->n_bd)
565 return;
566
567 /* In the command queue, all the TBs are mapped as BIDI
568 * so unmap them as such.
569 */
570 if (txq_id == trans_pcie->cmd_queue)
571 dma_dir = DMA_BIDIRECTIONAL;
572 else
573 dma_dir = DMA_TO_DEVICE;
574
575 spin_lock_bh(&txq->lock);
576 while (q->write_ptr != q->read_ptr) {
577 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
578 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
579 }
580 spin_unlock_bh(&txq->lock);
581}
582
583/*
584 * iwl_pcie_txq_free - Deallocate DMA queue.
585 * @txq: Transmit queue to deallocate.
586 *
587 * Empty queue by removing and destroying all BD's.
588 * Free all buffers.
589 * 0-fill, but do not free "txq" descriptor structure.
590 */
591static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
592{
593 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
594 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
595 struct device *dev = trans->dev;
596 int i;
597
598 if (WARN_ON(!txq))
599 return;
600
601 iwl_pcie_txq_unmap(trans, txq_id);
602
603 /* De-alloc array of command/tx buffers */
604 if (txq_id == trans_pcie->cmd_queue)
605 for (i = 0; i < txq->q.n_window; i++) {
606 kfree(txq->entries[i].cmd);
607 kfree(txq->entries[i].copy_cmd);
608 kfree(txq->entries[i].free_buf);
609 }
610
611 /* De-alloc circular buffer of TFDs */
612 if (txq->q.n_bd) {
613 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
614 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
615 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
616 }
617
618 kfree(txq->entries);
619 txq->entries = NULL;
620
621 del_timer_sync(&txq->stuck_timer);
622
623 /* 0-fill queue descriptor structure */
624 memset(txq, 0, sizeof(*txq));
625}
626
627/*
628 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
629 */
630static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
631{
632 struct iwl_trans_pcie __maybe_unused *trans_pcie =
633 IWL_TRANS_GET_PCIE_TRANS(trans);
634
635 iwl_write_prph(trans, SCD_TXFACT, mask);
636}
637
638void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
639{
640 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
641 u32 a;
642 int chan;
643 u32 reg_val;
644
645 /* make sure all queue are not stopped/used */
646 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
647 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
648
649 trans_pcie->scd_base_addr =
650 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
651
652 WARN_ON(scd_base_addr != 0 &&
653 scd_base_addr != trans_pcie->scd_base_addr);
654
655 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
656 /* reset conext data memory */
657 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
658 a += 4)
659 iwl_write_targ_mem(trans, a, 0);
660 /* reset tx status memory */
661 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
662 a += 4)
663 iwl_write_targ_mem(trans, a, 0);
664 for (; a < trans_pcie->scd_base_addr +
665 SCD_TRANS_TBL_OFFSET_QUEUE(
666 trans->cfg->base_params->num_of_queues);
667 a += 4)
668 iwl_write_targ_mem(trans, a, 0);
669
670 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
671 trans_pcie->scd_bc_tbls.dma >> 10);
672
673 /* The chain extension of the SCD doesn't work well. This feature is
674 * enabled by default by the HW, so we need to disable it manually.
675 */
676 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
677
678 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
679 trans_pcie->cmd_fifo);
680
681 /* Activate all Tx DMA/FIFO channels */
682 iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
683
684 /* Enable DMA channel */
685 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
686 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
687 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
688 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
689
690 /* Update FH chicken bits */
691 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
692 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
693 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
694
695 /* Enable L1-Active */
696 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
697 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
698}
699
700/*
701 * iwl_pcie_tx_stop - Stop all Tx DMA channels
702 */
703int iwl_pcie_tx_stop(struct iwl_trans *trans)
704{
705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
706 int ch, txq_id, ret;
707 unsigned long flags;
708
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
711
712 iwl_pcie_txq_set_sched(trans, 0);
713
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
716 iwl_write_direct32(trans,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
720 if (ret < 0)
721 IWL_ERR(trans,
722 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
723 ch,
724 iwl_read_direct32(trans,
725 FH_TSSR_TX_STATUS_REG));
726 }
727 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
728
729 if (!trans_pcie->txq) {
730 IWL_WARN(trans,
731 "Stopping tx queues that aren't allocated...\n");
732 return 0;
733 }
734
735 /* Unmap DMA from host system and free skb's */
736 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
737 txq_id++)
738 iwl_pcie_txq_unmap(trans, txq_id);
739
740 return 0;
741}
742
743/*
744 * iwl_trans_tx_free - Free TXQ Context
745 *
746 * Destroy all TX DMA queues and structures
747 */
748void iwl_pcie_tx_free(struct iwl_trans *trans)
749{
750 int txq_id;
751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
752
753 /* Tx queues */
754 if (trans_pcie->txq) {
755 for (txq_id = 0;
756 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
757 iwl_pcie_txq_free(trans, txq_id);
758 }
759
760 kfree(trans_pcie->txq);
761 trans_pcie->txq = NULL;
762
763 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
764
765 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
766}
767
768/*
769 * iwl_pcie_tx_alloc - allocate TX context
770 * Allocate all Tx DMA structures and initialize them
771 */
772static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
773{
774 int ret;
775 int txq_id, slots_num;
776 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
777
778 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
779 sizeof(struct iwlagn_scd_bc_tbl);
780
781 /*It is not allowed to alloc twice, so warn when this happens.
782 * We cannot rely on the previous allocation, so free and fail */
783 if (WARN_ON(trans_pcie->txq)) {
784 ret = -EINVAL;
785 goto error;
786 }
787
788 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
789 scd_bc_tbls_size);
790 if (ret) {
791 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
792 goto error;
793 }
794
795 /* Alloc keep-warm buffer */
796 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
797 if (ret) {
798 IWL_ERR(trans, "Keep Warm allocation failed\n");
799 goto error;
800 }
801
802 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
803 sizeof(struct iwl_txq), GFP_KERNEL);
804 if (!trans_pcie->txq) {
805 IWL_ERR(trans, "Not enough memory for txq\n");
806 ret = ENOMEM;
807 goto error;
808 }
809
810 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
811 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
812 txq_id++) {
813 slots_num = (txq_id == trans_pcie->cmd_queue) ?
814 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
815 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
816 slots_num, txq_id);
817 if (ret) {
818 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
819 goto error;
820 }
821 }
822
823 return 0;
824
825error:
826 iwl_pcie_tx_free(trans);
827
828 return ret;
829}
830int iwl_pcie_tx_init(struct iwl_trans *trans)
831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833 int ret;
834 int txq_id, slots_num;
835 unsigned long flags;
836 bool alloc = false;
837
838 if (!trans_pcie->txq) {
839 ret = iwl_pcie_tx_alloc(trans);
840 if (ret)
841 goto error;
842 alloc = true;
843 }
844
845 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
846
847 /* Turn off all Tx DMA fifos */
848 iwl_write_prph(trans, SCD_TXFACT, 0);
849
850 /* Tell NIC where to find the "keep warm" buffer */
851 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
852 trans_pcie->kw.dma >> 4);
853
854 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
855
856 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
857 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
858 txq_id++) {
859 slots_num = (txq_id == trans_pcie->cmd_queue) ?
860 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
861 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
862 slots_num, txq_id);
863 if (ret) {
864 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
865 goto error;
866 }
867 }
868
869 return 0;
870error:
871 /*Upon error, free only if we allocated something */
872 if (alloc)
873 iwl_pcie_tx_free(trans);
874 return ret;
875}
876
877static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
878 struct iwl_txq *txq)
879{
880 if (!trans_pcie->wd_timeout)
881 return;
882
883 /*
884 * if empty delete timer, otherwise move timer forward
885 * since we're making progress on this queue
886 */
887 if (txq->q.read_ptr == txq->q.write_ptr)
888 del_timer(&txq->stuck_timer);
889 else
890 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
891}
892
893/* Frees buffers until index _not_ inclusive */
894void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
895 struct sk_buff_head *skbs)
896{
897 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
898 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
899 /* n_bd is usually 256 => n_bd - 1 = 0xff */
900 int tfd_num = ssn & (txq->q.n_bd - 1);
901 struct iwl_queue *q = &txq->q;
902 int last_to_free;
903
904 /* This function is not meant to release cmd queue*/
905 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
906 return;
907
908 spin_lock(&txq->lock);
909
910 if (txq->q.read_ptr == tfd_num)
911 goto out;
912
913 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
914 txq_id, txq->q.read_ptr, tfd_num, ssn);
915
916 /*Since we free until index _not_ inclusive, the one before index is
917 * the last we will free. This one must be used */
918 last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
919
920 if (!iwl_queue_used(q, last_to_free)) {
921 IWL_ERR(trans,
922 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
923 __func__, txq_id, last_to_free, q->n_bd,
924 q->write_ptr, q->read_ptr);
925 goto out;
926 }
927
928 if (WARN_ON(!skb_queue_empty(skbs)))
929 goto out;
930
931 for (;
932 q->read_ptr != tfd_num;
933 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
934
935 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
936 continue;
937
938 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
939
940 txq->entries[txq->q.read_ptr].skb = NULL;
941
942 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
943
944 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
945 }
946
947 iwl_pcie_txq_progress(trans_pcie, txq);
948
949 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
950 iwl_wake_queue(trans, txq);
951out:
952 spin_unlock(&txq->lock);
953}
954
955/*
956 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
957 *
958 * When FW advances 'R' index, all entries between old and new 'R' index
959 * need to be reclaimed. As result, some free space forms. If there is
960 * enough free space (> low mark), wake the stack that feeds us.
961 */
962static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
963{
964 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
965 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
966 struct iwl_queue *q = &txq->q;
967 int nfreed = 0;
968
969 lockdep_assert_held(&txq->lock);
970
971 if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
972 IWL_ERR(trans,
973 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
974 __func__, txq_id, idx, q->n_bd,
975 q->write_ptr, q->read_ptr);
976 return;
977 }
978
979 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
980 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
981
982 if (nfreed++ > 0) {
983 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
984 idx, q->write_ptr, q->read_ptr);
985 iwl_op_mode_nic_error(trans->op_mode);
986 }
987 }
988
989 iwl_pcie_txq_progress(trans_pcie, txq);
990}
991
992static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
993 u16 txq_id)
994{
995 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
996 u32 tbl_dw_addr;
997 u32 tbl_dw;
998 u16 scd_q2ratid;
999
1000 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1001
1002 tbl_dw_addr = trans_pcie->scd_base_addr +
1003 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1004
1005 tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
1006
1007 if (txq_id & 0x1)
1008 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1009 else
1010 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1011
1012 iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
1013
1014 return 0;
1015}
1016
1017static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
1018 u16 txq_id)
1019{
1020 /* Simply stop the queue, but don't change any configuration;
1021 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1022 iwl_write_prph(trans,
1023 SCD_QUEUE_STATUS_BITS(txq_id),
1024 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1025 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1026}
1027
1028void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1029 int sta_id, int tid, int frame_limit, u16 ssn)
1030{
1031 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1032
1033 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1034 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1035
1036 /* Stop this Tx queue before configuring it */
1037 iwl_pcie_txq_set_inactive(trans, txq_id);
1038
1039 /* Set this queue as a chain-building queue unless it is CMD queue */
1040 if (txq_id != trans_pcie->cmd_queue)
1041 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
1042
1043 /* If this queue is mapped to a certain station: it is an AGG queue */
1044 if (sta_id != IWL_INVALID_STATION) {
1045 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1046
1047 /* Map receiver-address / traffic-ID to this queue */
1048 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1049
1050 /* enable aggregations for the queue */
1051 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1052 } else {
1053 /*
1054 * disable aggregations for the queue, this will also make the
1055 * ra_tid mapping configuration irrelevant since it is now a
1056 * non-AGG queue.
1057 */
1058 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1059 }
1060
1061 /* Place first TFD at index corresponding to start sequence number.
1062 * Assumes that ssn_idx is valid (!= 0xFFF) */
1063 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
1064 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1065
1066 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1067 (ssn & 0xff) | (txq_id << 8));
1068 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1069
1070 /* Set up Tx window size and frame limit for this queue */
1071 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1072 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1073 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1074 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1075 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1076 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1077 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1078 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1079
1080 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1081 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1082 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1083 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1084 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1085 SCD_QUEUE_STTS_REG_MSK);
1086 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
1087 txq_id, fifo, ssn & 0xff);
1088}
1089
1090void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1091{
1092 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1093 u32 stts_addr = trans_pcie->scd_base_addr +
1094 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1095 static const u32 zero_val[4] = {};
1096
1097 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1098 WARN_ONCE(1, "queue %d not used", txq_id);
1099 return;
1100 }
1101
1102 iwl_pcie_txq_set_inactive(trans, txq_id);
1103
1104 _iwl_write_targ_mem_dwords(trans, stts_addr,
1105 zero_val, ARRAY_SIZE(zero_val));
1106
1107 iwl_pcie_txq_unmap(trans, txq_id);
1108
1109 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1110}
1111
1112/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1113
1114/*
1115 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1116 * @priv: device private data point
1117 * @cmd: a point to the ucode command structure
1118 *
1119 * The function returns < 0 values to indicate the operation is
1120 * failed. On success, it turns the index (> 0) of command in the
1121 * command queue.
1122 */
1123static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1124 struct iwl_host_cmd *cmd)
1125{
1126 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1127 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1128 struct iwl_queue *q = &txq->q;
1129 struct iwl_device_cmd *out_cmd;
1130 struct iwl_cmd_meta *out_meta;
1131 void *dup_buf = NULL;
1132 dma_addr_t phys_addr;
1133 int idx;
1134 u16 copy_size, cmd_size;
1135 bool had_nocopy = false;
1136 int i;
1137 u32 cmd_pos;
1138
1139 copy_size = sizeof(out_cmd->hdr);
1140 cmd_size = sizeof(out_cmd->hdr);
1141
1142 /* need one for the header if the first is NOCOPY */
1143 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
1144
1145 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1146 if (!cmd->len[i])
1147 continue;
1148 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1149 had_nocopy = true;
1150 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1151 idx = -EINVAL;
1152 goto free_dup_buf;
1153 }
1154 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1155 /*
1156 * This is also a chunk that isn't copied
1157 * to the static buffer so set had_nocopy.
1158 */
1159 had_nocopy = true;
1160
1161 /* only allowed once */
1162 if (WARN_ON(dup_buf)) {
1163 idx = -EINVAL;
1164 goto free_dup_buf;
1165 }
1166
1167 dup_buf = kmemdup(cmd->data[i], cmd->len[i],
1168 GFP_ATOMIC);
1169 if (!dup_buf)
1170 return -ENOMEM;
1171 } else {
1172 /* NOCOPY must not be followed by normal! */
1173 if (WARN_ON(had_nocopy)) {
1174 idx = -EINVAL;
1175 goto free_dup_buf;
1176 }
1177 copy_size += cmd->len[i];
1178 }
1179 cmd_size += cmd->len[i];
1180 }
1181
1182 /*
1183 * If any of the command structures end up being larger than
1184 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1185 * allocated into separate TFDs, then we will need to
1186 * increase the size of the buffers.
1187 */
1188 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1189 "Command %s (%#x) is too large (%d bytes)\n",
1190 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1191 idx = -EINVAL;
1192 goto free_dup_buf;
1193 }
1194
1195 spin_lock_bh(&txq->lock);
1196
1197 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1198 spin_unlock_bh(&txq->lock);
1199
1200 IWL_ERR(trans, "No space in command queue\n");
1201 iwl_op_mode_cmd_queue_full(trans->op_mode);
1202 idx = -ENOSPC;
1203 goto free_dup_buf;
1204 }
1205
1206 idx = get_cmd_index(q, q->write_ptr);
1207 out_cmd = txq->entries[idx].cmd;
1208 out_meta = &txq->entries[idx].meta;
1209
1210 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
1211 if (cmd->flags & CMD_WANT_SKB)
1212 out_meta->source = cmd;
1213
1214 /* set up the header */
1215
1216 out_cmd->hdr.cmd = cmd->id;
1217 out_cmd->hdr.flags = 0;
1218 out_cmd->hdr.sequence =
1219 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1220 INDEX_TO_SEQ(q->write_ptr));
1221
1222 /* and copy the data that needs to be copied */
1223 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1224 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1225 if (!cmd->len[i])
1226 continue;
1227 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1228 IWL_HCMD_DFL_DUP))
1229 break;
1230 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
1231 cmd_pos += cmd->len[i];
1232 }
1233
1234 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
1235
1236 /*
1237 * since out_cmd will be the source address of the FH, it will write
1238 * the retry count there. So when the user needs to receivce the HCMD
1239 * that corresponds to the response in the response handler, it needs
1240 * to set CMD_WANT_HCMD.
1241 */
1242 if (cmd->flags & CMD_WANT_HCMD) {
1243 txq->entries[idx].copy_cmd =
1244 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
1245 if (unlikely(!txq->entries[idx].copy_cmd)) {
1246 idx = -ENOMEM;
1247 goto out;
1248 }
1249 }
1250
1251 IWL_DEBUG_HC(trans,
1252 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1253 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1254 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1255 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1256
1257 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
1258 DMA_BIDIRECTIONAL);
1259 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1260 idx = -ENOMEM;
1261 goto out;
1262 }
1263
1264 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1265 dma_unmap_len_set(out_meta, len, copy_size);
1266
1267 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1268
1269 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1270 const void *data = cmd->data[i];
1271
1272 if (!cmd->len[i])
1273 continue;
1274 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1275 IWL_HCMD_DFL_DUP)))
1276 continue;
1277 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1278 data = dup_buf;
1279 phys_addr = dma_map_single(trans->dev, (void *)data,
1280 cmd->len[i], DMA_BIDIRECTIONAL);
1281 if (dma_mapping_error(trans->dev, phys_addr)) {
1282 iwl_pcie_tfd_unmap(trans, out_meta,
1283 &txq->tfds[q->write_ptr],
1284 DMA_BIDIRECTIONAL);
1285 idx = -ENOMEM;
1286 goto out;
1287 }
1288
1289 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
1290 }
1291
1292 out_meta->flags = cmd->flags;
1293 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1294 kfree(txq->entries[idx].free_buf);
1295 txq->entries[idx].free_buf = dup_buf;
1296
1297 txq->need_update = 1;
1298
1299 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
1300 &out_cmd->hdr, copy_size);
1301
1302 /* start timer if queue currently empty */
1303 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1304 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1305
1306 /* Increment and update queue's write index */
1307 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1308 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1309
1310 out:
1311 spin_unlock_bh(&txq->lock);
1312 free_dup_buf:
1313 if (idx < 0)
1314 kfree(dup_buf);
1315 return idx;
1316}
1317
1318/*
1319 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1320 * @rxb: Rx buffer to reclaim
1321 * @handler_status: return value of the handler of the command
1322 * (put in setup_rx_handlers)
1323 *
1324 * If an Rx buffer has an async callback associated with it the callback
1325 * will be executed. The attached skb (if present) will only be freed
1326 * if the callback returns 1
1327 */
1328void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1329 struct iwl_rx_cmd_buffer *rxb, int handler_status)
1330{
1331 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1332 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1333 int txq_id = SEQ_TO_QUEUE(sequence);
1334 int index = SEQ_TO_INDEX(sequence);
1335 int cmd_index;
1336 struct iwl_device_cmd *cmd;
1337 struct iwl_cmd_meta *meta;
1338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1339 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1340
1341 /* If a Tx command is being handled and it isn't in the actual
1342 * command queue then there a command routing bug has been introduced
1343 * in the queue management code. */
1344 if (WARN(txq_id != trans_pcie->cmd_queue,
1345 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1346 txq_id, trans_pcie->cmd_queue, sequence,
1347 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1348 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1349 iwl_print_hex_error(trans, pkt, 32);
1350 return;
1351 }
1352
1353 spin_lock(&txq->lock);
1354
1355 cmd_index = get_cmd_index(&txq->q, index);
1356 cmd = txq->entries[cmd_index].cmd;
1357 meta = &txq->entries[cmd_index].meta;
1358
1359 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
1360
1361 /* Input error checking is done when commands are added to queue. */
1362 if (meta->flags & CMD_WANT_SKB) {
1363 struct page *p = rxb_steal_page(rxb);
1364
1365 meta->source->resp_pkt = pkt;
1366 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1367 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1368 meta->source->handler_status = handler_status;
1369 }
1370
1371 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1372
1373 if (!(meta->flags & CMD_ASYNC)) {
1374 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1375 IWL_WARN(trans,
1376 "HCMD_ACTIVE already clear for command %s\n",
1377 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1378 }
1379 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1380 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1381 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1382 wake_up(&trans_pcie->wait_command_queue);
1383 }
1384
1385 meta->flags = 0;
1386
1387 spin_unlock(&txq->lock);
1388}
1389
1390#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1391
1392static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1393 struct iwl_host_cmd *cmd)
1394{
1395 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1396 int ret;
1397
1398 /* An asynchronous command can not expect an SKB to be set. */
1399 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1400 return -EINVAL;
1401
1402 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1403 if (ret < 0) {
1404 IWL_ERR(trans,
1405 "Error sending %s: enqueue_hcmd failed: %d\n",
1406 get_cmd_string(trans_pcie, cmd->id), ret);
1407 return ret;
1408 }
1409 return 0;
1410}
1411
1412static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1413 struct iwl_host_cmd *cmd)
1414{
1415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1416 int cmd_idx;
1417 int ret;
1418
1419 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1420 get_cmd_string(trans_pcie, cmd->id));
1421
1422 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
1423 &trans_pcie->status))) {
1424 IWL_ERR(trans, "Command %s: a command is already active!\n",
1425 get_cmd_string(trans_pcie, cmd->id));
1426 return -EIO;
1427 }
1428
1429 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1430 get_cmd_string(trans_pcie, cmd->id));
1431
1432 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1433 if (cmd_idx < 0) {
1434 ret = cmd_idx;
1435 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1436 IWL_ERR(trans,
1437 "Error sending %s: enqueue_hcmd failed: %d\n",
1438 get_cmd_string(trans_pcie, cmd->id), ret);
1439 return ret;
1440 }
1441
1442 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1443 !test_bit(STATUS_HCMD_ACTIVE,
1444 &trans_pcie->status),
1445 HOST_COMPLETE_TIMEOUT);
1446 if (!ret) {
1447 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1448 struct iwl_txq *txq =
1449 &trans_pcie->txq[trans_pcie->cmd_queue];
1450 struct iwl_queue *q = &txq->q;
1451
1452 IWL_ERR(trans,
1453 "Error sending %s: time out after %dms.\n",
1454 get_cmd_string(trans_pcie, cmd->id),
1455 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1456
1457 IWL_ERR(trans,
1458 "Current CMD queue read_ptr %d write_ptr %d\n",
1459 q->read_ptr, q->write_ptr);
1460
1461 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1462 IWL_DEBUG_INFO(trans,
1463 "Clearing HCMD_ACTIVE for command %s\n",
1464 get_cmd_string(trans_pcie, cmd->id));
1465 ret = -ETIMEDOUT;
1466 goto cancel;
1467 }
1468 }
1469
1470 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
1471 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1472 get_cmd_string(trans_pcie, cmd->id));
1473 ret = -EIO;
1474 goto cancel;
1475 }
1476
1477 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
1478 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1479 ret = -ERFKILL;
1480 goto cancel;
1481 }
1482
1483 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1484 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1485 get_cmd_string(trans_pcie, cmd->id));
1486 ret = -EIO;
1487 goto cancel;
1488 }
1489
1490 return 0;
1491
1492cancel:
1493 if (cmd->flags & CMD_WANT_SKB) {
1494 /*
1495 * Cancel the CMD_WANT_SKB flag for the cmd in the
1496 * TX cmd queue. Otherwise in case the cmd comes
1497 * in later, it will possibly set an invalid
1498 * address (cmd->meta.source).
1499 */
1500 trans_pcie->txq[trans_pcie->cmd_queue].
1501 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1502 }
1503
1504 if (cmd->resp_pkt) {
1505 iwl_free_resp(cmd);
1506 cmd->resp_pkt = NULL;
1507 }
1508
1509 return ret;
1510}
1511
1512int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1513{
1514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1515
1516 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
1517 return -EIO;
1518
1519 if (test_bit(STATUS_RFKILL, &trans_pcie->status))
1520 return -ERFKILL;
1521
1522 if (cmd->flags & CMD_ASYNC)
1523 return iwl_pcie_send_hcmd_async(trans, cmd);
1524
1525 /* We still can fail on RFKILL that can be asserted while we wait */
1526 return iwl_pcie_send_hcmd_sync(trans, cmd);
1527}
1528
1529int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1530 struct iwl_device_cmd *dev_cmd, int txq_id)
1531{
1532 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1533 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1534 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1535 struct iwl_cmd_meta *out_meta;
1536 struct iwl_txq *txq;
1537 struct iwl_queue *q;
1538 dma_addr_t phys_addr = 0;
1539 dma_addr_t txcmd_phys;
1540 dma_addr_t scratch_phys;
1541 u16 len, firstlen, secondlen;
1542 u8 wait_write_ptr = 0;
1543 __le16 fc = hdr->frame_control;
1544 u8 hdr_len = ieee80211_hdrlen(fc);
1545 u16 __maybe_unused wifi_seq;
1546
1547 txq = &trans_pcie->txq[txq_id];
1548 q = &txq->q;
1549
1550 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1551 WARN_ON_ONCE(1);
1552 return -EINVAL;
1553 }
1554
1555 spin_lock(&txq->lock);
1556
1557 /* In AGG mode, the index in the ring must correspond to the WiFi
1558 * sequence number. This is a HW requirements to help the SCD to parse
1559 * the BA.
1560 * Check here that the packets are in the right place on the ring.
1561 */
1562#ifdef CONFIG_IWLWIFI_DEBUG
1563 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1564 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1565 ((wifi_seq & 0xff) != q->write_ptr),
1566 "Q: %d WiFi Seq %d tfdNum %d",
1567 txq_id, wifi_seq, q->write_ptr);
1568#endif
1569
1570 /* Set up driver data for this TFD */
1571 txq->entries[q->write_ptr].skb = skb;
1572 txq->entries[q->write_ptr].cmd = dev_cmd;
1573
1574 dev_cmd->hdr.cmd = REPLY_TX;
1575 dev_cmd->hdr.sequence =
1576 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1577 INDEX_TO_SEQ(q->write_ptr)));
1578
1579 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1580 out_meta = &txq->entries[q->write_ptr].meta;
1581
1582 /*
1583 * Use the first empty entry in this queue's command buffer array
1584 * to contain the Tx command and MAC header concatenated together
1585 * (payload data will be in another buffer).
1586 * Size of this varies, due to varying MAC header length.
1587 * If end is not dword aligned, we'll have 2 extra bytes at the end
1588 * of the MAC header (device reads on dword boundaries).
1589 * We'll tell device about this padding later.
1590 */
1591 len = sizeof(struct iwl_tx_cmd) +
1592 sizeof(struct iwl_cmd_header) + hdr_len;
1593 firstlen = (len + 3) & ~3;
1594
1595 /* Tell NIC about any 2-byte padding after MAC header */
1596 if (firstlen != len)
1597 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1598
1599 /* Physical address of this Tx command's header (not MAC header!),
1600 * within command buffer array. */
1601 txcmd_phys = dma_map_single(trans->dev,
1602 &dev_cmd->hdr, firstlen,
1603 DMA_BIDIRECTIONAL);
1604 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1605 goto out_err;
1606 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1607 dma_unmap_len_set(out_meta, len, firstlen);
1608
1609 if (!ieee80211_has_morefrags(fc)) {
1610 txq->need_update = 1;
1611 } else {
1612 wait_write_ptr = 1;
1613 txq->need_update = 0;
1614 }
1615
1616 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1617 * if any (802.11 null frames have no payload). */
1618 secondlen = skb->len - hdr_len;
1619 if (secondlen > 0) {
1620 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1621 secondlen, DMA_TO_DEVICE);
1622 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1623 dma_unmap_single(trans->dev,
1624 dma_unmap_addr(out_meta, mapping),
1625 dma_unmap_len(out_meta, len),
1626 DMA_BIDIRECTIONAL);
1627 goto out_err;
1628 }
1629 }
1630
1631 /* Attach buffers to TFD */
1632 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1633 if (secondlen > 0)
1634 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1635
1636 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1637 offsetof(struct iwl_tx_cmd, scratch);
1638
1639 /* take back ownership of DMA buffer to enable update */
1640 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1641 DMA_BIDIRECTIONAL);
1642 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1643 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1644
1645 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1646 le16_to_cpu(dev_cmd->hdr.sequence));
1647 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1648
1649 /* Set up entry for this TFD in Tx byte-count array */
1650 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1651
1652 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1653 DMA_BIDIRECTIONAL);
1654
1655 trace_iwlwifi_dev_tx(trans->dev, skb,
1656 &txq->tfds[txq->q.write_ptr],
1657 sizeof(struct iwl_tfd),
1658 &dev_cmd->hdr, firstlen,
1659 skb->data + hdr_len, secondlen);
1660 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1661 skb->data + hdr_len, secondlen);
1662
1663 /* start timer if queue currently empty */
1664 if (txq->need_update && q->read_ptr == q->write_ptr &&
1665 trans_pcie->wd_timeout)
1666 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1667
1668 /* Tell device the write index *just past* this latest filled TFD */
1669 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1670 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1671
1672 /*
1673 * At this point the frame is "transmitted" successfully
1674 * and we will get a TX status notification eventually,
1675 * regardless of the value of ret. "ret" only indicates
1676 * whether or not we should update the write pointer.
1677 */
1678 if (iwl_queue_space(q) < q->high_mark) {
1679 if (wait_write_ptr) {
1680 txq->need_update = 1;
1681 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1682 } else {
1683 iwl_stop_queue(trans, txq);
1684 }
1685 }
1686 spin_unlock(&txq->lock);
1687 return 0;
1688out_err:
1689 spin_unlock(&txq->lock);
1690 return -1;
1691}