aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2007-09-25 20:54:57 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:39 -0400
commitb481de9ca074528fe8c429604e2777db8b89806a (patch)
treecf226646d73c56af843e8a656a296905ad6df179 /drivers/net/wireless/iwlwifi
parent75388acd0cd827dc1498043daa7d1c760902cd67 (diff)
[IWLWIFI]: add iwlwifi wireless drivers
This patch adds the mac80211 based wireless drivers for the Intel PRO/Wireless 3945ABG/BG Network Connection and Intel Wireless WiFi Link AGN (4965) adapters. [ Move driver into it's own directory -DaveM ] Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig128
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c979
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h191
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2290
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h581
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c2118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.h266
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c4719
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.h341
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-channel.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h1734
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h149
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h336
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h255
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hw.h537
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h470
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-priv.h308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h229
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c8732
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c9323
-rw-r--r--drivers/net/wireless/iwlwifi/iwlwifi.h713
25 files changed, 34821 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
new file mode 100644
index 000000000000..25cfc6c32509
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -0,0 +1,128 @@
1config IWLWIFI
2 bool "Intel Wireless WiFi Link Drivers"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER
5 default n
6 ---help---
7 Select to enable drivers based on the iwlwifi project. This
8 project provides a common foundation for Intel's wireless
9 drivers designed to use the mac80211 subsystem.
10
11 See <file:Documentation/networking/README.iwlwifi> for
12 information on the capabilities currently enabled in this
13 driver and for tips for debugging issues and problems.
14
15config IWLWIFI_DEBUG
16 bool "Enable full debugging output in iwlwifi drivers"
17 depends on IWLWIFI
18 default y
19 ---help---
20 This option will enable debug tracing output for the iwlwifi
21 drivers.
22
23 This will result in the kernel module being ~100k larger. You can
24 control which debug output is sent to the kernel log by setting the
25 value in
26
27 /sys/bus/pci/drivers/${DRIVER}/debug_level
28
29 This entry will only exist if this option is enabled.
30
31 To set a value, simply echo an 8-byte hex value to the same file:
32
33 % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level
34
35 You can find the list of debug mask values in:
36 drivers/net/wireless/mac80211/iwlwifi/iwl-debug.h
37
38 If this is your first time using this driver, you should say Y here
39 as the debug information can assist others in helping you resolve
40 any problems you may encounter.
41
42config IWLWIFI_SENSITIVITY
43 bool "Enable Sensitivity Calibration in iwlwifi drivers"
44 depends on IWLWIFI
45 default y
46 ---help---
47 This option will enable sensitivity calibration for the iwlwifi
48 drivers.
49
50config IWLWIFI_SPECTRUM_MEASUREMENT
51 bool "Enable Spectrum Measurement in iwlwifi drivers"
52 depends on IWLWIFI
53 default y
54 ---help---
55 This option will enable spectrum measurement for the iwlwifi drivers.
56
57config IWLWIFI_QOS
58 bool "Enable Wireless QoS in iwlwifi drivers"
59 depends on IWLWIFI
60 default y
61 ---help---
62 This option will enable wireless quality of service (QoS) for the
63 iwlwifi drivers.
64
65config IWLWIFI_HT
66 bool "Enable 802.11n HT features in iwlwifi drivers"
67 depends on EXPERIMENTAL
68 depends on IWLWIFI && MAC80211_HT
69 default n
70 ---help---
71 This option enables IEEE 802.11n High Throughput features
72 for the iwlwifi drivers.
73
74config IWL4965
75 tristate "Intel Wireless WiFi 4965AGN"
76 depends on m && IWLWIFI && EXPERIMENTAL
77 default m
78 ---help---
79 Select to build the driver supporting the:
80
81 Intel Wireless WiFi Link 4965AGN
82
83 This driver uses the kernel's mac80211 subsystem.
84
85 See <file:Documentation/networking/README.iwlwifi> for
86 information on the capabilities currently enabled in this
87 driver and for tips for debugging any issues or problems.
88
89 In order to use this driver, you will need a microcode (uCode)
90 image for it. You can obtain the microcode from:
91
92 <http://intellinuxwireless.org/>.
93
94 See the above referenced README.iwlwifi for information on where
95 to install the microcode images.
96
97 If you want to compile the driver as a module ( = code which can be
98 inserted in and remvoed from the running kernel whenever you want),
99 say M here and read <file:Documentation/modules.txt>. The module
100 will be called iwl4965.ko.
101
102config IWL3945
103 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
104 depends on m && IWLWIFI && EXPERIMENTAL
105 default m
106 ---help---
107 Select to build the driver supporting the:
108
109 Intel PRO/Wireless 3945ABG/BG Network Connection
110
111 This driver uses the kernel's mac80211 subsystem.
112
113 See <file:Documentation/networking/README.iwlwifi> for
114 information on the capabilities currently enabled in this
115 driver and for tips for debugging any issues or problems.
116
117 In order to use this driver, you will need a microcode (uCode)
118 image for it. You can obtain the microcode from:
119
120 <http://intellinuxwireless.org/>.
121
122 See the above referenced README.iwlwifi for information on where
123 to install the microcode images.
124
125 If you want to compile the driver as a module ( = code which can be
126 inserted in and remvoed from the running kernel whenever you want),
127 say M here and read <file:Documentation/modules.txt>. The module
128 will be called iwl3945.ko.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
new file mode 100644
index 000000000000..03837ff54312
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -0,0 +1,11 @@
1obj-$(CONFIG_IWL3945) += iwl3945.o
2iwl3945-objs = iwl3945-base.o iwl-3945.o iwl-3945-rs.o
3CFLAGS_iwl3945-base.o = -DIWL=3945
4CFLAGS_iwl-3945.o = -DIWL=3945
5CFLAGS_iwl-3945-rs.o = -DIWL=3945
6
7obj-$(CONFIG_IWL4965) += iwl4965.o
8iwl4965-objs = iwl4965-base.o iwl-4965.o iwl-4965-rs.o
9CFLAGS_iwl4965-base.o = -DIWL=4965
10CFLAGS_iwl-4965.o = -DIWL=4965
11CFLAGS_iwl-4965-rs.o = -DIWL=4965
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
new file mode 100644
index 000000000000..fb5f0649f4f6
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -0,0 +1,118 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __iwl_3945_hw__
65#define __iwl_3945_hw__
66
67#define IWL_RX_BUF_SIZE 3000
68/* card static random access memory (SRAM) for processor data and instructs */
69#define ALM_RTC_INST_UPPER_BOUND (0x014000)
70#define ALM_RTC_DATA_UPPER_BOUND (0x808000)
71
72#define ALM_RTC_INST_SIZE (ALM_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
73#define ALM_RTC_DATA_SIZE (ALM_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
74
75#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE
76#define IWL_MAX_INST_SIZE ALM_RTC_INST_SIZE
77#define IWL_MAX_DATA_SIZE ALM_RTC_DATA_SIZE
78#define IWL_MAX_NUM_QUEUES 8
79
80static inline int iwl_hw_valid_rtc_data_addr(u32 addr)
81{
82 return (addr >= RTC_DATA_LOWER_BOUND) &&
83 (addr < ALM_RTC_DATA_UPPER_BOUND);
84}
85
86/* Base physical address of iwl_shared is provided to FH_TSSR_CBB_BASE
87 * and &iwl_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
88struct iwl_shared {
89 __le32 tx_base_ptr[8];
90 __le32 rx_read_ptr[3];
91} __attribute__ ((packed));
92
93struct iwl_tfd_frame_data {
94 __le32 addr;
95 __le32 len;
96} __attribute__ ((packed));
97
98struct iwl_tfd_frame {
99 __le32 control_flags;
100 struct iwl_tfd_frame_data pa[4];
101 u8 reserved[28];
102} __attribute__ ((packed));
103
104static inline u8 iwl_hw_get_rate(__le16 rate_n_flags)
105{
106 return le16_to_cpu(rate_n_flags) & 0xFF;
107}
108
109static inline u16 iwl_hw_get_rate_n_flags(__le16 rate_n_flags)
110{
111 return le16_to_cpu(rate_n_flags);
112}
113
114static inline __le16 iwl_hw_set_rate_n_flags(u8 rate, u16 flags)
115{
116 return cpu_to_le16((u16)rate|flags);
117}
118#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
new file mode 100644
index 000000000000..a4f4c8798a83
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -0,0 +1,979 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32#include <net/ieee80211.h>
33
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/delay.h>
37
38#include <linux/workqueue.h>
39
40#include <net/mac80211.h>
41#include <linux/wireless.h>
42
43#include "../net/mac80211/ieee80211_rate.h"
44
45#include "iwlwifi.h"
46
47#define RS_NAME "iwl-3945-rs"
48
49struct iwl_rate_scale_data {
50 u64 data;
51 s32 success_counter;
52 s32 success_ratio;
53 s32 counter;
54 s32 average_tpt;
55 unsigned long stamp;
56};
57
58struct iwl_rate_scale_priv {
59 spinlock_t lock;
60 s32 *expected_tpt;
61 unsigned long last_partial_flush;
62 unsigned long last_flush;
63 u32 flush_time;
64 u32 last_tx_packets;
65 u32 tx_packets;
66 u8 tgg;
67 u8 flush_pending;
68 u8 start_rate;
69 u8 ibss_sta_added;
70 struct timer_list rate_scale_flush;
71 struct iwl_rate_scale_data win[IWL_RATE_COUNT];
72};
73
74static s32 iwl_expected_tpt_g[IWL_RATE_COUNT] = {
75 0, 0, 76, 104, 130, 168, 191, 202, 7, 13, 35, 58
76};
77
78static s32 iwl_expected_tpt_g_prot[IWL_RATE_COUNT] = {
79 0, 0, 0, 80, 93, 113, 123, 125, 7, 13, 35, 58
80};
81
82static s32 iwl_expected_tpt_a[IWL_RATE_COUNT] = {
83 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0, 0
84};
85
86static s32 iwl_expected_tpt_b[IWL_RATE_COUNT] = {
87 0, 0, 0, 0, 0, 0, 0, 0, 7, 13, 35, 58
88};
89
90struct iwl_tpt_entry {
91 s8 min_rssi;
92 u8 index;
93};
94
95static struct iwl_tpt_entry iwl_tpt_table_a[] = {
96 {-60, IWL_RATE_54M_INDEX},
97 {-64, IWL_RATE_48M_INDEX},
98 {-72, IWL_RATE_36M_INDEX},
99 {-80, IWL_RATE_24M_INDEX},
100 {-84, IWL_RATE_18M_INDEX},
101 {-85, IWL_RATE_12M_INDEX},
102 {-87, IWL_RATE_9M_INDEX},
103 {-89, IWL_RATE_6M_INDEX}
104};
105
106static struct iwl_tpt_entry iwl_tpt_table_b[] = {
107 {-86, IWL_RATE_11M_INDEX},
108 {-88, IWL_RATE_5M_INDEX},
109 {-90, IWL_RATE_2M_INDEX},
110 {-92, IWL_RATE_1M_INDEX}
111
112};
113
114static struct iwl_tpt_entry iwl_tpt_table_g[] = {
115 {-60, IWL_RATE_54M_INDEX},
116 {-64, IWL_RATE_48M_INDEX},
117 {-68, IWL_RATE_36M_INDEX},
118 {-80, IWL_RATE_24M_INDEX},
119 {-84, IWL_RATE_18M_INDEX},
120 {-85, IWL_RATE_12M_INDEX},
121 {-86, IWL_RATE_11M_INDEX},
122 {-88, IWL_RATE_5M_INDEX},
123 {-90, IWL_RATE_2M_INDEX},
124 {-92, IWL_RATE_1M_INDEX}
125};
126
127#define IWL_RATE_MAX_WINDOW 62
128#define IWL_RATE_FLUSH (3*HZ/10)
129#define IWL_RATE_WIN_FLUSH (HZ/2)
130#define IWL_RATE_HIGH_TH 11520
131#define IWL_RATE_MIN_FAILURE_TH 8
132#define IWL_RATE_MIN_SUCCESS_TH 8
133#define IWL_RATE_DECREASE_TH 1920
134
135static u8 iwl_get_rate_index_by_rssi(s32 rssi, u8 mode)
136{
137 u32 index = 0;
138 u32 table_size = 0;
139 struct iwl_tpt_entry *tpt_table = NULL;
140
141 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
142 rssi = IWL_MIN_RSSI_VAL;
143
144 switch (mode) {
145 case MODE_IEEE80211G:
146 tpt_table = iwl_tpt_table_g;
147 table_size = ARRAY_SIZE(iwl_tpt_table_g);
148 break;
149
150 case MODE_IEEE80211A:
151 tpt_table = iwl_tpt_table_a;
152 table_size = ARRAY_SIZE(iwl_tpt_table_a);
153 break;
154
155 default:
156 case MODE_IEEE80211B:
157 tpt_table = iwl_tpt_table_b;
158 table_size = ARRAY_SIZE(iwl_tpt_table_b);
159 break;
160 }
161
162 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
163 index++;
164
165 index = min(index, (table_size - 1));
166
167 return tpt_table[index].index;
168}
169
170static void iwl_clear_window(struct iwl_rate_scale_data *window)
171{
172 window->data = 0;
173 window->success_counter = 0;
174 window->success_ratio = IWL_INVALID_VALUE;
175 window->counter = 0;
176 window->average_tpt = IWL_INVALID_VALUE;
177 window->stamp = 0;
178}
179
180/**
181 * iwl_rate_scale_flush_windows - flush out the rate scale windows
182 *
183 * Returns the number of windows that have gathered data but were
184 * not flushed. If there were any that were not flushed, then
185 * reschedule the rate flushing routine.
186 */
187static int iwl_rate_scale_flush_windows(struct iwl_rate_scale_priv *rs_priv)
188{
189 int unflushed = 0;
190 int i;
191 unsigned long flags;
192
193 /*
194 * For each rate, if we have collected data on that rate
195 * and it has been more than IWL_RATE_WIN_FLUSH
196 * since we flushed, clear out the gathered statistics
197 */
198 for (i = 0; i < IWL_RATE_COUNT; i++) {
199 if (!rs_priv->win[i].counter)
200 continue;
201
202 spin_lock_irqsave(&rs_priv->lock, flags);
203 if (time_after(jiffies, rs_priv->win[i].stamp +
204 IWL_RATE_WIN_FLUSH)) {
205 IWL_DEBUG_RATE("flushing %d samples of rate "
206 "index %d\n",
207 rs_priv->win[i].counter, i);
208 iwl_clear_window(&rs_priv->win[i]);
209 } else
210 unflushed++;
211 spin_unlock_irqrestore(&rs_priv->lock, flags);
212 }
213
214 return unflushed;
215}
216
217#define IWL_RATE_FLUSH_MAX 5000 /* msec */
218#define IWL_RATE_FLUSH_MIN 50 /* msec */
219
220static void iwl_bg_rate_scale_flush(unsigned long data)
221{
222 struct iwl_rate_scale_priv *rs_priv = (void *)data;
223 int unflushed = 0;
224 unsigned long flags;
225 u32 packet_count, duration, pps;
226
227 IWL_DEBUG_RATE("enter\n");
228
229 unflushed = iwl_rate_scale_flush_windows(rs_priv);
230
231 spin_lock_irqsave(&rs_priv->lock, flags);
232
233 rs_priv->flush_pending = 0;
234
235 /* Number of packets Rx'd since last time this timer ran */
236 packet_count = (rs_priv->tx_packets - rs_priv->last_tx_packets) + 1;
237
238 rs_priv->last_tx_packets = rs_priv->tx_packets + 1;
239
240 if (unflushed) {
241 duration =
242 jiffies_to_msecs(jiffies - rs_priv->last_partial_flush);
243/* duration = jiffies_to_msecs(rs_priv->flush_time); */
244
245 IWL_DEBUG_RATE("Tx'd %d packets in %dms\n",
246 packet_count, duration);
247
248 /* Determine packets per second */
249 if (duration)
250 pps = (packet_count * 1000) / duration;
251 else
252 pps = 0;
253
254 if (pps) {
255 duration = IWL_RATE_FLUSH_MAX / pps;
256 if (duration < IWL_RATE_FLUSH_MIN)
257 duration = IWL_RATE_FLUSH_MIN;
258 } else
259 duration = IWL_RATE_FLUSH_MAX;
260
261 rs_priv->flush_time = msecs_to_jiffies(duration);
262
263 IWL_DEBUG_RATE("new flush period: %d msec ave %d\n",
264 duration, packet_count);
265
266 mod_timer(&rs_priv->rate_scale_flush, jiffies +
267 rs_priv->flush_time);
268
269 rs_priv->last_partial_flush = jiffies;
270 }
271
272 /* If there weren't any unflushed entries, we don't schedule the timer
273 * to run again */
274
275 rs_priv->last_flush = jiffies;
276
277 spin_unlock_irqrestore(&rs_priv->lock, flags);
278
279 IWL_DEBUG_RATE("leave\n");
280}
281
282/**
283 * iwl_collect_tx_data - Update the success/failure sliding window
284 *
285 * We keep a sliding window of the last 64 packets transmitted
286 * at this rate. window->data contains the bitmask of successful
287 * packets.
288 */
289static void iwl_collect_tx_data(struct iwl_rate_scale_priv *rs_priv,
290 struct iwl_rate_scale_data *window,
291 int success, int retries)
292{
293 unsigned long flags;
294
295 if (!retries) {
296 IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n");
297 return;
298 }
299
300 while (retries--) {
301 spin_lock_irqsave(&rs_priv->lock, flags);
302
303 /* If we have filled up the window then subtract one from the
304 * success counter if the high-bit is counting toward
305 * success */
306 if (window->counter == IWL_RATE_MAX_WINDOW) {
307 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1)))
308 window->success_counter--;
309 } else
310 window->counter++;
311
312 /* Slide the window to the left one bit */
313 window->data = (window->data << 1);
314
315 /* If this packet was a success then set the low bit high */
316 if (success) {
317 window->success_counter++;
318 window->data |= 1;
319 }
320
321 /* window->counter can't be 0 -- it is either >0 or
322 * IWL_RATE_MAX_WINDOW */
323 window->success_ratio = 12800 * window->success_counter /
324 window->counter;
325
326 /* Tag this window as having been updated */
327 window->stamp = jiffies;
328
329 spin_unlock_irqrestore(&rs_priv->lock, flags);
330 }
331}
332
333static void rs_rate_init(void *priv_rate, void *priv_sta,
334 struct ieee80211_local *local, struct sta_info *sta)
335{
336 int i;
337
338 IWL_DEBUG_RATE("enter\n");
339
340 /* TODO: what is a good starting rate for STA? About middle? Maybe not
341 * the lowest or the highest rate.. Could consider using RSSI from
342 * previous packets? Need to have IEEE 802.1X auth succeed immediately
343 * after assoc.. */
344
345 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) {
346 if (sta->supp_rates & (1 << i)) {
347 sta->txrate = i;
348 break;
349 }
350 }
351
352 sta->last_txrate = sta->txrate;
353
354 IWL_DEBUG_RATE("leave\n");
355}
356
357static void *rs_alloc(struct ieee80211_local *local)
358{
359 return local->hw.priv;
360}
361
362/* rate scale requires free function to be implmented */
363static void rs_free(void *priv)
364{
365 return;
366}
367static void rs_clear(void *priv)
368{
369 return;
370}
371
372
373static void *rs_alloc_sta(void *priv, gfp_t gfp)
374{
375 struct iwl_rate_scale_priv *rs_priv;
376 int i;
377
378 IWL_DEBUG_RATE("enter\n");
379
380 rs_priv = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp);
381 if (!rs_priv) {
382 IWL_DEBUG_RATE("leave: ENOMEM\n");
383 return NULL;
384 }
385
386 spin_lock_init(&rs_priv->lock);
387
388 rs_priv->start_rate = IWL_RATE_INVALID;
389
390 /* default to just 802.11b */
391 rs_priv->expected_tpt = iwl_expected_tpt_b;
392
393 rs_priv->last_partial_flush = jiffies;
394 rs_priv->last_flush = jiffies;
395 rs_priv->flush_time = IWL_RATE_FLUSH;
396 rs_priv->last_tx_packets = 0;
397 rs_priv->ibss_sta_added = 0;
398
399 init_timer(&rs_priv->rate_scale_flush);
400 rs_priv->rate_scale_flush.data = (unsigned long)rs_priv;
401 rs_priv->rate_scale_flush.function = &iwl_bg_rate_scale_flush;
402
403 for (i = 0; i < IWL_RATE_COUNT; i++)
404 iwl_clear_window(&rs_priv->win[i]);
405
406 IWL_DEBUG_RATE("leave\n");
407
408 return rs_priv;
409}
410
411static void rs_free_sta(void *priv, void *priv_sta)
412{
413 struct iwl_rate_scale_priv *rs_priv = priv_sta;
414
415 IWL_DEBUG_RATE("enter\n");
416 del_timer_sync(&rs_priv->rate_scale_flush);
417 kfree(rs_priv);
418 IWL_DEBUG_RATE("leave\n");
419}
420
421/**
422 * rs_tx_status - Update rate control values based on Tx results
423 *
424 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
425 * the hardware for each rate.
426 */
427static void rs_tx_status(void *priv_rate,
428 struct net_device *dev,
429 struct sk_buff *skb,
430 struct ieee80211_tx_status *tx_resp)
431{
432 u8 retries, current_count;
433 int scale_rate_index, first_index, last_index;
434 unsigned long flags;
435 struct sta_info *sta;
436 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
437 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
438 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
439 struct iwl_rate_scale_priv *rs_priv;
440
441 IWL_DEBUG_RATE("enter\n");
442
443 retries = tx_resp->retry_count;
444
445 first_index = tx_resp->control.tx_rate;
446 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
447 IWL_DEBUG_RATE("leave: Rate out of bounds: %0x for %d\n",
448 tx_resp->control.tx_rate, first_index);
449 return;
450 }
451
452 sta = sta_info_get(local, hdr->addr1);
453 if (!sta || !sta->rate_ctrl_priv) {
454 if (sta)
455 sta_info_put(sta);
456 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
457 return;
458 }
459
460 rs_priv = (void *)sta->rate_ctrl_priv;
461
462 rs_priv->tx_packets++;
463
464 scale_rate_index = first_index;
465 last_index = first_index;
466
467 /*
468 * Update the window for each rate. We determine which rates
469 * were Tx'd based on the total number of retries vs. the number
470 * of retries configured for each rate -- currently set to the
471 * priv value 'retry_rate' vs. rate specific
472 *
473 * On exit from this while loop last_index indicates the rate
474 * at which the frame was finally transmitted (or failed if no
475 * ACK)
476 */
477 while (retries > 0) {
478 if (retries < priv->retry_rate) {
479 current_count = retries;
480 last_index = scale_rate_index;
481 } else {
482 current_count = priv->retry_rate;
483 last_index = iwl_get_prev_ieee_rate(scale_rate_index);
484 }
485
486 /* Update this rate accounting for as many retries
487 * as was used for it (per current_count) */
488 iwl_collect_tx_data(rs_priv,
489 &rs_priv->win[scale_rate_index],
490 0, current_count);
491 IWL_DEBUG_RATE("Update rate %d for %d retries.\n",
492 scale_rate_index, current_count);
493
494 retries -= current_count;
495
496 if (retries)
497 scale_rate_index =
498 iwl_get_prev_ieee_rate(scale_rate_index);
499 }
500
501 /* Update the last index window with success/failure based on ACK */
502 IWL_DEBUG_RATE("Update rate %d with %s.\n",
503 last_index,
504 (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ?
505 "success" : "failure");
506 iwl_collect_tx_data(rs_priv,
507 &rs_priv->win[last_index],
508 tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1);
509
510 /* We updated the rate scale window -- if its been more than
511 * flush_time since the last run, schedule the flush
512 * again */
513 spin_lock_irqsave(&rs_priv->lock, flags);
514
515 if (!rs_priv->flush_pending &&
516 time_after(jiffies, rs_priv->last_partial_flush +
517 rs_priv->flush_time)) {
518
519 rs_priv->flush_pending = 1;
520 mod_timer(&rs_priv->rate_scale_flush,
521 jiffies + rs_priv->flush_time);
522 }
523
524 spin_unlock_irqrestore(&rs_priv->lock, flags);
525
526 sta_info_put(sta);
527
528 IWL_DEBUG_RATE("leave\n");
529
530 return;
531}
532
533static struct ieee80211_rate *iwl_get_lowest_rate(struct ieee80211_local
534 *local)
535{
536 struct ieee80211_hw_mode *mode = local->oper_hw_mode;
537 int i;
538
539 for (i = 0; i < mode->num_rates; i++) {
540 struct ieee80211_rate *rate = &mode->rates[i];
541
542 if (rate->flags & IEEE80211_RATE_SUPPORTED)
543 return rate;
544 }
545
546 return &mode->rates[0];
547}
548
549static u16 iwl_get_adjacent_rate(struct iwl_rate_scale_priv *rs_priv,
550 u8 index, u16 rate_mask, int phymode)
551{
552 u8 high = IWL_RATE_INVALID;
553 u8 low = IWL_RATE_INVALID;
554
555 /* 802.11A walks to the next literal adjascent rate in
556 * the rate table */
557 if (unlikely(phymode == MODE_IEEE80211A)) {
558 int i;
559 u32 mask;
560
561 /* Find the previous rate that is in the rate mask */
562 i = index - 1;
563 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
564 if (rate_mask & mask) {
565 low = i;
566 break;
567 }
568 }
569
570 /* Find the next rate that is in the rate mask */
571 i = index + 1;
572 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
573 if (rate_mask & mask) {
574 high = i;
575 break;
576 }
577 }
578
579 return (high << 8) | low;
580 }
581
582 low = index;
583 while (low != IWL_RATE_INVALID) {
584 if (rs_priv->tgg)
585 low = iwl_rates[low].prev_rs_tgg;
586 else
587 low = iwl_rates[low].prev_rs;
588 if (low == IWL_RATE_INVALID)
589 break;
590 if (rate_mask & (1 << low))
591 break;
592 IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low);
593 }
594
595 high = index;
596 while (high != IWL_RATE_INVALID) {
597 if (rs_priv->tgg)
598 high = iwl_rates[high].next_rs_tgg;
599 else
600 high = iwl_rates[high].next_rs;
601 if (high == IWL_RATE_INVALID)
602 break;
603 if (rate_mask & (1 << high))
604 break;
605 IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high);
606 }
607
608 return (high << 8) | low;
609}
610
611/**
612 * rs_get_rate - find the rate for the requested packet
613 *
614 * Returns the ieee80211_rate structure allocated by the driver.
615 *
616 * The rate control algorithm has no internal mapping between hw_mode's
617 * rate ordering and the rate ordering used by the rate control algorithm.
618 *
619 * The rate control algorithm uses a single table of rates that goes across
620 * the entire A/B/G spectrum vs. being limited to just one particular
621 * hw_mode.
622 *
623 * As such, we can't convert the index obtained below into the hw_mode's
624 * rate table and must reference the driver allocated rate table
625 *
626 */
627static struct ieee80211_rate *rs_get_rate(void *priv_rate,
628 struct net_device *dev,
629 struct sk_buff *skb,
630 struct rate_control_extra *extra)
631{
632 u8 low = IWL_RATE_INVALID;
633 u8 high = IWL_RATE_INVALID;
634 u16 high_low;
635 int index;
636 struct iwl_rate_scale_priv *rs_priv;
637 struct iwl_rate_scale_data *window = NULL;
638 int current_tpt = IWL_INVALID_VALUE;
639 int low_tpt = IWL_INVALID_VALUE;
640 int high_tpt = IWL_INVALID_VALUE;
641 u32 fail_count;
642 s8 scale_action = 0;
643 unsigned long flags;
644 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
645 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
646 struct sta_info *sta;
647 u16 fc, rate_mask;
648 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
649
650 IWL_DEBUG_RATE("enter\n");
651
652 memset(extra, 0, sizeof(*extra));
653
654 fc = le16_to_cpu(hdr->frame_control);
655 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
656 (is_multicast_ether_addr(hdr->addr1))) {
657 /* Send management frames and broadcast/multicast data using
658 * lowest rate. */
659 /* TODO: this could probably be improved.. */
660 IWL_DEBUG_RATE("leave: lowest rate (not data or is "
661 "multicast)\n");
662
663 return iwl_get_lowest_rate(local);
664 }
665
666 sta = sta_info_get(local, hdr->addr1);
667 if (!sta || !sta->rate_ctrl_priv) {
668 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
669 if (sta)
670 sta_info_put(sta);
671 return NULL;
672 }
673
674 rate_mask = sta->supp_rates;
675 index = min(sta->txrate & 0xffff, IWL_RATE_COUNT - 1);
676
677 rs_priv = (void *)sta->rate_ctrl_priv;
678
679 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
680 !rs_priv->ibss_sta_added) {
681 u8 sta_id = iwl_hw_find_station(priv, hdr->addr1);
682
683 if (sta_id == IWL_INVALID_STATION) {
684 IWL_DEBUG_RATE("LQ: ADD station " MAC_FMT "\n",
685 MAC_ARG(hdr->addr1));
686 sta_id = iwl_add_station(priv,
687 hdr->addr1, 0, CMD_ASYNC);
688 }
689 if (sta_id != IWL_INVALID_STATION)
690 rs_priv->ibss_sta_added = 1;
691 }
692
693 spin_lock_irqsave(&rs_priv->lock, flags);
694
695 if (rs_priv->start_rate != IWL_RATE_INVALID) {
696 index = rs_priv->start_rate;
697 rs_priv->start_rate = IWL_RATE_INVALID;
698 }
699
700 window = &(rs_priv->win[index]);
701
702 fail_count = window->counter - window->success_counter;
703
704 if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) &&
705 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
706 window->average_tpt = IWL_INVALID_VALUE;
707 spin_unlock_irqrestore(&rs_priv->lock, flags);
708
709 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: "
710 "counter: %d, success_counter: %d, "
711 "expected_tpt is %sNULL\n",
712 index,
713 window->counter,
714 window->success_counter,
715 rs_priv->expected_tpt ? "not " : "");
716 goto out;
717
718 }
719
720 window->average_tpt = ((window->success_ratio *
721 rs_priv->expected_tpt[index] + 64) / 128);
722 current_tpt = window->average_tpt;
723
724 high_low = iwl_get_adjacent_rate(rs_priv, index, rate_mask,
725 local->hw.conf.phymode);
726 low = high_low & 0xff;
727 high = (high_low >> 8) & 0xff;
728
729 if (low != IWL_RATE_INVALID)
730 low_tpt = rs_priv->win[low].average_tpt;
731
732 if (high != IWL_RATE_INVALID)
733 high_tpt = rs_priv->win[high].average_tpt;
734
735 spin_unlock_irqrestore(&rs_priv->lock, flags);
736
737 scale_action = 1;
738
739 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
740 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
741 scale_action = -1;
742 } else if ((low_tpt == IWL_INVALID_VALUE) &&
743 (high_tpt == IWL_INVALID_VALUE))
744 scale_action = 1;
745 else if ((low_tpt != IWL_INVALID_VALUE) &&
746 (high_tpt != IWL_INVALID_VALUE)
747 && (low_tpt < current_tpt)
748 && (high_tpt < current_tpt)) {
749 IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < "
750 "current_tpt [%d]\n",
751 low_tpt, high_tpt, current_tpt);
752 scale_action = 0;
753 } else {
754 if (high_tpt != IWL_INVALID_VALUE) {
755 if (high_tpt > current_tpt)
756 scale_action = 1;
757 else {
758 IWL_DEBUG_RATE
759 ("decrease rate because of high tpt\n");
760 scale_action = -1;
761 }
762 } else if (low_tpt != IWL_INVALID_VALUE) {
763 if (low_tpt > current_tpt) {
764 IWL_DEBUG_RATE
765 ("decrease rate because of low tpt\n");
766 scale_action = -1;
767 } else
768 scale_action = 1;
769 }
770 }
771
772 if ((window->success_ratio > IWL_RATE_HIGH_TH) ||
773 (current_tpt > window->average_tpt)) {
774 IWL_DEBUG_RATE("No action -- success_ratio [%d] > HIGH_TH or "
775 "current_tpt [%d] > average_tpt [%d]\n",
776 window->success_ratio,
777 current_tpt, window->average_tpt);
778 scale_action = 0;
779 }
780
781 switch (scale_action) {
782 case -1:
783 if (low != IWL_RATE_INVALID)
784 index = low;
785 break;
786
787 case 1:
788 if (high != IWL_RATE_INVALID)
789 index = high;
790
791 break;
792
793 case 0:
794 default:
795 break;
796 }
797
798 IWL_DEBUG_RATE("Selected %d (action %d) - low %d high %d\n",
799 index, scale_action, low, high);
800
801 out:
802
803 sta->last_txrate = index;
804 sta->txrate = sta->last_txrate;
805 sta_info_put(sta);
806
807 IWL_DEBUG_RATE("leave: %d\n", index);
808
809 return &priv->ieee_rates[index];
810}
811
812static struct rate_control_ops rs_ops = {
813 .module = NULL,
814 .name = RS_NAME,
815 .tx_status = rs_tx_status,
816 .get_rate = rs_get_rate,
817 .rate_init = rs_rate_init,
818 .clear = rs_clear,
819 .alloc = rs_alloc,
820 .free = rs_free,
821 .alloc_sta = rs_alloc_sta,
822 .free_sta = rs_free_sta,
823};
824
825int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
826{
827 struct ieee80211_local *local = hw_to_local(hw);
828 struct iwl_priv *priv = hw->priv;
829 struct iwl_rate_scale_priv *rs_priv;
830 struct sta_info *sta;
831 unsigned long flags;
832 int count = 0, i;
833 u32 samples = 0, success = 0, good = 0;
834 unsigned long now = jiffies;
835 u32 max_time = 0;
836
837 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
838 if (!sta || !sta->rate_ctrl_priv) {
839 if (sta) {
840 sta_info_put(sta);
841 IWL_DEBUG_RATE("leave - no private rate data!\n");
842 } else
843 IWL_DEBUG_RATE("leave - no station!\n");
844 return sprintf(buf, "station %d not found\n", sta_id);
845 }
846
847 rs_priv = (void *)sta->rate_ctrl_priv;
848 spin_lock_irqsave(&rs_priv->lock, flags);
849 i = IWL_RATE_54M_INDEX;
850 while (1) {
851 u64 mask;
852 int j;
853
854 count +=
855 sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2);
856
857 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
858 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
859 buf[count++] =
860 (rs_priv->win[i].data & mask) ? '1' : '0';
861
862 samples += rs_priv->win[i].counter;
863 good += rs_priv->win[i].success_counter;
864 success += rs_priv->win[i].success_counter * iwl_rates[i].ieee;
865
866 if (rs_priv->win[i].stamp) {
867 int delta =
868 jiffies_to_msecs(now - rs_priv->win[i].stamp);
869
870 if (delta > max_time)
871 max_time = delta;
872
873 count += sprintf(&buf[count], "%5dms\n", delta);
874 } else
875 buf[count++] = '\n';
876
877 j = iwl_get_prev_ieee_rate(i);
878 if (j == i)
879 break;
880 i = j;
881 }
882 spin_unlock_irqrestore(&rs_priv->lock, flags);
883 sta_info_put(sta);
884
885 /* Display the average rate of all samples taken.
886 *
887 * NOTE: We multiple # of samples by 2 since the IEEE measurement
888 * added from iwl_rates is actually 2X the rate */
889 if (samples)
890 count += sprintf(
891 &buf[count],
892 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
893 "%3d%% success (%d good packets over %d tries)\n",
894 success / (2 * samples), (success * 5 / samples) % 10,
895 max_time, good * 100 / samples, good, samples);
896 else
897 count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n");
898
899 return count;
900}
901
902void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
903{
904 struct iwl_priv *priv = hw->priv;
905 s32 rssi = 0;
906 unsigned long flags;
907 struct ieee80211_local *local = hw_to_local(hw);
908 struct iwl_rate_scale_priv *rs_priv;
909 struct sta_info *sta;
910
911 IWL_DEBUG_RATE("enter\n");
912
913 if (!local->rate_ctrl->ops->name ||
914 strcmp(local->rate_ctrl->ops->name, RS_NAME)) {
915 IWL_WARNING("iwl-3945-rs not selected as rate control algo!\n");
916 IWL_DEBUG_RATE("leave - mac80211 picked the wrong RC algo.\n");
917 return;
918 }
919
920 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
921 if (!sta || !sta->rate_ctrl_priv) {
922 if (sta)
923 sta_info_put(sta);
924 IWL_DEBUG_RATE("leave - no private rate data!\n");
925 return;
926 }
927
928 rs_priv = (void *)sta->rate_ctrl_priv;
929
930 spin_lock_irqsave(&rs_priv->lock, flags);
931
932 rs_priv->tgg = 0;
933 switch (priv->phymode) {
934 case MODE_IEEE80211G:
935 if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) {
936 rs_priv->tgg = 1;
937 rs_priv->expected_tpt = iwl_expected_tpt_g_prot;
938 } else
939 rs_priv->expected_tpt = iwl_expected_tpt_g;
940 break;
941
942 case MODE_IEEE80211A:
943 rs_priv->expected_tpt = iwl_expected_tpt_a;
944 break;
945
946 default:
947 IWL_WARNING("Invalid phymode. Defaulting to 802.11b\n");
948 case MODE_IEEE80211B:
949 rs_priv->expected_tpt = iwl_expected_tpt_b;
950 break;
951 }
952
953 sta_info_put(sta);
954 spin_unlock_irqrestore(&rs_priv->lock, flags);
955
956 rssi = priv->last_rx_rssi;
957 if (rssi == 0)
958 rssi = IWL_MIN_RSSI_VAL;
959
960 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi);
961
962 rs_priv->start_rate = iwl_get_rate_index_by_rssi(rssi, priv->phymode);
963
964 IWL_DEBUG_RATE("leave: rssi %d assign rate index: "
965 "%d (plcp 0x%x)\n", rssi, rs_priv->start_rate,
966 iwl_rates[rs_priv->start_rate].plcp);
967}
968
969void iwl_rate_control_register(struct ieee80211_hw *hw)
970{
971 ieee80211_rate_control_register(&rs_ops);
972}
973
974void iwl_rate_control_unregister(struct ieee80211_hw *hw)
975{
976 ieee80211_rate_control_unregister(&rs_ops);
977}
978
979
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
new file mode 100644
index 000000000000..b926738e0ea1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -0,0 +1,191 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_rs_h__
28#define __iwl_3945_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp;
32 u8 ieee;
33 u8 prev_ieee; /* previous rate in IEEE speeds */
34 u8 next_ieee; /* next rate in IEEE speeds */
35 u8 prev_rs; /* previous rate used in rs algo */
36 u8 next_rs; /* next rate used in rs algo */
37 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
38 u8 next_rs_tgg; /* next rate used in TGG rs algo */
39};
40
41enum {
42 IWL_RATE_6M_INDEX = 0,
43 IWL_RATE_9M_INDEX,
44 IWL_RATE_12M_INDEX,
45 IWL_RATE_18M_INDEX,
46 IWL_RATE_24M_INDEX,
47 IWL_RATE_36M_INDEX,
48 IWL_RATE_48M_INDEX,
49 IWL_RATE_54M_INDEX,
50 IWL_RATE_1M_INDEX,
51 IWL_RATE_2M_INDEX,
52 IWL_RATE_5M_INDEX,
53 IWL_RATE_11M_INDEX,
54 IWL_RATE_COUNT,
55 IWL_RATE_INVM_INDEX,
56 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX
57};
58
59enum {
60 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
61 IWL_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
62 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
63 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
64};
65
66/* #define vs. enum to keep from defaulting to 'large integer' */
67#define IWL_RATE_6M_MASK (1<<IWL_RATE_6M_INDEX)
68#define IWL_RATE_9M_MASK (1<<IWL_RATE_9M_INDEX)
69#define IWL_RATE_12M_MASK (1<<IWL_RATE_12M_INDEX)
70#define IWL_RATE_18M_MASK (1<<IWL_RATE_18M_INDEX)
71#define IWL_RATE_24M_MASK (1<<IWL_RATE_24M_INDEX)
72#define IWL_RATE_36M_MASK (1<<IWL_RATE_36M_INDEX)
73#define IWL_RATE_48M_MASK (1<<IWL_RATE_48M_INDEX)
74#define IWL_RATE_54M_MASK (1<<IWL_RATE_54M_INDEX)
75#define IWL_RATE_1M_MASK (1<<IWL_RATE_1M_INDEX)
76#define IWL_RATE_2M_MASK (1<<IWL_RATE_2M_INDEX)
77#define IWL_RATE_5M_MASK (1<<IWL_RATE_5M_INDEX)
78#define IWL_RATE_11M_MASK (1<<IWL_RATE_11M_INDEX)
79
80enum {
81 IWL_RATE_6M_PLCP = 13,
82 IWL_RATE_9M_PLCP = 15,
83 IWL_RATE_12M_PLCP = 5,
84 IWL_RATE_18M_PLCP = 7,
85 IWL_RATE_24M_PLCP = 9,
86 IWL_RATE_36M_PLCP = 11,
87 IWL_RATE_48M_PLCP = 1,
88 IWL_RATE_54M_PLCP = 3,
89 IWL_RATE_1M_PLCP = 10,
90 IWL_RATE_2M_PLCP = 20,
91 IWL_RATE_5M_PLCP = 55,
92 IWL_RATE_11M_PLCP = 110,
93};
94
95enum {
96 IWL_RATE_6M_IEEE = 12,
97 IWL_RATE_9M_IEEE = 18,
98 IWL_RATE_12M_IEEE = 24,
99 IWL_RATE_18M_IEEE = 36,
100 IWL_RATE_24M_IEEE = 48,
101 IWL_RATE_36M_IEEE = 72,
102 IWL_RATE_48M_IEEE = 96,
103 IWL_RATE_54M_IEEE = 108,
104 IWL_RATE_1M_IEEE = 2,
105 IWL_RATE_2M_IEEE = 4,
106 IWL_RATE_5M_IEEE = 11,
107 IWL_RATE_11M_IEEE = 22,
108};
109
110#define IWL_CCK_BASIC_RATES_MASK \
111 (IWL_RATE_1M_MASK | \
112 IWL_RATE_2M_MASK)
113
114#define IWL_CCK_RATES_MASK \
115 (IWL_BASIC_RATES_MASK | \
116 IWL_RATE_5M_MASK | \
117 IWL_RATE_11M_MASK)
118
119#define IWL_OFDM_BASIC_RATES_MASK \
120 (IWL_RATE_6M_MASK | \
121 IWL_RATE_12M_MASK | \
122 IWL_RATE_24M_MASK)
123
124#define IWL_OFDM_RATES_MASK \
125 (IWL_OFDM_BASIC_RATES_MASK | \
126 IWL_RATE_9M_MASK | \
127 IWL_RATE_18M_MASK | \
128 IWL_RATE_36M_MASK | \
129 IWL_RATE_48M_MASK | \
130 IWL_RATE_54M_MASK)
131
132#define IWL_BASIC_RATES_MASK \
133 (IWL_OFDM_BASIC_RATES_MASK | \
134 IWL_CCK_BASIC_RATES_MASK)
135
136#define IWL_RATES_MASK ((1<<IWL_RATE_COUNT)-1)
137
138#define IWL_INVALID_VALUE -1
139
140#define IWL_MIN_RSSI_VAL -100
141#define IWL_MAX_RSSI_VAL 0
142
143extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
144
145static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
146{
147 u8 rate = iwl_rates[rate_index].prev_ieee;
148
149 if (rate == IWL_RATE_INVALID)
150 rate = rate_index;
151 return rate;
152}
153
154/**
155 * iwl_fill_rs_info - Fill an output text buffer with the rate representation
156 *
157 * NOTE: This is provided as a quick mechanism for a user to visualize
158 * the performance of the rate control alogirthm and is not meant to be
159 * parsed software.
160 */
161extern int iwl_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
162
163/**
164 * iwl_rate_scale_init - Initialize the rate scale table based on assoc info
165 *
166 * The specific througput table used is based on the type of network
167 * the associated with, including A, B, G, and G w/ TGG protection
168 */
169extern void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
170
171/**
172 * iwl_rate_control_register - Register the rate control algorithm callbacks
173 *
174 * Since the rate control algorithm is hardware specific, there is no need
175 * or reason to place it as a stand alone module. The driver can call
176 * iwl_rate_control_register in order to register the rate control callbacks
177 * with the mac80211 subsystem. This should be performed prior to calling
178 * ieee80211_register_hw
179 *
180 */
181extern void iwl_rate_control_register(struct ieee80211_hw *hw);
182
183/**
184 * iwl_rate_control_unregister - Unregister the rate control callbacks
185 *
186 * This should be called after calling ieee80211_unregister_hw, but before
187 * the driver is unloaded.
188 */
189extern void iwl_rate_control_unregister(struct ieee80211_hw *hw);
190
191#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
new file mode 100644
index 000000000000..26f03a0b878d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -0,0 +1,2290 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <linux/firmware.h>
38#include <net/mac80211.h>
39
40#include <linux/etherdevice.h>
41#include <linux/delay.h>
42
43#include "iwlwifi.h"
44#include "iwl-helpers.h"
45#include "iwl-3945.h"
46#include "iwl-3945-rs.h"
47
48#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
49 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
50 IWL_RATE_##r##M_IEEE, \
51 IWL_RATE_##ip##M_INDEX, \
52 IWL_RATE_##in##M_INDEX, \
53 IWL_RATE_##rp##M_INDEX, \
54 IWL_RATE_##rn##M_INDEX, \
55 IWL_RATE_##pp##M_INDEX, \
56 IWL_RATE_##np##M_INDEX }
57
58/*
59 * Parameter order:
60 * rate, prev rate, next rate, prev tgg rate, next tgg rate
61 *
62 * If there isn't a valid next or previous rate then INV is used which
63 * maps to IWL_RATE_INVALID
64 *
65 */
66const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
67 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
68 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
69 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
70 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
71 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
72 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
73 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
74 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
75 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
76 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
77 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
78 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
79};
80
81/* 1 = enable the iwl_disable_events() function */
82#define IWL_EVT_DISABLE (0)
83#define IWL_EVT_DISABLE_SIZE (1532/32)
84
85/**
86 * iwl_disable_events - Disable selected events in uCode event log
87 *
88 * Disable an event by writing "1"s into "disable"
89 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
90 * Default values of 0 enable uCode events to be logged.
91 * Use for only special debugging. This function is just a placeholder as-is,
92 * you'll need to provide the special bits! ...
93 * ... and set IWL_EVT_DISABLE to 1. */
94void iwl_disable_events(struct iwl_priv *priv)
95{
96 int rc;
97 int i;
98 u32 base; /* SRAM address of event log header */
99 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
100 u32 array_size; /* # of u32 entries in array */
101 u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
102 0x00000000, /* 31 - 0 Event id numbers */
103 0x00000000, /* 63 - 32 */
104 0x00000000, /* 95 - 64 */
105 0x00000000, /* 127 - 96 */
106 0x00000000, /* 159 - 128 */
107 0x00000000, /* 191 - 160 */
108 0x00000000, /* 223 - 192 */
109 0x00000000, /* 255 - 224 */
110 0x00000000, /* 287 - 256 */
111 0x00000000, /* 319 - 288 */
112 0x00000000, /* 351 - 320 */
113 0x00000000, /* 383 - 352 */
114 0x00000000, /* 415 - 384 */
115 0x00000000, /* 447 - 416 */
116 0x00000000, /* 479 - 448 */
117 0x00000000, /* 511 - 480 */
118 0x00000000, /* 543 - 512 */
119 0x00000000, /* 575 - 544 */
120 0x00000000, /* 607 - 576 */
121 0x00000000, /* 639 - 608 */
122 0x00000000, /* 671 - 640 */
123 0x00000000, /* 703 - 672 */
124 0x00000000, /* 735 - 704 */
125 0x00000000, /* 767 - 736 */
126 0x00000000, /* 799 - 768 */
127 0x00000000, /* 831 - 800 */
128 0x00000000, /* 863 - 832 */
129 0x00000000, /* 895 - 864 */
130 0x00000000, /* 927 - 896 */
131 0x00000000, /* 959 - 928 */
132 0x00000000, /* 991 - 960 */
133 0x00000000, /* 1023 - 992 */
134 0x00000000, /* 1055 - 1024 */
135 0x00000000, /* 1087 - 1056 */
136 0x00000000, /* 1119 - 1088 */
137 0x00000000, /* 1151 - 1120 */
138 0x00000000, /* 1183 - 1152 */
139 0x00000000, /* 1215 - 1184 */
140 0x00000000, /* 1247 - 1216 */
141 0x00000000, /* 1279 - 1248 */
142 0x00000000, /* 1311 - 1280 */
143 0x00000000, /* 1343 - 1312 */
144 0x00000000, /* 1375 - 1344 */
145 0x00000000, /* 1407 - 1376 */
146 0x00000000, /* 1439 - 1408 */
147 0x00000000, /* 1471 - 1440 */
148 0x00000000, /* 1503 - 1472 */
149 };
150
151 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
152 if (!iwl_hw_valid_rtc_data_addr(base)) {
153 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
154 return;
155 }
156
157 rc = iwl_grab_restricted_access(priv);
158 if (rc) {
159 IWL_WARNING("Can not read from adapter at this time.\n");
160 return;
161 }
162
163 disable_ptr = iwl_read_restricted_mem(priv, base + (4 * sizeof(u32)));
164 array_size = iwl_read_restricted_mem(priv, base + (5 * sizeof(u32)));
165 iwl_release_restricted_access(priv);
166
167 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
168 IWL_DEBUG_INFO("Disabling selected uCode log events at 0x%x\n",
169 disable_ptr);
170 rc = iwl_grab_restricted_access(priv);
171 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
172 iwl_write_restricted_mem(priv,
173 disable_ptr +
174 (i * sizeof(u32)),
175 evt_disable[i]);
176
177 iwl_release_restricted_access(priv);
178 } else {
179 IWL_DEBUG_INFO("Selected uCode log events may be disabled\n");
180 IWL_DEBUG_INFO(" by writing \"1\"s into disable bitmap\n");
181 IWL_DEBUG_INFO(" in SRAM at 0x%x, size %d u32s\n",
182 disable_ptr, array_size);
183 }
184
185}
186
187/**
188 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
189 * @priv: eeprom and antenna fields are used to determine antenna flags
190 *
191 * priv->eeprom is used to determine if antenna AUX/MAIN are reversed
192 * priv->antenna specifies the antenna diversity mode:
193 *
194 * IWL_ANTENNA_DIVERISTY - NIC selects best antenna by itself
195 * IWL_ANTENNA_MAIN - Force MAIN antenna
196 * IWL_ANTENNA_AUX - Force AUX antenna
197 */
198__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
199{
200 switch (priv->antenna) {
201 case IWL_ANTENNA_DIVERSITY:
202 return 0;
203
204 case IWL_ANTENNA_MAIN:
205 if (priv->eeprom.antenna_switch_type)
206 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
207 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
208
209 case IWL_ANTENNA_AUX:
210 if (priv->eeprom.antenna_switch_type)
211 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
212 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
213 }
214
215 /* bad antenna selector value */
216 IWL_ERROR("Bad antenna selector value (0x%x)\n", priv->antenna);
217 return 0; /* "diversity" is default if error */
218}
219
220/*****************************************************************************
221 *
222 * Intel PRO/Wireless 3945ABG/BG Network Connection
223 *
224 * RX handler implementations
225 *
226 * Used by iwl-base.c
227 *
228 *****************************************************************************/
229
230void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
231{
232 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
233 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
234 (int)sizeof(struct iwl_notif_statistics),
235 le32_to_cpu(pkt->len));
236
237 memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics));
238
239 priv->last_statistics_time = jiffies;
240}
241
242static void iwl3945_handle_data_packet(struct iwl_priv *priv, int is_data,
243 struct iwl_rx_mem_buffer *rxb,
244 struct ieee80211_rx_status *stats,
245 u16 phy_flags)
246{
247 struct ieee80211_hdr *hdr;
248 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
249 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
250 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
251 short len = le16_to_cpu(rx_hdr->len);
252
253 /* We received data from the HW, so stop the watchdog */
254 if (unlikely((len + IWL_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
255 IWL_DEBUG_DROP("Corruption detected!\n");
256 return;
257 }
258
259 /* We only process data packets if the interface is open */
260 if (unlikely(!priv->is_open)) {
261 IWL_DEBUG_DROP_LIMIT
262 ("Dropping packet while interface is not open.\n");
263 return;
264 }
265 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
266 if (iwl_param_hwcrypto)
267 iwl_set_decrypted_flag(priv, rxb->skb,
268 le32_to_cpu(rx_end->status),
269 stats);
270 iwl_handle_data_packet_monitor(priv, rxb, IWL_RX_DATA(pkt),
271 len, stats, phy_flags);
272 return;
273 }
274
275 skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt);
276 /* Set the size of the skb to the size of the frame */
277 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len));
278
279 hdr = (void *)rxb->skb->data;
280
281 if (iwl_param_hwcrypto)
282 iwl_set_decrypted_flag(priv, rxb->skb,
283 le32_to_cpu(rx_end->status), stats);
284
285 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
286 rxb->skb = NULL;
287}
288
289static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
290 struct iwl_rx_mem_buffer *rxb)
291{
292 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
293 struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
294 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
295 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
296 struct ieee80211_hdr *header;
297 u16 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
298 u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
299 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
300 struct ieee80211_rx_status stats = {
301 .mactime = le64_to_cpu(rx_end->timestamp),
302 .freq = ieee80211chan2mhz(le16_to_cpu(rx_hdr->channel)),
303 .channel = le16_to_cpu(rx_hdr->channel),
304 .phymode = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
305 MODE_IEEE80211G : MODE_IEEE80211A,
306 .antenna = 0,
307 .rate = rx_hdr->rate,
308 .flag = 0,
309 };
310 u8 network_packet;
311 int snr;
312
313 if ((unlikely(rx_stats->phy_count > 20))) {
314 IWL_DEBUG_DROP
315 ("dsp size out of range [0,20]: "
316 "%d/n", rx_stats->phy_count);
317 return;
318 }
319
320 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
321 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
322 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
323 return;
324 }
325
326 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
327 iwl3945_handle_data_packet(priv, 1, rxb, &stats, phy_flags);
328 return;
329 }
330
331 /* Convert 3945's rssi indicator to dBm */
332 stats.ssi = rx_stats->rssi - IWL_RSSI_OFFSET;
333
334 /* Set default noise value to -127 */
335 if (priv->last_rx_noise == 0)
336 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
337
338 /* 3945 provides noise info for OFDM frames only.
339 * sig_avg and noise_diff are measured by the 3945's digital signal
340 * processor (DSP), and indicate linear levels of signal level and
341 * distortion/noise within the packet preamble after
342 * automatic gain control (AGC). sig_avg should stay fairly
343 * constant if the radio's AGC is working well.
344 * Since these values are linear (not dB or dBm), linear
345 * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
346 * Convert linear SNR to dB SNR, then subtract that from rssi dBm
347 * to obtain noise level in dBm.
348 * Calculate stats.signal (quality indicator in %) based on SNR. */
349 if (rx_stats_noise_diff) {
350 snr = rx_stats_sig_avg / rx_stats_noise_diff;
351 stats.noise = stats.ssi - iwl_calc_db_from_ratio(snr);
352 stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise);
353
354 /* If noise info not available, calculate signal quality indicator (%)
355 * using just the dBm signal level. */
356 } else {
357 stats.noise = priv->last_rx_noise;
358 stats.signal = iwl_calc_sig_qual(stats.ssi, 0);
359 }
360
361
362 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
363 stats.ssi, stats.noise, stats.signal,
364 rx_stats_sig_avg, rx_stats_noise_diff);
365
366 stats.freq = ieee80211chan2mhz(stats.channel);
367
368 /* can be covered by iwl_report_frame() in most cases */
369/* IWL_DEBUG_RX("RX status: 0x%08X\n", rx_end->status); */
370
371 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
372
373 network_packet = iwl_is_network_packet(priv, header);
374
375#ifdef CONFIG_IWLWIFI_DEBUG
376 if (iwl_debug_level & IWL_DL_STATS && net_ratelimit())
377 IWL_DEBUG_STATS
378 ("[%c] %d RSSI: %d Signal: %u, Noise: %u, Rate: %u\n",
379 network_packet ? '*' : ' ',
380 stats.channel, stats.ssi, stats.ssi,
381 stats.ssi, stats.rate);
382
383 if (iwl_debug_level & (IWL_DL_RX))
384 /* Set "1" to report good data frames in groups of 100 */
385 iwl_report_frame(priv, pkt, header, 1);
386#endif
387
388 if (network_packet) {
389 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
390 priv->last_tsf = le64_to_cpu(rx_end->timestamp);
391 priv->last_rx_rssi = stats.ssi;
392 priv->last_rx_noise = stats.noise;
393 }
394
395 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
396 case IEEE80211_FTYPE_MGMT:
397 switch (le16_to_cpu(header->frame_control) &
398 IEEE80211_FCTL_STYPE) {
399 case IEEE80211_STYPE_PROBE_RESP:
400 case IEEE80211_STYPE_BEACON:{
401 /* If this is a beacon or probe response for
402 * our network then cache the beacon
403 * timestamp */
404 if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA)
405 && !compare_ether_addr(header->addr2,
406 priv->bssid)) ||
407 ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
408 && !compare_ether_addr(header->addr3,
409 priv->bssid)))) {
410 struct ieee80211_mgmt *mgmt =
411 (struct ieee80211_mgmt *)header;
412 __le32 *pos;
413 pos =
414 (__le32 *) & mgmt->u.beacon.
415 timestamp;
416 priv->timestamp0 = le32_to_cpu(pos[0]);
417 priv->timestamp1 = le32_to_cpu(pos[1]);
418 priv->beacon_int = le16_to_cpu(
419 mgmt->u.beacon.beacon_int);
420 if (priv->call_post_assoc_from_beacon &&
421 (priv->iw_mode ==
422 IEEE80211_IF_TYPE_STA))
423 queue_work(priv->workqueue,
424 &priv->post_associate.work);
425
426 priv->call_post_assoc_from_beacon = 0;
427 }
428
429 break;
430 }
431
432 case IEEE80211_STYPE_ACTION:
433 /* TODO: Parse 802.11h frames for CSA... */
434 break;
435
436 /*
437 * TODO: There is no callback function from upper
438 * stack to inform us when associated status. this
439 * work around to sniff assoc_resp management frame
440 * and finish the association process.
441 */
442 case IEEE80211_STYPE_ASSOC_RESP:
443 case IEEE80211_STYPE_REASSOC_RESP:{
444 struct ieee80211_mgmt *mgnt =
445 (struct ieee80211_mgmt *)header;
446 priv->assoc_id = (~((1 << 15) | (1 << 14)) &
447 le16_to_cpu(mgnt->u.
448 assoc_resp.aid));
449 priv->assoc_capability =
450 le16_to_cpu(mgnt->u.assoc_resp.capab_info);
451 if (priv->beacon_int)
452 queue_work(priv->workqueue,
453 &priv->post_associate.work);
454 else
455 priv->call_post_assoc_from_beacon = 1;
456 break;
457 }
458
459 case IEEE80211_STYPE_PROBE_REQ:{
460 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
461 IWL_DEBUG_DROP
462 ("Dropping (non network): " MAC_FMT
463 ", " MAC_FMT ", " MAC_FMT "\n",
464 MAC_ARG(header->addr1),
465 MAC_ARG(header->addr2),
466 MAC_ARG(header->addr3));
467 return;
468 }
469 }
470
471 iwl3945_handle_data_packet(priv, 0, rxb, &stats, phy_flags);
472 break;
473
474 case IEEE80211_FTYPE_CTL:
475 break;
476
477 case IEEE80211_FTYPE_DATA:
478 if (unlikely(is_duplicate_packet(priv, header)))
479 IWL_DEBUG_DROP("Dropping (dup): " MAC_FMT ", "
480 MAC_FMT ", " MAC_FMT "\n",
481 MAC_ARG(header->addr1),
482 MAC_ARG(header->addr2),
483 MAC_ARG(header->addr3));
484 else
485 iwl3945_handle_data_packet(priv, 1, rxb, &stats,
486 phy_flags);
487 break;
488 }
489}
490
491int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
492 dma_addr_t addr, u16 len)
493{
494 int count;
495 u32 pad;
496 struct iwl_tfd_frame *tfd = (struct iwl_tfd_frame *)ptr;
497
498 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
499 pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));
500
501 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
502 IWL_ERROR("Error can not send more than %d chunks\n",
503 NUM_TFD_CHUNKS);
504 return -EINVAL;
505 }
506
507 tfd->pa[count].addr = cpu_to_le32(addr);
508 tfd->pa[count].len = cpu_to_le32(len);
509
510 count++;
511
512 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
513 TFD_CTL_PAD_SET(pad));
514
515 return 0;
516}
517
518/**
519 * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used]
520 *
521 * Does NOT advance any indexes
522 */
523int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
524{
525 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
526 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used];
527 struct pci_dev *dev = priv->pci_dev;
528 int i;
529 int counter;
530
531 /* classify bd */
532 if (txq->q.id == IWL_CMD_QUEUE_NUM)
533 /* nothing to cleanup after for host commands */
534 return 0;
535
536 /* sanity check */
537 counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags));
538 if (counter > NUM_TFD_CHUNKS) {
539 IWL_ERROR("Too many chunks: %i\n", counter);
540 /* @todo issue fatal error, it is quite serious situation */
541 return 0;
542 }
543
544 /* unmap chunks if any */
545
546 for (i = 1; i < counter; i++) {
547 pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr),
548 le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE);
549 if (txq->txb[txq->q.last_used].skb[0]) {
550 struct sk_buff *skb = txq->txb[txq->q.last_used].skb[0];
551 if (txq->txb[txq->q.last_used].skb[0]) {
552 /* Can be called from interrupt context */
553 dev_kfree_skb_any(skb);
554 txq->txb[txq->q.last_used].skb[0] = NULL;
555 }
556 }
557 }
558 return 0;
559}
560
561u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr)
562{
563 int i;
564 int ret = IWL_INVALID_STATION;
565 unsigned long flags;
566
567 spin_lock_irqsave(&priv->sta_lock, flags);
568 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
569 if ((priv->stations[i].used) &&
570 (!compare_ether_addr
571 (priv->stations[i].sta.sta.addr, addr))) {
572 ret = i;
573 goto out;
574 }
575
576 IWL_DEBUG_INFO("can not find STA " MAC_FMT " (total %d)\n",
577 MAC_ARG(addr), priv->num_stations);
578 out:
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580 return ret;
581}
582
583/**
584 * iwl_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
585 *
586*/
587void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv,
588 struct iwl_cmd *cmd,
589 struct ieee80211_tx_control *ctrl,
590 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
591{
592 unsigned long flags;
593 u16 rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1);
594 u16 rate_mask;
595 int rate;
596 u8 rts_retry_limit;
597 u8 data_retry_limit;
598 __le32 tx_flags;
599 u16 fc = le16_to_cpu(hdr->frame_control);
600
601 rate = iwl_rates[rate_index].plcp;
602 tx_flags = cmd->cmd.tx.tx_flags;
603
604 /* We need to figure out how to get the sta->supp_rates while
605 * in this running context; perhaps encoding into ctrl->tx_rate? */
606 rate_mask = IWL_RATES_MASK;
607
608 spin_lock_irqsave(&priv->sta_lock, flags);
609
610 priv->stations[sta_id].current_rate.rate_n_flags = rate;
611
612 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
613 (sta_id != IWL3945_BROADCAST_ID) &&
614 (sta_id != IWL_MULTICAST_ID))
615 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
616
617 spin_unlock_irqrestore(&priv->sta_lock, flags);
618
619 if (tx_id >= IWL_CMD_QUEUE_NUM)
620 rts_retry_limit = 3;
621 else
622 rts_retry_limit = 7;
623
624 if (ieee80211_is_probe_response(fc)) {
625 data_retry_limit = 3;
626 if (data_retry_limit < rts_retry_limit)
627 rts_retry_limit = data_retry_limit;
628 } else
629 data_retry_limit = IWL_DEFAULT_TX_RETRY;
630
631 if (priv->data_retry_limit != -1)
632 data_retry_limit = priv->data_retry_limit;
633
634 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
635 switch (fc & IEEE80211_FCTL_STYPE) {
636 case IEEE80211_STYPE_AUTH:
637 case IEEE80211_STYPE_DEAUTH:
638 case IEEE80211_STYPE_ASSOC_REQ:
639 case IEEE80211_STYPE_REASSOC_REQ:
640 if (tx_flags & TX_CMD_FLG_RTS_MSK) {
641 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
642 tx_flags |= TX_CMD_FLG_CTS_MSK;
643 }
644 break;
645 default:
646 break;
647 }
648 }
649
650 cmd->cmd.tx.rts_retry_limit = rts_retry_limit;
651 cmd->cmd.tx.data_retry_limit = data_retry_limit;
652 cmd->cmd.tx.rate = rate;
653 cmd->cmd.tx.tx_flags = tx_flags;
654
655 /* OFDM */
656 cmd->cmd.tx.supp_rates[0] = rate_mask & IWL_OFDM_RATES_MASK;
657
658 /* CCK */
659 cmd->cmd.tx.supp_rates[1] = (rate_mask >> 8) & 0xF;
660
661 IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
662 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
663 cmd->cmd.tx.rate, le32_to_cpu(cmd->cmd.tx.tx_flags),
664 cmd->cmd.tx.supp_rates[1], cmd->cmd.tx.supp_rates[0]);
665}
666
667u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
668{
669 unsigned long flags_spin;
670 struct iwl_station_entry *station;
671
672 if (sta_id == IWL_INVALID_STATION)
673 return IWL_INVALID_STATION;
674
675 spin_lock_irqsave(&priv->sta_lock, flags_spin);
676 station = &priv->stations[sta_id];
677
678 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
679 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
680 station->current_rate.rate_n_flags = tx_rate;
681 station->sta.mode = STA_CONTROL_MODIFY_MSK;
682
683 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
684
685 iwl_send_add_station(priv, &station->sta, flags);
686 IWL_DEBUG_RATE("SCALE sync station %d to rate %d\n",
687 sta_id, tx_rate);
688 return sta_id;
689}
690
691void iwl_hw_card_show_info(struct iwl_priv *priv)
692{
693 IWL_DEBUG_INFO("3945ABG HW Version %u.%u.%u\n",
694 ((priv->eeprom.board_revision >> 8) & 0x0F),
695 ((priv->eeprom.board_revision >> 8) >> 4),
696 (priv->eeprom.board_revision & 0x00FF));
697
698 IWL_DEBUG_INFO("3945ABG PBA Number %.*s\n",
699 (int)sizeof(priv->eeprom.board_pba_number),
700 priv->eeprom.board_pba_number);
701
702 IWL_DEBUG_INFO("EEPROM_ANTENNA_SWITCH_TYPE is 0x%02X\n",
703 priv->eeprom.antenna_switch_type);
704}
705
706static int iwl3945_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
707{
708 int rc;
709 unsigned long flags;
710
711 spin_lock_irqsave(&priv->lock, flags);
712 rc = iwl_grab_restricted_access(priv);
713 if (rc) {
714 spin_unlock_irqrestore(&priv->lock, flags);
715 return rc;
716 }
717
718 if (!pwr_max) {
719 u32 val;
720
721 rc = pci_read_config_dword(priv->pci_dev,
722 PCI_POWER_SOURCE, &val);
723 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
724 iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG,
725 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
726 ~APMG_PS_CTRL_MSK_PWR_SRC);
727 iwl_release_restricted_access(priv);
728
729 iwl_poll_bit(priv, CSR_GPIO_IN,
730 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
731 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
732 } else
733 iwl_release_restricted_access(priv);
734 } else {
735 iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG,
736 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
737 ~APMG_PS_CTRL_MSK_PWR_SRC);
738
739 iwl_release_restricted_access(priv);
740 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
741 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
742 }
743 spin_unlock_irqrestore(&priv->lock, flags);
744
745 return rc;
746}
747
748static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
749{
750 int rc;
751 unsigned long flags;
752
753 spin_lock_irqsave(&priv->lock, flags);
754 rc = iwl_grab_restricted_access(priv);
755 if (rc) {
756 spin_unlock_irqrestore(&priv->lock, flags);
757 return rc;
758 }
759
760 iwl_write_restricted(priv, FH_RCSR_RBD_BASE(0), rxq->dma_addr);
761 iwl_write_restricted(priv, FH_RCSR_RPTR_ADDR(0),
762 priv->hw_setting.shared_phys +
763 offsetof(struct iwl_shared, rx_read_ptr[0]));
764 iwl_write_restricted(priv, FH_RCSR_WPTR(0), 0);
765 iwl_write_restricted(priv, FH_RCSR_CONFIG(0),
766 ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
767 ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
768 ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
769 ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
770 (RX_QUEUE_SIZE_LOG << ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
771 ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
772 (1 << ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
773 ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
774
775 /* fake read to flush all prev I/O */
776 iwl_read_restricted(priv, FH_RSSR_CTRL);
777
778 iwl_release_restricted_access(priv);
779 spin_unlock_irqrestore(&priv->lock, flags);
780
781 return 0;
782}
783
784static int iwl3945_tx_reset(struct iwl_priv *priv)
785{
786 int rc;
787 unsigned long flags;
788
789 spin_lock_irqsave(&priv->lock, flags);
790 rc = iwl_grab_restricted_access(priv);
791 if (rc) {
792 spin_unlock_irqrestore(&priv->lock, flags);
793 return rc;
794 }
795
796 /* bypass mode */
797 iwl_write_restricted_reg(priv, SCD_MODE_REG, 0x2);
798
799 /* RA 0 is active */
800 iwl_write_restricted_reg(priv, SCD_ARASTAT_REG, 0x01);
801
802 /* all 6 fifo are active */
803 iwl_write_restricted_reg(priv, SCD_TXFACT_REG, 0x3f);
804
805 iwl_write_restricted_reg(priv, SCD_SBYP_MODE_1_REG, 0x010000);
806 iwl_write_restricted_reg(priv, SCD_SBYP_MODE_2_REG, 0x030002);
807 iwl_write_restricted_reg(priv, SCD_TXF4MF_REG, 0x000004);
808 iwl_write_restricted_reg(priv, SCD_TXF5MF_REG, 0x000005);
809
810 iwl_write_restricted(priv, FH_TSSR_CBB_BASE,
811 priv->hw_setting.shared_phys);
812
813 iwl_write_restricted(priv, FH_TSSR_MSG_CONFIG,
814 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
815 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
816 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
817 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
818 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
819 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
820 ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
821
822 iwl_release_restricted_access(priv);
823 spin_unlock_irqrestore(&priv->lock, flags);
824
825 return 0;
826}
827
828/**
829 * iwl3945_txq_ctx_reset - Reset TX queue context
830 *
831 * Destroys all DMA structures and initialize them again
832 */
833static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
834{
835 int rc;
836 int txq_id, slots_num;
837
838 iwl_hw_txq_ctx_free(priv);
839
840 /* Tx CMD queue */
841 rc = iwl3945_tx_reset(priv);
842 if (rc)
843 goto error;
844
845 /* Tx queue(s) */
846 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
847 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
848 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
849 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
850 txq_id);
851 if (rc) {
852 IWL_ERROR("Tx %d queue init failed\n", txq_id);
853 goto error;
854 }
855 }
856
857 return rc;
858
859 error:
860 iwl_hw_txq_ctx_free(priv);
861 return rc;
862}
863
864int iwl_hw_nic_init(struct iwl_priv *priv)
865{
866 u8 rev_id;
867 int rc;
868 unsigned long flags;
869 struct iwl_rx_queue *rxq = &priv->rxq;
870
871 iwl_power_init_handle(priv);
872
873 spin_lock_irqsave(&priv->lock, flags);
874 iwl_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24));
875 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
876 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
877
878 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
879 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
880 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
881 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
882 if (rc < 0) {
883 spin_unlock_irqrestore(&priv->lock, flags);
884 IWL_DEBUG_INFO("Failed to init the card\n");
885 return rc;
886 }
887
888 rc = iwl_grab_restricted_access(priv);
889 if (rc) {
890 spin_unlock_irqrestore(&priv->lock, flags);
891 return rc;
892 }
893 iwl_write_restricted_reg(priv, APMG_CLK_EN_REG,
894 APMG_CLK_VAL_DMA_CLK_RQT |
895 APMG_CLK_VAL_BSM_CLK_RQT);
896 udelay(20);
897 iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG,
898 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
899 iwl_release_restricted_access(priv);
900 spin_unlock_irqrestore(&priv->lock, flags);
901
902 /* Determine HW type */
903 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
904 if (rc)
905 return rc;
906 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
907
908 iwl3945_nic_set_pwr_src(priv, 1);
909 spin_lock_irqsave(&priv->lock, flags);
910
911 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
912 IWL_DEBUG_INFO("RTP type \n");
913 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
914 IWL_DEBUG_INFO("ALM-MB type\n");
915 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
916 CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB);
917 } else {
918 IWL_DEBUG_INFO("ALM-MM type\n");
919 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
920 CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM);
921 }
922
923 spin_unlock_irqrestore(&priv->lock, flags);
924
925 /* Initialize the EEPROM */
926 rc = iwl_eeprom_init(priv);
927 if (rc)
928 return rc;
929
930 spin_lock_irqsave(&priv->lock, flags);
931 if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) {
932 IWL_DEBUG_INFO("SKU OP mode is mrc\n");
933 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
934 CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC);
935 } else
936 IWL_DEBUG_INFO("SKU OP mode is basic\n");
937
938 if ((priv->eeprom.board_revision & 0xF0) == 0xD0) {
939 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n",
940 priv->eeprom.board_revision);
941 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
942 CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
943 } else {
944 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n",
945 priv->eeprom.board_revision);
946 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
947 CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
948 }
949
950 if (priv->eeprom.almgor_m_version <= 1) {
951 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
952 CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
953 IWL_DEBUG_INFO("Card M type A version is 0x%X\n",
954 priv->eeprom.almgor_m_version);
955 } else {
956 IWL_DEBUG_INFO("Card M type B version is 0x%X\n",
957 priv->eeprom.almgor_m_version);
958 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
959 CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
960 }
961 spin_unlock_irqrestore(&priv->lock, flags);
962
963 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
964 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
965
966 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
967 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
968
969 /* Allocate the RX queue, or reset if it is already allocated */
970 if (!rxq->bd) {
971 rc = iwl_rx_queue_alloc(priv);
972 if (rc) {
973 IWL_ERROR("Unable to initialize Rx queue\n");
974 return -ENOMEM;
975 }
976 } else
977 iwl_rx_queue_reset(priv, rxq);
978
979 iwl_rx_replenish(priv);
980
981 iwl3945_rx_init(priv, rxq);
982
983 spin_lock_irqsave(&priv->lock, flags);
984
985 /* Look at using this instead:
986 rxq->need_update = 1;
987 iwl_rx_queue_update_write_ptr(priv, rxq);
988 */
989
990 rc = iwl_grab_restricted_access(priv);
991 if (rc) {
992 spin_unlock_irqrestore(&priv->lock, flags);
993 return rc;
994 }
995 iwl_write_restricted(priv, FH_RCSR_WPTR(0), rxq->write & ~7);
996 iwl_release_restricted_access(priv);
997
998 spin_unlock_irqrestore(&priv->lock, flags);
999
1000 rc = iwl3945_txq_ctx_reset(priv);
1001 if (rc)
1002 return rc;
1003
1004 set_bit(STATUS_INIT, &priv->status);
1005
1006 return 0;
1007}
1008
1009/**
1010 * iwl_hw_txq_ctx_free - Free TXQ Context
1011 *
1012 * Destroy all TX DMA queues and structures
1013 */
1014void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
1015{
1016 int txq_id;
1017
1018 /* Tx queues */
1019 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++)
1020 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
1021}
1022
1023void iwl_hw_txq_ctx_stop(struct iwl_priv *priv)
1024{
1025 int queue;
1026 unsigned long flags;
1027
1028 spin_lock_irqsave(&priv->lock, flags);
1029 if (iwl_grab_restricted_access(priv)) {
1030 spin_unlock_irqrestore(&priv->lock, flags);
1031 iwl_hw_txq_ctx_free(priv);
1032 return;
1033 }
1034
1035 /* stop SCD */
1036 iwl_write_restricted_reg(priv, SCD_MODE_REG, 0);
1037
1038 /* reset TFD queues */
1039 for (queue = TFD_QUEUE_MIN; queue < TFD_QUEUE_MAX; queue++) {
1040 iwl_write_restricted(priv, FH_TCSR_CONFIG(queue), 0x0);
1041 iwl_poll_restricted_bit(priv, FH_TSSR_TX_STATUS,
1042 ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(queue),
1043 1000);
1044 }
1045
1046 iwl_release_restricted_access(priv);
1047 spin_unlock_irqrestore(&priv->lock, flags);
1048
1049 iwl_hw_txq_ctx_free(priv);
1050}
1051
1052int iwl_hw_nic_stop_master(struct iwl_priv *priv)
1053{
1054 int rc = 0;
1055 u32 reg_val;
1056 unsigned long flags;
1057
1058 spin_lock_irqsave(&priv->lock, flags);
1059
1060 /* set stop master bit */
1061 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1062
1063 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
1064
1065 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
1066 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
1067 IWL_DEBUG_INFO("Card in power save, master is already "
1068 "stopped\n");
1069 else {
1070 rc = iwl_poll_bit(priv, CSR_RESET,
1071 CSR_RESET_REG_FLAG_MASTER_DISABLED,
1072 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1073 if (rc < 0) {
1074 spin_unlock_irqrestore(&priv->lock, flags);
1075 return rc;
1076 }
1077 }
1078
1079 spin_unlock_irqrestore(&priv->lock, flags);
1080 IWL_DEBUG_INFO("stop master\n");
1081
1082 return rc;
1083}
1084
1085int iwl_hw_nic_reset(struct iwl_priv *priv)
1086{
1087 int rc;
1088 unsigned long flags;
1089
1090 iwl_hw_nic_stop_master(priv);
1091
1092 spin_lock_irqsave(&priv->lock, flags);
1093
1094 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1095
1096 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
1097 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1098 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1099
1100 rc = iwl_grab_restricted_access(priv);
1101 if (!rc) {
1102 iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG,
1103 APMG_CLK_VAL_BSM_CLK_RQT);
1104
1105 udelay(10);
1106
1107 iwl_set_bit(priv, CSR_GP_CNTRL,
1108 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1109
1110 iwl_write_restricted_reg(priv, APMG_RTC_INT_MSK_REG, 0x0);
1111 iwl_write_restricted_reg(priv, APMG_RTC_INT_STT_REG,
1112 0xFFFFFFFF);
1113
1114 /* enable DMA */
1115 iwl_write_restricted_reg(priv, APMG_CLK_EN_REG,
1116 APMG_CLK_VAL_DMA_CLK_RQT |
1117 APMG_CLK_VAL_BSM_CLK_RQT);
1118 udelay(10);
1119
1120 iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG,
1121 APMG_PS_CTRL_VAL_RESET_REQ);
1122 udelay(5);
1123 iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG,
1124 APMG_PS_CTRL_VAL_RESET_REQ);
1125 iwl_release_restricted_access(priv);
1126 }
1127
1128 /* Clear the 'host command active' bit... */
1129 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1130
1131 wake_up_interruptible(&priv->wait_command_queue);
1132 spin_unlock_irqrestore(&priv->lock, flags);
1133
1134 return rc;
1135}
1136
1137/**
1138 * iwl_hw_reg_adjust_power_by_temp - return index delta into power gain settings table
1139 */
1140static int iwl_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1141{
1142 return (new_reading - old_reading) * (-11) / 100;
1143}
1144
1145/**
1146 * iwl_hw_reg_temp_out_of_range - Keep temperature in sane range
1147 */
1148static inline int iwl_hw_reg_temp_out_of_range(int temperature)
1149{
1150 return (((temperature < -260) || (temperature > 25)) ? 1 : 0);
1151}
1152
1153int iwl_hw_get_temperature(struct iwl_priv *priv)
1154{
1155 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1156}
1157
1158/**
1159 * iwl_hw_reg_txpower_get_temperature - get current temperature by reading from NIC
1160 */
1161static int iwl_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1162{
1163 int temperature;
1164
1165 temperature = iwl_hw_get_temperature(priv);
1166
1167 /* driver's okay range is -260 to +25.
1168 * human readable okay range is 0 to +285 */
1169 IWL_DEBUG_INFO("Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1170
1171 /* handle insane temp reading */
1172 if (iwl_hw_reg_temp_out_of_range(temperature)) {
1173 IWL_ERROR("Error bad temperature value %d\n", temperature);
1174
1175 /* if really really hot(?),
1176 * substitute the 3rd band/group's temp measured at factory */
1177 if (priv->last_temperature > 100)
1178 temperature = priv->eeprom.groups[2].temperature;
1179 else /* else use most recent "sane" value from driver */
1180 temperature = priv->last_temperature;
1181 }
1182
1183 return temperature; /* raw, not "human readable" */
1184}
1185
1186/* Adjust Txpower only if temperature variance is greater than threshold.
1187 *
1188 * Both are lower than older versions' 9 degrees */
1189#define IWL_TEMPERATURE_LIMIT_TIMER 6
1190
1191/**
1192 * is_temp_calib_needed - determines if new calibration is needed
1193 *
1194 * records new temperature in tx_mgr->temperature.
1195 * replaces tx_mgr->last_temperature *only* if calib needed
1196 * (assumes caller will actually do the calibration!). */
1197static int is_temp_calib_needed(struct iwl_priv *priv)
1198{
1199 int temp_diff;
1200
1201 priv->temperature = iwl_hw_reg_txpower_get_temperature(priv);
1202 temp_diff = priv->temperature - priv->last_temperature;
1203
1204 /* get absolute value */
1205 if (temp_diff < 0) {
1206 IWL_DEBUG_POWER("Getting cooler, delta %d,\n", temp_diff);
1207 temp_diff = -temp_diff;
1208 } else if (temp_diff == 0)
1209 IWL_DEBUG_POWER("Same temp,\n");
1210 else
1211 IWL_DEBUG_POWER("Getting warmer, delta %d,\n", temp_diff);
1212
1213 /* if we don't need calibration, *don't* update last_temperature */
1214 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1215 IWL_DEBUG_POWER("Timed thermal calib not needed\n");
1216 return 0;
1217 }
1218
1219 IWL_DEBUG_POWER("Timed thermal calib needed\n");
1220
1221 /* assume that caller will actually do calib ...
1222 * update the "last temperature" value */
1223 priv->last_temperature = priv->temperature;
1224 return 1;
1225}
1226
1227#define IWL_MAX_GAIN_ENTRIES 78
1228#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1229#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1230
1231/* radio and DSP power table, each step is 1/2 dB.
1232 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1233static struct iwl_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1234 {
1235 {251, 127}, /* 2.4 GHz, highest power */
1236 {251, 127},
1237 {251, 127},
1238 {251, 127},
1239 {251, 125},
1240 {251, 110},
1241 {251, 105},
1242 {251, 98},
1243 {187, 125},
1244 {187, 115},
1245 {187, 108},
1246 {187, 99},
1247 {243, 119},
1248 {243, 111},
1249 {243, 105},
1250 {243, 97},
1251 {243, 92},
1252 {211, 106},
1253 {211, 100},
1254 {179, 120},
1255 {179, 113},
1256 {179, 107},
1257 {147, 125},
1258 {147, 119},
1259 {147, 112},
1260 {147, 106},
1261 {147, 101},
1262 {147, 97},
1263 {147, 91},
1264 {115, 107},
1265 {235, 121},
1266 {235, 115},
1267 {235, 109},
1268 {203, 127},
1269 {203, 121},
1270 {203, 115},
1271 {203, 108},
1272 {203, 102},
1273 {203, 96},
1274 {203, 92},
1275 {171, 110},
1276 {171, 104},
1277 {171, 98},
1278 {139, 116},
1279 {227, 125},
1280 {227, 119},
1281 {227, 113},
1282 {227, 107},
1283 {227, 101},
1284 {227, 96},
1285 {195, 113},
1286 {195, 106},
1287 {195, 102},
1288 {195, 95},
1289 {163, 113},
1290 {163, 106},
1291 {163, 102},
1292 {163, 95},
1293 {131, 113},
1294 {131, 106},
1295 {131, 102},
1296 {131, 95},
1297 {99, 113},
1298 {99, 106},
1299 {99, 102},
1300 {99, 95},
1301 {67, 113},
1302 {67, 106},
1303 {67, 102},
1304 {67, 95},
1305 {35, 113},
1306 {35, 106},
1307 {35, 102},
1308 {35, 95},
1309 {3, 113},
1310 {3, 106},
1311 {3, 102},
1312 {3, 95} }, /* 2.4 GHz, lowest power */
1313 {
1314 {251, 127}, /* 5.x GHz, highest power */
1315 {251, 120},
1316 {251, 114},
1317 {219, 119},
1318 {219, 101},
1319 {187, 113},
1320 {187, 102},
1321 {155, 114},
1322 {155, 103},
1323 {123, 117},
1324 {123, 107},
1325 {123, 99},
1326 {123, 92},
1327 {91, 108},
1328 {59, 125},
1329 {59, 118},
1330 {59, 109},
1331 {59, 102},
1332 {59, 96},
1333 {59, 90},
1334 {27, 104},
1335 {27, 98},
1336 {27, 92},
1337 {115, 118},
1338 {115, 111},
1339 {115, 104},
1340 {83, 126},
1341 {83, 121},
1342 {83, 113},
1343 {83, 105},
1344 {83, 99},
1345 {51, 118},
1346 {51, 111},
1347 {51, 104},
1348 {51, 98},
1349 {19, 116},
1350 {19, 109},
1351 {19, 102},
1352 {19, 98},
1353 {19, 93},
1354 {171, 113},
1355 {171, 107},
1356 {171, 99},
1357 {139, 120},
1358 {139, 113},
1359 {139, 107},
1360 {139, 99},
1361 {107, 120},
1362 {107, 113},
1363 {107, 107},
1364 {107, 99},
1365 {75, 120},
1366 {75, 113},
1367 {75, 107},
1368 {75, 99},
1369 {43, 120},
1370 {43, 113},
1371 {43, 107},
1372 {43, 99},
1373 {11, 120},
1374 {11, 113},
1375 {11, 107},
1376 {11, 99},
1377 {131, 107},
1378 {131, 99},
1379 {99, 120},
1380 {99, 113},
1381 {99, 107},
1382 {99, 99},
1383 {67, 120},
1384 {67, 113},
1385 {67, 107},
1386 {67, 99},
1387 {35, 120},
1388 {35, 113},
1389 {35, 107},
1390 {35, 99},
1391 {3, 120} } /* 5.x GHz, lowest power */
1392};
1393
1394static inline u8 iwl_hw_reg_fix_power_index(int index)
1395{
1396 if (index < 0)
1397 return 0;
1398 if (index >= IWL_MAX_GAIN_ENTRIES)
1399 return IWL_MAX_GAIN_ENTRIES - 1;
1400 return (u8) index;
1401}
1402
1403/* Kick off thermal recalibration check every 60 seconds */
1404#define REG_RECALIB_PERIOD (60)
1405
1406/**
1407 * iwl_hw_reg_set_scan_power - Set Tx power for scan probe requests
1408 *
1409 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1410 * or 6 Mbit (OFDM) rates.
1411 */
1412static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1413 s32 rate_index, const s8 *clip_pwrs,
1414 struct iwl_channel_info *ch_info,
1415 int band_index)
1416{
1417 struct iwl_scan_power_info *scan_power_info;
1418 s8 power;
1419 u8 power_index;
1420
1421 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1422
1423 /* use this channel group's 6Mbit clipping/saturation pwr,
1424 * but cap at regulatory scan power restriction (set during init
1425 * based on eeprom channel data) for this channel. */
1426 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX]);
1427
1428 /* further limit to user's max power preference.
1429 * FIXME: Other spectrum management power limitations do not
1430 * seem to apply?? */
1431 power = min(power, priv->user_txpower_limit);
1432 scan_power_info->requested_power = power;
1433
1434 /* find difference between new scan *power* and current "normal"
1435 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1436 * current "normal" temperature-compensated Tx power *index* for
1437 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1438 * *index*. */
1439 power_index = ch_info->power_info[rate_index].power_table_index
1440 - (power - ch_info->power_info
1441 [IWL_RATE_6M_INDEX].requested_power) * 2;
1442
1443 /* store reference index that we use when adjusting *all* scan
1444 * powers. So we can accommodate user (all channel) or spectrum
1445 * management (single channel) power changes "between" temperature
1446 * feedback compensation procedures.
1447 * don't force fit this reference index into gain table; it may be a
1448 * negative number. This will help avoid errors when we're at
1449 * the lower bounds (highest gains, for warmest temperatures)
1450 * of the table. */
1451
1452 /* don't exceed table bounds for "real" setting */
1453 power_index = iwl_hw_reg_fix_power_index(power_index);
1454
1455 scan_power_info->power_table_index = power_index;
1456 scan_power_info->tpc.tx_gain =
1457 power_gain_table[band_index][power_index].tx_gain;
1458 scan_power_info->tpc.dsp_atten =
1459 power_gain_table[band_index][power_index].dsp_atten;
1460}
1461
1462/**
1463 * iwl_hw_reg_send_txpower - fill in Tx Power command with gain settings
1464 *
1465 * Configures power settings for all rates for the current channel,
1466 * using values from channel info struct, and send to NIC
1467 */
1468int iwl_hw_reg_send_txpower(struct iwl_priv *priv)
1469{
1470 int rate_idx;
1471 const struct iwl_channel_info *ch_info = NULL;
1472 struct iwl_txpowertable_cmd txpower = {
1473 .channel = priv->active_rxon.channel,
1474 };
1475
1476 txpower.band = (priv->phymode == MODE_IEEE80211A) ? 0 : 1;
1477 ch_info = iwl_get_channel_info(priv,
1478 priv->phymode,
1479 le16_to_cpu(priv->active_rxon.channel));
1480 if (!ch_info) {
1481 IWL_ERROR
1482 ("Failed to get channel info for channel %d [%d]\n",
1483 le16_to_cpu(priv->active_rxon.channel), priv->phymode);
1484 return -EINVAL;
1485 }
1486
1487 if (!is_channel_valid(ch_info)) {
1488 IWL_DEBUG_POWER("Not calling TX_PWR_TABLE_CMD on "
1489 "non-Tx channel.\n");
1490 return 0;
1491 }
1492
1493 /* fill cmd with power settings for all rates for current channel */
1494 for (rate_idx = 0; rate_idx < IWL_RATE_COUNT; rate_idx++) {
1495 txpower.power[rate_idx].tpc = ch_info->power_info[rate_idx].tpc;
1496 txpower.power[rate_idx].rate = iwl_rates[rate_idx].plcp;
1497
1498 IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1499 le16_to_cpu(txpower.channel),
1500 txpower.band,
1501 txpower.power[rate_idx].tpc.tx_gain,
1502 txpower.power[rate_idx].tpc.dsp_atten,
1503 txpower.power[rate_idx].rate);
1504 }
1505
1506 return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1507 sizeof(struct iwl_txpowertable_cmd), &txpower);
1508
1509}
1510
1511/**
1512 * iwl_hw_reg_set_new_power - Configures power tables at new levels
1513 * @ch_info: Channel to update. Uses power_info.requested_power.
1514 *
1515 * Replace requested_power and base_power_index ch_info fields for
1516 * one channel.
1517 *
1518 * Called if user or spectrum management changes power preferences.
1519 * Takes into account h/w and modulation limitations (clip power).
1520 *
1521 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1522 *
1523 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1524 * properly fill out the scan powers, and actual h/w gain settings,
1525 * and send changes to NIC
1526 */
1527static int iwl_hw_reg_set_new_power(struct iwl_priv *priv,
1528 struct iwl_channel_info *ch_info)
1529{
1530 struct iwl_channel_power_info *power_info;
1531 int power_changed = 0;
1532 int i;
1533 const s8 *clip_pwrs;
1534 int power;
1535
1536 /* Get this chnlgrp's rate-to-max/clip-powers table */
1537 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers;
1538
1539 /* Get this channel's rate-to-current-power settings table */
1540 power_info = ch_info->power_info;
1541
1542 /* update OFDM Txpower settings */
1543 for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE;
1544 i++, ++power_info) {
1545 int delta_idx;
1546
1547 /* limit new power to be no more than h/w capability */
1548 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1549 if (power == power_info->requested_power)
1550 continue;
1551
1552 /* find difference between old and new requested powers,
1553 * update base (non-temp-compensated) power index */
1554 delta_idx = (power - power_info->requested_power) * 2;
1555 power_info->base_power_index -= delta_idx;
1556
1557 /* save new requested power value */
1558 power_info->requested_power = power;
1559
1560 power_changed = 1;
1561 }
1562
1563 /* update CCK Txpower settings, based on OFDM 12M setting ...
1564 * ... all CCK power settings for a given channel are the *same*. */
1565 if (power_changed) {
1566 power =
1567 ch_info->power_info[IWL_RATE_12M_INDEX].
1568 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1569
1570 /* do all CCK rates' iwl_channel_power_info structures */
1571 for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) {
1572 power_info->requested_power = power;
1573 power_info->base_power_index =
1574 ch_info->power_info[IWL_RATE_12M_INDEX].
1575 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1576 ++power_info;
1577 }
1578 }
1579
1580 return 0;
1581}
1582
1583/**
1584 * iwl_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1585 *
1586 * NOTE: Returned power limit may be less (but not more) than requested,
1587 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1588 * (no consideration for h/w clipping limitations).
1589 */
1590static int iwl_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1591{
1592 s8 max_power;
1593
1594#if 0
1595 /* if we're using TGd limits, use lower of TGd or EEPROM */
1596 if (ch_info->tgd_data.max_power != 0)
1597 max_power = min(ch_info->tgd_data.max_power,
1598 ch_info->eeprom.max_power_avg);
1599
1600 /* else just use EEPROM limits */
1601 else
1602#endif
1603 max_power = ch_info->eeprom.max_power_avg;
1604
1605 return min(max_power, ch_info->max_power_avg);
1606}
1607
1608/**
1609 * iwl_hw_reg_comp_txpower_temp - Compensate for temperature
1610 *
1611 * Compensate txpower settings of *all* channels for temperature.
1612 * This only accounts for the difference between current temperature
1613 * and the factory calibration temperatures, and bases the new settings
1614 * on the channel's base_power_index.
1615 *
1616 * If RxOn is "associated", this sends the new Txpower to NIC!
1617 */
1618static int iwl_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1619{
1620 struct iwl_channel_info *ch_info = NULL;
1621 int delta_index;
1622 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1623 u8 a_band;
1624 u8 rate_index;
1625 u8 scan_tbl_index;
1626 u8 i;
1627 int ref_temp;
1628 int temperature = priv->temperature;
1629
1630 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1631 for (i = 0; i < priv->channel_count; i++) {
1632 ch_info = &priv->channel_info[i];
1633 a_band = is_channel_a_band(ch_info);
1634
1635 /* Get this chnlgrp's factory calibration temperature */
1636 ref_temp = (s16)priv->eeprom.groups[ch_info->group_index].
1637 temperature;
1638
1639 /* get power index adjustment based on curr and factory
1640 * temps */
1641 delta_index = iwl_hw_reg_adjust_power_by_temp(temperature,
1642 ref_temp);
1643
1644 /* set tx power value for all rates, OFDM and CCK */
1645 for (rate_index = 0; rate_index < IWL_RATE_COUNT;
1646 rate_index++) {
1647 int power_idx =
1648 ch_info->power_info[rate_index].base_power_index;
1649
1650 /* temperature compensate */
1651 power_idx += delta_index;
1652
1653 /* stay within table range */
1654 power_idx = iwl_hw_reg_fix_power_index(power_idx);
1655 ch_info->power_info[rate_index].
1656 power_table_index = (u8) power_idx;
1657 ch_info->power_info[rate_index].tpc =
1658 power_gain_table[a_band][power_idx];
1659 }
1660
1661 /* Get this chnlgrp's rate-to-max/clip-powers table */
1662 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers;
1663
1664 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1665 for (scan_tbl_index = 0;
1666 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1667 s32 actual_index = (scan_tbl_index == 0) ?
1668 IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX;
1669 iwl_hw_reg_set_scan_power(priv, scan_tbl_index,
1670 actual_index, clip_pwrs,
1671 ch_info, a_band);
1672 }
1673 }
1674
1675 /* send Txpower command for current channel to ucode */
1676 return iwl_hw_reg_send_txpower(priv);
1677}
1678
1679int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1680{
1681 struct iwl_channel_info *ch_info;
1682 s8 max_power;
1683 u8 a_band;
1684 u8 i;
1685
1686 if (priv->user_txpower_limit == power) {
1687 IWL_DEBUG_POWER("Requested Tx power same as current "
1688 "limit: %ddBm.\n", power);
1689 return 0;
1690 }
1691
1692 IWL_DEBUG_POWER("Setting upper limit clamp to %ddBm.\n", power);
1693 priv->user_txpower_limit = power;
1694
1695 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1696
1697 for (i = 0; i < priv->channel_count; i++) {
1698 ch_info = &priv->channel_info[i];
1699 a_band = is_channel_a_band(ch_info);
1700
1701 /* find minimum power of all user and regulatory constraints
1702 * (does not consider h/w clipping limitations) */
1703 max_power = iwl_hw_reg_get_ch_txpower_limit(ch_info);
1704 max_power = min(power, max_power);
1705 if (max_power != ch_info->curr_txpow) {
1706 ch_info->curr_txpow = max_power;
1707
1708 /* this considers the h/w clipping limitations */
1709 iwl_hw_reg_set_new_power(priv, ch_info);
1710 }
1711 }
1712
1713 /* update txpower settings for all channels,
1714 * send to NIC if associated. */
1715 is_temp_calib_needed(priv);
1716 iwl_hw_reg_comp_txpower_temp(priv);
1717
1718 return 0;
1719}
1720
1721/* will add 3945 channel switch cmd handling later */
1722int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1723{
1724 return 0;
1725}
1726
1727/**
1728 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1729 *
1730 * -- reset periodic timer
1731 * -- see if temp has changed enough to warrant re-calibration ... if so:
1732 * -- correct coeffs for temp (can reset temp timer)
1733 * -- save this temp as "last",
1734 * -- send new set of gain settings to NIC
1735 * NOTE: This should continue working, even when we're not associated,
1736 * so we can keep our internal table of scan powers current. */
1737void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1738{
1739 /* This will kick in the "brute force"
1740 * iwl_hw_reg_comp_txpower_temp() below */
1741 if (!is_temp_calib_needed(priv))
1742 goto reschedule;
1743
1744 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1745 * This is based *only* on current temperature,
1746 * ignoring any previous power measurements */
1747 iwl_hw_reg_comp_txpower_temp(priv);
1748
1749 reschedule:
1750 queue_delayed_work(priv->workqueue,
1751 &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ);
1752}
1753
1754void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1755{
1756 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1757 thermal_periodic.work);
1758
1759 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1760 return;
1761
1762 mutex_lock(&priv->mutex);
1763 iwl3945_reg_txpower_periodic(priv);
1764 mutex_unlock(&priv->mutex);
1765}
1766
1767/**
1768 * iwl_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1769 * for the channel.
1770 *
1771 * This function is used when initializing channel-info structs.
1772 *
1773 * NOTE: These channel groups do *NOT* match the bands above!
1774 * These channel groups are based on factory-tested channels;
1775 * on A-band, EEPROM's "group frequency" entries represent the top
1776 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1777 */
1778static u16 iwl_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1779 const struct iwl_channel_info *ch_info)
1780{
1781 struct iwl_eeprom_txpower_group *ch_grp = &priv->eeprom.groups[0];
1782 u8 group;
1783 u16 group_index = 0; /* based on factory calib frequencies */
1784 u8 grp_channel;
1785
1786 /* Find the group index for the channel ... don't use index 1(?) */
1787 if (is_channel_a_band(ch_info)) {
1788 for (group = 1; group < 5; group++) {
1789 grp_channel = ch_grp[group].group_channel;
1790 if (ch_info->channel <= grp_channel) {
1791 group_index = group;
1792 break;
1793 }
1794 }
1795 /* group 4 has a few channels *above* its factory cal freq */
1796 if (group == 5)
1797 group_index = 4;
1798 } else
1799 group_index = 0; /* 2.4 GHz, group 0 */
1800
1801 IWL_DEBUG_POWER("Chnl %d mapped to grp %d\n", ch_info->channel,
1802 group_index);
1803 return group_index;
1804}
1805
1806/**
1807 * iwl_hw_reg_get_matched_power_index - Interpolate to get nominal index
1808 *
1809 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1810 * into radio/DSP gain settings table for requested power.
1811 */
1812static int iwl_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1813 s8 requested_power,
1814 s32 setting_index, s32 *new_index)
1815{
1816 const struct iwl_eeprom_txpower_group *chnl_grp = NULL;
1817 s32 index0, index1;
1818 s32 power = 2 * requested_power;
1819 s32 i;
1820 const struct iwl_eeprom_txpower_sample *samples;
1821 s32 gains0, gains1;
1822 s32 res;
1823 s32 denominator;
1824
1825 chnl_grp = &priv->eeprom.groups[setting_index];
1826 samples = chnl_grp->samples;
1827 for (i = 0; i < 5; i++) {
1828 if (power == samples[i].power) {
1829 *new_index = samples[i].gain_index;
1830 return 0;
1831 }
1832 }
1833
1834 if (power > samples[1].power) {
1835 index0 = 0;
1836 index1 = 1;
1837 } else if (power > samples[2].power) {
1838 index0 = 1;
1839 index1 = 2;
1840 } else if (power > samples[3].power) {
1841 index0 = 2;
1842 index1 = 3;
1843 } else {
1844 index0 = 3;
1845 index1 = 4;
1846 }
1847
1848 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1849 if (denominator == 0)
1850 return -EINVAL;
1851 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1852 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1853 res = gains0 + (gains1 - gains0) *
1854 ((s32) power - (s32) samples[index0].power) / denominator +
1855 (1 << 18);
1856 *new_index = res >> 19;
1857 return 0;
1858}
1859
1860static void iwl_hw_reg_init_channel_groups(struct iwl_priv *priv)
1861{
1862 u32 i;
1863 s32 rate_index;
1864 const struct iwl_eeprom_txpower_group *group;
1865
1866 IWL_DEBUG_POWER("Initializing factory calib info from EEPROM\n");
1867
1868 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1869 s8 *clip_pwrs; /* table of power levels for each rate */
1870 s8 satur_pwr; /* saturation power for each chnl group */
1871 group = &priv->eeprom.groups[i];
1872
1873 /* sanity check on factory saturation power value */
1874 if (group->saturation_power < 40) {
1875 IWL_WARNING("Error: saturation power is %d, "
1876 "less than minimum expected 40\n",
1877 group->saturation_power);
1878 return;
1879 }
1880
1881 /*
1882 * Derive requested power levels for each rate, based on
1883 * hardware capabilities (saturation power for band).
1884 * Basic value is 3dB down from saturation, with further
1885 * power reductions for highest 3 data rates. These
1886 * backoffs provide headroom for high rate modulation
1887 * power peaks, without too much distortion (clipping).
1888 */
1889 /* we'll fill in this array with h/w max power levels */
1890 clip_pwrs = (s8 *) priv->clip_groups[i].clip_powers;
1891
1892 /* divide factory saturation power by 2 to find -3dB level */
1893 satur_pwr = (s8) (group->saturation_power >> 1);
1894
1895 /* fill in channel group's nominal powers for each rate */
1896 for (rate_index = 0;
1897 rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) {
1898 switch (rate_index) {
1899 case IWL_RATE_36M_INDEX:
1900 if (i == 0) /* B/G */
1901 *clip_pwrs = satur_pwr;
1902 else /* A */
1903 *clip_pwrs = satur_pwr - 5;
1904 break;
1905 case IWL_RATE_48M_INDEX:
1906 if (i == 0)
1907 *clip_pwrs = satur_pwr - 7;
1908 else
1909 *clip_pwrs = satur_pwr - 10;
1910 break;
1911 case IWL_RATE_54M_INDEX:
1912 if (i == 0)
1913 *clip_pwrs = satur_pwr - 9;
1914 else
1915 *clip_pwrs = satur_pwr - 12;
1916 break;
1917 default:
1918 *clip_pwrs = satur_pwr;
1919 break;
1920 }
1921 }
1922 }
1923}
1924
1925/**
1926 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
1927 *
1928 * Second pass (during init) to set up priv->channel_info
1929 *
1930 * Set up Tx-power settings in our channel info database for each VALID
1931 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
1932 * and current temperature.
1933 *
1934 * Since this is based on current temperature (at init time), these values may
1935 * not be valid for very long, but it gives us a starting/default point,
1936 * and allows us to active (i.e. using Tx) scan.
1937 *
1938 * This does *not* write values to NIC, just sets up our internal table.
1939 */
1940int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
1941{
1942 struct iwl_channel_info *ch_info = NULL;
1943 struct iwl_channel_power_info *pwr_info;
1944 int delta_index;
1945 u8 rate_index;
1946 u8 scan_tbl_index;
1947 const s8 *clip_pwrs; /* array of power levels for each rate */
1948 u8 gain, dsp_atten;
1949 s8 power;
1950 u8 pwr_index, base_pwr_index, a_band;
1951 u8 i;
1952 int temperature;
1953
1954 /* save temperature reference,
1955 * so we can determine next time to calibrate */
1956 temperature = iwl_hw_reg_txpower_get_temperature(priv);
1957 priv->last_temperature = temperature;
1958
1959 iwl_hw_reg_init_channel_groups(priv);
1960
1961 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
1962 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
1963 i++, ch_info++) {
1964 a_band = is_channel_a_band(ch_info);
1965 if (!is_channel_valid(ch_info))
1966 continue;
1967
1968 /* find this channel's channel group (*not* "band") index */
1969 ch_info->group_index =
1970 iwl_hw_reg_get_ch_grp_index(priv, ch_info);
1971
1972 /* Get this chnlgrp's rate->max/clip-powers table */
1973 clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers;
1974
1975 /* calculate power index *adjustment* value according to
1976 * diff between current temperature and factory temperature */
1977 delta_index = iwl_hw_reg_adjust_power_by_temp(temperature,
1978 priv->eeprom.groups[ch_info->group_index].
1979 temperature);
1980
1981 IWL_DEBUG_POWER("Delta index for channel %d: %d [%d]\n",
1982 ch_info->channel, delta_index, temperature +
1983 IWL_TEMP_CONVERT);
1984
1985 /* set tx power value for all OFDM rates */
1986 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
1987 rate_index++) {
1988 s32 power_idx;
1989 int rc;
1990
1991 /* use channel group's clip-power table,
1992 * but don't exceed channel's max power */
1993 s8 pwr = min(ch_info->max_power_avg,
1994 clip_pwrs[rate_index]);
1995
1996 pwr_info = &ch_info->power_info[rate_index];
1997
1998 /* get base (i.e. at factory-measured temperature)
1999 * power table index for this rate's power */
2000 rc = iwl_hw_reg_get_matched_power_index(priv, pwr,
2001 ch_info->group_index,
2002 &power_idx);
2003 if (rc) {
2004 IWL_ERROR("Invalid power index\n");
2005 return rc;
2006 }
2007 pwr_info->base_power_index = (u8) power_idx;
2008
2009 /* temperature compensate */
2010 power_idx += delta_index;
2011
2012 /* stay within range of gain table */
2013 power_idx = iwl_hw_reg_fix_power_index(power_idx);
2014
2015 /* fill 1 OFDM rate's iwl_channel_power_info struct */
2016 pwr_info->requested_power = pwr;
2017 pwr_info->power_table_index = (u8) power_idx;
2018 pwr_info->tpc.tx_gain =
2019 power_gain_table[a_band][power_idx].tx_gain;
2020 pwr_info->tpc.dsp_atten =
2021 power_gain_table[a_band][power_idx].dsp_atten;
2022 }
2023
2024 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2025 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX];
2026 power = pwr_info->requested_power +
2027 IWL_CCK_FROM_OFDM_POWER_DIFF;
2028 pwr_index = pwr_info->power_table_index +
2029 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2030 base_pwr_index = pwr_info->base_power_index +
2031 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2032
2033 /* stay within table range */
2034 pwr_index = iwl_hw_reg_fix_power_index(pwr_index);
2035 gain = power_gain_table[a_band][pwr_index].tx_gain;
2036 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2037
2038 /* fill each CCK rate's iwl_channel_power_info structure
2039 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2040 * NOTE: CCK rates start at end of OFDM rates! */
2041 for (rate_index = IWL_OFDM_RATES;
2042 rate_index < IWL_RATE_COUNT; rate_index++) {
2043 pwr_info = &ch_info->power_info[rate_index];
2044 pwr_info->requested_power = power;
2045 pwr_info->power_table_index = pwr_index;
2046 pwr_info->base_power_index = base_pwr_index;
2047 pwr_info->tpc.tx_gain = gain;
2048 pwr_info->tpc.dsp_atten = dsp_atten;
2049 }
2050
2051 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2052 for (scan_tbl_index = 0;
2053 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2054 s32 actual_index = (scan_tbl_index == 0) ?
2055 IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX;
2056 iwl_hw_reg_set_scan_power(priv, scan_tbl_index,
2057 actual_index, clip_pwrs, ch_info, a_band);
2058 }
2059 }
2060
2061 return 0;
2062}
2063
2064int iwl_hw_rxq_stop(struct iwl_priv *priv)
2065{
2066 int rc;
2067 unsigned long flags;
2068
2069 spin_lock_irqsave(&priv->lock, flags);
2070 rc = iwl_grab_restricted_access(priv);
2071 if (rc) {
2072 spin_unlock_irqrestore(&priv->lock, flags);
2073 return rc;
2074 }
2075
2076 iwl_write_restricted(priv, FH_RCSR_CONFIG(0), 0);
2077 rc = iwl_poll_restricted_bit(priv, FH_RSSR_STATUS, (1 << 24), 1000);
2078 if (rc < 0)
2079 IWL_ERROR("Can't stop Rx DMA.\n");
2080
2081 iwl_release_restricted_access(priv);
2082 spin_unlock_irqrestore(&priv->lock, flags);
2083
2084 return 0;
2085}
2086
2087int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2088{
2089 int rc;
2090 unsigned long flags;
2091 int txq_id = txq->q.id;
2092
2093 struct iwl_shared *shared_data = priv->hw_setting.shared_virt;
2094
2095 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2096
2097 spin_lock_irqsave(&priv->lock, flags);
2098 rc = iwl_grab_restricted_access(priv);
2099 if (rc) {
2100 spin_unlock_irqrestore(&priv->lock, flags);
2101 return rc;
2102 }
2103 iwl_write_restricted(priv, FH_CBCC_CTRL(txq_id), 0);
2104 iwl_write_restricted(priv, FH_CBCC_BASE(txq_id), 0);
2105
2106 iwl_write_restricted(priv, FH_TCSR_CONFIG(txq_id),
2107 ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2108 ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2109 ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2110 ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2111 ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2112 iwl_release_restricted_access(priv);
2113
2114 /* fake read to flush all prev. writes */
2115 iwl_read32(priv, FH_TSSR_CBB_BASE);
2116 spin_unlock_irqrestore(&priv->lock, flags);
2117
2118 return 0;
2119}
2120
2121int iwl_hw_get_rx_read(struct iwl_priv *priv)
2122{
2123 struct iwl_shared *shared_data = priv->hw_setting.shared_virt;
2124
2125 return le32_to_cpu(shared_data->rx_read_ptr[0]);
2126}
2127
2128/**
2129 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2130 */
2131int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2132{
2133 int rc, i;
2134 struct iwl_rate_scaling_cmd rate_cmd = {
2135 .reserved = {0, 0, 0},
2136 };
2137 struct iwl_rate_scaling_info *table = rate_cmd.table;
2138
2139 for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) {
2140 table[i].rate_n_flags =
2141 iwl_hw_set_rate_n_flags(iwl_rates[i].plcp, 0);
2142 table[i].try_cnt = priv->retry_rate;
2143 table[i].next_rate_index = iwl_get_prev_ieee_rate(i);
2144 }
2145
2146 switch (priv->phymode) {
2147 case MODE_IEEE80211A:
2148 IWL_DEBUG_RATE("Select A mode rate scale\n");
2149 /* If one of the following CCK rates is used,
2150 * have it fall back to the 6M OFDM rate */
2151 for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++)
2152 table[i].next_rate_index = IWL_FIRST_OFDM_RATE;
2153
2154 /* Don't fall back to CCK rates */
2155 table[IWL_RATE_12M_INDEX].next_rate_index = IWL_RATE_9M_INDEX;
2156
2157 /* Don't drop out of OFDM rates */
2158 table[IWL_FIRST_OFDM_RATE].next_rate_index =
2159 IWL_FIRST_OFDM_RATE;
2160 break;
2161
2162 case MODE_IEEE80211B:
2163 IWL_DEBUG_RATE("Select B mode rate scale\n");
2164 /* If an OFDM rate is used, have it fall back to the
2165 * 1M CCK rates */
2166 for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; i++)
2167 table[i].next_rate_index = IWL_FIRST_CCK_RATE;
2168
2169 /* CCK shouldn't fall back to OFDM... */
2170 table[IWL_RATE_11M_INDEX].next_rate_index = IWL_RATE_5M_INDEX;
2171 break;
2172
2173 default:
2174 IWL_DEBUG_RATE("Select G mode rate scale\n");
2175 break;
2176 }
2177
2178 /* Update the rate scaling for control frame Tx */
2179 rate_cmd.table_id = 0;
2180 rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2181 &rate_cmd);
2182 if (rc)
2183 return rc;
2184
2185 /* Update the rate scaling for data frame Tx */
2186 rate_cmd.table_id = 1;
2187 return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2188 &rate_cmd);
2189}
2190
2191int iwl_hw_set_hw_setting(struct iwl_priv *priv)
2192{
2193 memset((void *)&priv->hw_setting, 0,
2194 sizeof(struct iwl_driver_hw_info));
2195
2196 priv->hw_setting.shared_virt =
2197 pci_alloc_consistent(priv->pci_dev,
2198 sizeof(struct iwl_shared),
2199 &priv->hw_setting.shared_phys);
2200
2201 if (!priv->hw_setting.shared_virt) {
2202 IWL_ERROR("failed to allocate pci memory\n");
2203 mutex_unlock(&priv->mutex);
2204 return -ENOMEM;
2205 }
2206
2207 priv->hw_setting.ac_queue_count = AC_NUM;
2208 priv->hw_setting.rx_buffer_size = IWL_RX_BUF_SIZE;
2209 priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd);
2210 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
2211 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
2212 priv->hw_setting.cck_flag = 0;
2213 priv->hw_setting.max_stations = IWL3945_STATION_COUNT;
2214 priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID;
2215 return 0;
2216}
2217
2218unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
2219 struct iwl_frame *frame, u8 rate)
2220{
2221 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
2222 unsigned int frame_size;
2223
2224 tx_beacon_cmd = (struct iwl_tx_beacon_cmd *)&frame->u;
2225 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2226
2227 tx_beacon_cmd->tx.sta_id = IWL3945_BROADCAST_ID;
2228 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2229
2230 frame_size = iwl_fill_beacon_frame(priv,
2231 tx_beacon_cmd->frame,
2232 BROADCAST_ADDR,
2233 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2234
2235 BUG_ON(frame_size > MAX_MPDU_SIZE);
2236 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2237
2238 tx_beacon_cmd->tx.rate = rate;
2239 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2240 TX_CMD_FLG_TSF_MSK);
2241
2242 /* supp_rates[0] == OFDM */
2243 tx_beacon_cmd->tx.supp_rates[0] = IWL_OFDM_BASIC_RATES_MASK;
2244
2245 /* supp_rates[1] == CCK
2246 *
2247 * NOTE: IWL_*_RATES_MASK are not in the order that supp_rates
2248 * expects so we have to shift them around.
2249 *
2250 * supp_rates expects:
2251 * CCK rates are bit0..3
2252 *
2253 * However IWL_*_RATES_MASK has:
2254 * CCK rates are bit8..11
2255 */
2256 tx_beacon_cmd->tx.supp_rates[1] =
2257 (IWL_CCK_BASIC_RATES_MASK >> 8) & 0xF;
2258
2259 return (sizeof(struct iwl_tx_beacon_cmd) + frame_size);
2260}
2261
2262void iwl_hw_rx_handler_setup(struct iwl_priv *priv)
2263{
2264 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2265}
2266
2267void iwl_hw_setup_deferred_work(struct iwl_priv *priv)
2268{
2269 INIT_DELAYED_WORK(&priv->thermal_periodic,
2270 iwl3945_bg_reg_txpower_periodic);
2271}
2272
2273void iwl_hw_cancel_deferred_work(struct iwl_priv *priv)
2274{
2275 cancel_delayed_work(&priv->thermal_periodic);
2276}
2277
2278struct pci_device_id iwl_hw_card_ids[] = {
2279 {0x8086, 0x4222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 {0x8086, 0x4227, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2281 {0}
2282};
2283
2284inline int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv)
2285{
2286 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2287 return 0;
2288}
2289
2290MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
new file mode 100644
index 000000000000..813902e9f8c2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -0,0 +1,41 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_h__
28#define __iwl_3945_h__
29
30/*
31 * Forward declare iwl-3945.c functions for iwl-base.c
32 */
33extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv);
34extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
35extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
36extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
37extern void iwl3945_bg_reg_txpower_periodic(struct work_struct *work);
38extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
39extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
40 u16 tx_rate, u8 flags);
41#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
new file mode 100644
index 000000000000..99a19ef4c743
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -0,0 +1,581 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __iwl_4965_hw_h__
65#define __iwl_4965_hw_h__
66
67#define IWL_RX_BUF_SIZE (4 * 1024)
68#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE
69#define KDR_RTC_INST_UPPER_BOUND (0x018000)
70#define KDR_RTC_DATA_UPPER_BOUND (0x80A000)
71#define KDR_RTC_INST_SIZE (KDR_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
72#define KDR_RTC_DATA_SIZE (KDR_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
73
74#define IWL_MAX_INST_SIZE KDR_RTC_INST_SIZE
75#define IWL_MAX_DATA_SIZE KDR_RTC_DATA_SIZE
76
77static inline int iwl_hw_valid_rtc_data_addr(u32 addr)
78{
79 return (addr >= RTC_DATA_LOWER_BOUND) &&
80 (addr < KDR_RTC_DATA_UPPER_BOUND);
81}
82
83/********************* START TXPOWER *****************************************/
84enum {
85 HT_IE_EXT_CHANNEL_NONE = 0,
86 HT_IE_EXT_CHANNEL_ABOVE,
87 HT_IE_EXT_CHANNEL_INVALID,
88 HT_IE_EXT_CHANNEL_BELOW,
89 HT_IE_EXT_CHANNEL_MAX
90};
91
92enum {
93 CALIB_CH_GROUP_1 = 0,
94 CALIB_CH_GROUP_2 = 1,
95 CALIB_CH_GROUP_3 = 2,
96 CALIB_CH_GROUP_4 = 3,
97 CALIB_CH_GROUP_5 = 4,
98 CALIB_CH_GROUP_MAX
99};
100
101/* Temperature calibration offset is 3% 0C in Kelvin */
102#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
103#define TEMPERATURE_CALIB_A_VAL 259
104
105#define IWL_TX_POWER_TEMPERATURE_MIN (263)
106#define IWL_TX_POWER_TEMPERATURE_MAX (410)
107
108#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
109 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
110 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
111
112#define IWL_TX_POWER_ILLEGAL_TEMPERATURE (300)
113
114#define IWL_TX_POWER_TEMPERATURE_DIFFERENCE (2)
115
116#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
117
118#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
119#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
120
121/* timeout equivalent to 3 minutes */
122#define IWL_TX_POWER_TIMELIMIT_NOCALIB 1800000000
123
124#define IWL_TX_POWER_CCK_COMPENSATION (9)
125
126#define MIN_TX_GAIN_INDEX (0)
127#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9)
128#define MAX_TX_GAIN_INDEX_52GHZ (98)
129#define MIN_TX_GAIN_52GHZ (98)
130#define MAX_TX_GAIN_INDEX_24GHZ (98)
131#define MIN_TX_GAIN_24GHZ (98)
132#define MAX_TX_GAIN (0)
133#define MAX_TX_GAIN_52GHZ_EXT (-9)
134
135#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
136#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
137#define IWL_TX_POWER_REGULATORY_MIN (0)
138#define IWL_TX_POWER_REGULATORY_MAX (34)
139#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
140#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
141#define IWL_TX_POWER_SATURATION_MIN (20)
142#define IWL_TX_POWER_SATURATION_MAX (50)
143
144/* dv *0.4 = dt; so that 5 degrees temperature diff equals
145 * 12.5 in voltage diff */
146#define IWL_TX_TEMPERATURE_UPDATE_LIMIT 9
147
148#define IWL_INVALID_CHANNEL (0xffffffff)
149#define IWL_TX_POWER_REGITRY_BIT (2)
150
151#define MIN_IWL_TX_POWER_CALIB_DUR (100)
152#define IWL_CCK_FROM_OFDM_POWER_DIFF (-5)
153#define IWL_CCK_FROM_OFDM_INDEX_DIFF (9)
154
155/* Number of entries in the gain table */
156#define POWER_GAIN_NUM_ENTRIES 78
157#define TX_POW_MAX_SESSION_NUM 5
158/* timeout equivalent to 3 minutes */
159#define TX_IWL_TIMELIMIT_NOCALIB 1800000000
160
161/* Kedron TX_CALIB_STATES */
162#define IWL_TX_CALIB_STATE_SEND_TX 0x00000001
163#define IWL_TX_CALIB_WAIT_TX_RESPONSE 0x00000002
164#define IWL_TX_CALIB_ENABLED 0x00000004
165#define IWL_TX_CALIB_XVT_ON 0x00000008
166#define IWL_TX_CALIB_TEMPERATURE_CORRECT 0x00000010
167#define IWL_TX_CALIB_WORKING_WITH_XVT 0x00000020
168#define IWL_TX_CALIB_XVT_PERIODICAL 0x00000040
169
170#define NUM_IWL_TX_CALIB_SETTINS 5 /* Number of tx correction groups */
171
172#define IWL_MIN_POWER_IN_VP_TABLE 1 /* 0.5dBm multiplied by 2 */
173#define IWL_MAX_POWER_IN_VP_TABLE 40 /* 20dBm - multiplied by 2 (because
174 * entries are for each 0.5dBm) */
175#define IWL_STEP_IN_VP_TABLE 1 /* 0.5dB - multiplied by 2 */
176#define IWL_NUM_POINTS_IN_VPTABLE \
177 (1 + IWL_MAX_POWER_IN_VP_TABLE - IWL_MIN_POWER_IN_VP_TABLE)
178
179#define MIN_TX_GAIN_INDEX (0)
180#define MAX_TX_GAIN_INDEX_52GHZ (98)
181#define MIN_TX_GAIN_52GHZ (98)
182#define MAX_TX_GAIN_INDEX_24GHZ (98)
183#define MIN_TX_GAIN_24GHZ (98)
184#define MAX_TX_GAIN (0)
185
186/* First and last channels of all groups */
187#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
188#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
189#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
190#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
191#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
192#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
193#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
194#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
195#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
196#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
197
198
199union iwl_tx_power_dual_stream {
200 struct {
201 u8 radio_tx_gain[2];
202 u8 dsp_predis_atten[2];
203 } s;
204 u32 dw;
205};
206
207/********************* END TXPOWER *****************************************/
208
209/* HT flags */
210#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
211#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK __constant_cpu_to_le32(0x1<<22)
212
213#define RXON_FLG_HT_OPERATING_MODE_POS (23)
214
215#define RXON_FLG_HT_PROT_MSK __constant_cpu_to_le32(0x1<<23)
216#define RXON_FLG_FAT_PROT_MSK __constant_cpu_to_le32(0x2<<23)
217
218#define RXON_FLG_CHANNEL_MODE_POS (25)
219#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3<<25)
220#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1<<25)
221#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2<<25)
222
223#define RXON_RX_CHAIN_DRIVER_FORCE_MSK __constant_cpu_to_le16(0x1<<0)
224#define RXON_RX_CHAIN_VALID_MSK __constant_cpu_to_le16(0x7<<1)
225#define RXON_RX_CHAIN_VALID_POS (1)
226#define RXON_RX_CHAIN_FORCE_SEL_MSK __constant_cpu_to_le16(0x7<<4)
227#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
228#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK __constant_cpu_to_le16(0x7<<7)
229#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
230#define RXON_RX_CHAIN_CNT_MSK __constant_cpu_to_le16(0x3<<10)
231#define RXON_RX_CHAIN_CNT_POS (10)
232#define RXON_RX_CHAIN_MIMO_CNT_MSK __constant_cpu_to_le16(0x3<<12)
233#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
234#define RXON_RX_CHAIN_MIMO_FORCE_MSK __constant_cpu_to_le16(0x1<<14)
235#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
236
237
238#define MCS_DUP_6M_PLCP 0x20
239
240/* OFDM HT rate masks */
241/* ***************************************** */
242#define R_MCS_6M_MSK 0x1
243#define R_MCS_12M_MSK 0x2
244#define R_MCS_18M_MSK 0x4
245#define R_MCS_24M_MSK 0x8
246#define R_MCS_36M_MSK 0x10
247#define R_MCS_48M_MSK 0x20
248#define R_MCS_54M_MSK 0x40
249#define R_MCS_60M_MSK 0x80
250#define R_MCS_12M_DUAL_MSK 0x100
251#define R_MCS_24M_DUAL_MSK 0x200
252#define R_MCS_36M_DUAL_MSK 0x400
253#define R_MCS_48M_DUAL_MSK 0x800
254
255#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
256#define is_siso(tbl) (((tbl) == LQ_SISO))
257#define is_mimo(tbl) (((tbl) == LQ_MIMO))
258#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
259#define is_a_band(tbl) (((tbl) == LQ_A))
260#define is_g_and(tbl) (((tbl) == LQ_G))
261
262/* Flow Handler Definitions */
263
264/**********************/
265/* Addresses */
266/**********************/
267
268#define FH_MEM_LOWER_BOUND (0x1000)
269#define FH_MEM_UPPER_BOUND (0x1EF0)
270
271#define IWL_FH_REGS_LOWER_BOUND (0x1000)
272#define IWL_FH_REGS_UPPER_BOUND (0x2000)
273
274#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
275
276/* CBBC Area - Circular buffers base address cache pointers table */
277#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
278#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
279/* queues 0 - 15 */
280#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
281
282/* RSCSR Area */
283#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
284#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
285#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
286
287#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
288#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
289#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
290
291/* RCSR Area - Registers address map */
292#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
293#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
294#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
295
296#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
297
298/* RSSR Area - Rx shared ctrl & status registers */
299#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
300#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
301#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
302#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
303#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008)
304
305/* TCSR */
306#define IWL_FH_TCSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xD00)
307#define IWL_FH_TCSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xE60)
308
309#define IWL_FH_TCSR_CHNL_NUM (7)
310#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
311 (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
312
313/* TSSR Area - Tx shared status registers */
314/* TSSR */
315#define IWL_FH_TSSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEA0)
316#define IWL_FH_TSSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEC0)
317
318#define IWL_FH_TSSR_TX_MSG_CONFIG_REG (IWL_FH_TSSR_LOWER_BOUND + 0x008)
319#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010)
320
321#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
322#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
323
324#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_64B (0x00000000)
325#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
326#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_256B (0x00000800)
327#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_512B (0x00000C00)
328
329#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
330#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
331
332#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
333#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
334
335#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \
336 ((1 << (_chnl)) << 24)
337#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \
338 ((1 << (_chnl)) << 16)
339
340#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
341 (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
342 IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
343
344/* TCSR: tx_config register values */
345#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
346#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
347#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_ARC (0x00000002)
348
349#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
350#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
351
352#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
353#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
354#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
355
356#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
357#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
358#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
359
360#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
361#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
362#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
363
364#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
365#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
366#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
367
368#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
369
370#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
371#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
372
373/* RCSR: channel 0 rx_config register defines */
374#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */
375#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */
376#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */
377#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */
378#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */
379#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */
380
381#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
382#define FH_RCSR_RX_CONFIG_RB_SIZE_BITSHIFT (16)
383
384/* RCSR: rx_config register values */
385#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
386#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
387#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
388
389#define IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
390
391/* RCSR channel 0 config register values */
392#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
393#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
394
395/* RSCSR: defs used in normal mode */
396#define FH_RSCSR_CHNL0_RBDCB_WPTR_MASK (0x00000FFF) /* bits 0-11 */
397
398#define SCD_WIN_SIZE 64
399#define SCD_FRAME_LIMIT 64
400
401/* memory mapped registers */
402#define SCD_START_OFFSET 0xa02c00
403
404#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0)
405#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4)
406#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10)
407#define SCD_AIT (SCD_START_OFFSET + 0x18)
408#define SCD_TXFACT (SCD_START_OFFSET + 0x1c)
409#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4)
410#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4)
411#define SCD_SETQUEUENUM (SCD_START_OFFSET + 0xa4)
412#define SCD_SET_TXSTAT_TXED (SCD_START_OFFSET + 0xa8)
413#define SCD_SET_TXSTAT_DONE (SCD_START_OFFSET + 0xac)
414#define SCD_SET_TXSTAT_NOT_SCHD (SCD_START_OFFSET + 0xb0)
415#define SCD_DECREASE_CREDIT (SCD_START_OFFSET + 0xb4)
416#define SCD_DECREASE_SCREDIT (SCD_START_OFFSET + 0xb8)
417#define SCD_LOAD_CREDIT (SCD_START_OFFSET + 0xbc)
418#define SCD_LOAD_SCREDIT (SCD_START_OFFSET + 0xc0)
419#define SCD_BAR (SCD_START_OFFSET + 0xc4)
420#define SCD_BAR_DW0 (SCD_START_OFFSET + 0xc8)
421#define SCD_BAR_DW1 (SCD_START_OFFSET + 0xcc)
422#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0)
423#define SCD_QUERY_REQ (SCD_START_OFFSET + 0xd8)
424#define SCD_QUERY_RES (SCD_START_OFFSET + 0xdc)
425#define SCD_PENDING_FRAMES (SCD_START_OFFSET + 0xe0)
426#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4)
427#define SCD_INTERRUPT_THRESHOLD (SCD_START_OFFSET + 0xe8)
428#define SCD_QUERY_MIN_FRAME_SIZE (SCD_START_OFFSET + 0x100)
429#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4)
430
431/* SRAM structures */
432#define SCD_CONTEXT_DATA_OFFSET 0x380
433#define SCD_TX_STTS_BITMAP_OFFSET 0x400
434#define SCD_TRANSLATE_TBL_OFFSET 0x500
435#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
436#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
437 ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
438
439#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \
440 ((1<<(hi))|((1<<(hi))-(1<<(lo))))
441
442
443#define SCD_MODE_REG_BIT_SEARCH_MODE (1<<0)
444#define SCD_MODE_REG_BIT_SBYP_MODE (1<<1)
445
446#define SCD_TXFIFO_POS_TID (0)
447#define SCD_TXFIFO_POS_RA (4)
448#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
449#define SCD_QUEUE_STTS_REG_POS_TXF (1)
450#define SCD_QUEUE_STTS_REG_POS_WSL (5)
451#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
452#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
453#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
454
455#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
456
457#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
458#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
459#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
460#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
461#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
462#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
463#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
464#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
465
466#define CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R (0x00000010)
467#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
468#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
469#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
470
471static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
472{
473 return le32_to_cpu(rate_n_flags) & 0xFF;
474}
475static inline u16 iwl_hw_get_rate_n_flags(__le32 rate_n_flags)
476{
477 return le32_to_cpu(rate_n_flags) & 0xFFFF;
478}
479static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u16 flags)
480{
481 return cpu_to_le32(flags|(u16)rate);
482}
483
484struct iwl_tfd_frame_data {
485 __le32 tb1_addr;
486
487 __le32 val1;
488 /* __le32 ptb1_32_35:4; */
489#define IWL_tb1_addr_hi_POS 0
490#define IWL_tb1_addr_hi_LEN 4
491#define IWL_tb1_addr_hi_SYM val1
492 /* __le32 tb_len1:12; */
493#define IWL_tb1_len_POS 4
494#define IWL_tb1_len_LEN 12
495#define IWL_tb1_len_SYM val1
496 /* __le32 ptb2_0_15:16; */
497#define IWL_tb2_addr_lo16_POS 16
498#define IWL_tb2_addr_lo16_LEN 16
499#define IWL_tb2_addr_lo16_SYM val1
500
501 __le32 val2;
502 /* __le32 ptb2_16_35:20; */
503#define IWL_tb2_addr_hi20_POS 0
504#define IWL_tb2_addr_hi20_LEN 20
505#define IWL_tb2_addr_hi20_SYM val2
506 /* __le32 tb_len2:12; */
507#define IWL_tb2_len_POS 20
508#define IWL_tb2_len_LEN 12
509#define IWL_tb2_len_SYM val2
510} __attribute__ ((packed));
511
512struct iwl_tfd_frame {
513 __le32 val0;
514 /* __le32 rsvd1:24; */
515 /* __le32 num_tbs:5; */
516#define IWL_num_tbs_POS 24
517#define IWL_num_tbs_LEN 5
518#define IWL_num_tbs_SYM val0
519 /* __le32 rsvd2:1; */
520 /* __le32 padding:2; */
521 struct iwl_tfd_frame_data pa[10];
522 __le32 reserved;
523} __attribute__ ((packed));
524
525#define IWL4965_MAX_WIN_SIZE 64
526#define IWL4965_QUEUE_SIZE 256
527#define IWL4965_NUM_FIFOS 7
528#define IWL_MAX_NUM_QUEUES 16
529
530struct iwl4965_queue_byte_cnt_entry {
531 __le16 val;
532 /* __le16 byte_cnt:12; */
533#define IWL_byte_cnt_POS 0
534#define IWL_byte_cnt_LEN 12
535#define IWL_byte_cnt_SYM val
536 /* __le16 rsvd:4; */
537} __attribute__ ((packed));
538
539struct iwl4965_sched_queue_byte_cnt_tbl {
540 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE +
541 IWL4965_MAX_WIN_SIZE];
542 u8 dont_care[1024 -
543 (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) *
544 sizeof(__le16)];
545} __attribute__ ((packed));
546
547/* Base physical address of iwl_shared is provided to SCD_DRAM_BASE_ADDR
548 * and &iwl_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */
549struct iwl_shared {
550 struct iwl4965_sched_queue_byte_cnt_tbl
551 queues_byte_cnt_tbls[IWL_MAX_NUM_QUEUES];
552 __le32 val0;
553
554 /* __le32 rb_closed_stts_rb_num:12; */
555#define IWL_rb_closed_stts_rb_num_POS 0
556#define IWL_rb_closed_stts_rb_num_LEN 12
557#define IWL_rb_closed_stts_rb_num_SYM val0
558 /* __le32 rsrv1:4; */
559 /* __le32 rb_closed_stts_rx_frame_num:12; */
560#define IWL_rb_closed_stts_rx_frame_num_POS 16
561#define IWL_rb_closed_stts_rx_frame_num_LEN 12
562#define IWL_rb_closed_stts_rx_frame_num_SYM val0
563 /* __le32 rsrv2:4; */
564
565 __le32 val1;
566 /* __le32 frame_finished_stts_rb_num:12; */
567#define IWL_frame_finished_stts_rb_num_POS 0
568#define IWL_frame_finished_stts_rb_num_LEN 12
569#define IWL_frame_finished_stts_rb_num_SYM val1
570 /* __le32 rsrv3:4; */
571 /* __le32 frame_finished_stts_rx_frame_num:12; */
572#define IWL_frame_finished_stts_rx_frame_num_POS 16
573#define IWL_frame_finished_stts_rx_frame_num_LEN 12
574#define IWL_frame_finished_stts_rx_frame_num_SYM val1
575 /* __le32 rsrv4:4; */
576
577 __le32 padding1; /* so that allocation will be aligned to 16B */
578 __le32 padding2;
579} __attribute__ ((packed));
580
581#endif /* __iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
new file mode 100644
index 000000000000..f3638607d641
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -0,0 +1,2118 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/wireless.h>
30#include <net/mac80211.h>
31#include <net/ieee80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include <net/mac80211.h>
40#include <linux/wireless.h>
41
42#include "../net/mac80211/ieee80211_rate.h"
43
44#include "iwlwifi.h"
45#include "iwl-helpers.h"
46
47#define RS_NAME "iwl-4965-rs"
48
49#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1
50#define IWL_NUMBER_TRY 1
51#define IWL_HT_NUMBER_TRY 3
52
53#define IWL_RATE_MAX_WINDOW 62
54#define IWL_RATE_HIGH_TH 10880
55#define IWL_RATE_MIN_FAILURE_TH 6
56#define IWL_RATE_MIN_SUCCESS_TH 8
57#define IWL_RATE_DECREASE_TH 1920
58#define IWL_RATE_INCREASE_TH 8960
59#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ) /*2 seconds */
60
61static u8 rs_ht_to_legacy[] = {
62 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
64 IWL_RATE_6M_INDEX,
65 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
66 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
67 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
68 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
69};
70
71struct iwl_rate {
72 u32 rate_n_flags;
73} __attribute__ ((packed));
74
75struct iwl_rate_scale_data {
76 u64 data;
77 s32 success_counter;
78 s32 success_ratio;
79 s32 counter;
80 s32 average_tpt;
81 unsigned long stamp;
82};
83
84struct iwl_scale_tbl_info {
85 enum iwl_table_type lq_type;
86 enum iwl_antenna_type antenna_type;
87 u8 is_SGI;
88 u8 is_fat;
89 u8 is_dup;
90 u8 action;
91 s32 *expected_tpt;
92 struct iwl_rate current_rate;
93 struct iwl_rate_scale_data win[IWL_RATE_COUNT];
94};
95
96struct iwl_rate_scale_priv {
97 u8 active_tbl;
98 u8 enable_counter;
99 u8 stay_in_tbl;
100 u8 search_better_tbl;
101 s32 last_tpt;
102 u32 table_count_limit;
103 u32 max_failure_limit;
104 u32 max_success_limit;
105 u32 table_count;
106 u32 total_failed;
107 u32 total_success;
108 u32 flush_timer;
109 u8 action_counter;
110 u8 antenna;
111 u8 valid_antenna;
112 u8 is_green;
113 u8 is_dup;
114 u8 phymode;
115 u8 ibss_sta_added;
116 u16 active_rate;
117 u16 active_siso_rate;
118 u16 active_mimo_rate;
119 u16 active_rate_basic;
120 struct iwl_link_quality_cmd lq;
121 struct iwl_scale_tbl_info lq_info[LQ_SIZE];
122};
123
124static void rs_rate_scale_perform(struct iwl_priv *priv,
125 struct net_device *dev,
126 struct ieee80211_hdr *hdr,
127 struct sta_info *sta);
128static int rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data,
129 struct iwl_rate *tx_mcs,
130 struct iwl_link_quality_cmd *tbl,
131 struct sta_info *sta);
132
133
134static s32 expected_tpt_A[IWL_RATE_COUNT] = {
135 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
136};
137
138static s32 expected_tpt_G[IWL_RATE_COUNT] = {
139 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186
140};
141
142static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = {
143 0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202
144};
145
146static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = {
147 0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211
148};
149
150static s32 expected_tpt_mimo20MHz[IWL_RATE_COUNT] = {
151 0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251
152};
153
154static s32 expected_tpt_mimo20MHzSGI[IWL_RATE_COUNT] = {
155 0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257
156};
157
158static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = {
159 0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257
160};
161
162static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = {
163 0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264
164};
165
166static s32 expected_tpt_mimo40MHz[IWL_RATE_COUNT] = {
167 0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289
168};
169
170static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
171 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
172};
173
174static int iwl_lq_sync_callback(struct iwl_priv *priv,
175 struct iwl_cmd *cmd, struct sk_buff *skb)
176{
177 /*We didn't cache the SKB; let the caller free it */
178 return 1;
179}
180
181static inline u8 iwl_rate_get_rate(u32 rate_n_flags)
182{
183 return (u8)(rate_n_flags & 0xFF);
184}
185
186static int rs_send_lq_cmd(struct iwl_priv *priv,
187 struct iwl_link_quality_cmd *lq, u8 flags)
188{
189#ifdef CONFIG_IWLWIFI_DEBUG
190 int i;
191#endif
192 int rc = -1;
193
194 struct iwl_host_cmd cmd = {
195 .id = REPLY_TX_LINK_QUALITY_CMD,
196 .len = sizeof(struct iwl_link_quality_cmd),
197 .meta.flags = flags,
198 .data = lq,
199 };
200
201 if ((lq->sta_id == 0xFF) &&
202 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
203 return rc;
204
205 if (lq->sta_id == 0xFF)
206 lq->sta_id = IWL_AP_ID;
207
208 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id);
209 IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n",
210 lq->general_params.single_stream_ant_msk,
211 lq->general_params.dual_stream_ant_msk);
212#ifdef CONFIG_IWLWIFI_DEBUG
213 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
214 IWL_DEBUG_RATE("lq index %d 0x%X\n",
215 i, lq->rs_table[i].rate_n_flags);
216#endif
217
218 if (flags & CMD_ASYNC)
219 cmd.meta.u.callback = iwl_lq_sync_callback;
220
221 if (iwl_is_associated(priv) && priv->assoc_station_added &&
222 priv->lq_mngr.lq_ready)
223 rc = iwl_send_cmd(priv, &cmd);
224
225 return rc;
226}
227
228static int rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
229{
230 window->data = 0;
231 window->success_counter = 0;
232 window->success_ratio = IWL_INVALID_VALUE;
233 window->counter = 0;
234 window->average_tpt = IWL_INVALID_VALUE;
235 window->stamp = 0;
236
237 return 0;
238}
239
240static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
241 int scale_index, s32 tpt, u32 status)
242{
243 int rc = 0;
244 struct iwl_rate_scale_data *window = NULL;
245 u64 mask;
246 u8 win_size = IWL_RATE_MAX_WINDOW;
247 s32 fail_count;
248
249 if (scale_index < 0)
250 return -1;
251
252 if (scale_index >= IWL_RATE_COUNT)
253 return -1;
254
255 window = &(windows[scale_index]);
256
257 if (window->counter >= win_size) {
258
259 window->counter = win_size - 1;
260 mask = 1;
261 mask = (mask << (win_size - 1));
262 if ((window->data & mask)) {
263 window->data &= ~mask;
264 window->success_counter = window->success_counter - 1;
265 }
266 }
267
268 window->counter = window->counter + 1;
269 mask = window->data;
270 window->data = (mask << 1);
271 if (status != 0) {
272 window->success_counter = window->success_counter + 1;
273 window->data |= 0x1;
274 }
275
276 if (window->counter > 0)
277 window->success_ratio = 128 * (100 * window->success_counter)
278 / window->counter;
279 else
280 window->success_ratio = IWL_INVALID_VALUE;
281
282 fail_count = window->counter - window->success_counter;
283
284 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
285 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
286 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
287 else
288 window->average_tpt = IWL_INVALID_VALUE;
289
290 window->stamp = jiffies;
291
292 return rc;
293}
294
295int static rs_mcs_from_tbl(struct iwl_rate *mcs_rate,
296 struct iwl_scale_tbl_info *tbl,
297 int index, u8 use_green)
298{
299 int rc = 0;
300
301 if (is_legacy(tbl->lq_type)) {
302 mcs_rate->rate_n_flags = iwl_rates[index].plcp;
303 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
304 mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK;
305
306 } else if (is_siso(tbl->lq_type)) {
307 if (index > IWL_LAST_OFDM_RATE)
308 index = IWL_LAST_OFDM_RATE;
309 mcs_rate->rate_n_flags = iwl_rates[index].plcp_siso |
310 RATE_MCS_HT_MSK;
311 } else {
312 if (index > IWL_LAST_OFDM_RATE)
313 index = IWL_LAST_OFDM_RATE;
314 mcs_rate->rate_n_flags = iwl_rates[index].plcp_mimo |
315 RATE_MCS_HT_MSK;
316 }
317
318 switch (tbl->antenna_type) {
319 case ANT_BOTH:
320 mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK;
321 break;
322 case ANT_MAIN:
323 mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK;
324 break;
325 case ANT_AUX:
326 mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK;
327 break;
328 case ANT_NONE:
329 break;
330 }
331
332 if (is_legacy(tbl->lq_type))
333 return rc;
334
335 if (tbl->is_fat) {
336 if (tbl->is_dup)
337 mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK;
338 else
339 mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK;
340 }
341 if (tbl->is_SGI)
342 mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK;
343
344 if (use_green) {
345 mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK;
346 if (is_siso(tbl->lq_type))
347 mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK;
348 }
349 return rc;
350}
351
352static int rs_get_tbl_info_from_mcs(const struct iwl_rate *mcs_rate,
353 int phymode, struct iwl_scale_tbl_info *tbl,
354 int *rate_idx)
355{
356 int index;
357 u32 ant_msk;
358
359 index = iwl_rate_index_from_plcp(mcs_rate->rate_n_flags);
360
361 if (index == IWL_RATE_INVALID) {
362 *rate_idx = -1;
363 return -1;
364 }
365 tbl->is_SGI = 0;
366 tbl->is_fat = 0;
367 tbl->is_dup = 0;
368 tbl->antenna_type = ANT_BOTH;
369
370 if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) {
371 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK);
372
373 if (ant_msk == RATE_MCS_ANT_AB_MSK)
374 tbl->lq_type = LQ_NONE;
375 else {
376
377 if (phymode == MODE_IEEE80211A)
378 tbl->lq_type = LQ_A;
379 else
380 tbl->lq_type = LQ_G;
381
382 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
383 tbl->antenna_type = ANT_MAIN;
384 else
385 tbl->antenna_type = ANT_AUX;
386 }
387 *rate_idx = index;
388
389 } else if (iwl_rate_get_rate(mcs_rate->rate_n_flags)
390 <= IWL_RATE_SISO_60M_PLCP) {
391 tbl->lq_type = LQ_SISO;
392
393 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK);
394 if (ant_msk == RATE_MCS_ANT_AB_MSK)
395 tbl->lq_type = LQ_NONE;
396 else {
397 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
398 tbl->antenna_type = ANT_MAIN;
399 else
400 tbl->antenna_type = ANT_AUX;
401 }
402 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
403 tbl->is_SGI = 1;
404
405 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) ||
406 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK))
407 tbl->is_fat = 1;
408
409 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)
410 tbl->is_dup = 1;
411
412 *rate_idx = index;
413 } else {
414 tbl->lq_type = LQ_MIMO;
415 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
416 tbl->is_SGI = 1;
417
418 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) ||
419 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK))
420 tbl->is_fat = 1;
421
422 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)
423 tbl->is_dup = 1;
424 *rate_idx = index;
425 }
426 return 0;
427}
428
429static inline void rs_toggle_antenna(struct iwl_rate *new_rate,
430 struct iwl_scale_tbl_info *tbl)
431{
432 if (tbl->antenna_type == ANT_AUX) {
433 tbl->antenna_type = ANT_MAIN;
434 new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK;
435 new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK;
436 } else {
437 tbl->antenna_type = ANT_AUX;
438 new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK;
439 new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK;
440 }
441}
442
443static inline s8 rs_use_green(struct iwl_priv *priv)
444{
445 s8 rc = 0;
446#ifdef CONFIG_IWLWIFI_HT
447 if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht)
448 return 0;
449
450 if ((priv->current_assoc_ht.is_green_field) &&
451 !(priv->current_assoc_ht.operating_mode & 0x4))
452 rc = 1;
453#endif /*CONFIG_IWLWIFI_HT */
454 return rc;
455}
456
457/**
458 * rs_get_supported_rates - get the available rates
459 *
460 * if management frame or broadcast frame only return
461 * basic available rates.
462 *
463 */
464static void rs_get_supported_rates(struct iwl_rate_scale_priv *lq_data,
465 struct ieee80211_hdr *hdr,
466 enum iwl_table_type rate_type,
467 u16 *data_rate)
468{
469 if (is_legacy(rate_type))
470 *data_rate = lq_data->active_rate;
471 else {
472 if (is_siso(rate_type))
473 *data_rate = lq_data->active_siso_rate;
474 else
475 *data_rate = lq_data->active_mimo_rate;
476 }
477
478 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
479 lq_data->active_rate_basic)
480 *data_rate = lq_data->active_rate_basic;
481}
482
483static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
484{
485 u8 high = IWL_RATE_INVALID;
486 u8 low = IWL_RATE_INVALID;
487
488 /* 802.11A or ht walks to the next literal adjascent rate in
489 * the rate table */
490 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
491 int i;
492 u32 mask;
493
494 /* Find the previous rate that is in the rate mask */
495 i = index - 1;
496 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
497 if (rate_mask & mask) {
498 low = i;
499 break;
500 }
501 }
502
503 /* Find the next rate that is in the rate mask */
504 i = index + 1;
505 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
506 if (rate_mask & mask) {
507 high = i;
508 break;
509 }
510 }
511
512 return (high << 8) | low;
513 }
514
515 low = index;
516 while (low != IWL_RATE_INVALID) {
517 low = iwl_rates[low].prev_rs;
518 if (low == IWL_RATE_INVALID)
519 break;
520 if (rate_mask & (1 << low))
521 break;
522 IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low);
523 }
524
525 high = index;
526 while (high != IWL_RATE_INVALID) {
527 high = iwl_rates[high].next_rs;
528 if (high == IWL_RATE_INVALID)
529 break;
530 if (rate_mask & (1 << high))
531 break;
532 IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high);
533 }
534
535 return (high << 8) | low;
536}
537
538static int rs_get_lower_rate(struct iwl_rate_scale_priv *lq_data,
539 struct iwl_scale_tbl_info *tbl, u8 scale_index,
540 u8 ht_possible, struct iwl_rate *mcs_rate,
541 struct sta_info *sta)
542{
543 u8 is_green = lq_data->is_green;
544 s32 low;
545 u16 rate_mask;
546 u16 high_low;
547 u8 switch_to_legacy = 0;
548
549 /* check if we need to switch from HT to legacy rates.
550 * assumption is that mandatory rates (1Mbps or 6Mbps)
551 * are always supported (spec demand) */
552 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
553 switch_to_legacy = 1;
554 scale_index = rs_ht_to_legacy[scale_index];
555 if (lq_data->phymode == MODE_IEEE80211A)
556 tbl->lq_type = LQ_A;
557 else
558 tbl->lq_type = LQ_G;
559
560 if ((tbl->antenna_type == ANT_BOTH) ||
561 (tbl->antenna_type == ANT_NONE))
562 tbl->antenna_type = ANT_MAIN;
563
564 tbl->is_fat = 0;
565 tbl->is_SGI = 0;
566 }
567
568 rs_get_supported_rates(lq_data, NULL, tbl->lq_type, &rate_mask);
569
570 /* mask with station rate restriction */
571 if (is_legacy(tbl->lq_type)) {
572 if (lq_data->phymode == (u8) MODE_IEEE80211A)
573 rate_mask = (u16)(rate_mask &
574 (sta->supp_rates << IWL_FIRST_OFDM_RATE));
575 else
576 rate_mask = (u16)(rate_mask & sta->supp_rates);
577 }
578
579 /* if we did switched from HT to legacy check current rate */
580 if ((switch_to_legacy) &&
581 (rate_mask & (1 << scale_index))) {
582 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green);
583 return 0;
584 }
585
586 high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type);
587 low = high_low & 0xff;
588
589 if (low != IWL_RATE_INVALID)
590 rs_mcs_from_tbl(mcs_rate, tbl, low, is_green);
591 else
592 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green);
593
594 return 0;
595}
596
597static void rs_tx_status(void *priv_rate,
598 struct net_device *dev,
599 struct sk_buff *skb,
600 struct ieee80211_tx_status *tx_resp)
601{
602 int status;
603 u8 retries;
604 int rs_index, index = 0;
605 struct iwl_rate_scale_priv *lq;
606 struct iwl_link_quality_cmd *table;
607 struct sta_info *sta;
608 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
609 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
610 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
611 struct iwl_rate_scale_data *window = NULL;
612 struct iwl_rate_scale_data *search_win = NULL;
613 struct iwl_rate tx_mcs;
614 struct iwl_scale_tbl_info tbl_type;
615 struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
616 u8 active_index = 0;
617 u16 fc = le16_to_cpu(hdr->frame_control);
618 s32 tpt = 0;
619
620 IWL_DEBUG_RATE("get frame ack response, update rate scale window\n");
621
622 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
623 return;
624
625 retries = tx_resp->retry_count;
626
627 if (retries > 15)
628 retries = 15;
629
630
631 sta = sta_info_get(local, hdr->addr1);
632
633 if (!sta || !sta->rate_ctrl_priv) {
634 if (sta)
635 sta_info_put(sta);
636 return;
637 }
638
639 lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv;
640
641 if (!priv->lq_mngr.lq_ready)
642 return;
643
644 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added)
645 return;
646
647 table = &lq->lq;
648 active_index = lq->active_tbl;
649
650 lq->antenna = (lq->valid_antenna & local->hw.conf.antenna_sel_tx);
651 if (!lq->antenna)
652 lq->antenna = lq->valid_antenna;
653
654 lq->antenna = lq->valid_antenna;
655 curr_tbl = &(lq->lq_info[active_index]);
656 search_tbl = &(lq->lq_info[(1 - active_index)]);
657 window = (struct iwl_rate_scale_data *)
658 &(curr_tbl->win[0]);
659 search_win = (struct iwl_rate_scale_data *)
660 &(search_tbl->win[0]);
661
662 tx_mcs.rate_n_flags = tx_resp->control.tx_rate;
663
664 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode,
665 &tbl_type, &rs_index);
666 if ((rs_index < 0) || (rs_index >= IWL_RATE_COUNT)) {
667 IWL_DEBUG_RATE("bad rate index at: %d rate 0x%X\n",
668 rs_index, tx_mcs.rate_n_flags);
669 sta_info_put(sta);
670 return;
671 }
672
673 if (retries &&
674 (tx_mcs.rate_n_flags !=
675 le32_to_cpu(table->rs_table[0].rate_n_flags))) {
676 IWL_DEBUG_RATE("initial rate does not match 0x%x 0x%x\n",
677 tx_mcs.rate_n_flags,
678 le32_to_cpu(table->rs_table[0].rate_n_flags));
679 sta_info_put(sta);
680 return;
681 }
682
683 while (retries) {
684 tx_mcs.rate_n_flags =
685 le32_to_cpu(table->rs_table[index].rate_n_flags);
686 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode,
687 &tbl_type, &rs_index);
688
689 if ((tbl_type.lq_type == search_tbl->lq_type) &&
690 (tbl_type.antenna_type == search_tbl->antenna_type) &&
691 (tbl_type.is_SGI == search_tbl->is_SGI)) {
692 if (search_tbl->expected_tpt)
693 tpt = search_tbl->expected_tpt[rs_index];
694 else
695 tpt = 0;
696 rs_collect_tx_data(search_win,
697 rs_index, tpt, 0);
698 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
699 (tbl_type.antenna_type == curr_tbl->antenna_type) &&
700 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
701 if (curr_tbl->expected_tpt)
702 tpt = curr_tbl->expected_tpt[rs_index];
703 else
704 tpt = 0;
705 rs_collect_tx_data(window, rs_index, tpt, 0);
706 }
707 if (lq->stay_in_tbl)
708 lq->total_failed++;
709 --retries;
710 index++;
711
712 }
713
714 if (!tx_resp->retry_count)
715 tx_mcs.rate_n_flags = tx_resp->control.tx_rate;
716 else
717 tx_mcs.rate_n_flags =
718 le32_to_cpu(table->rs_table[index].rate_n_flags);
719
720 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode,
721 &tbl_type, &rs_index);
722
723 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK)
724 status = 1;
725 else
726 status = 0;
727
728 if ((tbl_type.lq_type == search_tbl->lq_type) &&
729 (tbl_type.antenna_type == search_tbl->antenna_type) &&
730 (tbl_type.is_SGI == search_tbl->is_SGI)) {
731 if (search_tbl->expected_tpt)
732 tpt = search_tbl->expected_tpt[rs_index];
733 else
734 tpt = 0;
735 rs_collect_tx_data(search_win,
736 rs_index, tpt, status);
737 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
738 (tbl_type.antenna_type == curr_tbl->antenna_type) &&
739 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
740 if (curr_tbl->expected_tpt)
741 tpt = curr_tbl->expected_tpt[rs_index];
742 else
743 tpt = 0;
744 rs_collect_tx_data(window, rs_index, tpt, status);
745 }
746
747 if (lq->stay_in_tbl) {
748 if (status)
749 lq->total_success++;
750 else
751 lq->total_failed++;
752 }
753
754 rs_rate_scale_perform(priv, dev, hdr, sta);
755 sta_info_put(sta);
756 return;
757}
758
759static u8 rs_is_ant_connected(u8 valid_antenna,
760 enum iwl_antenna_type antenna_type)
761{
762 if (antenna_type == ANT_AUX)
763 return ((valid_antenna & 0x2) ? 1:0);
764 else if (antenna_type == ANT_MAIN)
765 return ((valid_antenna & 0x1) ? 1:0);
766 else if (antenna_type == ANT_BOTH) {
767 if ((valid_antenna & 0x3) == 0x3)
768 return 1;
769 else
770 return 0;
771 }
772
773 return 1;
774}
775
776static u8 rs_is_other_ant_connected(u8 valid_antenna,
777 enum iwl_antenna_type antenna_type)
778{
779 if (antenna_type == ANT_AUX)
780 return (rs_is_ant_connected(valid_antenna, ANT_MAIN));
781 else
782 return (rs_is_ant_connected(valid_antenna, ANT_AUX));
783
784 return 0;
785}
786
787static void rs_set_stay_in_table(u8 is_legacy,
788 struct iwl_rate_scale_priv *lq_data)
789{
790 IWL_DEBUG_HT("we are staying in the same table\n");
791 lq_data->stay_in_tbl = 1;
792 if (is_legacy) {
793 lq_data->table_count_limit = IWL_LEGACY_TABLE_COUNT;
794 lq_data->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
795 lq_data->max_success_limit = IWL_LEGACY_TABLE_COUNT;
796 } else {
797 lq_data->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
798 lq_data->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
799 lq_data->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
800 }
801 lq_data->table_count = 0;
802 lq_data->total_failed = 0;
803 lq_data->total_success = 0;
804}
805
806static void rs_get_expected_tpt_table(struct iwl_rate_scale_priv *lq_data,
807 struct iwl_scale_tbl_info *tbl)
808{
809 if (is_legacy(tbl->lq_type)) {
810 if (!is_a_band(tbl->lq_type))
811 tbl->expected_tpt = expected_tpt_G;
812 else
813 tbl->expected_tpt = expected_tpt_A;
814 } else if (is_siso(tbl->lq_type)) {
815 if (tbl->is_fat && !lq_data->is_dup)
816 if (tbl->is_SGI)
817 tbl->expected_tpt = expected_tpt_siso40MHzSGI;
818 else
819 tbl->expected_tpt = expected_tpt_siso40MHz;
820 else if (tbl->is_SGI)
821 tbl->expected_tpt = expected_tpt_siso20MHzSGI;
822 else
823 tbl->expected_tpt = expected_tpt_siso20MHz;
824
825 } else if (is_mimo(tbl->lq_type)) {
826 if (tbl->is_fat && !lq_data->is_dup)
827 if (tbl->is_SGI)
828 tbl->expected_tpt = expected_tpt_mimo40MHzSGI;
829 else
830 tbl->expected_tpt = expected_tpt_mimo40MHz;
831 else if (tbl->is_SGI)
832 tbl->expected_tpt = expected_tpt_mimo20MHzSGI;
833 else
834 tbl->expected_tpt = expected_tpt_mimo20MHz;
835 } else
836 tbl->expected_tpt = expected_tpt_G;
837}
838
839#ifdef CONFIG_IWLWIFI_HT
840static s32 rs_get_best_rate(struct iwl_priv *priv,
841 struct iwl_rate_scale_priv *lq_data,
842 struct iwl_scale_tbl_info *tbl,
843 u16 rate_mask, s8 index, s8 rate)
844{
845 struct iwl_scale_tbl_info *active_tbl =
846 &(lq_data->lq_info[lq_data->active_tbl]);
847 s32 new_rate, high, low, start_hi;
848 s32 active_sr = active_tbl->win[index].success_ratio;
849 s32 *tpt_tbl = tbl->expected_tpt;
850 s32 active_tpt = active_tbl->expected_tpt[index];
851 u16 high_low;
852
853 new_rate = high = low = start_hi = IWL_RATE_INVALID;
854
855 for (; ;) {
856 high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type);
857
858 low = high_low & 0xff;
859 high = (high_low >> 8) & 0xff;
860
861 if ((((100 * tpt_tbl[rate]) > lq_data->last_tpt) &&
862 ((active_sr > IWL_RATE_DECREASE_TH) &&
863 (active_sr <= IWL_RATE_HIGH_TH) &&
864 (tpt_tbl[rate] <= active_tpt))) ||
865 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
866 (tpt_tbl[rate] > active_tpt))) {
867
868 if (start_hi != IWL_RATE_INVALID) {
869 new_rate = start_hi;
870 break;
871 }
872 new_rate = rate;
873 if (low != IWL_RATE_INVALID)
874 rate = low;
875 else
876 break;
877 } else {
878 if (new_rate != IWL_RATE_INVALID)
879 break;
880 else if (high != IWL_RATE_INVALID) {
881 start_hi = high;
882 rate = high;
883 } else {
884 new_rate = rate;
885 break;
886 }
887 }
888 }
889
890 return new_rate;
891}
892#endif /* CONFIG_IWLWIFI_HT */
893
894static inline u8 rs_is_both_ant_supp(u8 valid_antenna)
895{
896 return (rs_is_ant_connected(valid_antenna, ANT_BOTH));
897}
898
899static int rs_switch_to_mimo(struct iwl_priv *priv,
900 struct iwl_rate_scale_priv *lq_data,
901 struct iwl_scale_tbl_info *tbl, int index)
902{
903 int rc = -1;
904#ifdef CONFIG_IWLWIFI_HT
905 u16 rate_mask;
906 s32 rate;
907 s8 is_green = lq_data->is_green;
908
909 if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht)
910 return -1;
911
912 IWL_DEBUG_HT("LQ: try to switch to MIMO\n");
913 tbl->lq_type = LQ_MIMO;
914 rs_get_supported_rates(lq_data, NULL, tbl->lq_type,
915 &rate_mask);
916
917 if (priv->current_assoc_ht.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC)
918 return -1;
919
920 if (!rs_is_both_ant_supp(lq_data->antenna))
921 return -1;
922
923 rc = 0;
924 tbl->is_dup = lq_data->is_dup;
925 tbl->action = 0;
926 if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ)
927 tbl->is_fat = 1;
928 else
929 tbl->is_fat = 0;
930
931 if (tbl->is_fat) {
932 if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY)
933 tbl->is_SGI = 1;
934 else
935 tbl->is_SGI = 0;
936 } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY)
937 tbl->is_SGI = 1;
938 else
939 tbl->is_SGI = 0;
940
941 rs_get_expected_tpt_table(lq_data, tbl);
942
943 rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index);
944
945 IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask);
946 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask))
947 return -1;
948 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green);
949
950 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n",
951 tbl->current_rate.rate_n_flags, is_green);
952
953#endif /*CONFIG_IWLWIFI_HT */
954 return rc;
955}
956
957static int rs_switch_to_siso(struct iwl_priv *priv,
958 struct iwl_rate_scale_priv *lq_data,
959 struct iwl_scale_tbl_info *tbl, int index)
960{
961 int rc = -1;
962#ifdef CONFIG_IWLWIFI_HT
963 u16 rate_mask;
964 u8 is_green = lq_data->is_green;
965 s32 rate;
966
967 IWL_DEBUG_HT("LQ: try to switch to SISO\n");
968 if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht)
969 return -1;
970
971 rc = 0;
972 tbl->is_dup = lq_data->is_dup;
973 tbl->lq_type = LQ_SISO;
974 tbl->action = 0;
975 rs_get_supported_rates(lq_data, NULL, tbl->lq_type,
976 &rate_mask);
977
978 if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ)
979 tbl->is_fat = 1;
980 else
981 tbl->is_fat = 0;
982
983 if (tbl->is_fat) {
984 if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY)
985 tbl->is_SGI = 1;
986 else
987 tbl->is_SGI = 0;
988 } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY)
989 tbl->is_SGI = 1;
990 else
991 tbl->is_SGI = 0;
992
993 if (is_green)
994 tbl->is_SGI = 0;
995
996 rs_get_expected_tpt_table(lq_data, tbl);
997 rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index);
998
999 IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask);
1000 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1001 IWL_DEBUG_HT("can not switch with index %d rate mask %x\n",
1002 rate, rate_mask);
1003 return -1;
1004 }
1005 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green);
1006 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n",
1007 tbl->current_rate.rate_n_flags, is_green);
1008
1009#endif /*CONFIG_IWLWIFI_HT */
1010 return rc;
1011}
1012
1013static int rs_move_legacy_other(struct iwl_priv *priv,
1014 struct iwl_rate_scale_priv *lq_data,
1015 int index)
1016{
1017 int rc = 0;
1018 struct iwl_scale_tbl_info *tbl =
1019 &(lq_data->lq_info[lq_data->active_tbl]);
1020 struct iwl_scale_tbl_info *search_tbl =
1021 &(lq_data->lq_info[(1 - lq_data->active_tbl)]);
1022 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1023 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1024 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1025 u8 start_action = tbl->action;
1026
1027 for (; ;) {
1028 switch (tbl->action) {
1029 case IWL_LEGACY_SWITCH_ANTENNA:
1030 IWL_DEBUG_HT("LQ Legacy switch Antenna\n");
1031
1032 search_tbl->lq_type = LQ_NONE;
1033 lq_data->action_counter++;
1034 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1035 break;
1036 if (!rs_is_other_ant_connected(lq_data->antenna,
1037 tbl->antenna_type))
1038 break;
1039
1040 memcpy(search_tbl, tbl, sz);
1041
1042 rs_toggle_antenna(&(search_tbl->current_rate),
1043 search_tbl);
1044 rs_get_expected_tpt_table(lq_data, search_tbl);
1045 lq_data->search_better_tbl = 1;
1046 goto out;
1047
1048 case IWL_LEGACY_SWITCH_SISO:
1049 IWL_DEBUG_HT("LQ: Legacy switch to SISO\n");
1050 memcpy(search_tbl, tbl, sz);
1051 search_tbl->lq_type = LQ_SISO;
1052 search_tbl->is_SGI = 0;
1053 search_tbl->is_fat = 0;
1054 rc = rs_switch_to_siso(priv, lq_data, search_tbl,
1055 index);
1056 if (!rc) {
1057 lq_data->search_better_tbl = 1;
1058 lq_data->action_counter = 0;
1059 }
1060 if (!rc)
1061 goto out;
1062
1063 break;
1064 case IWL_LEGACY_SWITCH_MIMO:
1065 IWL_DEBUG_HT("LQ: Legacy switch MIMO\n");
1066 memcpy(search_tbl, tbl, sz);
1067 search_tbl->lq_type = LQ_MIMO;
1068 search_tbl->is_SGI = 0;
1069 search_tbl->is_fat = 0;
1070 search_tbl->antenna_type = ANT_BOTH;
1071 rc = rs_switch_to_mimo(priv, lq_data, search_tbl,
1072 index);
1073 if (!rc) {
1074 lq_data->search_better_tbl = 1;
1075 lq_data->action_counter = 0;
1076 }
1077 if (!rc)
1078 goto out;
1079 break;
1080 }
1081 tbl->action++;
1082 if (tbl->action > IWL_LEGACY_SWITCH_MIMO)
1083 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1084
1085 if (tbl->action == start_action)
1086 break;
1087
1088 }
1089 return 0;
1090
1091 out:
1092 tbl->action++;
1093 if (tbl->action > IWL_LEGACY_SWITCH_MIMO)
1094 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1095 return 0;
1096
1097}
1098
1099static int rs_move_siso_to_other(struct iwl_priv *priv,
1100 struct iwl_rate_scale_priv *lq_data,
1101 int index)
1102{
1103 int rc = -1;
1104 u8 is_green = lq_data->is_green;
1105 struct iwl_scale_tbl_info *tbl =
1106 &(lq_data->lq_info[lq_data->active_tbl]);
1107 struct iwl_scale_tbl_info *search_tbl =
1108 &(lq_data->lq_info[(1 - lq_data->active_tbl)]);
1109 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1110 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1111 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1112 u8 start_action = tbl->action;
1113
1114 for (;;) {
1115 lq_data->action_counter++;
1116 switch (tbl->action) {
1117 case IWL_SISO_SWITCH_ANTENNA:
1118 IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n");
1119 search_tbl->lq_type = LQ_NONE;
1120 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1121 break;
1122 if (!rs_is_other_ant_connected(lq_data->antenna,
1123 tbl->antenna_type))
1124 break;
1125
1126 memcpy(search_tbl, tbl, sz);
1127 search_tbl->action = IWL_SISO_SWITCH_MIMO;
1128 rs_toggle_antenna(&(search_tbl->current_rate),
1129 search_tbl);
1130 lq_data->search_better_tbl = 1;
1131
1132 goto out;
1133
1134 case IWL_SISO_SWITCH_MIMO:
1135 IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n");
1136 memcpy(search_tbl, tbl, sz);
1137 search_tbl->lq_type = LQ_MIMO;
1138 search_tbl->is_SGI = 0;
1139 search_tbl->is_fat = 0;
1140 search_tbl->antenna_type = ANT_BOTH;
1141 rc = rs_switch_to_mimo(priv, lq_data, search_tbl,
1142 index);
1143 if (!rc)
1144 lq_data->search_better_tbl = 1;
1145
1146 if (!rc)
1147 goto out;
1148 break;
1149 case IWL_SISO_SWITCH_GI:
1150 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n");
1151 memcpy(search_tbl, tbl, sz);
1152 search_tbl->action = 0;
1153 if (search_tbl->is_SGI)
1154 search_tbl->is_SGI = 0;
1155 else if (!is_green)
1156 search_tbl->is_SGI = 1;
1157 else
1158 break;
1159 lq_data->search_better_tbl = 1;
1160 if ((tbl->lq_type == LQ_SISO) &&
1161 (tbl->is_SGI)) {
1162 s32 tpt = lq_data->last_tpt / 100;
1163 if (((!tbl->is_fat) &&
1164 (tpt >= expected_tpt_siso20MHz[index])) ||
1165 ((tbl->is_fat) &&
1166 (tpt >= expected_tpt_siso40MHz[index])))
1167 lq_data->search_better_tbl = 0;
1168 }
1169 rs_get_expected_tpt_table(lq_data, search_tbl);
1170 rs_mcs_from_tbl(&search_tbl->current_rate,
1171 search_tbl, index, is_green);
1172 goto out;
1173 }
1174 tbl->action++;
1175 if (tbl->action > IWL_SISO_SWITCH_GI)
1176 tbl->action = IWL_SISO_SWITCH_ANTENNA;
1177
1178 if (tbl->action == start_action)
1179 break;
1180 }
1181 return 0;
1182
1183 out:
1184 tbl->action++;
1185 if (tbl->action > IWL_SISO_SWITCH_GI)
1186 tbl->action = IWL_SISO_SWITCH_ANTENNA;
1187 return 0;
1188}
1189
1190static int rs_move_mimo_to_other(struct iwl_priv *priv,
1191 struct iwl_rate_scale_priv *lq_data,
1192 int index)
1193{
1194 int rc = -1;
1195 s8 is_green = lq_data->is_green;
1196 struct iwl_scale_tbl_info *tbl =
1197 &(lq_data->lq_info[lq_data->active_tbl]);
1198 struct iwl_scale_tbl_info *search_tbl =
1199 &(lq_data->lq_info[(1 - lq_data->active_tbl)]);
1200 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1201 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1202 u8 start_action = tbl->action;
1203
1204 for (;;) {
1205 lq_data->action_counter++;
1206 switch (tbl->action) {
1207 case IWL_MIMO_SWITCH_ANTENNA_A:
1208 case IWL_MIMO_SWITCH_ANTENNA_B:
1209 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n");
1210 memcpy(search_tbl, tbl, sz);
1211 search_tbl->lq_type = LQ_SISO;
1212 search_tbl->is_SGI = 0;
1213 search_tbl->is_fat = 0;
1214 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1215 search_tbl->antenna_type = ANT_MAIN;
1216 else
1217 search_tbl->antenna_type = ANT_AUX;
1218
1219 rc = rs_switch_to_siso(priv, lq_data, search_tbl,
1220 index);
1221 if (!rc) {
1222 lq_data->search_better_tbl = 1;
1223 goto out;
1224 }
1225 break;
1226
1227 case IWL_MIMO_SWITCH_GI:
1228 IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n");
1229 memcpy(search_tbl, tbl, sz);
1230 search_tbl->lq_type = LQ_MIMO;
1231 search_tbl->antenna_type = ANT_BOTH;
1232 search_tbl->action = 0;
1233 if (search_tbl->is_SGI)
1234 search_tbl->is_SGI = 0;
1235 else
1236 search_tbl->is_SGI = 1;
1237 lq_data->search_better_tbl = 1;
1238 if ((tbl->lq_type == LQ_MIMO) &&
1239 (tbl->is_SGI)) {
1240 s32 tpt = lq_data->last_tpt / 100;
1241 if (((!tbl->is_fat) &&
1242 (tpt >= expected_tpt_mimo20MHz[index])) ||
1243 ((tbl->is_fat) &&
1244 (tpt >= expected_tpt_mimo40MHz[index])))
1245 lq_data->search_better_tbl = 0;
1246 }
1247 rs_get_expected_tpt_table(lq_data, search_tbl);
1248 rs_mcs_from_tbl(&search_tbl->current_rate,
1249 search_tbl, index, is_green);
1250 goto out;
1251
1252 }
1253 tbl->action++;
1254 if (tbl->action > IWL_MIMO_SWITCH_GI)
1255 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A;
1256
1257 if (tbl->action == start_action)
1258 break;
1259 }
1260
1261 return 0;
1262 out:
1263 tbl->action++;
1264 if (tbl->action > IWL_MIMO_SWITCH_GI)
1265 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A;
1266 return 0;
1267
1268}
1269
1270static void rs_stay_in_table(struct iwl_rate_scale_priv *lq_data)
1271{
1272 struct iwl_scale_tbl_info *tbl;
1273 int i;
1274 int active_tbl;
1275 int flush_interval_passed = 0;
1276
1277 active_tbl = lq_data->active_tbl;
1278
1279 tbl = &(lq_data->lq_info[active_tbl]);
1280
1281 if (lq_data->stay_in_tbl) {
1282
1283 if (lq_data->flush_timer)
1284 flush_interval_passed =
1285 time_after(jiffies,
1286 (unsigned long)(lq_data->flush_timer +
1287 IWL_RATE_SCALE_FLUSH_INTVL));
1288
1289 flush_interval_passed = 0;
1290 if ((lq_data->total_failed > lq_data->max_failure_limit) ||
1291 (lq_data->total_success > lq_data->max_success_limit) ||
1292 ((!lq_data->search_better_tbl) && (lq_data->flush_timer)
1293 && (flush_interval_passed))) {
1294 IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:",
1295 lq_data->total_failed,
1296 lq_data->total_success,
1297 flush_interval_passed);
1298 lq_data->stay_in_tbl = 0;
1299 lq_data->total_failed = 0;
1300 lq_data->total_success = 0;
1301 lq_data->flush_timer = 0;
1302 } else if (lq_data->table_count > 0) {
1303 lq_data->table_count++;
1304 if (lq_data->table_count >=
1305 lq_data->table_count_limit) {
1306 lq_data->table_count = 0;
1307
1308 IWL_DEBUG_HT("LQ: stay in table clear win\n");
1309 for (i = 0; i < IWL_RATE_COUNT; i++)
1310 rs_rate_scale_clear_window(
1311 &(tbl->win[i]));
1312 }
1313 }
1314
1315 if (!lq_data->stay_in_tbl) {
1316 for (i = 0; i < IWL_RATE_COUNT; i++)
1317 rs_rate_scale_clear_window(&(tbl->win[i]));
1318 }
1319 }
1320}
1321
1322static void rs_rate_scale_perform(struct iwl_priv *priv,
1323 struct net_device *dev,
1324 struct ieee80211_hdr *hdr,
1325 struct sta_info *sta)
1326{
1327 int low = IWL_RATE_INVALID;
1328 int high = IWL_RATE_INVALID;
1329 int index;
1330 int i;
1331 struct iwl_rate_scale_data *window = NULL;
1332 int current_tpt = IWL_INVALID_VALUE;
1333 int low_tpt = IWL_INVALID_VALUE;
1334 int high_tpt = IWL_INVALID_VALUE;
1335 u32 fail_count;
1336 s8 scale_action = 0;
1337 u16 fc, rate_mask;
1338 u8 update_lq = 0;
1339 struct iwl_rate_scale_priv *lq_data;
1340 struct iwl_scale_tbl_info *tbl, *tbl1;
1341 u16 rate_scale_index_msk = 0;
1342 struct iwl_rate mcs_rate;
1343 u8 is_green = 0;
1344 u8 active_tbl = 0;
1345 u8 done_search = 0;
1346 u16 high_low;
1347
1348 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
1349
1350 fc = le16_to_cpu(hdr->frame_control);
1351 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) {
1352 /* Send management frames and broadcast/multicast data using
1353 * lowest rate. */
1354 /* TODO: this could probably be improved.. */
1355 return;
1356 }
1357
1358 if (!sta || !sta->rate_ctrl_priv)
1359 return;
1360
1361 if (!priv->lq_mngr.lq_ready) {
1362 IWL_DEBUG_RATE("still rate scaling not ready\n");
1363 return;
1364 }
1365 lq_data = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv;
1366
1367 if (!lq_data->search_better_tbl)
1368 active_tbl = lq_data->active_tbl;
1369 else
1370 active_tbl = 1 - lq_data->active_tbl;
1371
1372 tbl = &(lq_data->lq_info[active_tbl]);
1373 is_green = lq_data->is_green;
1374
1375 index = sta->last_txrate;
1376
1377 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index,
1378 tbl->lq_type);
1379
1380 rs_get_supported_rates(lq_data, hdr, tbl->lq_type,
1381 &rate_mask);
1382
1383 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask);
1384
1385 /* mask with station rate restriction */
1386 if (is_legacy(tbl->lq_type)) {
1387 if (lq_data->phymode == (u8) MODE_IEEE80211A)
1388 rate_scale_index_msk = (u16) (rate_mask &
1389 (sta->supp_rates << IWL_FIRST_OFDM_RATE));
1390 else
1391 rate_scale_index_msk = (u16) (rate_mask &
1392 sta->supp_rates);
1393
1394 } else
1395 rate_scale_index_msk = rate_mask;
1396
1397 if (!rate_scale_index_msk)
1398 rate_scale_index_msk = rate_mask;
1399
1400 if (index < 0 || !((1 << index) & rate_scale_index_msk)) {
1401 index = IWL_INVALID_VALUE;
1402 update_lq = 1;
1403
1404 /* get the lowest availabe rate */
1405 for (i = 0; i <= IWL_RATE_COUNT; i++) {
1406 if ((1 << i) & rate_scale_index_msk)
1407 index = i;
1408 }
1409
1410 if (index == IWL_INVALID_VALUE) {
1411 IWL_WARNING("Can not find a suitable rate\n");
1412 return;
1413 }
1414 }
1415
1416 if (!tbl->expected_tpt)
1417 rs_get_expected_tpt_table(lq_data, tbl);
1418
1419 window = &(tbl->win[index]);
1420
1421 fail_count = window->counter - window->success_counter;
1422 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1423 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))
1424 || (tbl->expected_tpt == NULL)) {
1425 IWL_DEBUG_RATE("LQ: still below TH succ %d total %d "
1426 "for index %d\n",
1427 window->success_counter, window->counter, index);
1428 window->average_tpt = IWL_INVALID_VALUE;
1429 rs_stay_in_table(lq_data);
1430 if (update_lq) {
1431 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1432 rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq, sta);
1433 rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC);
1434 }
1435 goto out;
1436
1437 } else
1438 window->average_tpt = ((window->success_ratio *
1439 tbl->expected_tpt[index] + 64) / 128);
1440
1441 if (lq_data->search_better_tbl) {
1442 int success_limit = IWL_RATE_SCALE_SWITCH;
1443
1444 if ((window->success_ratio > success_limit) ||
1445 (window->average_tpt > lq_data->last_tpt)) {
1446 if (!is_legacy(tbl->lq_type)) {
1447 IWL_DEBUG_HT("LQ: we are switching to HT"
1448 " rate suc %d current tpt %d"
1449 " old tpt %d\n",
1450 window->success_ratio,
1451 window->average_tpt,
1452 lq_data->last_tpt);
1453 lq_data->enable_counter = 1;
1454 }
1455 lq_data->active_tbl = active_tbl;
1456 current_tpt = window->average_tpt;
1457 } else {
1458 tbl->lq_type = LQ_NONE;
1459 active_tbl = lq_data->active_tbl;
1460 tbl = &(lq_data->lq_info[active_tbl]);
1461
1462 index = iwl_rate_index_from_plcp(
1463 tbl->current_rate.rate_n_flags);
1464
1465 update_lq = 1;
1466 current_tpt = lq_data->last_tpt;
1467 IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n");
1468 }
1469 lq_data->search_better_tbl = 0;
1470 done_search = 1;
1471 goto lq_update;
1472 }
1473
1474 high_low = rs_get_adjacent_rate(index, rate_scale_index_msk,
1475 tbl->lq_type);
1476 low = high_low & 0xff;
1477 high = (high_low >> 8) & 0xff;
1478
1479 current_tpt = window->average_tpt;
1480
1481 if (low != IWL_RATE_INVALID)
1482 low_tpt = tbl->win[low].average_tpt;
1483
1484 if (high != IWL_RATE_INVALID)
1485 high_tpt = tbl->win[high].average_tpt;
1486
1487
1488 scale_action = 1;
1489
1490 if ((window->success_ratio <= IWL_RATE_DECREASE_TH) ||
1491 (current_tpt == 0)) {
1492 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
1493 scale_action = -1;
1494 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1495 (high_tpt == IWL_INVALID_VALUE))
1496 scale_action = 1;
1497 else if ((low_tpt != IWL_INVALID_VALUE) &&
1498 (high_tpt != IWL_INVALID_VALUE) &&
1499 (low_tpt < current_tpt) &&
1500 (high_tpt < current_tpt))
1501 scale_action = 0;
1502 else {
1503 if (high_tpt != IWL_INVALID_VALUE) {
1504 if (high_tpt > current_tpt)
1505 scale_action = 1;
1506 else {
1507 IWL_DEBUG_RATE
1508 ("decrease rate because of high tpt\n");
1509 scale_action = -1;
1510 }
1511 } else if (low_tpt != IWL_INVALID_VALUE) {
1512 if (low_tpt > current_tpt) {
1513 IWL_DEBUG_RATE
1514 ("decrease rate because of low tpt\n");
1515 scale_action = -1;
1516 } else
1517 scale_action = 1;
1518 }
1519 }
1520
1521 if (scale_action == -1) {
1522 if ((low != IWL_RATE_INVALID) &&
1523 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
1524 (current_tpt > (100 * tbl->expected_tpt[low]))))
1525 scale_action = 0;
1526 } else if ((scale_action == 1) &&
1527 (window->success_ratio < IWL_RATE_INCREASE_TH))
1528 scale_action = 0;
1529
1530 switch (scale_action) {
1531 case -1:
1532 if (low != IWL_RATE_INVALID) {
1533 update_lq = 1;
1534 index = low;
1535 }
1536 break;
1537 case 1:
1538 if (high != IWL_RATE_INVALID) {
1539 update_lq = 1;
1540 index = high;
1541 }
1542
1543 break;
1544 case 0:
1545 default:
1546 break;
1547 }
1548
1549 IWL_DEBUG_HT("choose rate scale index %d action %d low %d "
1550 "high %d type %d\n",
1551 index, scale_action, low, high, tbl->lq_type);
1552
1553 lq_update:
1554 if (update_lq) {
1555 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1556 rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq, sta);
1557 rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC);
1558 }
1559 rs_stay_in_table(lq_data);
1560
1561 if (!update_lq && !done_search && !lq_data->stay_in_tbl) {
1562 lq_data->last_tpt = current_tpt;
1563
1564 if (is_legacy(tbl->lq_type))
1565 rs_move_legacy_other(priv, lq_data, index);
1566 else if (is_siso(tbl->lq_type))
1567 rs_move_siso_to_other(priv, lq_data, index);
1568 else
1569 rs_move_mimo_to_other(priv, lq_data, index);
1570
1571 if (lq_data->search_better_tbl) {
1572 tbl = &(lq_data->lq_info[(1 - lq_data->active_tbl)]);
1573 for (i = 0; i < IWL_RATE_COUNT; i++)
1574 rs_rate_scale_clear_window(&(tbl->win[i]));
1575
1576 index = iwl_rate_index_from_plcp(
1577 tbl->current_rate.rate_n_flags);
1578
1579 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n",
1580 tbl->current_rate.rate_n_flags, index);
1581 rs_fill_link_cmd(lq_data, &tbl->current_rate,
1582 &(lq_data->lq), sta);
1583 rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC);
1584 }
1585 tbl1 = &(lq_data->lq_info[lq_data->active_tbl]);
1586
1587 if (is_legacy(tbl1->lq_type) &&
1588#ifdef CONFIG_IWLWIFI_HT
1589 !priv->current_assoc_ht.is_ht &&
1590#endif
1591 (lq_data->action_counter >= 1)) {
1592 lq_data->action_counter = 0;
1593 IWL_DEBUG_HT("LQ: STAY in legacy table\n");
1594 rs_set_stay_in_table(1, lq_data);
1595 }
1596
1597 if (lq_data->enable_counter &&
1598 (lq_data->action_counter >= IWL_ACTION_LIMIT)) {
1599#ifdef CONFIG_IWLWIFI_HT_AGG
1600 if ((lq_data->last_tpt > TID_AGG_TPT_THREHOLD) &&
1601 (priv->lq_mngr.agg_ctrl.auto_agg)) {
1602 priv->lq_mngr.agg_ctrl.tid_retry =
1603 TID_ALL_SPECIFIED;
1604 schedule_work(&priv->agg_work);
1605 }
1606#endif /*CONFIG_IWLWIFI_HT_AGG */
1607 lq_data->action_counter = 0;
1608 rs_set_stay_in_table(0, lq_data);
1609 }
1610 } else {
1611 if ((!update_lq) && (!done_search) && (!lq_data->flush_timer))
1612 lq_data->flush_timer = jiffies;
1613 }
1614
1615out:
1616 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green);
1617 i = index;
1618 sta->last_txrate = i;
1619
1620 /* sta->txrate is an index to A mode rates which start
1621 * at IWL_FIRST_OFDM_RATE
1622 */
1623 if (lq_data->phymode == (u8) MODE_IEEE80211A)
1624 sta->txrate = i - IWL_FIRST_OFDM_RATE;
1625 else
1626 sta->txrate = i;
1627
1628 return;
1629}
1630
1631
1632static void rs_initialize_lq(struct iwl_priv *priv,
1633 struct sta_info *sta)
1634{
1635 int i;
1636 struct iwl_rate_scale_priv *lq;
1637 struct iwl_scale_tbl_info *tbl;
1638 u8 active_tbl = 0;
1639 int rate_idx;
1640 u8 use_green = rs_use_green(priv);
1641 struct iwl_rate mcs_rate;
1642
1643 if (!sta || !sta->rate_ctrl_priv)
1644 goto out;
1645
1646 lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv;
1647 i = sta->last_txrate;
1648
1649 if ((lq->lq.sta_id == 0xff) &&
1650 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
1651 goto out;
1652
1653 if (!lq->search_better_tbl)
1654 active_tbl = lq->active_tbl;
1655 else
1656 active_tbl = 1 - lq->active_tbl;
1657
1658 tbl = &(lq->lq_info[active_tbl]);
1659
1660 if ((i < 0) || (i >= IWL_RATE_COUNT))
1661 i = 0;
1662
1663 mcs_rate.rate_n_flags = iwl_rates[i].plcp ;
1664 mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK;
1665 mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK;
1666
1667 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
1668 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK;
1669
1670 tbl->antenna_type = ANT_AUX;
1671 rs_get_tbl_info_from_mcs(&mcs_rate, priv->phymode, tbl, &rate_idx);
1672 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type))
1673 rs_toggle_antenna(&mcs_rate, tbl),
1674
1675 rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green);
1676 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags;
1677 rs_get_expected_tpt_table(lq, tbl);
1678 rs_fill_link_cmd(lq, &mcs_rate, &(lq->lq), sta);
1679 rs_send_lq_cmd(priv, &lq->lq, CMD_ASYNC);
1680 out:
1681 return;
1682}
1683
1684static struct ieee80211_rate *rs_get_lowest_rate(struct ieee80211_local
1685 *local)
1686{
1687 struct ieee80211_hw_mode *mode = local->oper_hw_mode;
1688 int i;
1689
1690 for (i = 0; i < mode->num_rates; i++) {
1691 struct ieee80211_rate *rate = &mode->rates[i];
1692
1693 if (rate->flags & IEEE80211_RATE_SUPPORTED)
1694 return rate;
1695 }
1696
1697 return &mode->rates[0];
1698}
1699
1700static struct ieee80211_rate *rs_get_rate(void *priv_rate,
1701 struct net_device *dev,
1702 struct sk_buff *skb,
1703 struct rate_control_extra
1704 *extra)
1705{
1706
1707 int i;
1708 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1709 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1710 struct sta_info *sta;
1711 u16 fc;
1712 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
1713 struct iwl_rate_scale_priv *lq;
1714
1715 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
1716
1717 memset(extra, 0, sizeof(*extra));
1718
1719 fc = le16_to_cpu(hdr->frame_control);
1720 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) {
1721 /* Send management frames and broadcast/multicast data using
1722 * lowest rate. */
1723 /* TODO: this could probably be improved.. */
1724 return rs_get_lowest_rate(local);
1725 }
1726
1727 sta = sta_info_get(local, hdr->addr1);
1728
1729 if (!sta || !sta->rate_ctrl_priv) {
1730 if (sta)
1731 sta_info_put(sta);
1732 return rs_get_lowest_rate(local);
1733 }
1734
1735 lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv;
1736 i = sta->last_txrate;
1737
1738 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added) {
1739 u8 sta_id = iwl_hw_find_station(priv, hdr->addr1);
1740
1741 if (sta_id == IWL_INVALID_STATION) {
1742 IWL_DEBUG_RATE("LQ: ADD station " MAC_FMT "\n",
1743 MAC_ARG(hdr->addr1));
1744 sta_id = iwl_add_station(priv,
1745 hdr->addr1, 0, CMD_ASYNC);
1746 }
1747 if ((sta_id != IWL_INVALID_STATION)) {
1748 lq->lq.sta_id = sta_id;
1749 lq->lq.rs_table[0].rate_n_flags = 0;
1750 lq->ibss_sta_added = 1;
1751 rs_initialize_lq(priv, sta);
1752 }
1753 if (!lq->ibss_sta_added)
1754 goto done;
1755 }
1756
1757 done:
1758 sta_info_put(sta);
1759 if ((i < 0) || (i > IWL_RATE_COUNT))
1760 return rs_get_lowest_rate(local);
1761
1762 return &priv->ieee_rates[i];
1763}
1764
1765static void *rs_alloc_sta(void *priv, gfp_t gfp)
1766{
1767 struct iwl_rate_scale_priv *crl;
1768 int i, j;
1769
1770 IWL_DEBUG_RATE("create station rate scale window\n");
1771
1772 crl = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp);
1773
1774 if (crl == NULL)
1775 return NULL;
1776
1777 memset(crl, 0, sizeof(struct iwl_rate_scale_priv));
1778 crl->lq.sta_id = 0xff;
1779
1780 for (j = 0; j < LQ_SIZE; j++)
1781 for (i = 0; i < IWL_RATE_COUNT; i++)
1782 rs_rate_scale_clear_window(&(crl->lq_info[j].win[i]));
1783
1784 return crl;
1785}
1786
1787static void rs_rate_init(void *priv_rate, void *priv_sta,
1788 struct ieee80211_local *local,
1789 struct sta_info *sta)
1790{
1791 int i, j;
1792 struct ieee80211_hw_mode *mode = local->oper_hw_mode;
1793 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
1794 struct iwl_rate_scale_priv *crl = priv_sta;
1795
1796 memset(crl, 0, sizeof(struct iwl_rate_scale_priv));
1797
1798 crl->lq.sta_id = 0xff;
1799 crl->flush_timer = 0;
1800 sta->txrate = 3;
1801 for (j = 0; j < LQ_SIZE; j++)
1802 for (i = 0; i < IWL_RATE_COUNT; i++)
1803 rs_rate_scale_clear_window(&(crl->lq_info[j].win[i]));
1804
1805 IWL_DEBUG_RATE("rate scale global init\n");
1806 /* TODO: what is a good starting rate for STA? About middle? Maybe not
1807 * the lowest or the highest rate.. Could consider using RSSI from
1808 * previous packets? Need to have IEEE 802.1X auth succeed immediately
1809 * after assoc.. */
1810
1811 crl->ibss_sta_added = 0;
1812 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1813 u8 sta_id = iwl_hw_find_station(priv, sta->addr);
1814 /* for IBSS the call are from tasklet */
1815 IWL_DEBUG_HT("LQ: ADD station " MAC_FMT " \n",
1816 MAC_ARG(sta->addr));
1817
1818 if (sta_id == IWL_INVALID_STATION) {
1819 IWL_DEBUG_RATE("LQ: ADD station " MAC_FMT "\n",
1820 MAC_ARG(sta->addr));
1821 sta_id = iwl_add_station(priv,
1822 sta->addr, 0, CMD_ASYNC);
1823 }
1824 if ((sta_id != IWL_INVALID_STATION)) {
1825 crl->lq.sta_id = sta_id;
1826 crl->lq.rs_table[0].rate_n_flags = 0;
1827 }
1828 /* FIXME: this is w/a remove it later */
1829 priv->assoc_station_added = 1;
1830 }
1831
1832 for (i = 0; i < mode->num_rates; i++) {
1833 if ((sta->supp_rates & BIT(i)) &&
1834 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED))
1835 sta->txrate = i;
1836 }
1837 sta->last_txrate = sta->txrate;
1838 /* For MODE_IEEE80211A mode cck rate are at end
1839 * rate table
1840 */
1841 if (local->hw.conf.phymode == MODE_IEEE80211A)
1842 sta->last_txrate += IWL_FIRST_OFDM_RATE;
1843
1844 crl->is_dup = priv->is_dup;
1845 crl->valid_antenna = priv->valid_antenna;
1846 crl->antenna = priv->antenna;
1847 crl->is_green = rs_use_green(priv);
1848 crl->active_rate = priv->active_rate;
1849 crl->active_rate &= ~(0x1000);
1850 crl->active_rate_basic = priv->active_rate_basic;
1851 crl->phymode = priv->phymode;
1852#ifdef CONFIG_IWLWIFI_HT
1853 crl->active_siso_rate = (priv->current_assoc_ht.supp_rates[0] << 1);
1854 crl->active_siso_rate |= (priv->current_assoc_ht.supp_rates[0] & 0x1);
1855 crl->active_siso_rate &= ~((u16)0x2);
1856 crl->active_siso_rate = crl->active_siso_rate << IWL_FIRST_OFDM_RATE;
1857
1858 crl->active_mimo_rate = (priv->current_assoc_ht.supp_rates[1] << 1);
1859 crl->active_mimo_rate |= (priv->current_assoc_ht.supp_rates[1] & 0x1);
1860 crl->active_mimo_rate &= ~((u16)0x2);
1861 crl->active_mimo_rate = crl->active_mimo_rate << IWL_FIRST_OFDM_RATE;
1862 IWL_DEBUG_HT("MIMO RATE 0x%X SISO MASK 0x%X\n", crl->active_siso_rate,
1863 crl->active_mimo_rate);
1864#endif /*CONFIG_IWLWIFI_HT*/
1865
1866 if (priv->assoc_station_added)
1867 priv->lq_mngr.lq_ready = 1;
1868
1869 rs_initialize_lq(priv, sta);
1870}
1871
1872static int rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data,
1873 struct iwl_rate *tx_mcs,
1874 struct iwl_link_quality_cmd *lq_cmd,
1875 struct sta_info *sta)
1876{
1877 int index = 0;
1878 int rc = 0;
1879 int rate_idx;
1880 u8 ant_toggle_count = 0;
1881 u8 use_ht_possible = 1;
1882 u8 repeat_cur_rate = 0;
1883 struct iwl_rate new_rate;
1884 struct iwl_scale_tbl_info tbl_type = { 0 };
1885
1886 rs_get_tbl_info_from_mcs(tx_mcs, lq_data->phymode,
1887 &tbl_type, &rate_idx);
1888
1889 if (is_legacy(tbl_type.lq_type)) {
1890 ant_toggle_count = 1;
1891 repeat_cur_rate = IWL_NUMBER_TRY;
1892 } else
1893 repeat_cur_rate = IWL_HT_NUMBER_TRY;
1894
1895 lq_cmd->general_params.mimo_delimiter =
1896 is_mimo(tbl_type.lq_type) ? 1 : 0;
1897 lq_cmd->rs_table[index].rate_n_flags =
1898 cpu_to_le32(tx_mcs->rate_n_flags);
1899 new_rate.rate_n_flags = tx_mcs->rate_n_flags;
1900
1901 if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN))
1902 lq_cmd->general_params.single_stream_ant_msk = 1;
1903 else
1904 lq_cmd->general_params.single_stream_ant_msk = 2;
1905
1906 index++;
1907 repeat_cur_rate--;
1908
1909 while (index < LINK_QUAL_MAX_RETRY_NUM) {
1910 while (repeat_cur_rate && (index < LINK_QUAL_MAX_RETRY_NUM)) {
1911 if (is_legacy(tbl_type.lq_type)) {
1912 if (ant_toggle_count <
1913 NUM_TRY_BEFORE_ANTENNA_TOGGLE)
1914 ant_toggle_count++;
1915 else {
1916 rs_toggle_antenna(&new_rate, &tbl_type);
1917 ant_toggle_count = 1;
1918 }
1919 }
1920 lq_cmd->rs_table[index].rate_n_flags =
1921 cpu_to_le32(new_rate.rate_n_flags);
1922 repeat_cur_rate--;
1923 index++;
1924 }
1925
1926 rs_get_tbl_info_from_mcs(&new_rate, lq_data->phymode, &tbl_type,
1927 &rate_idx);
1928
1929 if (is_mimo(tbl_type.lq_type))
1930 lq_cmd->general_params.mimo_delimiter = index;
1931
1932 rs_get_lower_rate(lq_data, &tbl_type, rate_idx,
1933 use_ht_possible, &new_rate, sta);
1934
1935 if (is_legacy(tbl_type.lq_type)) {
1936 if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE)
1937 ant_toggle_count++;
1938 else {
1939 rs_toggle_antenna(&new_rate, &tbl_type);
1940 ant_toggle_count = 1;
1941 }
1942 repeat_cur_rate = IWL_NUMBER_TRY;
1943 } else
1944 repeat_cur_rate = IWL_HT_NUMBER_TRY;
1945
1946 use_ht_possible = 0;
1947
1948 lq_cmd->rs_table[index].rate_n_flags =
1949 cpu_to_le32(new_rate.rate_n_flags);
1950 /* lq_cmd->rs_table[index].rate_n_flags = 0x800d; */
1951
1952 index++;
1953 repeat_cur_rate--;
1954 }
1955
1956 /* lq_cmd->rs_table[0].rate_n_flags = 0x800d; */
1957
1958 lq_cmd->general_params.dual_stream_ant_msk = 3;
1959 lq_cmd->agg_params.agg_dis_start_th = 3;
1960 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
1961 return rc;
1962}
1963
1964static void *rs_alloc(struct ieee80211_local *local)
1965{
1966 return local->hw.priv;
1967}
1968/* rate scale requires free function to be implemented */
1969static void rs_free(void *priv_rate)
1970{
1971 return;
1972}
1973
1974static void rs_clear(void *priv_rate)
1975{
1976 struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
1977
1978 IWL_DEBUG_RATE("enter\n");
1979
1980 priv->lq_mngr.lq_ready = 0;
1981#ifdef CONFIG_IWLWIFI_HT
1982#ifdef CONFIG_IWLWIFI_HT_AGG
1983 if (priv->lq_mngr.agg_ctrl.granted_ba)
1984 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);
1985#endif /*CONFIG_IWLWIFI_HT_AGG */
1986#endif /* CONFIG_IWLWIFI_HT */
1987
1988 IWL_DEBUG_RATE("leave\n");
1989}
1990
1991static void rs_free_sta(void *priv, void *priv_sta)
1992{
1993 struct iwl_rate_scale_priv *rs_priv = priv_sta;
1994
1995 IWL_DEBUG_RATE("enter\n");
1996 kfree(rs_priv);
1997 IWL_DEBUG_RATE("leave\n");
1998}
1999
2000
2001static struct rate_control_ops rs_ops = {
2002 .module = NULL,
2003 .name = RS_NAME,
2004 .tx_status = rs_tx_status,
2005 .get_rate = rs_get_rate,
2006 .rate_init = rs_rate_init,
2007 .clear = rs_clear,
2008 .alloc = rs_alloc,
2009 .free = rs_free,
2010 .alloc_sta = rs_alloc_sta,
2011 .free_sta = rs_free_sta,
2012};
2013
2014int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2015{
2016 struct ieee80211_local *local = hw_to_local(hw);
2017 struct iwl_priv *priv = hw->priv;
2018 struct iwl_rate_scale_priv *rs_priv;
2019 struct sta_info *sta;
2020 int count = 0, i;
2021 u32 samples = 0, success = 0, good = 0;
2022 unsigned long now = jiffies;
2023 u32 max_time = 0;
2024 u8 lq_type, antenna;
2025
2026 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
2027 if (!sta || !sta->rate_ctrl_priv) {
2028 if (sta) {
2029 sta_info_put(sta);
2030 IWL_DEBUG_RATE("leave - no private rate data!\n");
2031 } else
2032 IWL_DEBUG_RATE("leave - no station!\n");
2033 return sprintf(buf, "station %d not found\n", sta_id);
2034 }
2035
2036 rs_priv = (void *)sta->rate_ctrl_priv;
2037
2038 lq_type = rs_priv->lq_info[rs_priv->active_tbl].lq_type;
2039 antenna = rs_priv->lq_info[rs_priv->active_tbl].antenna_type;
2040
2041 if (is_legacy(lq_type))
2042 i = IWL_RATE_54M_INDEX;
2043 else
2044 i = IWL_RATE_60M_INDEX;
2045 while (1) {
2046 u64 mask;
2047 int j;
2048 int active = rs_priv->active_tbl;
2049
2050 count +=
2051 sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2);
2052
2053 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2054 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
2055 buf[count++] =
2056 (rs_priv->lq_info[active].win[i].data & mask)
2057 ? '1' : '0';
2058
2059 samples += rs_priv->lq_info[active].win[i].counter;
2060 good += rs_priv->lq_info[active].win[i].success_counter;
2061 success += rs_priv->lq_info[active].win[i].success_counter *
2062 iwl_rates[i].ieee;
2063
2064 if (rs_priv->lq_info[active].win[i].stamp) {
2065 int delta =
2066 jiffies_to_msecs(now -
2067 rs_priv->lq_info[active].win[i].stamp);
2068
2069 if (delta > max_time)
2070 max_time = delta;
2071
2072 count += sprintf(&buf[count], "%5dms\n", delta);
2073 } else
2074 buf[count++] = '\n';
2075
2076 j = iwl_get_prev_ieee_rate(i);
2077 if (j == i)
2078 break;
2079 i = j;
2080 }
2081
2082 /* Display the average rate of all samples taken.
2083 *
2084 * NOTE: We multiple # of samples by 2 since the IEEE measurement
2085 * added from iwl_rates is actually 2X the rate */
2086 if (samples)
2087 count += sprintf(&buf[count],
2088 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
2089 "%3d%% success (%d good packets over %d tries)\n",
2090 success / (2 * samples), (success * 5 / samples) % 10,
2091 max_time, good * 100 / samples, good, samples);
2092 else
2093 count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n");
2094 count += sprintf(&buf[count], "\nrate scale type %d anntena %d "
2095 "active_search %d rate index %d\n", lq_type, antenna,
2096 rs_priv->search_better_tbl, sta->last_txrate);
2097
2098 sta_info_put(sta);
2099 return count;
2100}
2101
2102void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
2103{
2104 struct iwl_priv *priv = hw->priv;
2105
2106 priv->lq_mngr.lq_ready = 1;
2107}
2108
2109void iwl_rate_control_register(struct ieee80211_hw *hw)
2110{
2111 ieee80211_rate_control_register(&rs_ops);
2112}
2113
2114void iwl_rate_control_unregister(struct ieee80211_hw *hw)
2115{
2116 ieee80211_rate_control_unregister(&rs_ops);
2117}
2118
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
new file mode 100644
index 000000000000..c6325f72df68
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
@@ -0,0 +1,266 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_rs_h__
28#define __iwl_4965_rs_h__
29
30#include "iwl-4965.h"
31
32struct iwl_rate_info {
33 u8 plcp;
34 u8 plcp_siso;
35 u8 plcp_mimo;
36 u8 ieee;
37 u8 prev_ieee; /* previous rate in IEEE speeds */
38 u8 next_ieee; /* next rate in IEEE speeds */
39 u8 prev_rs; /* previous rate used in rs algo */
40 u8 next_rs; /* next rate used in rs algo */
41 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
42 u8 next_rs_tgg; /* next rate used in TGG rs algo */
43};
44
45enum {
46 IWL_RATE_1M_INDEX = 0,
47 IWL_RATE_2M_INDEX,
48 IWL_RATE_5M_INDEX,
49 IWL_RATE_11M_INDEX,
50 IWL_RATE_6M_INDEX,
51 IWL_RATE_9M_INDEX,
52 IWL_RATE_12M_INDEX,
53 IWL_RATE_18M_INDEX,
54 IWL_RATE_24M_INDEX,
55 IWL_RATE_36M_INDEX,
56 IWL_RATE_48M_INDEX,
57 IWL_RATE_54M_INDEX,
58 IWL_RATE_60M_INDEX,
59 IWL_RATE_COUNT,
60 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
61 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX
62};
63
64enum {
65 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
66 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
67 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
68 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
69};
70
71/* #define vs. enum to keep from defaulting to 'large integer' */
72#define IWL_RATE_6M_MASK (1<<IWL_RATE_6M_INDEX)
73#define IWL_RATE_9M_MASK (1<<IWL_RATE_9M_INDEX)
74#define IWL_RATE_12M_MASK (1<<IWL_RATE_12M_INDEX)
75#define IWL_RATE_18M_MASK (1<<IWL_RATE_18M_INDEX)
76#define IWL_RATE_24M_MASK (1<<IWL_RATE_24M_INDEX)
77#define IWL_RATE_36M_MASK (1<<IWL_RATE_36M_INDEX)
78#define IWL_RATE_48M_MASK (1<<IWL_RATE_48M_INDEX)
79#define IWL_RATE_54M_MASK (1<<IWL_RATE_54M_INDEX)
80#define IWL_RATE_60M_MASK (1<<IWL_RATE_60M_INDEX)
81#define IWL_RATE_1M_MASK (1<<IWL_RATE_1M_INDEX)
82#define IWL_RATE_2M_MASK (1<<IWL_RATE_2M_INDEX)
83#define IWL_RATE_5M_MASK (1<<IWL_RATE_5M_INDEX)
84#define IWL_RATE_11M_MASK (1<<IWL_RATE_11M_INDEX)
85
86enum {
87 IWL_RATE_6M_PLCP = 13,
88 IWL_RATE_9M_PLCP = 15,
89 IWL_RATE_12M_PLCP = 5,
90 IWL_RATE_18M_PLCP = 7,
91 IWL_RATE_24M_PLCP = 9,
92 IWL_RATE_36M_PLCP = 11,
93 IWL_RATE_48M_PLCP = 1,
94 IWL_RATE_54M_PLCP = 3,
95 IWL_RATE_60M_PLCP = 3,
96 IWL_RATE_1M_PLCP = 10,
97 IWL_RATE_2M_PLCP = 20,
98 IWL_RATE_5M_PLCP = 55,
99 IWL_RATE_11M_PLCP = 110,
100};
101
102/* OFDM HT rate plcp */
103enum {
104 IWL_RATE_SISO_6M_PLCP = 0,
105 IWL_RATE_SISO_12M_PLCP = 1,
106 IWL_RATE_SISO_18M_PLCP = 2,
107 IWL_RATE_SISO_24M_PLCP = 3,
108 IWL_RATE_SISO_36M_PLCP = 4,
109 IWL_RATE_SISO_48M_PLCP = 5,
110 IWL_RATE_SISO_54M_PLCP = 6,
111 IWL_RATE_SISO_60M_PLCP = 7,
112 IWL_RATE_MIMO_6M_PLCP = 0x8,
113 IWL_RATE_MIMO_12M_PLCP = 0x9,
114 IWL_RATE_MIMO_18M_PLCP = 0xa,
115 IWL_RATE_MIMO_24M_PLCP = 0xb,
116 IWL_RATE_MIMO_36M_PLCP = 0xc,
117 IWL_RATE_MIMO_48M_PLCP = 0xd,
118 IWL_RATE_MIMO_54M_PLCP = 0xe,
119 IWL_RATE_MIMO_60M_PLCP = 0xf,
120 IWL_RATE_SISO_INVM_PLCP,
121 IWL_RATE_MIMO_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
122};
123
124enum {
125 IWL_RATE_6M_IEEE = 12,
126 IWL_RATE_9M_IEEE = 18,
127 IWL_RATE_12M_IEEE = 24,
128 IWL_RATE_18M_IEEE = 36,
129 IWL_RATE_24M_IEEE = 48,
130 IWL_RATE_36M_IEEE = 72,
131 IWL_RATE_48M_IEEE = 96,
132 IWL_RATE_54M_IEEE = 108,
133 IWL_RATE_60M_IEEE = 120,
134 IWL_RATE_1M_IEEE = 2,
135 IWL_RATE_2M_IEEE = 4,
136 IWL_RATE_5M_IEEE = 11,
137 IWL_RATE_11M_IEEE = 22,
138};
139
140#define IWL_CCK_BASIC_RATES_MASK \
141 (IWL_RATE_1M_MASK | \
142 IWL_RATE_2M_MASK)
143
144#define IWL_CCK_RATES_MASK \
145 (IWL_BASIC_RATES_MASK | \
146 IWL_RATE_5M_MASK | \
147 IWL_RATE_11M_MASK)
148
149#define IWL_OFDM_BASIC_RATES_MASK \
150 (IWL_RATE_6M_MASK | \
151 IWL_RATE_12M_MASK | \
152 IWL_RATE_24M_MASK)
153
154#define IWL_OFDM_RATES_MASK \
155 (IWL_OFDM_BASIC_RATES_MASK | \
156 IWL_RATE_9M_MASK | \
157 IWL_RATE_18M_MASK | \
158 IWL_RATE_36M_MASK | \
159 IWL_RATE_48M_MASK | \
160 IWL_RATE_54M_MASK)
161
162#define IWL_BASIC_RATES_MASK \
163 (IWL_OFDM_BASIC_RATES_MASK | \
164 IWL_CCK_BASIC_RATES_MASK)
165
166#define IWL_RATES_MASK ((1<<IWL_RATE_COUNT)-1)
167
168#define IWL_INVALID_VALUE -1
169
170#define IWL_MIN_RSSI_VAL -100
171#define IWL_MAX_RSSI_VAL 0
172
173#define IWL_LEGACY_SWITCH_ANTENNA 0
174#define IWL_LEGACY_SWITCH_SISO 1
175#define IWL_LEGACY_SWITCH_MIMO 2
176
177#define IWL_RS_GOOD_RATIO 12800
178
179#define IWL_ACTION_LIMIT 3
180#define IWL_LEGACY_FAILURE_LIMIT 160
181#define IWL_LEGACY_SUCCESS_LIMIT 480
182#define IWL_LEGACY_TABLE_COUNT 160
183
184#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
185#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
186#define IWL_NONE_LEGACY_TABLE_COUNT 1500
187
188#define IWL_RATE_SCALE_SWITCH (10880)
189
190#define IWL_SISO_SWITCH_ANTENNA 0
191#define IWL_SISO_SWITCH_MIMO 1
192#define IWL_SISO_SWITCH_GI 2
193
194#define IWL_MIMO_SWITCH_ANTENNA_A 0
195#define IWL_MIMO_SWITCH_ANTENNA_B 1
196#define IWL_MIMO_SWITCH_GI 2
197
198#define LQ_SIZE 2
199
200extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
201
202enum iwl_table_type {
203 LQ_NONE,
204 LQ_G,
205 LQ_A,
206 LQ_SISO,
207 LQ_MIMO,
208 LQ_MAX,
209};
210
211enum iwl_antenna_type {
212 ANT_NONE,
213 ANT_MAIN,
214 ANT_AUX,
215 ANT_BOTH,
216};
217
218static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
219{
220 u8 rate = iwl_rates[rate_index].prev_ieee;
221
222 if (rate == IWL_RATE_INVALID)
223 rate = rate_index;
224 return rate;
225}
226
227extern int iwl_rate_index_from_plcp(int plcp);
228
229/**
230 * iwl_fill_rs_info - Fill an output text buffer with the rate representation
231 *
232 * NOTE: This is provided as a quick mechanism for a user to visualize
233 * the performance of the rate control alogirthm and is not meant to be
234 * parsed software.
235 */
236extern int iwl_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
237
238/**
239 * iwl_rate_scale_init - Initialize the rate scale table based on assoc info
240 *
241 * The specific througput table used is based on the type of network
242 * the associated with, including A, B, G, and G w/ TGG protection
243 */
244extern void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
245
246/**
247 * iwl_rate_control_register - Register the rate control algorithm callbacks
248 *
249 * Since the rate control algorithm is hardware specific, there is no need
250 * or reason to place it as a stand alone module. The driver can call
251 * iwl_rate_control_register in order to register the rate control callbacks
252 * with the mac80211 subsystem. This should be performed prior to calling
253 * ieee80211_register_hw
254 *
255 */
256extern void iwl_rate_control_register(struct ieee80211_hw *hw);
257
258/**
259 * iwl_rate_control_unregister - Unregister the rate control callbacks
260 *
261 * This should be called after calling ieee80211_unregister_hw, but before
262 * the driver is unloaded.
263 */
264extern void iwl_rate_control_unregister(struct ieee80211_hw *hw);
265
266#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
new file mode 100644
index 000000000000..ba35b3ac7c7e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -0,0 +1,4719 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/delay.h>
41
42#include "iwlwifi.h"
43#include "iwl-4965.h"
44#include "iwl-helpers.h"
45
46#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
47 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
48 IWL_RATE_SISO_##s##M_PLCP, \
49 IWL_RATE_MIMO_##s##M_PLCP, \
50 IWL_RATE_##r##M_IEEE, \
51 IWL_RATE_##ip##M_INDEX, \
52 IWL_RATE_##in##M_INDEX, \
53 IWL_RATE_##rp##M_INDEX, \
54 IWL_RATE_##rn##M_INDEX, \
55 IWL_RATE_##pp##M_INDEX, \
56 IWL_RATE_##np##M_INDEX }
57
58/*
59 * Parameter order:
60 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
61 *
62 * If there isn't a valid next or previous rate then INV is used which
63 * maps to IWL_RATE_INVALID
64 *
65 */
66const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
67 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
68 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
69 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
70 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
71 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
72 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
73 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
74 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
75 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
76 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
77 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
78 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
80};
81
82static int is_fat_channel(__le32 rxon_flags)
83{
84 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
85 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
86}
87
88static u8 is_single_stream(struct iwl_priv *priv)
89{
90#ifdef CONFIG_IWLWIFI_HT
91 if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht ||
92 (priv->active_rate_ht[1] == 0) ||
93 (priv->ps_mode == IWL_MIMO_PS_STATIC))
94 return 1;
95#else
96 return 1;
97#endif /*CONFIG_IWLWIFI_HT */
98 return 0;
99}
100
101/*
102 * Determine how many receiver/antenna chains to use.
103 * More provides better reception via diversity. Fewer saves power.
104 * MIMO (dual stream) requires at least 2, but works better with 3.
105 * This does not determine *which* chains to use, just how many.
106 */
107static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
108 u8 *idle_state, u8 *rx_state)
109{
110 u8 is_single = is_single_stream(priv);
111 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
112
113 /* # of Rx chains to use when expecting MIMO. */
114 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
115 *rx_state = 2;
116 else
117 *rx_state = 3;
118
119 /* # Rx chains when idling and maybe trying to save power */
120 switch (priv->ps_mode) {
121 case IWL_MIMO_PS_STATIC:
122 case IWL_MIMO_PS_DYNAMIC:
123 *idle_state = (is_cam) ? 2 : 1;
124 break;
125 case IWL_MIMO_PS_NONE:
126 *idle_state = (is_cam) ? *rx_state : 1;
127 break;
128 default:
129 *idle_state = 1;
130 break;
131 }
132
133 return 0;
134}
135
136int iwl_hw_rxq_stop(struct iwl_priv *priv)
137{
138 int rc;
139 unsigned long flags;
140
141 spin_lock_irqsave(&priv->lock, flags);
142 rc = iwl_grab_restricted_access(priv);
143 if (rc) {
144 spin_unlock_irqrestore(&priv->lock, flags);
145 return rc;
146 }
147
148 /* stop HW */
149 iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
150 rc = iwl_poll_restricted_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
151 (1 << 24), 1000);
152 if (rc < 0)
153 IWL_ERROR("Can't stop Rx DMA.\n");
154
155 iwl_release_restricted_access(priv);
156 spin_unlock_irqrestore(&priv->lock, flags);
157
158 return 0;
159}
160
161u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr)
162{
163 int i;
164 int start = 0;
165 int ret = IWL_INVALID_STATION;
166 unsigned long flags;
167
168 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
169 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
170 start = IWL_STA_ID;
171
172 if (is_broadcast_ether_addr(addr))
173 return IWL4965_BROADCAST_ID;
174
175 spin_lock_irqsave(&priv->sta_lock, flags);
176 for (i = start; i < priv->hw_setting.max_stations; i++)
177 if ((priv->stations[i].used) &&
178 (!compare_ether_addr
179 (priv->stations[i].sta.sta.addr, addr))) {
180 ret = i;
181 goto out;
182 }
183
184 IWL_DEBUG_ASSOC("can not find STA " MAC_FMT " total %d\n",
185 MAC_ARG(addr), priv->num_stations);
186
187 out:
188 spin_unlock_irqrestore(&priv->sta_lock, flags);
189 return ret;
190}
191
192static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
193{
194 int rc = 0;
195 unsigned long flags;
196
197 spin_lock_irqsave(&priv->lock, flags);
198 rc = iwl_grab_restricted_access(priv);
199 if (rc) {
200 spin_unlock_irqrestore(&priv->lock, flags);
201 return rc;
202 }
203
204 if (!pwr_max) {
205 u32 val;
206
207 rc = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
208 &val);
209
210 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
211 iwl_set_bits_mask_restricted_reg(
212 priv, APMG_PS_CTRL_REG,
213 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
214 ~APMG_PS_CTRL_MSK_PWR_SRC);
215 } else
216 iwl_set_bits_mask_restricted_reg(
217 priv, APMG_PS_CTRL_REG,
218 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
219 ~APMG_PS_CTRL_MSK_PWR_SRC);
220
221 iwl_release_restricted_access(priv);
222 spin_unlock_irqrestore(&priv->lock, flags);
223
224 return rc;
225}
226
227static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
228{
229 int rc;
230 unsigned long flags;
231
232 spin_lock_irqsave(&priv->lock, flags);
233 rc = iwl_grab_restricted_access(priv);
234 if (rc) {
235 spin_unlock_irqrestore(&priv->lock, flags);
236 return rc;
237 }
238
239 /* stop HW */
240 iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
241
242 iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
243 iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
244 rxq->dma_addr >> 8);
245
246 iwl_write_restricted(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
247 (priv->hw_setting.shared_phys +
248 offsetof(struct iwl_shared, val0)) >> 4);
249
250 iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
251 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
252 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
253 IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
254 /*0x10 << 4 | */
255 (RX_QUEUE_SIZE_LOG <<
256 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
257
258 /*
259 * iwl_write32(priv,CSR_INT_COAL_REG,0);
260 */
261
262 iwl_release_restricted_access(priv);
263 spin_unlock_irqrestore(&priv->lock, flags);
264
265 return 0;
266}
267
268static int iwl4965_kw_init(struct iwl_priv *priv)
269{
270 unsigned long flags;
271 int rc;
272
273 spin_lock_irqsave(&priv->lock, flags);
274 rc = iwl_grab_restricted_access(priv);
275 if (rc)
276 goto out;
277
278 iwl_write_restricted(priv, IWL_FH_KW_MEM_ADDR_REG,
279 priv->kw.dma_addr >> 4);
280 iwl_release_restricted_access(priv);
281out:
282 spin_unlock_irqrestore(&priv->lock, flags);
283 return rc;
284}
285
286static int iwl4965_kw_alloc(struct iwl_priv *priv)
287{
288 struct pci_dev *dev = priv->pci_dev;
289 struct iwl_kw *kw = &priv->kw;
290
291 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
292 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
293 if (!kw->v_addr)
294 return -ENOMEM;
295
296 return 0;
297}
298
299#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
300 ? # x " " : "")
301
302int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode, u16 channel,
303 const struct iwl_eeprom_channel *eeprom_ch,
304 u8 fat_extension_channel)
305{
306 struct iwl_channel_info *ch_info;
307
308 ch_info = (struct iwl_channel_info *)
309 iwl_get_channel_info(priv, phymode, channel);
310
311 if (!is_channel_valid(ch_info))
312 return -1;
313
314 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
315 " %ddBm): Ad-Hoc %ssupported\n",
316 ch_info->channel,
317 is_channel_a_band(ch_info) ?
318 "5.2" : "2.4",
319 CHECK_AND_PRINT(IBSS),
320 CHECK_AND_PRINT(ACTIVE),
321 CHECK_AND_PRINT(RADAR),
322 CHECK_AND_PRINT(WIDE),
323 CHECK_AND_PRINT(NARROW),
324 CHECK_AND_PRINT(DFS),
325 eeprom_ch->flags,
326 eeprom_ch->max_power_avg,
327 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
328 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
329 "" : "not ");
330
331 ch_info->fat_eeprom = *eeprom_ch;
332 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
333 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
334 ch_info->fat_min_power = 0;
335 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
336 ch_info->fat_flags = eeprom_ch->flags;
337 ch_info->fat_extension_channel = fat_extension_channel;
338
339 return 0;
340}
341
342static void iwl4965_kw_free(struct iwl_priv *priv)
343{
344 struct pci_dev *dev = priv->pci_dev;
345 struct iwl_kw *kw = &priv->kw;
346
347 if (kw->v_addr) {
348 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
349 memset(kw, 0, sizeof(*kw));
350 }
351}
352
353/**
354 * iwl4965_txq_ctx_reset - Reset TX queue context
355 * Destroys all DMA structures and initialise them again
356 *
357 * @param priv
358 * @return error code
359 */
360static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
361{
362 int rc = 0;
363 int txq_id, slots_num;
364 unsigned long flags;
365
366 iwl4965_kw_free(priv);
367
368 iwl_hw_txq_ctx_free(priv);
369
370 /* Tx CMD queue */
371 rc = iwl4965_kw_alloc(priv);
372 if (rc) {
373 IWL_ERROR("Keep Warm allocation failed");
374 goto error_kw;
375 }
376
377 spin_lock_irqsave(&priv->lock, flags);
378
379 rc = iwl_grab_restricted_access(priv);
380 if (unlikely(rc)) {
381 IWL_ERROR("TX reset failed");
382 spin_unlock_irqrestore(&priv->lock, flags);
383 goto error_reset;
384 }
385
386 iwl_write_restricted_reg(priv, SCD_TXFACT, 0);
387 iwl_release_restricted_access(priv);
388 spin_unlock_irqrestore(&priv->lock, flags);
389
390 rc = iwl4965_kw_init(priv);
391 if (rc) {
392 IWL_ERROR("kw_init failed\n");
393 goto error_reset;
394 }
395
396 /* Tx queue(s) */
397 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
398 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
399 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
400 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
401 txq_id);
402 if (rc) {
403 IWL_ERROR("Tx %d queue init failed\n", txq_id);
404 goto error;
405 }
406 }
407
408 return rc;
409
410 error:
411 iwl_hw_txq_ctx_free(priv);
412 error_reset:
413 iwl4965_kw_free(priv);
414 error_kw:
415 return rc;
416}
417
418int iwl_hw_nic_init(struct iwl_priv *priv)
419{
420 int rc;
421 unsigned long flags;
422 struct iwl_rx_queue *rxq = &priv->rxq;
423 u8 rev_id;
424 u32 val;
425 u8 val_link;
426
427 iwl_power_init_handle(priv);
428
429 /* nic_init */
430 spin_lock_irqsave(&priv->lock, flags);
431
432 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
433 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
434
435 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
436 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
437 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
438 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
439 if (rc < 0) {
440 spin_unlock_irqrestore(&priv->lock, flags);
441 IWL_DEBUG_INFO("Failed to init the card\n");
442 return rc;
443 }
444
445 rc = iwl_grab_restricted_access(priv);
446 if (rc) {
447 spin_unlock_irqrestore(&priv->lock, flags);
448 return rc;
449 }
450
451 iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG);
452
453 iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG,
454 APMG_CLK_VAL_DMA_CLK_RQT |
455 APMG_CLK_VAL_BSM_CLK_RQT);
456 iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG);
457
458 udelay(20);
459
460 iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG,
461 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
462
463 iwl_release_restricted_access(priv);
464 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
465 spin_unlock_irqrestore(&priv->lock, flags);
466
467 /* Determine HW type */
468 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
469 if (rc)
470 return rc;
471
472 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
473
474 iwl4965_nic_set_pwr_src(priv, 1);
475 spin_lock_irqsave(&priv->lock, flags);
476
477 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
478 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
479 /* Enable No Snoop field */
480 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
481 val & ~(1 << 11));
482 }
483
484 spin_unlock_irqrestore(&priv->lock, flags);
485
486 /* Read the EEPROM */
487 rc = iwl_eeprom_init(priv);
488 if (rc)
489 return rc;
490
491 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
492 IWL_ERROR("Older EEPROM detected! Aborting.\n");
493 return -EINVAL;
494 }
495
496 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
497
498 /* disable L1 entry -- workaround for pre-B1 */
499 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
500
501 spin_lock_irqsave(&priv->lock, flags);
502
503 /* set CSR_HW_CONFIG_REG for uCode use */
504
505 iwl_set_bit(priv, CSR_SW_VER, CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R |
506 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
507 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
508
509 rc = iwl_grab_restricted_access(priv);
510 if (rc < 0) {
511 spin_unlock_irqrestore(&priv->lock, flags);
512 IWL_DEBUG_INFO("Failed to init the card\n");
513 return rc;
514 }
515
516 iwl_read_restricted_reg(priv, APMG_PS_CTRL_REG);
517 iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG,
518 APMG_PS_CTRL_VAL_RESET_REQ);
519 udelay(5);
520 iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG,
521 APMG_PS_CTRL_VAL_RESET_REQ);
522
523 iwl_release_restricted_access(priv);
524 spin_unlock_irqrestore(&priv->lock, flags);
525
526 iwl_hw_card_show_info(priv);
527
528 /* end nic_init */
529
530 /* Allocate the RX queue, or reset if it is already allocated */
531 if (!rxq->bd) {
532 rc = iwl_rx_queue_alloc(priv);
533 if (rc) {
534 IWL_ERROR("Unable to initialize Rx queue\n");
535 return -ENOMEM;
536 }
537 } else
538 iwl_rx_queue_reset(priv, rxq);
539
540 iwl_rx_replenish(priv);
541
542 iwl4965_rx_init(priv, rxq);
543
544 spin_lock_irqsave(&priv->lock, flags);
545
546 rxq->need_update = 1;
547 iwl_rx_queue_update_write_ptr(priv, rxq);
548
549 spin_unlock_irqrestore(&priv->lock, flags);
550 rc = iwl4965_txq_ctx_reset(priv);
551 if (rc)
552 return rc;
553
554 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
555 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
556
557 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
558 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
559
560 set_bit(STATUS_INIT, &priv->status);
561
562 return 0;
563}
564
565int iwl_hw_nic_stop_master(struct iwl_priv *priv)
566{
567 int rc = 0;
568 u32 reg_val;
569 unsigned long flags;
570
571 spin_lock_irqsave(&priv->lock, flags);
572
573 /* set stop master bit */
574 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
575
576 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
577
578 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
579 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
580 IWL_DEBUG_INFO("Card in power save, master is already "
581 "stopped\n");
582 else {
583 rc = iwl_poll_bit(priv, CSR_RESET,
584 CSR_RESET_REG_FLAG_MASTER_DISABLED,
585 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
586 if (rc < 0) {
587 spin_unlock_irqrestore(&priv->lock, flags);
588 return rc;
589 }
590 }
591
592 spin_unlock_irqrestore(&priv->lock, flags);
593 IWL_DEBUG_INFO("stop master\n");
594
595 return rc;
596}
597
598void iwl_hw_txq_ctx_stop(struct iwl_priv *priv)
599{
600
601 int txq_id;
602 unsigned long flags;
603
604 /* reset TFD queues */
605 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
606 spin_lock_irqsave(&priv->lock, flags);
607 if (iwl_grab_restricted_access(priv)) {
608 spin_unlock_irqrestore(&priv->lock, flags);
609 continue;
610 }
611
612 iwl_write_restricted(priv,
613 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
614 0x0);
615 iwl_poll_restricted_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
616 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
617 (txq_id), 200);
618 iwl_release_restricted_access(priv);
619 spin_unlock_irqrestore(&priv->lock, flags);
620 }
621
622 iwl_hw_txq_ctx_free(priv);
623}
624
625int iwl_hw_nic_reset(struct iwl_priv *priv)
626{
627 int rc = 0;
628 unsigned long flags;
629
630 iwl_hw_nic_stop_master(priv);
631
632 spin_lock_irqsave(&priv->lock, flags);
633
634 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
635
636 udelay(10);
637
638 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
639 rc = iwl_poll_bit(priv, CSR_RESET,
640 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
641 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
642
643 udelay(10);
644
645 rc = iwl_grab_restricted_access(priv);
646 if (!rc) {
647 iwl_write_restricted_reg(priv, APMG_CLK_EN_REG,
648 APMG_CLK_VAL_DMA_CLK_RQT |
649 APMG_CLK_VAL_BSM_CLK_RQT);
650
651 udelay(10);
652
653 iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG,
654 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
655
656 iwl_release_restricted_access(priv);
657 }
658
659 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
660 wake_up_interruptible(&priv->wait_command_queue);
661
662 spin_unlock_irqrestore(&priv->lock, flags);
663
664 return rc;
665
666}
667
668#define REG_RECALIB_PERIOD (60)
669
670/**
671 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
672 *
673 * This callback is provided in order to queue the statistics_work
674 * in work_queue context (v. softirq)
675 *
676 * This timer function is continually reset to execute within
677 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
678 * was received. We need to ensure we receive the statistics in order
679 * to update the temperature used for calibrating the TXPOWER. However,
680 * we can't send the statistics command from softirq context (which
681 * is the context which timers run at) so we have to queue off the
682 * statistics_work to actually send the command to the hardware.
683 */
684static void iwl4965_bg_statistics_periodic(unsigned long data)
685{
686 struct iwl_priv *priv = (struct iwl_priv *)data;
687
688 queue_work(priv->workqueue, &priv->statistics_work);
689}
690
691/**
692 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
693 *
694 * This is queued by iwl_bg_statistics_periodic.
695 */
696static void iwl4965_bg_statistics_work(struct work_struct *work)
697{
698 struct iwl_priv *priv = container_of(work, struct iwl_priv,
699 statistics_work);
700
701 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
702 return;
703
704 mutex_lock(&priv->mutex);
705 iwl_send_statistics_request(priv);
706 mutex_unlock(&priv->mutex);
707}
708
709#define CT_LIMIT_CONST 259
710#define TM_CT_KILL_THRESHOLD 110
711
712void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
713{
714 struct iwl_ct_kill_config cmd;
715 u32 R1, R2, R3;
716 u32 temp_th;
717 u32 crit_temperature;
718 unsigned long flags;
719 int rc = 0;
720
721 spin_lock_irqsave(&priv->lock, flags);
722 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
723 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
724 spin_unlock_irqrestore(&priv->lock, flags);
725
726 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
727 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
728 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
729 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
730 } else {
731 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
732 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
733 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
734 }
735
736 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
737
738 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
739 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
740 rc = iwl_send_cmd_pdu(priv,
741 REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd);
742 if (rc)
743 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
744 else
745 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
746}
747
748#ifdef CONFIG_IWLWIFI_SENSITIVITY
749
750/* "false alarms" are signals that our DSP tries to lock onto,
751 * but then determines that they are either noise, or transmissions
752 * from a distant wireless network (also "noise", really) that get
753 * "stepped on" by stronger transmissions within our own network.
754 * This algorithm attempts to set a sensitivity level that is high
755 * enough to receive all of our own network traffic, but not so
756 * high that our DSP gets too busy trying to lock onto non-network
757 * activity/noise. */
758static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
759 u32 norm_fa,
760 u32 rx_enable_time,
761 struct statistics_general_data *rx_info)
762{
763 u32 max_nrg_cck = 0;
764 int i = 0;
765 u8 max_silence_rssi = 0;
766 u32 silence_ref = 0;
767 u8 silence_rssi_a = 0;
768 u8 silence_rssi_b = 0;
769 u8 silence_rssi_c = 0;
770 u32 val;
771
772 /* "false_alarms" values below are cross-multiplications to assess the
773 * numbers of false alarms within the measured period of actual Rx
774 * (Rx is off when we're txing), vs the min/max expected false alarms
775 * (some should be expected if rx is sensitive enough) in a
776 * hypothetical listening period of 200 time units (TU), 204.8 msec:
777 *
778 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
779 *
780 * */
781 u32 false_alarms = norm_fa * 200 * 1024;
782 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
783 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
784 struct iwl_sensitivity_data *data = NULL;
785
786 data = &(priv->sensitivity_data);
787
788 data->nrg_auto_corr_silence_diff = 0;
789
790 /* Find max silence rssi among all 3 receivers.
791 * This is background noise, which may include transmissions from other
792 * networks, measured during silence before our network's beacon */
793 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
794 ALL_BAND_FILTER)>>8);
795 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
796 ALL_BAND_FILTER)>>8);
797 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
798 ALL_BAND_FILTER)>>8);
799
800 val = max(silence_rssi_b, silence_rssi_c);
801 max_silence_rssi = max(silence_rssi_a, (u8) val);
802
803 /* Store silence rssi in 20-beacon history table */
804 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
805 data->nrg_silence_idx++;
806 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
807 data->nrg_silence_idx = 0;
808
809 /* Find max silence rssi across 20 beacon history */
810 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
811 val = data->nrg_silence_rssi[i];
812 silence_ref = max(silence_ref, val);
813 }
814 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
815 silence_rssi_a, silence_rssi_b, silence_rssi_c,
816 silence_ref);
817
818 /* Find max rx energy (min value!) among all 3 receivers,
819 * measured during beacon frame.
820 * Save it in 10-beacon history table. */
821 i = data->nrg_energy_idx;
822 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
823 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
824
825 data->nrg_energy_idx++;
826 if (data->nrg_energy_idx >= 10)
827 data->nrg_energy_idx = 0;
828
829 /* Find min rx energy (max value) across 10 beacon history.
830 * This is the minimum signal level that we want to receive well.
831 * Add backoff (margin so we don't miss slightly lower energy frames).
832 * This establishes an upper bound (min value) for energy threshold. */
833 max_nrg_cck = data->nrg_value[0];
834 for (i = 1; i < 10; i++)
835 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
836 max_nrg_cck += 6;
837
838 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
839 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
840 rx_info->beacon_energy_c, max_nrg_cck - 6);
841
842 /* Count number of consecutive beacons with fewer-than-desired
843 * false alarms. */
844 if (false_alarms < min_false_alarms)
845 data->num_in_cck_no_fa++;
846 else
847 data->num_in_cck_no_fa = 0;
848 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
849 data->num_in_cck_no_fa);
850
851 /* If we got too many false alarms this time, reduce sensitivity */
852 if (false_alarms > max_false_alarms) {
853 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
854 false_alarms, max_false_alarms);
855 IWL_DEBUG_CALIB("... reducing sensitivity\n");
856 data->nrg_curr_state = IWL_FA_TOO_MANY;
857
858 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
859 /* Store for "fewer than desired" on later beacon */
860 data->nrg_silence_ref = silence_ref;
861
862 /* increase energy threshold (reduce nrg value)
863 * to decrease sensitivity */
864 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
865 data->nrg_th_cck = data->nrg_th_cck
866 - NRG_STEP_CCK;
867 }
868
869 /* increase auto_corr values to decrease sensitivity */
870 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
871 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
872 else {
873 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
874 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
875 }
876 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
877 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
878
879 /* Else if we got fewer than desired, increase sensitivity */
880 } else if (false_alarms < min_false_alarms) {
881 data->nrg_curr_state = IWL_FA_TOO_FEW;
882
883 /* Compare silence level with silence level for most recent
884 * healthy number or too many false alarms */
885 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
886 (s32)silence_ref;
887
888 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
889 false_alarms, min_false_alarms,
890 data->nrg_auto_corr_silence_diff);
891
892 /* Increase value to increase sensitivity, but only if:
893 * 1a) previous beacon did *not* have *too many* false alarms
894 * 1b) AND there's a significant difference in Rx levels
895 * from a previous beacon with too many, or healthy # FAs
896 * OR 2) We've seen a lot of beacons (100) with too few
897 * false alarms */
898 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
899 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
900 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
901
902 IWL_DEBUG_CALIB("... increasing sensitivity\n");
903 /* Increase nrg value to increase sensitivity */
904 val = data->nrg_th_cck + NRG_STEP_CCK;
905 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
906
907 /* Decrease auto_corr values to increase sensitivity */
908 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
909 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
910
911 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
912 data->auto_corr_cck_mrc =
913 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
914
915 } else
916 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
917
918 /* Else we got a healthy number of false alarms, keep status quo */
919 } else {
920 IWL_DEBUG_CALIB(" FA in safe zone\n");
921 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
922
923 /* Store for use in "fewer than desired" with later beacon */
924 data->nrg_silence_ref = silence_ref;
925
926 /* If previous beacon had too many false alarms,
927 * give it some extra margin by reducing sensitivity again
928 * (but don't go below measured energy of desired Rx) */
929 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
930 IWL_DEBUG_CALIB("... increasing margin\n");
931 data->nrg_th_cck -= NRG_MARGIN;
932 }
933 }
934
935 /* Make sure the energy threshold does not go above the measured
936 * energy of the desired Rx signals (reduced by backoff margin),
937 * or else we might start missing Rx frames.
938 * Lower value is higher energy, so we use max()!
939 */
940 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
941 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
942
943 data->nrg_prev_state = data->nrg_curr_state;
944
945 return 0;
946}
947
948
949static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
950 u32 norm_fa,
951 u32 rx_enable_time)
952{
953 u32 val;
954 u32 false_alarms = norm_fa * 200 * 1024;
955 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
956 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
957 struct iwl_sensitivity_data *data = NULL;
958
959 data = &(priv->sensitivity_data);
960
961 /* If we got too many false alarms this time, reduce sensitivity */
962 if (false_alarms > max_false_alarms) {
963
964 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
965 false_alarms, max_false_alarms);
966
967 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
968 data->auto_corr_ofdm =
969 min((u32)AUTO_CORR_MAX_OFDM, val);
970
971 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
972 data->auto_corr_ofdm_mrc =
973 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
974
975 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
976 data->auto_corr_ofdm_x1 =
977 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
978
979 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
980 data->auto_corr_ofdm_mrc_x1 =
981 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
982 }
983
984 /* Else if we got fewer than desired, increase sensitivity */
985 else if (false_alarms < min_false_alarms) {
986
987 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
988 false_alarms, min_false_alarms);
989
990 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
991 data->auto_corr_ofdm =
992 max((u32)AUTO_CORR_MIN_OFDM, val);
993
994 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
995 data->auto_corr_ofdm_mrc =
996 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
997
998 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
999 data->auto_corr_ofdm_x1 =
1000 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1001
1002 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1003 data->auto_corr_ofdm_mrc_x1 =
1004 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1005 }
1006
1007 else
1008 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1009 min_false_alarms, false_alarms, max_false_alarms);
1010
1011 return 0;
1012}
1013
1014static int iwl_sensitivity_callback(struct iwl_priv *priv,
1015 struct iwl_cmd *cmd, struct sk_buff *skb)
1016{
1017 /* We didn't cache the SKB; let the caller free it */
1018 return 1;
1019}
1020
1021/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1022static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1023{
1024 int rc = 0;
1025 struct iwl_sensitivity_cmd cmd ;
1026 struct iwl_sensitivity_data *data = NULL;
1027 struct iwl_host_cmd cmd_out = {
1028 .id = SENSITIVITY_CMD,
1029 .len = sizeof(struct iwl_sensitivity_cmd),
1030 .meta.flags = flags,
1031 .data = &cmd,
1032 };
1033
1034 data = &(priv->sensitivity_data);
1035
1036 memset(&cmd, 0, sizeof(cmd));
1037
1038 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1039 cpu_to_le16((u16)data->auto_corr_ofdm);
1040 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1041 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1042 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1043 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1044 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1045 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1046
1047 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1048 cpu_to_le16((u16)data->auto_corr_cck);
1049 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1050 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1051
1052 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1053 cpu_to_le16((u16)data->nrg_th_cck);
1054 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1055 cpu_to_le16((u16)data->nrg_th_ofdm);
1056
1057 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1058 __constant_cpu_to_le16(190);
1059 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1060 __constant_cpu_to_le16(390);
1061 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1062 __constant_cpu_to_le16(62);
1063
1064 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1065 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1066 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1067 data->nrg_th_ofdm);
1068
1069 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1070 data->auto_corr_cck, data->auto_corr_cck_mrc,
1071 data->nrg_th_cck);
1072
1073 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1074
1075 if (flags & CMD_ASYNC)
1076 cmd_out.meta.u.callback = iwl_sensitivity_callback;
1077
1078 /* Don't send command to uCode if nothing has changed */
1079 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1080 sizeof(u16)*HD_TABLE_SIZE)) {
1081 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1082 return 0;
1083 }
1084
1085 /* Copy table for comparison next time */
1086 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1087 sizeof(u16)*HD_TABLE_SIZE);
1088
1089 rc = iwl_send_cmd(priv, &cmd_out);
1090 if (!rc) {
1091 IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n");
1092 return rc;
1093 }
1094
1095 return 0;
1096}
1097
1098void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1099{
1100 int rc = 0;
1101 int i;
1102 struct iwl_sensitivity_data *data = NULL;
1103
1104 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1105
1106 if (force)
1107 memset(&(priv->sensitivity_tbl[0]), 0,
1108 sizeof(u16)*HD_TABLE_SIZE);
1109
1110 /* Clear driver's sensitivity algo data */
1111 data = &(priv->sensitivity_data);
1112 memset(data, 0, sizeof(struct iwl_sensitivity_data));
1113
1114 data->num_in_cck_no_fa = 0;
1115 data->nrg_curr_state = IWL_FA_TOO_MANY;
1116 data->nrg_prev_state = IWL_FA_TOO_MANY;
1117 data->nrg_silence_ref = 0;
1118 data->nrg_silence_idx = 0;
1119 data->nrg_energy_idx = 0;
1120
1121 for (i = 0; i < 10; i++)
1122 data->nrg_value[i] = 0;
1123
1124 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1125 data->nrg_silence_rssi[i] = 0;
1126
1127 data->auto_corr_ofdm = 90;
1128 data->auto_corr_ofdm_mrc = 170;
1129 data->auto_corr_ofdm_x1 = 105;
1130 data->auto_corr_ofdm_mrc_x1 = 220;
1131 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1132 data->auto_corr_cck_mrc = 200;
1133 data->nrg_th_cck = 100;
1134 data->nrg_th_ofdm = 100;
1135
1136 data->last_bad_plcp_cnt_ofdm = 0;
1137 data->last_fa_cnt_ofdm = 0;
1138 data->last_bad_plcp_cnt_cck = 0;
1139 data->last_fa_cnt_cck = 0;
1140
1141 /* Clear prior Sensitivity command data to force send to uCode */
1142 if (force)
1143 memset(&(priv->sensitivity_tbl[0]), 0,
1144 sizeof(u16)*HD_TABLE_SIZE);
1145
1146 rc |= iwl4965_sensitivity_write(priv, flags);
1147 IWL_DEBUG_CALIB("<<return 0x%X\n", rc);
1148
1149 return;
1150}
1151
1152
1153/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1154 * Called after every association, but this runs only once!
1155 * ... once chain noise is calibrated the first time, it's good forever. */
1156void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1157{
1158 struct iwl_chain_noise_data *data = NULL;
1159 int rc = 0;
1160
1161 data = &(priv->chain_noise_data);
1162 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1163 struct iwl_calibration_cmd cmd;
1164
1165 memset(&cmd, 0, sizeof(cmd));
1166 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1167 cmd.diff_gain_a = 0;
1168 cmd.diff_gain_b = 0;
1169 cmd.diff_gain_c = 0;
1170 rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1171 sizeof(cmd), &cmd);
1172 msleep(4);
1173 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1174 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1175 }
1176 return;
1177}
1178
1179/*
1180 * Accumulate 20 beacons of signal and noise statistics for each of
1181 * 3 receivers/antennas/rx-chains, then figure out:
1182 * 1) Which antennas are connected.
1183 * 2) Differential rx gain settings to balance the 3 receivers.
1184 */
1185static void iwl4965_noise_calibration(struct iwl_priv *priv,
1186 struct iwl_notif_statistics *stat_resp)
1187{
1188 struct iwl_chain_noise_data *data = NULL;
1189 int rc = 0;
1190
1191 u32 chain_noise_a;
1192 u32 chain_noise_b;
1193 u32 chain_noise_c;
1194 u32 chain_sig_a;
1195 u32 chain_sig_b;
1196 u32 chain_sig_c;
1197 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1198 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1199 u32 max_average_sig;
1200 u16 max_average_sig_antenna_i;
1201 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1202 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1203 u16 i = 0;
1204 u16 chan_num = INITIALIZATION_VALUE;
1205 u32 band = INITIALIZATION_VALUE;
1206 u32 active_chains = 0;
1207 unsigned long flags;
1208 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1209
1210 data = &(priv->chain_noise_data);
1211
1212 /* Accumulate just the first 20 beacons after the first association,
1213 * then we're done forever. */
1214 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1215 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1216 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1217 return;
1218 }
1219
1220 spin_lock_irqsave(&priv->lock, flags);
1221 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1222 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1223 spin_unlock_irqrestore(&priv->lock, flags);
1224 return;
1225 }
1226
1227 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1228 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1229
1230 /* Make sure we accumulate data for just the associated channel
1231 * (even if scanning). */
1232 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1233 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1234 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1235 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1236 chan_num, band);
1237 spin_unlock_irqrestore(&priv->lock, flags);
1238 return;
1239 }
1240
1241 /* Accumulate beacon statistics values across 20 beacons */
1242 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1243 IN_BAND_FILTER;
1244 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1245 IN_BAND_FILTER;
1246 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1247 IN_BAND_FILTER;
1248
1249 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1250 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1251 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1252
1253 spin_unlock_irqrestore(&priv->lock, flags);
1254
1255 data->beacon_count++;
1256
1257 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1258 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1259 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1260
1261 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1262 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1263 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1264
1265 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1266 data->beacon_count);
1267 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1268 chain_sig_a, chain_sig_b, chain_sig_c);
1269 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1270 chain_noise_a, chain_noise_b, chain_noise_c);
1271
1272 /* If this is the 20th beacon, determine:
1273 * 1) Disconnected antennas (using signal strengths)
1274 * 2) Differential gain (using silence noise) to balance receivers */
1275 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1276
1277 /* Analyze signal for disconnected antenna */
1278 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1279 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1280 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1281
1282 if (average_sig[0] >= average_sig[1]) {
1283 max_average_sig = average_sig[0];
1284 max_average_sig_antenna_i = 0;
1285 active_chains = (1 << max_average_sig_antenna_i);
1286 } else {
1287 max_average_sig = average_sig[1];
1288 max_average_sig_antenna_i = 1;
1289 active_chains = (1 << max_average_sig_antenna_i);
1290 }
1291
1292 if (average_sig[2] >= max_average_sig) {
1293 max_average_sig = average_sig[2];
1294 max_average_sig_antenna_i = 2;
1295 active_chains = (1 << max_average_sig_antenna_i);
1296 }
1297
1298 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1299 average_sig[0], average_sig[1], average_sig[2]);
1300 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1301 max_average_sig, max_average_sig_antenna_i);
1302
1303 /* Compare signal strengths for all 3 receivers. */
1304 for (i = 0; i < NUM_RX_CHAINS; i++) {
1305 if (i != max_average_sig_antenna_i) {
1306 s32 rssi_delta = (max_average_sig -
1307 average_sig[i]);
1308
1309 /* If signal is very weak, compared with
1310 * strongest, mark it as disconnected. */
1311 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1312 data->disconn_array[i] = 1;
1313 else
1314 active_chains |= (1 << i);
1315 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1316 "disconn_array[i] = %d\n",
1317 i, rssi_delta, data->disconn_array[i]);
1318 }
1319 }
1320
1321 /*If both chains A & B are disconnected -
1322 * connect B and leave A as is */
1323 if (data->disconn_array[CHAIN_A] &&
1324 data->disconn_array[CHAIN_B]) {
1325 data->disconn_array[CHAIN_B] = 0;
1326 active_chains |= (1 << CHAIN_B);
1327 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1328 "W/A - declare B as connected\n");
1329 }
1330
1331 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1332 active_chains);
1333
1334 /* Save for use within RXON, TX, SCAN commands, etc. */
1335 priv->valid_antenna = active_chains;
1336
1337 /* Analyze noise for rx balance */
1338 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1339 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1340 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1341
1342 for (i = 0; i < NUM_RX_CHAINS; i++) {
1343 if (!(data->disconn_array[i]) &&
1344 (average_noise[i] <= min_average_noise)) {
1345 /* This means that chain i is active and has
1346 * lower noise values so far: */
1347 min_average_noise = average_noise[i];
1348 min_average_noise_antenna_i = i;
1349 }
1350 }
1351
1352 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1353
1354 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1355 average_noise[0], average_noise[1],
1356 average_noise[2]);
1357
1358 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1359 min_average_noise, min_average_noise_antenna_i);
1360
1361 for (i = 0; i < NUM_RX_CHAINS; i++) {
1362 s32 delta_g = 0;
1363
1364 if (!(data->disconn_array[i]) &&
1365 (data->delta_gain_code[i] ==
1366 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1367 delta_g = average_noise[i] - min_average_noise;
1368 data->delta_gain_code[i] = (u8)((delta_g *
1369 10) / 15);
1370 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1371 data->delta_gain_code[i])
1372 data->delta_gain_code[i] =
1373 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1374
1375 data->delta_gain_code[i] =
1376 (data->delta_gain_code[i] | (1 << 2));
1377 } else
1378 data->delta_gain_code[i] = 0;
1379 }
1380 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1381 data->delta_gain_code[0],
1382 data->delta_gain_code[1],
1383 data->delta_gain_code[2]);
1384
1385 /* Differential gain gets sent to uCode only once */
1386 if (!data->radio_write) {
1387 struct iwl_calibration_cmd cmd;
1388 data->radio_write = 1;
1389
1390 memset(&cmd, 0, sizeof(cmd));
1391 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1392 cmd.diff_gain_a = data->delta_gain_code[0];
1393 cmd.diff_gain_b = data->delta_gain_code[1];
1394 cmd.diff_gain_c = data->delta_gain_code[2];
1395 rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1396 sizeof(cmd), &cmd);
1397 if (rc)
1398 IWL_DEBUG_CALIB("fail sending cmd "
1399 "REPLY_PHY_CALIBRATION_CMD \n");
1400
1401 /* TODO we might want recalculate
1402 * rx_chain in rxon cmd */
1403
1404 /* Mark so we run this algo only once! */
1405 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1406 }
1407 data->chain_noise_a = 0;
1408 data->chain_noise_b = 0;
1409 data->chain_noise_c = 0;
1410 data->chain_signal_a = 0;
1411 data->chain_signal_b = 0;
1412 data->chain_signal_c = 0;
1413 data->beacon_count = 0;
1414 }
1415 return;
1416}
1417
1418static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1419 struct iwl_notif_statistics *resp)
1420{
1421 int rc = 0;
1422 u32 rx_enable_time;
1423 u32 fa_cck;
1424 u32 fa_ofdm;
1425 u32 bad_plcp_cck;
1426 u32 bad_plcp_ofdm;
1427 u32 norm_fa_ofdm;
1428 u32 norm_fa_cck;
1429 struct iwl_sensitivity_data *data = NULL;
1430 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1431 struct statistics_rx *statistics = &(resp->rx);
1432 unsigned long flags;
1433 struct statistics_general_data statis;
1434
1435 data = &(priv->sensitivity_data);
1436
1437 if (!iwl_is_associated(priv)) {
1438 IWL_DEBUG_CALIB("<< - not associated\n");
1439 return;
1440 }
1441
1442 spin_lock_irqsave(&priv->lock, flags);
1443 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1444 IWL_DEBUG_CALIB("<< invalid data.\n");
1445 spin_unlock_irqrestore(&priv->lock, flags);
1446 return;
1447 }
1448
1449 /* Extract Statistics: */
1450 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1451 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1452 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1453 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1454 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1455
1456 statis.beacon_silence_rssi_a =
1457 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1458 statis.beacon_silence_rssi_b =
1459 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1460 statis.beacon_silence_rssi_c =
1461 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1462 statis.beacon_energy_a =
1463 le32_to_cpu(statistics->general.beacon_energy_a);
1464 statis.beacon_energy_b =
1465 le32_to_cpu(statistics->general.beacon_energy_b);
1466 statis.beacon_energy_c =
1467 le32_to_cpu(statistics->general.beacon_energy_c);
1468
1469 spin_unlock_irqrestore(&priv->lock, flags);
1470
1471 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1472
1473 if (!rx_enable_time) {
1474 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1475 return;
1476 }
1477
1478 /* These statistics increase monotonically, and do not reset
1479 * at each beacon. Calculate difference from last value, or just
1480 * use the new statistics value if it has reset or wrapped around. */
1481 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1482 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1483 else {
1484 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1485 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1486 }
1487
1488 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1489 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1490 else {
1491 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1492 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1493 }
1494
1495 if (data->last_fa_cnt_ofdm > fa_ofdm)
1496 data->last_fa_cnt_ofdm = fa_ofdm;
1497 else {
1498 fa_ofdm -= data->last_fa_cnt_ofdm;
1499 data->last_fa_cnt_ofdm += fa_ofdm;
1500 }
1501
1502 if (data->last_fa_cnt_cck > fa_cck)
1503 data->last_fa_cnt_cck = fa_cck;
1504 else {
1505 fa_cck -= data->last_fa_cnt_cck;
1506 data->last_fa_cnt_cck += fa_cck;
1507 }
1508
1509 /* Total aborted signal locks */
1510 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1511 norm_fa_cck = fa_cck + bad_plcp_cck;
1512
1513 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1514 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1515
1516 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1517 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1518 rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC);
1519
1520 return;
1521}
1522
1523static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1524{
1525 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1526 sensitivity_work);
1527
1528 mutex_lock(&priv->mutex);
1529
1530 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1531 test_bit(STATUS_SCANNING, &priv->status)) {
1532 mutex_unlock(&priv->mutex);
1533 return;
1534 }
1535
1536 if (priv->start_calib) {
1537 iwl4965_noise_calibration(priv, &priv->statistics);
1538
1539 if (priv->sensitivity_data.state ==
1540 IWL_SENS_CALIB_NEED_REINIT) {
1541 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1542 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1543 } else
1544 iwl4965_sensitivity_calibration(priv,
1545 &priv->statistics);
1546 }
1547
1548 mutex_unlock(&priv->mutex);
1549 return;
1550}
1551#endif /*CONFIG_IWLWIFI_SENSITIVITY*/
1552
1553static void iwl4965_bg_txpower_work(struct work_struct *work)
1554{
1555 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1556 txpower_work);
1557
1558 /* If a scan happened to start before we got here
1559 * then just return; the statistics notification will
1560 * kick off another scheduled work to compensate for
1561 * any temperature delta we missed here. */
1562 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1563 test_bit(STATUS_SCANNING, &priv->status))
1564 return;
1565
1566 mutex_lock(&priv->mutex);
1567
1568 /* Regardless of if we are assocaited, we must reconfigure the
1569 * TX power since frames can be sent on non-radar channels while
1570 * not associated */
1571 iwl_hw_reg_send_txpower(priv);
1572
1573 /* Update last_temperature to keep is_calib_needed from running
1574 * when it isn't needed... */
1575 priv->last_temperature = priv->temperature;
1576
1577 mutex_unlock(&priv->mutex);
1578}
1579
1580/*
1581 * Acquire priv->lock before calling this function !
1582 */
1583static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1584{
1585 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
1586 (index & 0xff) | (txq_id << 8));
1587 iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(txq_id), index);
1588}
1589
1590/*
1591 * Acquire priv->lock before calling this function !
1592 */
1593static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1594 struct iwl_tx_queue *txq,
1595 int tx_fifo_id, int scd_retry)
1596{
1597 int txq_id = txq->q.id;
1598 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1599
1600 iwl_write_restricted_reg(priv, SCD_QUEUE_STATUS_BITS(txq_id),
1601 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1602 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1603 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1604 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1605 SCD_QUEUE_STTS_REG_MSK);
1606
1607 txq->sched_retry = scd_retry;
1608
1609 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1610 active ? "Activete" : "Deactivate",
1611 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1612}
1613
1614static const u16 default_queue_to_tx_fifo[] = {
1615 IWL_TX_FIFO_AC3,
1616 IWL_TX_FIFO_AC2,
1617 IWL_TX_FIFO_AC1,
1618 IWL_TX_FIFO_AC0,
1619 IWL_CMD_FIFO_NUM,
1620 IWL_TX_FIFO_HCCA_1,
1621 IWL_TX_FIFO_HCCA_2
1622};
1623
1624static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1625{
1626 set_bit(txq_id, &priv->txq_ctx_active_msk);
1627}
1628
1629static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1630{
1631 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1632}
1633
1634int iwl4965_alive_notify(struct iwl_priv *priv)
1635{
1636 u32 a;
1637 int i = 0;
1638 unsigned long flags;
1639 int rc;
1640
1641 spin_lock_irqsave(&priv->lock, flags);
1642
1643#ifdef CONFIG_IWLWIFI_SENSITIVITY
1644 memset(&(priv->sensitivity_data), 0,
1645 sizeof(struct iwl_sensitivity_data));
1646 memset(&(priv->chain_noise_data), 0,
1647 sizeof(struct iwl_chain_noise_data));
1648 for (i = 0; i < NUM_RX_CHAINS; i++)
1649 priv->chain_noise_data.delta_gain_code[i] =
1650 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1651#endif /* CONFIG_IWLWIFI_SENSITIVITY*/
1652 rc = iwl_grab_restricted_access(priv);
1653 if (rc) {
1654 spin_unlock_irqrestore(&priv->lock, flags);
1655 return rc;
1656 }
1657
1658 priv->scd_base_addr = iwl_read_restricted_reg(priv, SCD_SRAM_BASE_ADDR);
1659 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1660 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1661 iwl_write_restricted_mem(priv, a, 0);
1662 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1663 iwl_write_restricted_mem(priv, a, 0);
1664 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1665 iwl_write_restricted_mem(priv, a, 0);
1666
1667 iwl_write_restricted_reg(priv, SCD_DRAM_BASE_ADDR,
1668 (priv->hw_setting.shared_phys +
1669 offsetof(struct iwl_shared, queues_byte_cnt_tbls)) >> 10);
1670 iwl_write_restricted_reg(priv, SCD_QUEUECHAIN_SEL, 0);
1671
1672 /* initiate the queues */
1673 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1674 iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(i), 0);
1675 iwl_write_restricted(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1676 iwl_write_restricted_mem(priv, priv->scd_base_addr +
1677 SCD_CONTEXT_QUEUE_OFFSET(i),
1678 (SCD_WIN_SIZE <<
1679 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1680 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1681 iwl_write_restricted_mem(priv, priv->scd_base_addr +
1682 SCD_CONTEXT_QUEUE_OFFSET(i) +
1683 sizeof(u32),
1684 (SCD_FRAME_LIMIT <<
1685 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1686 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1687
1688 }
1689 iwl_write_restricted_reg(priv, SCD_INTERRUPT_MASK,
1690 (1 << priv->hw_setting.max_txq_num) - 1);
1691
1692 iwl_write_restricted_reg(priv, SCD_TXFACT,
1693 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1694
1695 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1696 /* map qos queues to fifos one-to-one */
1697 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1698 int ac = default_queue_to_tx_fifo[i];
1699 iwl4965_txq_ctx_activate(priv, i);
1700 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1701 }
1702
1703 iwl_release_restricted_access(priv);
1704 spin_unlock_irqrestore(&priv->lock, flags);
1705
1706 return 0;
1707}
1708
1709int iwl_hw_set_hw_setting(struct iwl_priv *priv)
1710{
1711 priv->hw_setting.shared_virt =
1712 pci_alloc_consistent(priv->pci_dev,
1713 sizeof(struct iwl_shared),
1714 &priv->hw_setting.shared_phys);
1715
1716 if (!priv->hw_setting.shared_virt)
1717 return -1;
1718
1719 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl_shared));
1720
1721 priv->hw_setting.max_txq_num = iwl_param_queues_num;
1722 priv->hw_setting.ac_queue_count = AC_NUM;
1723
1724 priv->hw_setting.cck_flag = RATE_MCS_CCK_MSK;
1725 priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd);
1726 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
1727 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
1728
1729 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
1730 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
1731 return 0;
1732}
1733
1734/**
1735 * iwl_hw_txq_ctx_free - Free TXQ Context
1736 *
1737 * Destroy all TX DMA queues and structures
1738 */
1739void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
1740{
1741 int txq_id;
1742
1743 /* Tx queues */
1744 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
1745 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
1746
1747 iwl4965_kw_free(priv);
1748}
1749
1750/**
1751 * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used]
1752 *
1753 * Does NOT advance any indexes
1754 */
1755int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
1756{
1757 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
1758 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used];
1759 struct pci_dev *dev = priv->pci_dev;
1760 int i;
1761 int counter = 0;
1762 int index, is_odd;
1763
1764 /* classify bd */
1765 if (txq->q.id == IWL_CMD_QUEUE_NUM)
1766 /* nothing to cleanup after for host commands */
1767 return 0;
1768
1769 /* sanity check */
1770 counter = IWL_GET_BITS(*bd, num_tbs);
1771 if (counter > MAX_NUM_OF_TBS) {
1772 IWL_ERROR("Too many chunks: %i\n", counter);
1773 /* @todo issue fatal error, it is quite serious situation */
1774 return 0;
1775 }
1776
1777 /* unmap chunks if any */
1778
1779 for (i = 0; i < counter; i++) {
1780 index = i / 2;
1781 is_odd = i & 0x1;
1782
1783 if (is_odd)
1784 pci_unmap_single(
1785 dev,
1786 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1787 (IWL_GET_BITS(bd->pa[index],
1788 tb2_addr_hi20) << 16),
1789 IWL_GET_BITS(bd->pa[index], tb2_len),
1790 PCI_DMA_TODEVICE);
1791
1792 else if (i > 0)
1793 pci_unmap_single(dev,
1794 le32_to_cpu(bd->pa[index].tb1_addr),
1795 IWL_GET_BITS(bd->pa[index], tb1_len),
1796 PCI_DMA_TODEVICE);
1797
1798 if (txq->txb[txq->q.last_used].skb[i]) {
1799 struct sk_buff *skb = txq->txb[txq->q.last_used].skb[i];
1800
1801 dev_kfree_skb(skb);
1802 txq->txb[txq->q.last_used].skb[i] = NULL;
1803 }
1804 }
1805 return 0;
1806}
1807
1808int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1809{
1810 IWL_ERROR("TODO: Implement iwl_hw_reg_set_txpower!\n");
1811 return -EINVAL;
1812}
1813
1814static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1815{
1816 s32 sign = 1;
1817
1818 if (num < 0) {
1819 sign = -sign;
1820 num = -num;
1821 }
1822 if (denom < 0) {
1823 sign = -sign;
1824 denom = -denom;
1825 }
1826 *res = 1;
1827 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1828
1829 return 1;
1830}
1831
1832static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1833 s32 current_voltage)
1834{
1835 s32 comp = 0;
1836
1837 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1838 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1839 return 0;
1840
1841 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1842 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1843
1844 if (current_voltage > eeprom_voltage)
1845 comp *= 2;
1846 if ((comp < -2) || (comp > 2))
1847 comp = 0;
1848
1849 return comp;
1850}
1851
1852static const struct iwl_channel_info *
1853iwl4965_get_channel_txpower_info(struct iwl_priv *priv, u8 phymode, u16 channel)
1854{
1855 const struct iwl_channel_info *ch_info;
1856
1857 ch_info = iwl_get_channel_info(priv, phymode, channel);
1858
1859 if (!is_channel_valid(ch_info))
1860 return NULL;
1861
1862 return ch_info;
1863}
1864
1865static s32 iwl4965_get_tx_atten_grp(u16 channel)
1866{
1867 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
1868 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
1869 return CALIB_CH_GROUP_5;
1870
1871 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
1872 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
1873 return CALIB_CH_GROUP_1;
1874
1875 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
1876 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
1877 return CALIB_CH_GROUP_2;
1878
1879 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
1880 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
1881 return CALIB_CH_GROUP_3;
1882
1883 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
1884 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
1885 return CALIB_CH_GROUP_4;
1886
1887 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
1888 return -1;
1889}
1890
1891static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
1892{
1893 s32 b = -1;
1894
1895 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
1896 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
1897 continue;
1898
1899 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
1900 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
1901 break;
1902 }
1903
1904 return b;
1905}
1906
1907static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1908{
1909 s32 val;
1910
1911 if (x2 == x1)
1912 return y1;
1913 else {
1914 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
1915 return val + y2;
1916 }
1917}
1918
1919static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
1920 struct iwl_eeprom_calib_ch_info *chan_info)
1921{
1922 s32 s = -1;
1923 u32 c;
1924 u32 m;
1925 const struct iwl_eeprom_calib_measure *m1;
1926 const struct iwl_eeprom_calib_measure *m2;
1927 struct iwl_eeprom_calib_measure *omeas;
1928 u32 ch_i1;
1929 u32 ch_i2;
1930
1931 s = iwl4965_get_sub_band(priv, channel);
1932 if (s >= EEPROM_TX_POWER_BANDS) {
1933 IWL_ERROR("Tx Power can not find channel %d ", channel);
1934 return -1;
1935 }
1936
1937 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
1938 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
1939 chan_info->ch_num = (u8) channel;
1940
1941 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
1942 channel, s, ch_i1, ch_i2);
1943
1944 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
1945 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
1946 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
1947 measurements[c][m]);
1948 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
1949 measurements[c][m]);
1950 omeas = &(chan_info->measurements[c][m]);
1951
1952 omeas->actual_pow =
1953 (u8) iwl4965_interpolate_value(channel, ch_i1,
1954 m1->actual_pow,
1955 ch_i2,
1956 m2->actual_pow);
1957 omeas->gain_idx =
1958 (u8) iwl4965_interpolate_value(channel, ch_i1,
1959 m1->gain_idx, ch_i2,
1960 m2->gain_idx);
1961 omeas->temperature =
1962 (u8) iwl4965_interpolate_value(channel, ch_i1,
1963 m1->temperature,
1964 ch_i2,
1965 m2->temperature);
1966 omeas->pa_det =
1967 (s8) iwl4965_interpolate_value(channel, ch_i1,
1968 m1->pa_det, ch_i2,
1969 m2->pa_det);
1970
1971 IWL_DEBUG_TXPOWER
1972 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1973 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1974 IWL_DEBUG_TXPOWER
1975 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1976 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1977 IWL_DEBUG_TXPOWER
1978 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1979 m1->pa_det, m2->pa_det, omeas->pa_det);
1980 IWL_DEBUG_TXPOWER
1981 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1982 m1->temperature, m2->temperature,
1983 omeas->temperature);
1984 }
1985 }
1986
1987 return 0;
1988}
1989
1990/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1991 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1992static s32 back_off_table[] = {
1993 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1994 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1995 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1996 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1997 10 /* CCK */
1998};
1999
2000/* Thermal compensation values for txpower for various frequency ranges ...
2001 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
2002static struct iwl_txpower_comp_entry {
2003 s32 degrees_per_05db_a;
2004 s32 degrees_per_05db_a_denom;
2005} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2006 {9, 2}, /* group 0 5.2, ch 34-43 */
2007 {4, 1}, /* group 1 5.2, ch 44-70 */
2008 {4, 1}, /* group 2 5.2, ch 71-124 */
2009 {4, 1}, /* group 3 5.2, ch 125-200 */
2010 {3, 1} /* group 4 2.4, ch all */
2011};
2012
2013static s32 get_min_power_index(s32 rate_power_index, u32 band)
2014{
2015 if (!band) {
2016 if ((rate_power_index & 7) <= 4)
2017 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2018 }
2019 return MIN_TX_GAIN_INDEX;
2020}
2021
2022struct gain_entry {
2023 u8 dsp;
2024 u8 radio;
2025};
2026
2027static const struct gain_entry gain_table[2][108] = {
2028 /* 5.2GHz power gain index table */
2029 {
2030 {123, 0x3F}, /* highest txpower */
2031 {117, 0x3F},
2032 {110, 0x3F},
2033 {104, 0x3F},
2034 {98, 0x3F},
2035 {110, 0x3E},
2036 {104, 0x3E},
2037 {98, 0x3E},
2038 {110, 0x3D},
2039 {104, 0x3D},
2040 {98, 0x3D},
2041 {110, 0x3C},
2042 {104, 0x3C},
2043 {98, 0x3C},
2044 {110, 0x3B},
2045 {104, 0x3B},
2046 {98, 0x3B},
2047 {110, 0x3A},
2048 {104, 0x3A},
2049 {98, 0x3A},
2050 {110, 0x39},
2051 {104, 0x39},
2052 {98, 0x39},
2053 {110, 0x38},
2054 {104, 0x38},
2055 {98, 0x38},
2056 {110, 0x37},
2057 {104, 0x37},
2058 {98, 0x37},
2059 {110, 0x36},
2060 {104, 0x36},
2061 {98, 0x36},
2062 {110, 0x35},
2063 {104, 0x35},
2064 {98, 0x35},
2065 {110, 0x34},
2066 {104, 0x34},
2067 {98, 0x34},
2068 {110, 0x33},
2069 {104, 0x33},
2070 {98, 0x33},
2071 {110, 0x32},
2072 {104, 0x32},
2073 {98, 0x32},
2074 {110, 0x31},
2075 {104, 0x31},
2076 {98, 0x31},
2077 {110, 0x30},
2078 {104, 0x30},
2079 {98, 0x30},
2080 {110, 0x25},
2081 {104, 0x25},
2082 {98, 0x25},
2083 {110, 0x24},
2084 {104, 0x24},
2085 {98, 0x24},
2086 {110, 0x23},
2087 {104, 0x23},
2088 {98, 0x23},
2089 {110, 0x22},
2090 {104, 0x18},
2091 {98, 0x18},
2092 {110, 0x17},
2093 {104, 0x17},
2094 {98, 0x17},
2095 {110, 0x16},
2096 {104, 0x16},
2097 {98, 0x16},
2098 {110, 0x15},
2099 {104, 0x15},
2100 {98, 0x15},
2101 {110, 0x14},
2102 {104, 0x14},
2103 {98, 0x14},
2104 {110, 0x13},
2105 {104, 0x13},
2106 {98, 0x13},
2107 {110, 0x12},
2108 {104, 0x08},
2109 {98, 0x08},
2110 {110, 0x07},
2111 {104, 0x07},
2112 {98, 0x07},
2113 {110, 0x06},
2114 {104, 0x06},
2115 {98, 0x06},
2116 {110, 0x05},
2117 {104, 0x05},
2118 {98, 0x05},
2119 {110, 0x04},
2120 {104, 0x04},
2121 {98, 0x04},
2122 {110, 0x03},
2123 {104, 0x03},
2124 {98, 0x03},
2125 {110, 0x02},
2126 {104, 0x02},
2127 {98, 0x02},
2128 {110, 0x01},
2129 {104, 0x01},
2130 {98, 0x01},
2131 {110, 0x00},
2132 {104, 0x00},
2133 {98, 0x00},
2134 {93, 0x00},
2135 {88, 0x00},
2136 {83, 0x00},
2137 {78, 0x00},
2138 },
2139 /* 2.4GHz power gain index table */
2140 {
2141 {110, 0x3f}, /* highest txpower */
2142 {104, 0x3f},
2143 {98, 0x3f},
2144 {110, 0x3e},
2145 {104, 0x3e},
2146 {98, 0x3e},
2147 {110, 0x3d},
2148 {104, 0x3d},
2149 {98, 0x3d},
2150 {110, 0x3c},
2151 {104, 0x3c},
2152 {98, 0x3c},
2153 {110, 0x3b},
2154 {104, 0x3b},
2155 {98, 0x3b},
2156 {110, 0x3a},
2157 {104, 0x3a},
2158 {98, 0x3a},
2159 {110, 0x39},
2160 {104, 0x39},
2161 {98, 0x39},
2162 {110, 0x38},
2163 {104, 0x38},
2164 {98, 0x38},
2165 {110, 0x37},
2166 {104, 0x37},
2167 {98, 0x37},
2168 {110, 0x36},
2169 {104, 0x36},
2170 {98, 0x36},
2171 {110, 0x35},
2172 {104, 0x35},
2173 {98, 0x35},
2174 {110, 0x34},
2175 {104, 0x34},
2176 {98, 0x34},
2177 {110, 0x33},
2178 {104, 0x33},
2179 {98, 0x33},
2180 {110, 0x32},
2181 {104, 0x32},
2182 {98, 0x32},
2183 {110, 0x31},
2184 {104, 0x31},
2185 {98, 0x31},
2186 {110, 0x30},
2187 {104, 0x30},
2188 {98, 0x30},
2189 {110, 0x6},
2190 {104, 0x6},
2191 {98, 0x6},
2192 {110, 0x5},
2193 {104, 0x5},
2194 {98, 0x5},
2195 {110, 0x4},
2196 {104, 0x4},
2197 {98, 0x4},
2198 {110, 0x3},
2199 {104, 0x3},
2200 {98, 0x3},
2201 {110, 0x2},
2202 {104, 0x2},
2203 {98, 0x2},
2204 {110, 0x1},
2205 {104, 0x1},
2206 {98, 0x1},
2207 {110, 0x0},
2208 {104, 0x0},
2209 {98, 0x0},
2210 {97, 0},
2211 {96, 0},
2212 {95, 0},
2213 {94, 0},
2214 {93, 0},
2215 {92, 0},
2216 {91, 0},
2217 {90, 0},
2218 {89, 0},
2219 {88, 0},
2220 {87, 0},
2221 {86, 0},
2222 {85, 0},
2223 {84, 0},
2224 {83, 0},
2225 {82, 0},
2226 {81, 0},
2227 {80, 0},
2228 {79, 0},
2229 {78, 0},
2230 {77, 0},
2231 {76, 0},
2232 {75, 0},
2233 {74, 0},
2234 {73, 0},
2235 {72, 0},
2236 {71, 0},
2237 {70, 0},
2238 {69, 0},
2239 {68, 0},
2240 {67, 0},
2241 {66, 0},
2242 {65, 0},
2243 {64, 0},
2244 {63, 0},
2245 {62, 0},
2246 {61, 0},
2247 {60, 0},
2248 {59, 0},
2249 }
2250};
2251
2252static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2253 u8 is_fat, u8 ctrl_chan_high,
2254 struct iwl_tx_power_db *tx_power_tbl)
2255{
2256 u8 saturation_power;
2257 s32 target_power;
2258 s32 user_target_power;
2259 s32 power_limit;
2260 s32 current_temp;
2261 s32 reg_limit;
2262 s32 current_regulatory;
2263 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2264 int i;
2265 int c;
2266 const struct iwl_channel_info *ch_info = NULL;
2267 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
2268 const struct iwl_eeprom_calib_measure *measurement;
2269 s16 voltage;
2270 s32 init_voltage;
2271 s32 voltage_compensation;
2272 s32 degrees_per_05db_num;
2273 s32 degrees_per_05db_denom;
2274 s32 factory_temp;
2275 s32 temperature_comp[2];
2276 s32 factory_gain_index[2];
2277 s32 factory_actual_pwr[2];
2278 s32 power_index;
2279
2280 /* Sanity check requested level (dBm) */
2281 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2282 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2283 priv->user_txpower_limit);
2284 return -EINVAL;
2285 }
2286 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2287 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2288 priv->user_txpower_limit);
2289 return -EINVAL;
2290 }
2291
2292 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2293 * are used for indexing into txpower table) */
2294 user_target_power = 2 * priv->user_txpower_limit;
2295
2296 /* Get current (RXON) channel, band, width */
2297 ch_info =
2298 iwl4965_get_channel_txpower_info(priv, priv->phymode, channel);
2299
2300 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2301 is_fat);
2302
2303 if (!ch_info)
2304 return -EINVAL;
2305
2306 /* get txatten group, used to select 1) thermal txpower adjustment
2307 * and 2) mimo txpower balance between Tx chains. */
2308 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2309 if (txatten_grp < 0)
2310 return -EINVAL;
2311
2312 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2313 channel, txatten_grp);
2314
2315 if (is_fat) {
2316 if (ctrl_chan_high)
2317 channel -= 2;
2318 else
2319 channel += 2;
2320 }
2321
2322 /* hardware txpower limits ...
2323 * saturation (clipping distortion) txpowers are in half-dBm */
2324 if (band)
2325 saturation_power = priv->eeprom.calib_info.saturation_power24;
2326 else
2327 saturation_power = priv->eeprom.calib_info.saturation_power52;
2328
2329 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2330 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2331 if (band)
2332 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2333 else
2334 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2335 }
2336
2337 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2338 * max_power_avg values are in dBm, convert * 2 */
2339 if (is_fat)
2340 reg_limit = ch_info->fat_max_power_avg * 2;
2341 else
2342 reg_limit = ch_info->max_power_avg * 2;
2343
2344 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2345 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2346 if (band)
2347 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2348 else
2349 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2350 }
2351
2352 /* Interpolate txpower calibration values for this channel,
2353 * based on factory calibration tests on spaced channels. */
2354 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2355
2356 /* calculate tx gain adjustment based on power supply voltage */
2357 voltage = priv->eeprom.calib_info.voltage;
2358 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2359 voltage_compensation =
2360 iwl4965_get_voltage_compensation(voltage, init_voltage);
2361
2362 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2363 init_voltage,
2364 voltage, voltage_compensation);
2365
2366 /* get current temperature (Celsius) */
2367 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2368 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2369 current_temp = KELVIN_TO_CELSIUS(current_temp);
2370
2371 /* select thermal txpower adjustment params, based on channel group
2372 * (same frequency group used for mimo txatten adjustment) */
2373 degrees_per_05db_num =
2374 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2375 degrees_per_05db_denom =
2376 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2377
2378 /* get per-chain txpower values from factory measurements */
2379 for (c = 0; c < 2; c++) {
2380 measurement = &ch_eeprom_info.measurements[c][1];
2381
2382 /* txgain adjustment (in half-dB steps) based on difference
2383 * between factory and current temperature */
2384 factory_temp = measurement->temperature;
2385 iwl4965_math_div_round((current_temp - factory_temp) *
2386 degrees_per_05db_denom,
2387 degrees_per_05db_num,
2388 &temperature_comp[c]);
2389
2390 factory_gain_index[c] = measurement->gain_idx;
2391 factory_actual_pwr[c] = measurement->actual_pow;
2392
2393 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2394 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2395 "curr tmp %d, comp %d steps\n",
2396 factory_temp, current_temp,
2397 temperature_comp[c]);
2398
2399 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2400 factory_gain_index[c],
2401 factory_actual_pwr[c]);
2402 }
2403
2404 /* for each of 33 bit-rates (including 1 for CCK) */
2405 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2406 u8 is_mimo_rate;
2407 union iwl_tx_power_dual_stream tx_power;
2408
2409 /* for mimo, reduce each chain's txpower by half
2410 * (3dB, 6 steps), so total output power is regulatory
2411 * compliant. */
2412 if (i & 0x8) {
2413 current_regulatory = reg_limit -
2414 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2415 is_mimo_rate = 1;
2416 } else {
2417 current_regulatory = reg_limit;
2418 is_mimo_rate = 0;
2419 }
2420
2421 /* find txpower limit, either hardware or regulatory */
2422 power_limit = saturation_power - back_off_table[i];
2423 if (power_limit > current_regulatory)
2424 power_limit = current_regulatory;
2425
2426 /* reduce user's txpower request if necessary
2427 * for this rate on this channel */
2428 target_power = user_target_power;
2429 if (target_power > power_limit)
2430 target_power = power_limit;
2431
2432 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2433 i, saturation_power - back_off_table[i],
2434 current_regulatory, user_target_power,
2435 target_power);
2436
2437 /* for each of 2 Tx chains (radio transmitters) */
2438 for (c = 0; c < 2; c++) {
2439 s32 atten_value;
2440
2441 if (is_mimo_rate)
2442 atten_value =
2443 (s32)le32_to_cpu(priv->card_alive_init.
2444 tx_atten[txatten_grp][c]);
2445 else
2446 atten_value = 0;
2447
2448 /* calculate index; higher index means lower txpower */
2449 power_index = (u8) (factory_gain_index[c] -
2450 (target_power -
2451 factory_actual_pwr[c]) -
2452 temperature_comp[c] -
2453 voltage_compensation +
2454 atten_value);
2455
2456/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2457 power_index); */
2458
2459 if (power_index < get_min_power_index(i, band))
2460 power_index = get_min_power_index(i, band);
2461
2462 /* adjust 5 GHz index to support negative indexes */
2463 if (!band)
2464 power_index += 9;
2465
2466 /* CCK, rate 32, reduce txpower for CCK */
2467 if (i == POWER_TABLE_CCK_ENTRY)
2468 power_index +=
2469 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2470
2471 /* stay within the table! */
2472 if (power_index > 107) {
2473 IWL_WARNING("txpower index %d > 107\n",
2474 power_index);
2475 power_index = 107;
2476 }
2477 if (power_index < 0) {
2478 IWL_WARNING("txpower index %d < 0\n",
2479 power_index);
2480 power_index = 0;
2481 }
2482
2483 /* fill txpower command for this rate/chain */
2484 tx_power.s.radio_tx_gain[c] =
2485 gain_table[band][power_index].radio;
2486 tx_power.s.dsp_predis_atten[c] =
2487 gain_table[band][power_index].dsp;
2488
2489 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2490 "gain 0x%02x dsp %d\n",
2491 c, atten_value, power_index,
2492 tx_power.s.radio_tx_gain[c],
2493 tx_power.s.dsp_predis_atten[c]);
2494 }/* for each chain */
2495
2496 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2497
2498 }/* for each rate */
2499
2500 return 0;
2501}
2502
2503/**
2504 * iwl_hw_reg_send_txpower - Configure the TXPOWER level user limit
2505 *
2506 * Uses the active RXON for channel, band, and characteristics (fat, high)
2507 * The power limit is taken from priv->user_txpower_limit.
2508 */
2509int iwl_hw_reg_send_txpower(struct iwl_priv *priv)
2510{
2511 struct iwl_txpowertable_cmd cmd = { 0 };
2512 int rc = 0;
2513 u8 band = 0;
2514 u8 is_fat = 0;
2515 u8 ctrl_chan_high = 0;
2516
2517 if (test_bit(STATUS_SCANNING, &priv->status)) {
2518 /* If this gets hit a lot, switch it to a BUG() and catch
2519 * the stack trace to find out who is calling this during
2520 * a scan. */
2521 IWL_WARNING("TX Power requested while scanning!\n");
2522 return -EAGAIN;
2523 }
2524
2525 band = ((priv->phymode == MODE_IEEE80211B) ||
2526 (priv->phymode == MODE_IEEE80211G));
2527
2528 is_fat = is_fat_channel(priv->active_rxon.flags);
2529
2530 if (is_fat &&
2531 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2532 ctrl_chan_high = 1;
2533
2534 cmd.band = band;
2535 cmd.channel = priv->active_rxon.channel;
2536
2537 rc = iwl4965_fill_txpower_tbl(priv, band,
2538 le16_to_cpu(priv->active_rxon.channel),
2539 is_fat, ctrl_chan_high, &cmd.tx_power);
2540 if (rc)
2541 return rc;
2542
2543 rc = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2544 return rc;
2545}
2546
2547int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2548{
2549 int rc;
2550 u8 band = 0;
2551 u8 is_fat = 0;
2552 u8 ctrl_chan_high = 0;
2553 struct iwl_channel_switch_cmd cmd = { 0 };
2554 const struct iwl_channel_info *ch_info;
2555
2556 band = ((priv->phymode == MODE_IEEE80211B) ||
2557 (priv->phymode == MODE_IEEE80211G));
2558
2559 ch_info = iwl_get_channel_info(priv, priv->phymode, channel);
2560
2561 is_fat = is_fat_channel(priv->staging_rxon.flags);
2562
2563 if (is_fat &&
2564 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2565 ctrl_chan_high = 1;
2566
2567 cmd.band = band;
2568 cmd.expect_beacon = 0;
2569 cmd.channel = cpu_to_le16(channel);
2570 cmd.rxon_flags = priv->active_rxon.flags;
2571 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2572 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2573 if (ch_info)
2574 cmd.expect_beacon = is_channel_radar(ch_info);
2575 else
2576 cmd.expect_beacon = 1;
2577
2578 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2579 ctrl_chan_high, &cmd.tx_power);
2580 if (rc) {
2581 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2582 return rc;
2583 }
2584
2585 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2586 return rc;
2587}
2588
2589#define RTS_HCCA_RETRY_LIMIT 3
2590#define RTS_DFAULT_RETRY_LIMIT 60
2591
2592void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2593 struct iwl_cmd *cmd,
2594 struct ieee80211_tx_control *ctrl,
2595 struct ieee80211_hdr *hdr, int sta_id,
2596 int is_hcca)
2597{
2598 u8 rate;
2599 u8 rts_retry_limit = 0;
2600 u8 data_retry_limit = 0;
2601 __le32 tx_flags;
2602 u16 fc = le16_to_cpu(hdr->frame_control);
2603
2604 tx_flags = cmd->cmd.tx.tx_flags;
2605
2606 rate = iwl_rates[ctrl->tx_rate].plcp;
2607
2608 rts_retry_limit = (is_hcca) ?
2609 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2610
2611 if (ieee80211_is_probe_response(fc)) {
2612 data_retry_limit = 3;
2613 if (data_retry_limit < rts_retry_limit)
2614 rts_retry_limit = data_retry_limit;
2615 } else
2616 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2617
2618 if (priv->data_retry_limit != -1)
2619 data_retry_limit = priv->data_retry_limit;
2620
2621 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2622 switch (fc & IEEE80211_FCTL_STYPE) {
2623 case IEEE80211_STYPE_AUTH:
2624 case IEEE80211_STYPE_DEAUTH:
2625 case IEEE80211_STYPE_ASSOC_REQ:
2626 case IEEE80211_STYPE_REASSOC_REQ:
2627 if (tx_flags & TX_CMD_FLG_RTS_MSK) {
2628 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2629 tx_flags |= TX_CMD_FLG_CTS_MSK;
2630 }
2631 break;
2632 default:
2633 break;
2634 }
2635 }
2636
2637 cmd->cmd.tx.rts_retry_limit = rts_retry_limit;
2638 cmd->cmd.tx.data_retry_limit = data_retry_limit;
2639 cmd->cmd.tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, 0);
2640 cmd->cmd.tx.tx_flags = tx_flags;
2641}
2642
2643int iwl_hw_get_rx_read(struct iwl_priv *priv)
2644{
2645 struct iwl_shared *shared_data = priv->hw_setting.shared_virt;
2646
2647 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2648}
2649
2650int iwl_hw_get_temperature(struct iwl_priv *priv)
2651{
2652 return priv->temperature;
2653}
2654
2655unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
2656 struct iwl_frame *frame, u8 rate)
2657{
2658 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
2659 unsigned int frame_size;
2660
2661 tx_beacon_cmd = &frame->u.beacon;
2662 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2663
2664 tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID;
2665 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2666
2667 frame_size = iwl_fill_beacon_frame(priv,
2668 tx_beacon_cmd->frame,
2669 BROADCAST_ADDR,
2670 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2671
2672 BUG_ON(frame_size > MAX_MPDU_SIZE);
2673 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2674
2675 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2676 tx_beacon_cmd->tx.rate_n_flags =
2677 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
2678 else
2679 tx_beacon_cmd->tx.rate_n_flags =
2680 iwl_hw_set_rate_n_flags(rate, 0);
2681
2682 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2683 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2684 return (sizeof(*tx_beacon_cmd) + frame_size);
2685}
2686
2687int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2688{
2689 int rc;
2690 unsigned long flags;
2691 int txq_id = txq->q.id;
2692
2693 spin_lock_irqsave(&priv->lock, flags);
2694 rc = iwl_grab_restricted_access(priv);
2695 if (rc) {
2696 spin_unlock_irqrestore(&priv->lock, flags);
2697 return rc;
2698 }
2699
2700 iwl_write_restricted(priv, FH_MEM_CBBC_QUEUE(txq_id),
2701 txq->q.dma_addr >> 8);
2702 iwl_write_restricted(
2703 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2704 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2705 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2706 iwl_release_restricted_access(priv);
2707 spin_unlock_irqrestore(&priv->lock, flags);
2708
2709 return 0;
2710}
2711
2712static inline u8 iwl4965_get_dma_hi_address(dma_addr_t addr)
2713{
2714 return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0;
2715}
2716
2717int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
2718 dma_addr_t addr, u16 len)
2719{
2720 int index, is_odd;
2721 struct iwl_tfd_frame *tfd = ptr;
2722 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2723
2724 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2725 IWL_ERROR("Error can not send more than %d chunks\n",
2726 MAX_NUM_OF_TBS);
2727 return -EINVAL;
2728 }
2729
2730 index = num_tbs / 2;
2731 is_odd = num_tbs & 0x1;
2732
2733 if (!is_odd) {
2734 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2735 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
2736 iwl4965_get_dma_hi_address(addr));
2737 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2738 } else {
2739 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2740 (u32) (addr & 0xffff));
2741 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2742 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2743 }
2744
2745 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2746
2747 return 0;
2748}
2749
2750void iwl_hw_card_show_info(struct iwl_priv *priv)
2751{
2752 u16 hw_version = priv->eeprom.board_revision_4965;
2753
2754 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2755 ((hw_version >> 8) & 0x0F),
2756 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2757
2758 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2759 priv->eeprom.board_pba_number_4965);
2760}
2761
2762#define IWL_TX_CRC_SIZE 4
2763#define IWL_TX_DELIMITER_SIZE 4
2764
2765int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
2766 struct iwl_tx_queue *txq, u16 byte_cnt)
2767{
2768 int len;
2769 int txq_id = txq->q.id;
2770 struct iwl_shared *shared_data = priv->hw_setting.shared_virt;
2771
2772 if (txq->need_update == 0)
2773 return 0;
2774
2775 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2776
2777 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2778 tfd_offset[txq->q.first_empty], byte_cnt, len);
2779
2780 if (txq->q.first_empty < IWL4965_MAX_WIN_SIZE)
2781 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2782 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.first_empty],
2783 byte_cnt, len);
2784
2785 return 0;
2786}
2787
2788/* Set up Rx receiver/antenna/chain usage in "staging" RXON image.
2789 * This should not be used for scan command ... it puts data in wrong place. */
2790void iwl4965_set_rxon_chain(struct iwl_priv *priv)
2791{
2792 u8 is_single = is_single_stream(priv);
2793 u8 idle_state, rx_state;
2794
2795 priv->staging_rxon.rx_chain = 0;
2796 rx_state = idle_state = 3;
2797
2798 /* Tell uCode which antennas are actually connected.
2799 * Before first association, we assume all antennas are connected.
2800 * Just after first association, iwl4965_noise_calibration()
2801 * checks which antennas actually *are* connected. */
2802 priv->staging_rxon.rx_chain |=
2803 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
2804
2805 /* How many receivers should we use? */
2806 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
2807 priv->staging_rxon.rx_chain |=
2808 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
2809 priv->staging_rxon.rx_chain |=
2810 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
2811
2812 if (!is_single && (rx_state >= 2) &&
2813 !test_bit(STATUS_POWER_PMI, &priv->status))
2814 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2815 else
2816 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2817
2818 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
2819}
2820
2821#ifdef CONFIG_IWLWIFI_HT
2822#ifdef CONFIG_IWLWIFI_HT_AGG
2823/*
2824 get the traffic load value for tid
2825*/
2826static u32 iwl4965_tl_get_load(struct iwl_priv *priv, u8 tid)
2827{
2828 u32 load = 0;
2829 u32 current_time = jiffies_to_msecs(jiffies);
2830 u32 time_diff;
2831 s32 index;
2832 unsigned long flags;
2833 struct iwl_traffic_load *tid_ptr = NULL;
2834
2835 if (tid >= TID_MAX_LOAD_COUNT)
2836 return 0;
2837
2838 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2839
2840 current_time -= current_time % TID_ROUND_VALUE;
2841
2842 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2843 if (!(tid_ptr->queue_count))
2844 goto out;
2845
2846 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2847 index = time_diff / TID_QUEUE_CELL_SPACING;
2848
2849 if (index >= TID_QUEUE_MAX_SIZE) {
2850 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2851
2852 while (tid_ptr->queue_count &&
2853 (tid_ptr->time_stamp < oldest_time)) {
2854 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
2855 tid_ptr->packet_count[tid_ptr->head] = 0;
2856 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
2857 tid_ptr->queue_count--;
2858 tid_ptr->head++;
2859 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
2860 tid_ptr->head = 0;
2861 }
2862 }
2863 load = tid_ptr->total;
2864
2865 out:
2866 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2867 return load;
2868}
2869
2870/*
2871 increment traffic load value for tid and also remove
2872 any old values if passed the certian time period
2873*/
2874static void iwl4965_tl_add_packet(struct iwl_priv *priv, u8 tid)
2875{
2876 u32 current_time = jiffies_to_msecs(jiffies);
2877 u32 time_diff;
2878 s32 index;
2879 unsigned long flags;
2880 struct iwl_traffic_load *tid_ptr = NULL;
2881
2882 if (tid >= TID_MAX_LOAD_COUNT)
2883 return;
2884
2885 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2886
2887 current_time -= current_time % TID_ROUND_VALUE;
2888
2889 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2890 if (!(tid_ptr->queue_count)) {
2891 tid_ptr->total = 1;
2892 tid_ptr->time_stamp = current_time;
2893 tid_ptr->queue_count = 1;
2894 tid_ptr->head = 0;
2895 tid_ptr->packet_count[0] = 1;
2896 goto out;
2897 }
2898
2899 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2900 index = time_diff / TID_QUEUE_CELL_SPACING;
2901
2902 if (index >= TID_QUEUE_MAX_SIZE) {
2903 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2904
2905 while (tid_ptr->queue_count &&
2906 (tid_ptr->time_stamp < oldest_time)) {
2907 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
2908 tid_ptr->packet_count[tid_ptr->head] = 0;
2909 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
2910 tid_ptr->queue_count--;
2911 tid_ptr->head++;
2912 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
2913 tid_ptr->head = 0;
2914 }
2915 }
2916
2917 index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE;
2918 tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1;
2919 tid_ptr->total = tid_ptr->total + 1;
2920
2921 if ((index + 1) > tid_ptr->queue_count)
2922 tid_ptr->queue_count = index + 1;
2923 out:
2924 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2925
2926}
2927
2928#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7
2929enum HT_STATUS {
2930 BA_STATUS_FAILURE = 0,
2931 BA_STATUS_INITIATOR_DELBA,
2932 BA_STATUS_RECIPIENT_DELBA,
2933 BA_STATUS_RENEW_ADDBA_REQUEST,
2934 BA_STATUS_ACTIVE,
2935};
2936
2937static u8 iwl4964_tl_ba_avail(struct iwl_priv *priv)
2938{
2939 int i;
2940 struct iwl_lq_mngr *lq;
2941 u8 count = 0;
2942 u16 msk;
2943
2944 lq = (struct iwl_lq_mngr *)&(priv->lq_mngr);
2945 for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
2946 msk = 1 << i;
2947 if ((lq->agg_ctrl.granted_ba & msk) ||
2948 (lq->agg_ctrl.wait_for_agg_status & msk))
2949 count++;
2950 }
2951
2952 if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS)
2953 return 1;
2954
2955 return 0;
2956}
2957
2958static void iwl4965_ba_status(struct iwl_priv *priv,
2959 u8 tid, enum HT_STATUS status);
2960
2961static int iwl4965_perform_addba(struct iwl_priv *priv, u8 tid, u32 length,
2962 u32 ba_timeout)
2963{
2964 int rc;
2965
2966 rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid);
2967 if (rc)
2968 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
2969
2970 return rc;
2971}
2972
2973static int iwl4965_perform_delba(struct iwl_priv *priv, u8 tid)
2974{
2975 int rc;
2976
2977 rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid);
2978 if (rc)
2979 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
2980
2981 return rc;
2982}
2983
2984static void iwl4965_turn_on_agg_for_tid(struct iwl_priv *priv,
2985 struct iwl_lq_mngr *lq,
2986 u8 auto_agg, u8 tid)
2987{
2988 u32 tid_msk = (1 << tid);
2989 unsigned long flags;
2990
2991 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2992/*
2993 if ((auto_agg) && (!lq->enable_counter)){
2994 lq->agg_ctrl.next_retry = 0;
2995 lq->agg_ctrl.tid_retry = 0;
2996 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2997 return;
2998 }
2999*/
3000 if (!(lq->agg_ctrl.granted_ba & tid_msk) &&
3001 (lq->agg_ctrl.requested_ba & tid_msk)) {
3002 u8 available_queues;
3003 u32 load;
3004
3005 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3006 available_queues = iwl4964_tl_ba_avail(priv);
3007 load = iwl4965_tl_get_load(priv, tid);
3008
3009 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3010 if (!available_queues) {
3011 if (auto_agg)
3012 lq->agg_ctrl.tid_retry |= tid_msk;
3013 else {
3014 lq->agg_ctrl.requested_ba &= ~tid_msk;
3015 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3016 }
3017 } else if ((auto_agg) &&
3018 ((load <= lq->agg_ctrl.tid_traffic_load_threshold) ||
3019 ((lq->agg_ctrl.wait_for_agg_status & tid_msk))))
3020 lq->agg_ctrl.tid_retry |= tid_msk;
3021 else {
3022 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3023 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3024 iwl4965_perform_addba(priv, tid, 0x40,
3025 lq->agg_ctrl.ba_timeout);
3026 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3027 }
3028 }
3029 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3030}
3031
3032static void iwl4965_turn_on_agg(struct iwl_priv *priv, u8 tid)
3033{
3034 struct iwl_lq_mngr *lq;
3035 unsigned long flags;
3036
3037 lq = (struct iwl_lq_mngr *)&(priv->lq_mngr);
3038
3039 if ((tid < TID_MAX_LOAD_COUNT))
3040 iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg,
3041 tid);
3042 else if (tid == TID_ALL_SPECIFIED) {
3043 if (lq->agg_ctrl.requested_ba) {
3044 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
3045 iwl4965_turn_on_agg_for_tid(priv, lq,
3046 lq->agg_ctrl.auto_agg, tid);
3047 } else {
3048 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3049 lq->agg_ctrl.tid_retry = 0;
3050 lq->agg_ctrl.next_retry = 0;
3051 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3052 }
3053 }
3054
3055}
3056
3057void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid)
3058{
3059 u32 tid_msk;
3060 struct iwl_lq_mngr *lq;
3061 unsigned long flags;
3062
3063 lq = (struct iwl_lq_mngr *)&(priv->lq_mngr);
3064
3065 if ((tid < TID_MAX_LOAD_COUNT)) {
3066 tid_msk = 1 << tid;
3067 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3068 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3069 lq->agg_ctrl.requested_ba &= ~tid_msk;
3070 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3071 iwl4965_perform_delba(priv, tid);
3072 } else if (tid == TID_ALL_SPECIFIED) {
3073 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3074 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3075 tid_msk = 1 << tid;
3076 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3077 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3078 iwl4965_perform_delba(priv, tid);
3079 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3080 }
3081 lq->agg_ctrl.requested_ba = 0;
3082 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3083 }
3084}
3085
3086static void iwl4965_ba_status(struct iwl_priv *priv,
3087 u8 tid, enum HT_STATUS status)
3088{
3089 struct iwl_lq_mngr *lq;
3090 u32 tid_msk = (1 << tid);
3091 unsigned long flags;
3092
3093 lq = (struct iwl_lq_mngr *)&(priv->lq_mngr);
3094
3095 if ((tid >= TID_MAX_LOAD_COUNT))
3096 goto out;
3097
3098 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3099 switch (status) {
3100 case BA_STATUS_ACTIVE:
3101 if (!(lq->agg_ctrl.granted_ba & tid_msk))
3102 lq->agg_ctrl.granted_ba |= tid_msk;
3103 break;
3104 default:
3105 if ((lq->agg_ctrl.granted_ba & tid_msk))
3106 lq->agg_ctrl.granted_ba &= ~tid_msk;
3107 break;
3108 }
3109
3110 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3111 if (status != BA_STATUS_ACTIVE) {
3112 if (lq->agg_ctrl.auto_agg) {
3113 lq->agg_ctrl.tid_retry |= tid_msk;
3114 lq->agg_ctrl.next_retry =
3115 jiffies + msecs_to_jiffies(500);
3116 } else
3117 lq->agg_ctrl.requested_ba &= ~tid_msk;
3118 }
3119 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3120 out:
3121 return;
3122}
3123
3124static void iwl4965_bg_agg_work(struct work_struct *work)
3125{
3126 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3127 agg_work);
3128
3129 u32 tid;
3130 u32 retry_tid;
3131 u32 tid_msk;
3132 unsigned long flags;
3133 struct iwl_lq_mngr *lq = (struct iwl_lq_mngr *)&(priv->lq_mngr);
3134
3135 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3136 retry_tid = lq->agg_ctrl.tid_retry;
3137 lq->agg_ctrl.tid_retry = 0;
3138 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3139
3140 if (retry_tid == TID_ALL_SPECIFIED)
3141 iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED);
3142 else {
3143 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3144 tid_msk = (1 << tid);
3145 if (retry_tid & tid_msk)
3146 iwl4965_turn_on_agg(priv, tid);
3147 }
3148 }
3149
3150 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3151 if (lq->agg_ctrl.tid_retry)
3152 lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500);
3153 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3154 return;
3155}
3156#endif /*CONFIG_IWLWIFI_HT_AGG */
3157#endif /* CONFIG_IWLWIFI_HT */
3158
3159int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd,
3160 u8 sta_id, dma_addr_t txcmd_phys,
3161 struct ieee80211_hdr *hdr, u8 hdr_len,
3162 struct ieee80211_tx_control *ctrl, void *sta_in)
3163{
3164 struct iwl_tx_cmd cmd;
3165 struct iwl_tx_cmd *tx = (struct iwl_tx_cmd *)&out_cmd->cmd.payload[0];
3166 dma_addr_t scratch_phys;
3167 u8 unicast = 0;
3168 u8 is_data = 1;
3169 u16 fc;
3170 u16 rate_flags;
3171 int rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1);
3172#ifdef CONFIG_IWLWIFI_HT
3173#ifdef CONFIG_IWLWIFI_HT_AGG
3174 __le16 *qc;
3175#endif /*CONFIG_IWLWIFI_HT_AGG */
3176#endif /* CONFIG_IWLWIFI_HT */
3177
3178 unicast = !is_multicast_ether_addr(hdr->addr1);
3179
3180 fc = le16_to_cpu(hdr->frame_control);
3181 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
3182 is_data = 0;
3183
3184 memcpy(&cmd, &(out_cmd->cmd.tx), sizeof(struct iwl_tx_cmd));
3185 memset(tx, 0, sizeof(struct iwl_tx_cmd));
3186 memcpy(tx->hdr, hdr, hdr_len);
3187
3188 tx->len = cmd.len;
3189 tx->driver_txop = cmd.driver_txop;
3190 tx->stop_time.life_time = cmd.stop_time.life_time;
3191 tx->tx_flags = cmd.tx_flags;
3192 tx->sta_id = cmd.sta_id;
3193 tx->tid_tspec = cmd.tid_tspec;
3194 tx->timeout.pm_frame_timeout = cmd.timeout.pm_frame_timeout;
3195 tx->next_frame_len = cmd.next_frame_len;
3196
3197 tx->sec_ctl = cmd.sec_ctl;
3198 memcpy(&(tx->key[0]), &(cmd.key[0]), 16);
3199 tx->tx_flags = cmd.tx_flags;
3200
3201 tx->rts_retry_limit = cmd.rts_retry_limit;
3202 tx->data_retry_limit = cmd.data_retry_limit;
3203
3204 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
3205 offsetof(struct iwl_tx_cmd, scratch);
3206 tx->dram_lsb_ptr = cpu_to_le32(scratch_phys);
3207 tx->dram_msb_ptr = iwl4965_get_dma_hi_address(scratch_phys);
3208
3209 /* Hard coded to start at the highest retry fallback position
3210 * until the 4965 specific rate control algorithm is tied in */
3211 tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
3212
3213 /* Alternate between antenna A and B for successive frames */
3214 if (priv->use_ant_b_for_management_frame) {
3215 priv->use_ant_b_for_management_frame = 0;
3216 rate_flags = RATE_MCS_ANT_B_MSK;
3217 } else {
3218 priv->use_ant_b_for_management_frame = 1;
3219 rate_flags = RATE_MCS_ANT_A_MSK;
3220 }
3221
3222 if (!unicast || !is_data) {
3223 if ((rate_index >= IWL_FIRST_CCK_RATE) &&
3224 (rate_index <= IWL_LAST_CCK_RATE))
3225 rate_flags |= RATE_MCS_CCK_MSK;
3226 } else {
3227 tx->initial_rate_index = 0;
3228 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
3229 }
3230
3231 tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp,
3232 rate_flags);
3233
3234 if (ieee80211_is_probe_request(fc))
3235 tx->tx_flags |= TX_CMD_FLG_TSF_MSK;
3236 else if (ieee80211_is_back_request(fc))
3237 tx->tx_flags |= TX_CMD_FLG_ACK_MSK |
3238 TX_CMD_FLG_IMM_BA_RSP_MASK;
3239#ifdef CONFIG_IWLWIFI_HT
3240#ifdef CONFIG_IWLWIFI_HT_AGG
3241 qc = ieee80211_get_qos_ctrl(hdr);
3242 if (qc &&
3243 (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) {
3244 u8 tid = 0;
3245 tid = (u8) (le16_to_cpu(*qc) & 0xF);
3246 if (tid < TID_MAX_LOAD_COUNT)
3247 iwl4965_tl_add_packet(priv, tid);
3248 }
3249
3250 if (priv->lq_mngr.agg_ctrl.next_retry &&
3251 (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) {
3252 unsigned long flags;
3253
3254 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3255 priv->lq_mngr.agg_ctrl.next_retry = 0;
3256 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3257 schedule_work(&priv->agg_work);
3258 }
3259#endif
3260#endif
3261 return 0;
3262}
3263
3264/**
3265 * sign_extend - Sign extend a value using specified bit as sign-bit
3266 *
3267 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3268 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3269 *
3270 * @param oper value to sign extend
3271 * @param index 0 based bit index (0<=index<32) to sign bit
3272 */
3273static s32 sign_extend(u32 oper, int index)
3274{
3275 u8 shift = 31 - index;
3276
3277 return (s32)(oper << shift) >> shift;
3278}
3279
3280/**
3281 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3282 * @statistics: Provides the temperature reading from the uCode
3283 *
3284 * A return of <0 indicates bogus data in the statistics
3285 */
3286int iwl4965_get_temperature(const struct iwl_priv *priv)
3287{
3288 s32 temperature;
3289 s32 vt;
3290 s32 R1, R2, R3;
3291 u32 R4;
3292
3293 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3294 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3295 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3296 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3297 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3298 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3299 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3300 } else {
3301 IWL_DEBUG_TEMP("Running temperature calibration\n");
3302 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3303 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3304 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3305 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3306 }
3307
3308 /*
3309 * Temperature is only 23 bits so sign extend out to 32
3310 *
3311 * NOTE If we haven't received a statistics notification yet
3312 * with an updated temperature, use R4 provided to us in the
3313 * ALIVE response. */
3314 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3315 vt = sign_extend(R4, 23);
3316 else
3317 vt = sign_extend(
3318 le32_to_cpu(priv->statistics.general.temperature), 23);
3319
3320 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3321 R1, R2, R3, vt);
3322
3323 if (R3 == R1) {
3324 IWL_ERROR("Calibration conflict R1 == R3\n");
3325 return -1;
3326 }
3327
3328 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3329 * Add offset to center the adjustment around 0 degrees Centigrade. */
3330 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3331 temperature /= (R3 - R1);
3332 temperature = (temperature * 97) / 100 +
3333 TEMPERATURE_CALIB_KELVIN_OFFSET;
3334
3335 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3336 KELVIN_TO_CELSIUS(temperature));
3337
3338 return temperature;
3339}
3340
3341/* Adjust Txpower only if temperature variance is greater than threshold. */
3342#define IWL_TEMPERATURE_THRESHOLD 3
3343
3344/**
3345 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3346 *
3347 * If the temperature changed has changed sufficiently, then a recalibration
3348 * is needed.
3349 *
3350 * Assumes caller will replace priv->last_temperature once calibration
3351 * executed.
3352 */
3353static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
3354{
3355 int temp_diff;
3356
3357 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3358 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3359 return 0;
3360 }
3361
3362 temp_diff = priv->temperature - priv->last_temperature;
3363
3364 /* get absolute value */
3365 if (temp_diff < 0) {
3366 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3367 temp_diff = -temp_diff;
3368 } else if (temp_diff == 0)
3369 IWL_DEBUG_POWER("Same temp, \n");
3370 else
3371 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3372
3373 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3374 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3375 return 0;
3376 }
3377
3378 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3379
3380 return 1;
3381}
3382
3383/* Calculate noise level, based on measurements during network silence just
3384 * before arriving beacon. This measurement can be done only if we know
3385 * exactly when to expect beacons, therefore only when we're associated. */
3386static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3387{
3388 struct statistics_rx_non_phy *rx_info
3389 = &(priv->statistics.rx.general);
3390 int num_active_rx = 0;
3391 int total_silence = 0;
3392 int bcn_silence_a =
3393 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3394 int bcn_silence_b =
3395 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3396 int bcn_silence_c =
3397 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3398
3399 if (bcn_silence_a) {
3400 total_silence += bcn_silence_a;
3401 num_active_rx++;
3402 }
3403 if (bcn_silence_b) {
3404 total_silence += bcn_silence_b;
3405 num_active_rx++;
3406 }
3407 if (bcn_silence_c) {
3408 total_silence += bcn_silence_c;
3409 num_active_rx++;
3410 }
3411
3412 /* Average among active antennas */
3413 if (num_active_rx)
3414 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3415 else
3416 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3417
3418 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3419 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3420 priv->last_rx_noise);
3421}
3422
3423void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3424{
3425 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3426 int change;
3427 s32 temp;
3428
3429 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3430 (int)sizeof(priv->statistics), pkt->len);
3431
3432 change = ((priv->statistics.general.temperature !=
3433 pkt->u.stats.general.temperature) ||
3434 ((priv->statistics.flag &
3435 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3436 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3437
3438 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3439
3440 set_bit(STATUS_STATISTICS, &priv->status);
3441
3442 /* Reschedule the statistics timer to occur in
3443 * REG_RECALIB_PERIOD seconds to ensure we get a
3444 * thermal update even if the uCode doesn't give
3445 * us one */
3446 mod_timer(&priv->statistics_periodic, jiffies +
3447 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3448
3449 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3450 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3451 iwl4965_rx_calc_noise(priv);
3452#ifdef CONFIG_IWLWIFI_SENSITIVITY
3453 queue_work(priv->workqueue, &priv->sensitivity_work);
3454#endif
3455 }
3456
3457 /* If the hardware hasn't reported a change in
3458 * temperature then don't bother computing a
3459 * calibrated temperature value */
3460 if (!change)
3461 return;
3462
3463 temp = iwl4965_get_temperature(priv);
3464 if (temp < 0)
3465 return;
3466
3467 if (priv->temperature != temp) {
3468 if (priv->temperature)
3469 IWL_DEBUG_TEMP("Temperature changed "
3470 "from %dC to %dC\n",
3471 KELVIN_TO_CELSIUS(priv->temperature),
3472 KELVIN_TO_CELSIUS(temp));
3473 else
3474 IWL_DEBUG_TEMP("Temperature "
3475 "initialized to %dC\n",
3476 KELVIN_TO_CELSIUS(temp));
3477 }
3478
3479 priv->temperature = temp;
3480 set_bit(STATUS_TEMPERATURE, &priv->status);
3481
3482 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3483 iwl4965_is_temp_calib_needed(priv))
3484 queue_work(priv->workqueue, &priv->txpower_work);
3485}
3486
3487static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3488 int include_phy,
3489 struct iwl_rx_mem_buffer *rxb,
3490 struct ieee80211_rx_status *stats)
3491{
3492 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3493 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3494 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3495 struct ieee80211_hdr *hdr;
3496 u16 len;
3497 __le32 *rx_end;
3498 unsigned int skblen;
3499 u32 ampdu_status;
3500
3501 if (!include_phy && priv->last_phy_res[0])
3502 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3503
3504 if (!rx_start) {
3505 IWL_ERROR("MPDU frame without a PHY data\n");
3506 return;
3507 }
3508 if (include_phy) {
3509 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3510 rx_start->cfg_phy_cnt);
3511
3512 len = le16_to_cpu(rx_start->byte_count);
3513
3514 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3515 sizeof(struct iwl4965_rx_phy_res) +
3516 rx_start->cfg_phy_cnt + len);
3517
3518 } else {
3519 struct iwl4965_rx_mpdu_res_start *amsdu =
3520 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3521
3522 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3523 sizeof(struct iwl4965_rx_mpdu_res_start));
3524 len = le16_to_cpu(amsdu->byte_count);
3525 rx_start->byte_count = amsdu->byte_count;
3526 rx_end = (__le32 *) (((u8 *) hdr) + len);
3527 }
3528 if (len > 2342 || len < 16) {
3529 IWL_DEBUG_DROP("byte count out of range [16,2342]"
3530 " : %d\n", len);
3531 return;
3532 }
3533
3534 ampdu_status = le32_to_cpu(*rx_end);
3535 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3536
3537 /* start from MAC */
3538 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3539 skb_put(rxb->skb, len); /* end where data ends */
3540
3541 /* We only process data packets if the interface is open */
3542 if (unlikely(!priv->is_open)) {
3543 IWL_DEBUG_DROP_LIMIT
3544 ("Dropping packet while interface is not open.\n");
3545 return;
3546 }
3547
3548 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
3549 if (iwl_param_hwcrypto)
3550 iwl_set_decrypted_flag(priv, rxb->skb,
3551 ampdu_status, stats);
3552 iwl_handle_data_packet_monitor(priv, rxb, hdr, len, stats, 0);
3553 return;
3554 }
3555
3556 stats->flag = 0;
3557 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3558
3559 if (iwl_param_hwcrypto)
3560 iwl_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3561
3562 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3563 priv->alloc_rxb_skb--;
3564 rxb->skb = NULL;
3565#ifdef LED
3566 priv->led_packets += len;
3567 iwl_setup_activity_timer(priv);
3568#endif
3569}
3570
3571/* Calc max signal level (dBm) among 3 possible receivers */
3572static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3573{
3574 /* data from PHY/DSP regarding signal strength, etc.,
3575 * contents are always there, not configurable by host. */
3576 struct iwl4965_rx_non_cfg_phy *ncphy =
3577 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3578 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3579 >> IWL_AGC_DB_POS;
3580
3581 u32 valid_antennae =
3582 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3583 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3584 u8 max_rssi = 0;
3585 u32 i;
3586
3587 /* Find max rssi among 3 possible receivers.
3588 * These values are measured by the digital signal processor (DSP).
3589 * They should stay fairly constant even as the signal strength varies,
3590 * if the radio's automatic gain control (AGC) is working right.
3591 * AGC value (see below) will provide the "interesting" info. */
3592 for (i = 0; i < 3; i++)
3593 if (valid_antennae & (1 << i))
3594 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3595
3596 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3597 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3598 max_rssi, agc);
3599
3600 /* dBm = max_rssi dB - agc dB - constant.
3601 * Higher AGC (higher radio gain) means lower signal. */
3602 return (max_rssi - agc - IWL_RSSI_OFFSET);
3603}
3604
3605#ifdef CONFIG_IWLWIFI_HT
3606
3607/* Parsed Information Elements */
3608struct ieee802_11_elems {
3609 u8 *ds_params;
3610 u8 ds_params_len;
3611 u8 *tim;
3612 u8 tim_len;
3613 u8 *ibss_params;
3614 u8 ibss_params_len;
3615 u8 *erp_info;
3616 u8 erp_info_len;
3617 u8 *ht_cap_param;
3618 u8 ht_cap_param_len;
3619 u8 *ht_extra_param;
3620 u8 ht_extra_param_len;
3621};
3622
3623static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3624{
3625 size_t left = len;
3626 u8 *pos = start;
3627 int unknown = 0;
3628
3629 memset(elems, 0, sizeof(*elems));
3630
3631 while (left >= 2) {
3632 u8 id, elen;
3633
3634 id = *pos++;
3635 elen = *pos++;
3636 left -= 2;
3637
3638 if (elen > left)
3639 return -1;
3640
3641 switch (id) {
3642 case WLAN_EID_DS_PARAMS:
3643 elems->ds_params = pos;
3644 elems->ds_params_len = elen;
3645 break;
3646 case WLAN_EID_TIM:
3647 elems->tim = pos;
3648 elems->tim_len = elen;
3649 break;
3650 case WLAN_EID_IBSS_PARAMS:
3651 elems->ibss_params = pos;
3652 elems->ibss_params_len = elen;
3653 break;
3654 case WLAN_EID_ERP_INFO:
3655 elems->erp_info = pos;
3656 elems->erp_info_len = elen;
3657 break;
3658 case WLAN_EID_HT_CAPABILITY:
3659 elems->ht_cap_param = pos;
3660 elems->ht_cap_param_len = elen;
3661 break;
3662 case WLAN_EID_HT_EXTRA_INFO:
3663 elems->ht_extra_param = pos;
3664 elems->ht_extra_param_len = elen;
3665 break;
3666 default:
3667 unknown++;
3668 break;
3669 }
3670
3671 left -= elen;
3672 pos += elen;
3673 }
3674
3675 return 0;
3676}
3677#endif /* CONFIG_IWLWIFI_HT */
3678
3679static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3680{
3681 unsigned long flags;
3682
3683 spin_lock_irqsave(&priv->sta_lock, flags);
3684 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3685 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3686 priv->stations[sta_id].sta.sta.modify_mask = 0;
3687 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3688 spin_unlock_irqrestore(&priv->sta_lock, flags);
3689
3690 iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3691}
3692
3693static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3694{
3695 /* FIXME: need locking over ps_status ??? */
3696 u8 sta_id = iwl_hw_find_station(priv, addr);
3697
3698 if (sta_id != IWL_INVALID_STATION) {
3699 u8 sta_awake = priv->stations[sta_id].
3700 ps_status == STA_PS_STATUS_WAKE;
3701
3702 if (sta_awake && ps_bit)
3703 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3704 else if (!sta_awake && !ps_bit) {
3705 iwl4965_sta_modify_ps_wake(priv, sta_id);
3706 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3707 }
3708 }
3709}
3710
3711/* Called for REPLY_4965_RX (legacy ABG frames), or
3712 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3713static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3714 struct iwl_rx_mem_buffer *rxb)
3715{
3716 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3717 /* Use phy data (Rx signal strength, etc.) contained within
3718 * this rx packet for legacy frames,
3719 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3720 int include_phy = (pkt->hdr.cmd == REPLY_4965_RX);
3721 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3722 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3723 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3724 __le32 *rx_end;
3725 unsigned int len = 0;
3726 struct ieee80211_hdr *header;
3727 u16 fc;
3728 struct ieee80211_rx_status stats = {
3729 .mactime = le64_to_cpu(rx_start->timestamp),
3730 .channel = le16_to_cpu(rx_start->channel),
3731 .phymode =
3732 (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3733 MODE_IEEE80211G : MODE_IEEE80211A,
3734 .antenna = 0,
3735 .rate = iwl_hw_get_rate(rx_start->rate_n_flags),
3736 .flag = 0,
3737#ifdef CONFIG_IWLWIFI_HT_AGG
3738 .ordered = 0
3739#endif /* CONFIG_IWLWIFI_HT_AGG */
3740 };
3741 u8 network_packet;
3742
3743 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3744 IWL_DEBUG_DROP
3745 ("dsp size out of range [0,20]: "
3746 "%d/n", rx_start->cfg_phy_cnt);
3747 return;
3748 }
3749 if (!include_phy) {
3750 if (priv->last_phy_res[0])
3751 rx_start = (struct iwl4965_rx_phy_res *)
3752 &priv->last_phy_res[1];
3753 else
3754 rx_start = NULL;
3755 }
3756
3757 if (!rx_start) {
3758 IWL_ERROR("MPDU frame without a PHY data\n");
3759 return;
3760 }
3761
3762 if (include_phy) {
3763 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3764 + rx_start->cfg_phy_cnt);
3765
3766 len = le16_to_cpu(rx_start->byte_count);
3767 rx_end = (__le32 *) (pkt->u.raw + rx_start->cfg_phy_cnt +
3768 sizeof(struct iwl4965_rx_phy_res) + len);
3769 } else {
3770 struct iwl4965_rx_mpdu_res_start *amsdu =
3771 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3772
3773 header = (void *)(pkt->u.raw +
3774 sizeof(struct iwl4965_rx_mpdu_res_start));
3775 len = le16_to_cpu(amsdu->byte_count);
3776 rx_end = (__le32 *) (pkt->u.raw +
3777 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3778 }
3779
3780 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3781 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3782 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3783 le32_to_cpu(*rx_end));
3784 return;
3785 }
3786
3787 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3788
3789 stats.freq = ieee80211chan2mhz(stats.channel);
3790
3791 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
3792 stats.ssi = iwl4965_calc_rssi(rx_start);
3793
3794 /* Meaningful noise values are available only from beacon statistics,
3795 * which are gathered only when associated, and indicate noise
3796 * only for the associated network channel ...
3797 * Ignore these noise values while scanning (other channels) */
3798 if (iwl_is_associated(priv) &&
3799 !test_bit(STATUS_SCANNING, &priv->status)) {
3800 stats.noise = priv->last_rx_noise;
3801 stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise);
3802 } else {
3803 stats.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3804 stats.signal = iwl_calc_sig_qual(stats.ssi, 0);
3805 }
3806
3807 /* Reset beacon noise level if not associated. */
3808 if (!iwl_is_associated(priv))
3809 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3810
3811#ifdef CONFIG_IWLWIFI_DEBUG
3812 /* TODO: Parts of iwl_report_frame are broken for 4965 */
3813 if (iwl_debug_level & (IWL_DL_RX))
3814 /* Set "1" to report good data frames in groups of 100 */
3815 iwl_report_frame(priv, pkt, header, 1);
3816
3817 if (iwl_debug_level & (IWL_DL_RX | IWL_DL_STATS))
3818 IWL_DEBUG_RX("Rssi %d, noise %d, qual %d, TSF %lu\n",
3819 stats.ssi, stats.noise, stats.signal,
3820 (long unsigned int)le64_to_cpu(rx_start->timestamp));
3821#endif
3822
3823 network_packet = iwl_is_network_packet(priv, header);
3824 if (network_packet) {
3825 priv->last_rx_rssi = stats.ssi;
3826 priv->last_beacon_time = priv->ucode_beacon_time;
3827 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3828 }
3829
3830 fc = le16_to_cpu(header->frame_control);
3831 switch (fc & IEEE80211_FCTL_FTYPE) {
3832 case IEEE80211_FTYPE_MGMT:
3833
3834 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3835 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3836 header->addr2);
3837 switch (fc & IEEE80211_FCTL_STYPE) {
3838 case IEEE80211_STYPE_PROBE_RESP:
3839 case IEEE80211_STYPE_BEACON:
3840 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
3841 !compare_ether_addr(header->addr2, priv->bssid)) ||
3842 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
3843 !compare_ether_addr(header->addr3, priv->bssid))) {
3844 struct ieee80211_mgmt *mgmt =
3845 (struct ieee80211_mgmt *)header;
3846 u64 timestamp =
3847 le64_to_cpu(mgmt->u.beacon.timestamp);
3848
3849 priv->timestamp0 = timestamp & 0xFFFFFFFF;
3850 priv->timestamp1 =
3851 (timestamp >> 32) & 0xFFFFFFFF;
3852 priv->beacon_int = le16_to_cpu(
3853 mgmt->u.beacon.beacon_int);
3854 if (priv->call_post_assoc_from_beacon &&
3855 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
3856 priv->call_post_assoc_from_beacon = 0;
3857 queue_work(priv->workqueue,
3858 &priv->post_associate.work);
3859 }
3860 }
3861 break;
3862
3863 case IEEE80211_STYPE_ACTION:
3864 break;
3865
3866 /*
3867 * TODO: There is no callback function from upper
3868 * stack to inform us when associated status. this
3869 * work around to sniff assoc_resp management frame
3870 * and finish the association process.
3871 */
3872 case IEEE80211_STYPE_ASSOC_RESP:
3873 case IEEE80211_STYPE_REASSOC_RESP:
3874 if (network_packet && iwl_is_associated(priv)) {
3875#ifdef CONFIG_IWLWIFI_HT
3876 u8 *pos = NULL;
3877 struct ieee802_11_elems elems;
3878#endif /*CONFIG_IWLWIFI_HT */
3879 struct ieee80211_mgmt *mgnt =
3880 (struct ieee80211_mgmt *)header;
3881
3882 priv->assoc_id = (~((1 << 15) | (1 << 14))
3883 & le16_to_cpu(mgnt->u.assoc_resp.aid));
3884 priv->assoc_capability =
3885 le16_to_cpu(
3886 mgnt->u.assoc_resp.capab_info);
3887#ifdef CONFIG_IWLWIFI_HT
3888 pos = mgnt->u.assoc_resp.variable;
3889 if (!parse_elems(pos,
3890 len - (pos - (u8 *) mgnt),
3891 &elems)) {
3892 if (elems.ht_extra_param &&
3893 elems.ht_cap_param)
3894 break;
3895 }
3896#endif /*CONFIG_IWLWIFI_HT */
3897 /* assoc_id is 0 no association */
3898 if (!priv->assoc_id)
3899 break;
3900 if (priv->beacon_int)
3901 queue_work(priv->workqueue,
3902 &priv->post_associate.work);
3903 else
3904 priv->call_post_assoc_from_beacon = 1;
3905 }
3906
3907 break;
3908
3909 case IEEE80211_STYPE_PROBE_REQ:
3910 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
3911 !iwl_is_associated(priv)) {
3912 IWL_DEBUG_DROP("Dropping (non network): "
3913 MAC_FMT ", " MAC_FMT ", "
3914 MAC_FMT "\n",
3915 MAC_ARG(header->addr1),
3916 MAC_ARG(header->addr2),
3917 MAC_ARG(header->addr3));
3918 return;
3919 }
3920 }
3921 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &stats);
3922 break;
3923
3924 case IEEE80211_FTYPE_CTL:
3925#ifdef CONFIG_IWLWIFI_HT_AGG
3926 switch (fc & IEEE80211_FCTL_STYPE) {
3927 case IEEE80211_STYPE_BACK_REQ:
3928 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3929 iwl4965_handle_data_packet(priv, 0, include_phy,
3930 rxb, &stats);
3931 break;
3932 default:
3933 break;
3934 }
3935#endif
3936
3937 break;
3938
3939 case IEEE80211_FTYPE_DATA:
3940 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3941 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3942 header->addr2);
3943
3944 if (unlikely(!network_packet))
3945 IWL_DEBUG_DROP("Dropping (non network): "
3946 MAC_FMT ", " MAC_FMT ", "
3947 MAC_FMT "\n",
3948 MAC_ARG(header->addr1),
3949 MAC_ARG(header->addr2),
3950 MAC_ARG(header->addr3));
3951 else if (unlikely(is_duplicate_packet(priv, header)))
3952 IWL_DEBUG_DROP("Dropping (dup): " MAC_FMT ", "
3953 MAC_FMT ", " MAC_FMT "\n",
3954 MAC_ARG(header->addr1),
3955 MAC_ARG(header->addr2),
3956 MAC_ARG(header->addr3));
3957 else
3958 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
3959 &stats);
3960 break;
3961 default:
3962 break;
3963
3964 }
3965}
3966
3967/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
3968 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
3969static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
3970 struct iwl_rx_mem_buffer *rxb)
3971{
3972 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3973 priv->last_phy_res[0] = 1;
3974 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
3975 sizeof(struct iwl4965_rx_phy_res));
3976}
3977
3978static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
3979 struct iwl_rx_mem_buffer *rxb)
3980
3981{
3982#ifdef CONFIG_IWLWIFI_SENSITIVITY
3983 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3984 struct iwl_missed_beacon_notif *missed_beacon;
3985
3986 missed_beacon = &pkt->u.missed_beacon;
3987 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
3988 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
3989 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
3990 le32_to_cpu(missed_beacon->total_missed_becons),
3991 le32_to_cpu(missed_beacon->num_recvd_beacons),
3992 le32_to_cpu(missed_beacon->num_expected_beacons));
3993 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
3994 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
3995 queue_work(priv->workqueue, &priv->sensitivity_work);
3996 }
3997#endif /*CONFIG_IWLWIFI_SENSITIVITY*/
3998}
3999
4000#ifdef CONFIG_IWLWIFI_HT
4001#ifdef CONFIG_IWLWIFI_HT_AGG
4002
4003static void iwl4965_set_tx_status(struct iwl_priv *priv, int txq_id, int idx,
4004 u32 status, u32 retry_count, u32 rate)
4005{
4006 struct ieee80211_tx_status *tx_status =
4007 &(priv->txq[txq_id].txb[idx].status);
4008
4009 tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0;
4010 tx_status->retry_count += retry_count;
4011 tx_status->control.tx_rate = rate;
4012}
4013
4014
4015static void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4016 int sta_id, int tid)
4017{
4018 unsigned long flags;
4019
4020 spin_lock_irqsave(&priv->sta_lock, flags);
4021 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4022 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4023 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4024 spin_unlock_irqrestore(&priv->sta_lock, flags);
4025
4026 iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4027}
4028
4029
4030static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4031 struct iwl_ht_agg *agg,
4032 struct iwl_compressed_ba_resp*
4033 ba_resp)
4034
4035{
4036 int i, sh, ack;
4037 u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl);
4038 u32 bitmap0, bitmap1;
4039 u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0);
4040 u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1);
4041
4042 if (unlikely(!agg->wait_for_ba)) {
4043 IWL_ERROR("Received BA when not expected\n");
4044 return -EINVAL;
4045 }
4046 agg->wait_for_ba = 0;
4047 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl);
4048 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4);
4049 if (sh < 0) /* tbw something is wrong with indeces */
4050 sh += 0x100;
4051
4052 /* don't use 64 bits for now */
4053 bitmap0 = resp_bitmap0 >> sh;
4054 bitmap1 = resp_bitmap1 >> sh;
4055 bitmap0 |= (resp_bitmap1 & ((1<<sh)|((1<<sh)-1))) << (32 - sh);
4056
4057 if (agg->frame_count > (64 - sh)) {
4058 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4059 return -1;
4060 }
4061
4062 /* check for success or failure according to the
4063 * transmitted bitmap and back bitmap */
4064 bitmap0 &= agg->bitmap0;
4065 bitmap1 &= agg->bitmap1;
4066
4067 for (i = 0; i < agg->frame_count ; i++) {
4068 int idx = (agg->start_idx + i) & 0xff;
4069 ack = bitmap0 & (1 << i);
4070 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4071 ack? "ACK":"NACK", i, idx, agg->start_idx + i);
4072 iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0,
4073 agg->rate_n_flags);
4074
4075 }
4076
4077 IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1);
4078
4079 return 0;
4080}
4081
4082static inline int iwl_queue_dec_wrap(int index, int n_bd)
4083{
4084 return (index == 0) ? n_bd - 1 : index - 1;
4085}
4086
4087static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4088 struct iwl_rx_mem_buffer *rxb)
4089{
4090 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4091 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4092 int index;
4093 struct iwl_tx_queue *txq = NULL;
4094 struct iwl_ht_agg *agg;
4095 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow);
4096 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4097
4098 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) {
4099 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4100 return;
4101 }
4102
4103 txq = &priv->txq[ba_resp_scd_flow];
4104 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4105 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4106
4107 /* TODO: Need to get this copy more sefely - now good for debug */
4108/*
4109 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from " MAC_FMT ",
4110 sta_id = %d\n",
4111 agg->wait_for_ba,
4112 MAC_ARG((u8*) &ba_resp->sta_addr_lo32),
4113 ba_resp->sta_id);
4114 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = "
4115 "%d, scd_ssn = %d\n",
4116 ba_resp->tid,
4117 ba_resp->ba_seq_ctl,
4118 ba_resp->ba_bitmap1,
4119 ba_resp->ba_bitmap0,
4120 ba_resp->scd_flow,
4121 ba_resp->scd_ssn);
4122 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n",
4123 agg->start_idx,
4124 agg->bitmap1,
4125 agg->bitmap0);
4126*/
4127 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4128 /* releases all the TFDs until the SSN */
4129 if (txq->q.last_used != (ba_resp_scd_ssn & 0xff))
4130 iwl_tx_queue_reclaim(priv, ba_resp_scd_flow, index);
4131
4132}
4133
4134
4135static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
4136{
4137 iwl_write_restricted_reg(priv,
4138 SCD_QUEUE_STATUS_BITS(txq_id),
4139 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4140 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4141}
4142
4143static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4144 u16 txq_id)
4145{
4146 u32 tbl_dw_addr;
4147 u32 tbl_dw;
4148 u16 scd_q2ratid;
4149
4150 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4151
4152 tbl_dw_addr = priv->scd_base_addr +
4153 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4154
4155 tbl_dw = iwl_read_restricted_mem(priv, tbl_dw_addr);
4156
4157 if (txq_id & 0x1)
4158 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4159 else
4160 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4161
4162 iwl_write_restricted_mem(priv, tbl_dw_addr, tbl_dw);
4163
4164 return 0;
4165}
4166
4167/**
4168 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4169 */
4170static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4171 int tx_fifo, int sta_id, int tid,
4172 u16 ssn_idx)
4173{
4174 unsigned long flags;
4175 int rc;
4176 u16 ra_tid;
4177
4178 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4179 IWL_WARNING("queue number too small: %d, must be > %d\n",
4180 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4181
4182 ra_tid = BUILD_RAxTID(sta_id, tid);
4183
4184 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
4185
4186 spin_lock_irqsave(&priv->lock, flags);
4187 rc = iwl_grab_restricted_access(priv);
4188 if (rc) {
4189 spin_unlock_irqrestore(&priv->lock, flags);
4190 return rc;
4191 }
4192
4193 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4194
4195 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4196
4197
4198 iwl_set_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
4199
4200 priv->txq[txq_id].q.last_used = (ssn_idx & 0xff);
4201 priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff);
4202
4203 /* supposes that ssn_idx is valid (!= 0xFFF) */
4204 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4205
4206 iwl_write_restricted_mem(priv,
4207 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4208 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4209 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4210
4211 iwl_write_restricted_mem(priv, priv->scd_base_addr +
4212 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4213 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4214 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4215
4216 iwl_set_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
4217
4218 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4219
4220 iwl_release_restricted_access(priv);
4221 spin_unlock_irqrestore(&priv->lock, flags);
4222
4223 return 0;
4224}
4225
4226/**
4227 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4228 */
4229static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4230 u16 ssn_idx, u8 tx_fifo)
4231{
4232 unsigned long flags;
4233 int rc;
4234
4235 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4236 IWL_WARNING("queue number too small: %d, must be > %d\n",
4237 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4238 return -EINVAL;
4239 }
4240
4241 spin_lock_irqsave(&priv->lock, flags);
4242 rc = iwl_grab_restricted_access(priv);
4243 if (rc) {
4244 spin_unlock_irqrestore(&priv->lock, flags);
4245 return rc;
4246 }
4247
4248 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4249
4250 iwl_clear_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1 << txq_id));
4251
4252 priv->txq[txq_id].q.last_used = (ssn_idx & 0xff);
4253 priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff);
4254 /* supposes that ssn_idx is valid (!= 0xFFF) */
4255 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4256
4257 iwl_clear_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
4258 iwl4965_txq_ctx_deactivate(priv, txq_id);
4259 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4260
4261 iwl_release_restricted_access(priv);
4262 spin_unlock_irqrestore(&priv->lock, flags);
4263
4264 return 0;
4265}
4266
4267#endif/* CONFIG_IWLWIFI_HT_AGG */
4268#endif /* CONFIG_IWLWIFI_HT */
4269/*
4270 * RATE SCALE CODE
4271 */
4272int iwl4965_init_hw_rates(struct iwl_priv *priv, struct ieee80211_rate *rates)
4273{
4274 return 0;
4275}
4276
4277
4278/**
4279 * iwl4965_add_station - Initialize a station's hardware rate table
4280 *
4281 * The uCode contains a table of fallback rates and retries per rate
4282 * for automatic fallback during transmission.
4283 *
4284 * NOTE: This initializes the table for a single retry per data rate
4285 * which is not optimal. Setting up an intelligent retry per rate
4286 * requires feedback from transmission, which isn't exposed through
4287 * rc80211_simple which is what this driver is currently using.
4288 *
4289 */
4290void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4291{
4292 int i, r;
4293 struct iwl_link_quality_cmd link_cmd = {
4294 .reserved1 = 0,
4295 };
4296 u16 rate_flags;
4297
4298 /* Set up the rate scaling to start at 54M and fallback
4299 * all the way to 1M in IEEE order and then spin on IEEE */
4300 if (is_ap)
4301 r = IWL_RATE_54M_INDEX;
4302 else if (priv->phymode == MODE_IEEE80211A)
4303 r = IWL_RATE_6M_INDEX;
4304 else
4305 r = IWL_RATE_1M_INDEX;
4306
4307 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4308 rate_flags = 0;
4309 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4310 rate_flags |= RATE_MCS_CCK_MSK;
4311
4312 rate_flags |= RATE_MCS_ANT_B_MSK;
4313 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4314 link_cmd.rs_table[i].rate_n_flags =
4315 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
4316 r = iwl_get_prev_ieee_rate(r);
4317 }
4318
4319 link_cmd.general_params.single_stream_ant_msk = 2;
4320 link_cmd.general_params.dual_stream_ant_msk = 3;
4321 link_cmd.agg_params.agg_dis_start_th = 3;
4322 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4323
4324 /* Update the rate scaling for control frame Tx to AP */
4325 link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID;
4326
4327 iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd),
4328 &link_cmd);
4329}
4330
4331#ifdef CONFIG_IWLWIFI_HT
4332
4333static u8 iwl_is_channel_extension(struct iwl_priv *priv, int phymode,
4334 u16 channel, u8 extension_chan_offset)
4335{
4336 const struct iwl_channel_info *ch_info;
4337
4338 ch_info = iwl_get_channel_info(priv, phymode, channel);
4339 if (!is_channel_valid(ch_info))
4340 return 0;
4341
4342 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO)
4343 return 0;
4344
4345 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4346 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4347 return 1;
4348
4349 return 0;
4350}
4351
4352static u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
4353 const struct sta_ht_info *ht_info)
4354{
4355
4356 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
4357 return 0;
4358
4359 if (ht_info->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ)
4360 return 0;
4361
4362 if (ht_info->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO)
4363 return 0;
4364
4365 /* no fat tx allowed on 2.4GHZ */
4366 if (priv->phymode != MODE_IEEE80211A)
4367 return 0;
4368 return (iwl_is_channel_extension(priv, priv->phymode,
4369 ht_info->control_channel,
4370 ht_info->extension_chan_offset));
4371}
4372
4373void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct sta_ht_info *ht_info)
4374{
4375 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
4376 u32 val;
4377
4378 if (!ht_info->is_ht)
4379 return;
4380
4381 if (iwl_is_fat_tx_allowed(priv, ht_info))
4382 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4383 else
4384 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4385 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4386
4387 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4388 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4389 le16_to_cpu(rxon->channel),
4390 ht_info->control_channel);
4391 rxon->channel = cpu_to_le16(ht_info->control_channel);
4392 return;
4393 }
4394
4395 /* Note: control channel is oposit to extension channel */
4396 switch (ht_info->extension_chan_offset) {
4397 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4398 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4399 break;
4400 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4401 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4402 break;
4403 case IWL_EXT_CHANNEL_OFFSET_AUTO:
4404 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4405 break;
4406 default:
4407 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4408 break;
4409 }
4410
4411 val = ht_info->operating_mode;
4412
4413 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4414
4415 priv->active_rate_ht[0] = ht_info->supp_rates[0];
4416 priv->active_rate_ht[1] = ht_info->supp_rates[1];
4417 iwl4965_set_rxon_chain(priv);
4418
4419 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4420 "rxon flags 0x%X operation mode :0x%X "
4421 "extension channel offset 0x%x "
4422 "control chan %d\n",
4423 priv->active_rate_ht[0], priv->active_rate_ht[1],
4424 le32_to_cpu(rxon->flags), ht_info->operating_mode,
4425 ht_info->extension_chan_offset,
4426 ht_info->control_channel);
4427 return;
4428}
4429
4430void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index)
4431{
4432 __le32 sta_flags;
4433 struct sta_ht_info *ht_info = &priv->current_assoc_ht;
4434
4435 priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ;
4436 if (!ht_info->is_ht)
4437 goto done;
4438
4439 sta_flags = priv->stations[index].sta.station_flags;
4440
4441 if (ht_info->tx_mimo_ps_mode == IWL_MIMO_PS_DYNAMIC)
4442 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4443 else
4444 sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK;
4445
4446 sta_flags |= cpu_to_le32(
4447 (u32)ht_info->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4448
4449 sta_flags |= cpu_to_le32(
4450 (u32)ht_info->mpdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4451
4452 sta_flags &= (~STA_FLG_FAT_EN_MSK);
4453 ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_20MHZ;
4454 ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_20MHZ;
4455
4456 if (iwl_is_fat_tx_allowed(priv, ht_info)) {
4457 sta_flags |= STA_FLG_FAT_EN_MSK;
4458 ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_40MHZ;
4459 if (ht_info->supported_chan_width == IWL_CHANNEL_WIDTH_40MHZ)
4460 ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_40MHZ;
4461 }
4462 priv->current_channel_width = ht_info->tx_chan_width;
4463 priv->stations[index].sta.station_flags = sta_flags;
4464 done:
4465 return;
4466}
4467
4468#ifdef CONFIG_IWLWIFI_HT_AGG
4469
4470static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4471 int sta_id, int tid, u16 ssn)
4472{
4473 unsigned long flags;
4474
4475 spin_lock_irqsave(&priv->sta_lock, flags);
4476 priv->stations[sta_id].sta.station_flags_msk = 0;
4477 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4478 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4479 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4480 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4481 spin_unlock_irqrestore(&priv->sta_lock, flags);
4482
4483 iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4484}
4485
4486static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
4487 int sta_id, int tid)
4488{
4489 unsigned long flags;
4490
4491 spin_lock_irqsave(&priv->sta_lock, flags);
4492 priv->stations[sta_id].sta.station_flags_msk = 0;
4493 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4494 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4495 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4496 spin_unlock_irqrestore(&priv->sta_lock, flags);
4497
4498 iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4499}
4500
4501static const u16 default_tid_to_tx_fifo[] = {
4502 IWL_TX_FIFO_AC1,
4503 IWL_TX_FIFO_AC0,
4504 IWL_TX_FIFO_AC0,
4505 IWL_TX_FIFO_AC1,
4506 IWL_TX_FIFO_AC2,
4507 IWL_TX_FIFO_AC2,
4508 IWL_TX_FIFO_AC3,
4509 IWL_TX_FIFO_AC3,
4510 IWL_TX_FIFO_NONE,
4511 IWL_TX_FIFO_NONE,
4512 IWL_TX_FIFO_NONE,
4513 IWL_TX_FIFO_NONE,
4514 IWL_TX_FIFO_NONE,
4515 IWL_TX_FIFO_NONE,
4516 IWL_TX_FIFO_NONE,
4517 IWL_TX_FIFO_NONE,
4518 IWL_TX_FIFO_AC3
4519};
4520
4521static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
4522{
4523 int txq_id;
4524
4525 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4526 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4527 return txq_id;
4528 return -1;
4529}
4530
4531int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid,
4532 u16 *start_seq_num)
4533{
4534
4535 struct iwl_priv *priv = hw->priv;
4536 int sta_id;
4537 int tx_fifo;
4538 int txq_id;
4539 int ssn = -1;
4540 unsigned long flags;
4541 struct iwl_tid_data *tid_data;
4542
4543 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4544 tx_fifo = default_tid_to_tx_fifo[tid];
4545 else
4546 return -EINVAL;
4547
4548 IWL_WARNING("iwl-AGG iwl_mac_ht_tx_agg_start on da=" MAC_FMT
4549 " tid=%d\n", MAC_ARG(da), tid);
4550
4551 sta_id = iwl_hw_find_station(priv, da);
4552 if (sta_id == IWL_INVALID_STATION)
4553 return -ENXIO;
4554
4555 txq_id = iwl_txq_ctx_activate_free(priv);
4556 if (txq_id == -1)
4557 return -ENXIO;
4558
4559 spin_lock_irqsave(&priv->sta_lock, flags);
4560 tid_data = &priv->stations[sta_id].tid[tid];
4561 ssn = SEQ_TO_SN(tid_data->seq_number);
4562 tid_data->agg.txq_id = txq_id;
4563 spin_unlock_irqrestore(&priv->sta_lock, flags);
4564
4565 *start_seq_num = ssn;
4566 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE);
4567 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4568 sta_id, tid, ssn);
4569}
4570
4571
4572int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid,
4573 int generator)
4574{
4575
4576 struct iwl_priv *priv = hw->priv;
4577 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4578 struct iwl_tid_data *tid_data;
4579 int rc;
4580 if (!da) {
4581 IWL_ERROR("%s: da = NULL\n", __func__);
4582 return -EINVAL;
4583 }
4584
4585 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4586 tx_fifo_id = default_tid_to_tx_fifo[tid];
4587 else
4588 return -EINVAL;
4589
4590 sta_id = iwl_hw_find_station(priv, da);
4591
4592 if (sta_id == IWL_INVALID_STATION)
4593 return -ENXIO;
4594
4595 tid_data = &priv->stations[sta_id].tid[tid];
4596 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4597 txq_id = tid_data->agg.txq_id;
4598
4599 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4600 /* FIXME: need more safe way to handle error condition */
4601 if (rc)
4602 return rc;
4603
4604 iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA);
4605 IWL_DEBUG_INFO("iwl_mac_ht_tx_agg_stop on da=" MAC_FMT " tid=%d\n",
4606 MAC_ARG(da), tid);
4607
4608 return 0;
4609}
4610
4611int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da,
4612 u16 tid, u16 start_seq_num)
4613{
4614 struct iwl_priv *priv = hw->priv;
4615 int sta_id;
4616
4617 IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_start on da=" MAC_FMT
4618 " tid=%d\n", MAC_ARG(da), tid);
4619 sta_id = iwl_hw_find_station(priv, da);
4620 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, start_seq_num);
4621 return 0;
4622}
4623
4624int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da,
4625 u16 tid, int generator)
4626{
4627 struct iwl_priv *priv = hw->priv;
4628 int sta_id;
4629
4630 IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_stop on da=" MAC_FMT " tid=%d\n",
4631 MAC_ARG(da), tid);
4632 sta_id = iwl_hw_find_station(priv, da);
4633 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4634 return 0;
4635}
4636
4637#endif /* CONFIG_IWLWIFI_HT_AGG */
4638#endif /* CONFIG_IWLWIFI_HT */
4639
4640/* Set up 4965-specific Rx frame reply handlers */
4641void iwl_hw_rx_handler_setup(struct iwl_priv *priv)
4642{
4643 /* Legacy Rx frames */
4644 priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx;
4645
4646 /* High-throughput (HT) Rx frames */
4647 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4648 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4649
4650 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4651 iwl4965_rx_missed_beacon_notif;
4652
4653#ifdef CONFIG_IWLWIFI_HT
4654#ifdef CONFIG_IWLWIFI_HT_AGG
4655 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4656#endif /* CONFIG_IWLWIFI_AGG */
4657#endif /* CONFIG_IWLWIFI */
4658}
4659
4660void iwl_hw_setup_deferred_work(struct iwl_priv *priv)
4661{
4662 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4663 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
4664#ifdef CONFIG_IWLWIFI_SENSITIVITY
4665 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4666#endif
4667#ifdef CONFIG_IWLWIFI_HT
4668#ifdef CONFIG_IWLWIFI_HT_AGG
4669 INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work);
4670#endif /* CONFIG_IWLWIFI_AGG */
4671#endif /* CONFIG_IWLWIFI_HT */
4672 init_timer(&priv->statistics_periodic);
4673 priv->statistics_periodic.data = (unsigned long)priv;
4674 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4675}
4676
4677void iwl_hw_cancel_deferred_work(struct iwl_priv *priv)
4678{
4679 del_timer_sync(&priv->statistics_periodic);
4680
4681 cancel_delayed_work(&priv->init_alive_start);
4682}
4683
4684struct pci_device_id iwl_hw_card_ids[] = {
4685 {0x8086, 0x4229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4686 {0x8086, 0x4230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4687 {0}
4688};
4689
4690int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv)
4691{
4692 u16 count;
4693 int rc;
4694
4695 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
4696 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
4697 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
4698 rc = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
4699 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4700 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4701 EEPROM_SEM_TIMEOUT);
4702 if (rc >= 0) {
4703 IWL_DEBUG_IO("Aqcuired semaphore after %d tries.\n",
4704 count+1);
4705 return rc;
4706 }
4707 }
4708
4709 return rc;
4710}
4711
4712inline void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
4713{
4714 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
4715 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
4716}
4717
4718
4719MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h
new file mode 100644
index 000000000000..4c700812b45b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.h
@@ -0,0 +1,341 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __iwl_4965_h__
27#define __iwl_4965_h__
28
29struct iwl_priv;
30struct sta_ht_info;
31
32/*
33 * Forward declare iwl-4965.c functions for iwl-base.c
34 */
35extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv);
36extern void iwl_eeprom_release_semaphore(struct iwl_priv *priv);
37
38extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
39 struct iwl_tx_queue *txq,
40 u16 byte_cnt);
41extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr,
42 int is_ap);
43extern void iwl4965_set_rxon_ht(struct iwl_priv *priv,
44 struct sta_ht_info *ht_info);
45
46extern void iwl4965_set_rxon_chain(struct iwl_priv *priv);
47extern int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd,
48 u8 sta_id, dma_addr_t txcmd_phys,
49 struct ieee80211_hdr *hdr, u8 hdr_len,
50 struct ieee80211_tx_control *ctrl, void *sta_in);
51extern int iwl4965_init_hw_rates(struct iwl_priv *priv,
52 struct ieee80211_rate *rates);
53extern int iwl4965_alive_notify(struct iwl_priv *priv);
54extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode);
55extern void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index);
56
57extern void iwl4965_chain_noise_reset(struct iwl_priv *priv);
58extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags,
59 u8 force);
60extern int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode,
61 u16 channel,
62 const struct iwl_eeprom_channel *eeprom_ch,
63 u8 fat_extension_channel);
64extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
65
66#ifdef CONFIG_IWLWIFI_HT
67#ifdef CONFIG_IWLWIFI_HT_AGG
68extern int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da,
69 u16 tid, u16 *start_seq_num);
70extern int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da,
71 u16 tid, u16 start_seq_num);
72extern int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da,
73 u16 tid, int generator);
74extern int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da,
75 u16 tid, int generator);
76extern void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid);
77#endif /* CONFIG_IWLWIFI_HT_AGG */
78#endif /*CONFIG_IWLWIFI_HT */
79/* Structures, enum, and defines specific to the 4965 */
80
81#define IWL4965_KW_SIZE 0x1000 /*4k */
82
83struct iwl_kw {
84 dma_addr_t dma_addr;
85 void *v_addr;
86 size_t size;
87};
88
89#define TID_QUEUE_CELL_SPACING 50 /*mS */
90#define TID_QUEUE_MAX_SIZE 20
91#define TID_ROUND_VALUE 5 /* mS */
92#define TID_MAX_LOAD_COUNT 8
93
94#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
95#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
96
97#define TID_ALL_ENABLED 0x7f
98#define TID_ALL_SPECIFIED 0xff
99#define TID_AGG_TPT_THREHOLD 0x0
100
101#define IWL_CHANNEL_WIDTH_20MHZ 0
102#define IWL_CHANNEL_WIDTH_40MHZ 1
103
104#define IWL_MIMO_PS_STATIC 0
105#define IWL_MIMO_PS_NONE 3
106#define IWL_MIMO_PS_DYNAMIC 1
107#define IWL_MIMO_PS_INVALID 2
108
109#define IWL_OPERATION_MODE_AUTO 0
110#define IWL_OPERATION_MODE_HT_ONLY 1
111#define IWL_OPERATION_MODE_MIXED 2
112#define IWL_OPERATION_MODE_20MHZ 3
113
114#define IWL_EXT_CHANNEL_OFFSET_AUTO 0
115#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1
116#define IWL_EXT_CHANNEL_OFFSET_ 2
117#define IWL_EXT_CHANNEL_OFFSET_BELOW 3
118#define IWL_EXT_CHANNEL_OFFSET_MAX 4
119
120#define NRG_NUM_PREV_STAT_L 20
121#define NUM_RX_CHAINS (3)
122
123#define TX_POWER_IWL_ILLEGAL_VDET -100000
124#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
125#define TX_POWER_IWL_CLOSED_LOOP_MIN_POWER 18
126#define TX_POWER_IWL_CLOSED_LOOP_MAX_POWER 34
127#define TX_POWER_IWL_VDET_SLOPE_BELOW_NOMINAL 17
128#define TX_POWER_IWL_VDET_SLOPE_ABOVE_NOMINAL 20
129#define TX_POWER_IWL_NOMINAL_POWER 26
130#define TX_POWER_IWL_CLOSED_LOOP_ITERATION_LIMIT 1
131#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V 7
132#define TX_POWER_IWL_DEGREES_PER_VDET_CODE 11
133#define IWL_TX_POWER_MAX_NUM_PA_MEASUREMENTS 1
134#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
135#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
136
137struct iwl_traffic_load {
138 unsigned long time_stamp;
139 u32 packet_count[TID_QUEUE_MAX_SIZE];
140 u8 queue_count;
141 u8 head;
142 u32 total;
143};
144
145#ifdef CONFIG_IWLWIFI_HT_AGG
146struct iwl_agg_control {
147 unsigned long next_retry;
148 u32 wait_for_agg_status;
149 u32 tid_retry;
150 u32 requested_ba;
151 u32 granted_ba;
152 u8 auto_agg;
153 u32 tid_traffic_load_threshold;
154 u32 ba_timeout;
155 struct iwl_traffic_load traffic_load[TID_MAX_LOAD_COUNT];
156};
157#endif /*CONFIG_IWLWIFI_HT_AGG */
158
159struct iwl_lq_mngr {
160#ifdef CONFIG_IWLWIFI_HT_AGG
161 struct iwl_agg_control agg_ctrl;
162#endif
163 spinlock_t lock;
164 s32 max_window_size;
165 s32 *expected_tpt;
166 u8 *next_higher_rate;
167 u8 *next_lower_rate;
168 unsigned long stamp;
169 unsigned long stamp_last;
170 u32 flush_time;
171 u32 tx_packets;
172 u8 lq_ready;
173};
174
175
176/* Sensitivity and chain noise calibration */
177#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
178#define INITIALIZATION_VALUE 0xFFFF
179#define CAL_NUM_OF_BEACONS 20
180#define MAXIMUM_ALLOWED_PATHLOSS 15
181
182/* Param table within SENSITIVITY_CMD */
183#define HD_MIN_ENERGY_CCK_DET_INDEX (0)
184#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
185#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
186#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
187#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
188#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
189#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
190#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
191#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
192#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
193#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
194
195#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0)
196#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1)
197
198#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
199
200#define MAX_FA_OFDM 50
201#define MIN_FA_OFDM 5
202#define MAX_FA_CCK 50
203#define MIN_FA_CCK 5
204
205#define NRG_MIN_CCK 97
206#define NRG_MAX_CCK 0
207
208#define AUTO_CORR_MIN_OFDM 85
209#define AUTO_CORR_MIN_OFDM_MRC 170
210#define AUTO_CORR_MIN_OFDM_X1 105
211#define AUTO_CORR_MIN_OFDM_MRC_X1 220
212#define AUTO_CORR_MAX_OFDM 120
213#define AUTO_CORR_MAX_OFDM_MRC 210
214#define AUTO_CORR_MAX_OFDM_X1 140
215#define AUTO_CORR_MAX_OFDM_MRC_X1 270
216#define AUTO_CORR_STEP_OFDM 1
217
218#define AUTO_CORR_MIN_CCK (125)
219#define AUTO_CORR_MAX_CCK (200)
220#define AUTO_CORR_MIN_CCK_MRC 200
221#define AUTO_CORR_MAX_CCK_MRC 400
222#define AUTO_CORR_STEP_CCK 3
223#define AUTO_CORR_MAX_TH_CCK 160
224
225#define NRG_ALG 0
226#define AUTO_CORR_ALG 1
227#define NRG_DIFF 2
228#define NRG_STEP_CCK 2
229#define NRG_MARGIN 8
230#define MAX_NUMBER_CCK_NO_FA 100
231
232#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
233
234#define CHAIN_A 0
235#define CHAIN_B 1
236#define CHAIN_C 2
237#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
238#define ALL_BAND_FILTER 0xFF00
239#define IN_BAND_FILTER 0xFF
240#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
241
242enum iwl_false_alarm_state {
243 IWL_FA_TOO_MANY = 0,
244 IWL_FA_TOO_FEW = 1,
245 IWL_FA_GOOD_RANGE = 2,
246};
247
248enum iwl_chain_noise_state {
249 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
250 IWL_CHAIN_NOISE_ACCUMULATE = 1,
251 IWL_CHAIN_NOISE_CALIBRATED = 2,
252};
253
254enum iwl_sensitivity_state {
255 IWL_SENS_CALIB_ALLOWED = 0,
256 IWL_SENS_CALIB_NEED_REINIT = 1,
257};
258
259enum iwl_calib_enabled_state {
260 IWL_CALIB_DISABLED = 0, /* must be 0 */
261 IWL_CALIB_ENABLED = 1,
262};
263
264struct statistics_general_data {
265 u32 beacon_silence_rssi_a;
266 u32 beacon_silence_rssi_b;
267 u32 beacon_silence_rssi_c;
268 u32 beacon_energy_a;
269 u32 beacon_energy_b;
270 u32 beacon_energy_c;
271};
272
273/* Sensitivity calib data */
274struct iwl_sensitivity_data {
275 u32 auto_corr_ofdm;
276 u32 auto_corr_ofdm_mrc;
277 u32 auto_corr_ofdm_x1;
278 u32 auto_corr_ofdm_mrc_x1;
279 u32 auto_corr_cck;
280 u32 auto_corr_cck_mrc;
281
282 u32 last_bad_plcp_cnt_ofdm;
283 u32 last_fa_cnt_ofdm;
284 u32 last_bad_plcp_cnt_cck;
285 u32 last_fa_cnt_cck;
286
287 u32 nrg_curr_state;
288 u32 nrg_prev_state;
289 u32 nrg_value[10];
290 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
291 u32 nrg_silence_ref;
292 u32 nrg_energy_idx;
293 u32 nrg_silence_idx;
294 u32 nrg_th_cck;
295 s32 nrg_auto_corr_silence_diff;
296 u32 num_in_cck_no_fa;
297 u32 nrg_th_ofdm;
298
299 u8 state;
300};
301
302/* Chain noise (differential Rx gain) calib data */
303struct iwl_chain_noise_data {
304 u8 state;
305 u16 beacon_count;
306 u32 chain_noise_a;
307 u32 chain_noise_b;
308 u32 chain_noise_c;
309 u32 chain_signal_a;
310 u32 chain_signal_b;
311 u32 chain_signal_c;
312 u8 disconn_array[NUM_RX_CHAINS];
313 u8 delta_gain_code[NUM_RX_CHAINS];
314 u8 radio_write;
315};
316
317/* IWL4965 */
318#define RATE_MCS_CODE_MSK 0x7
319#define RATE_MCS_MIMO_POS 3
320#define RATE_MCS_MIMO_MSK 0x8
321#define RATE_MCS_HT_DUP_POS 5
322#define RATE_MCS_HT_DUP_MSK 0x20
323#define RATE_MCS_FLAGS_POS 8
324#define RATE_MCS_HT_POS 8
325#define RATE_MCS_HT_MSK 0x100
326#define RATE_MCS_CCK_POS 9
327#define RATE_MCS_CCK_MSK 0x200
328#define RATE_MCS_GF_POS 10
329#define RATE_MCS_GF_MSK 0x400
330
331#define RATE_MCS_FAT_POS 11
332#define RATE_MCS_FAT_MSK 0x800
333#define RATE_MCS_DUP_POS 12
334#define RATE_MCS_DUP_MSK 0x1000
335#define RATE_MCS_SGI_POS 13
336#define RATE_MCS_SGI_MSK 0x2000
337
338#define EEPROM_SEM_TIMEOUT 10
339#define EEPROM_SEM_RETRY_LIMIT 1000
340
341#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-channel.h b/drivers/net/wireless/iwlwifi/iwl-channel.h
new file mode 100644
index 000000000000..023c3f240cea
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-channel.h
@@ -0,0 +1,161 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __iwl_channel_h__
27#define __iwl_channel_h__
28
29#define IWL_NUM_SCAN_RATES (2)
30
31struct iwl_channel_tgd_info {
32 u8 type;
33 s8 max_power;
34};
35
36struct iwl_channel_tgh_info {
37 s64 last_radar_time;
38};
39
40/* current Tx power values to use, one for each rate for each channel.
41 * requested power is limited by:
42 * -- regulatory EEPROM limits for this channel
43 * -- hardware capabilities (clip-powers)
44 * -- spectrum management
45 * -- user preference (e.g. iwconfig)
46 * when requested power is set, base power index must also be set. */
47struct iwl_channel_power_info {
48 struct iwl_tx_power tpc; /* actual radio and DSP gain settings */
49 s8 power_table_index; /* actual (compenst'd) index into gain table */
50 s8 base_power_index; /* gain index for power at factory temp. */
51 s8 requested_power; /* power (dBm) requested for this chnl/rate */
52};
53
54/* current scan Tx power values to use, one for each scan rate for each
55 * channel. */
56struct iwl_scan_power_info {
57 struct iwl_tx_power tpc; /* actual radio and DSP gain settings */
58 s8 power_table_index; /* actual (compenst'd) index into gain table */
59 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
60};
61
62/* Channel unlock period is 15 seconds. If no beacon or probe response
63 * has been received within 15 seconds on a locked channel then the channel
64 * remains locked. */
65#define TX_UNLOCK_PERIOD 15
66
67/* CSA lock period is 15 seconds. If a CSA has been received on a channel in
68 * the last 15 seconds, the channel is locked */
69#define CSA_LOCK_PERIOD 15
70/*
71 * One for each channel, holds all channel setup data
72 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
73 * with one another!
74 */
75#define IWL4965_MAX_RATE (33)
76
77struct iwl_channel_info {
78 struct iwl_channel_tgd_info tgd;
79 struct iwl_channel_tgh_info tgh;
80 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
81 struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for
82 * FAT channel */
83
84 u8 channel; /* channel number */
85 u8 flags; /* flags copied from EEPROM */
86 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
87 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */
88 s8 min_power; /* always 0 */
89 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
90
91 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
92 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
93 u8 phymode; /* MODE_IEEE80211{A,B,G} */
94
95 /* Radio/DSP gain settings for each "normal" data Tx rate.
96 * These include, in addition to RF and DSP gain, a few fields for
97 * remembering/modifying gain settings (indexes). */
98 struct iwl_channel_power_info power_info[IWL4965_MAX_RATE];
99
100#if IWL == 4965
101 /* FAT channel info */
102 s8 fat_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
103 s8 fat_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */
104 s8 fat_min_power; /* always 0 */
105 s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */
106 u8 fat_flags; /* flags copied from EEPROM */
107 u8 fat_extension_channel;
108#endif
109
110 /* Radio/DSP gain settings for each scan rate, for directed scans. */
111 struct iwl_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
112};
113
114struct iwl_clip_group {
115 /* maximum power level to prevent clipping for each rate, derived by
116 * us from this band's saturation power in EEPROM */
117 const s8 clip_powers[IWL_MAX_RATES];
118};
119
120static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
121{
122 if (ch_info == NULL)
123 return 0;
124 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
125}
126
127static inline int is_channel_narrow(const struct iwl_channel_info *ch_info)
128{
129 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
130}
131
132static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
133{
134 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
135}
136
137static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
138{
139 return ch_info->phymode == MODE_IEEE80211A;
140}
141
142static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
143{
144 return ((ch_info->phymode == MODE_IEEE80211B) ||
145 (ch_info->phymode == MODE_IEEE80211G));
146}
147
148static inline int is_channel_passive(const struct iwl_channel_info *ch)
149{
150 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
151}
152
153static inline int is_channel_ibss(const struct iwl_channel_info *ch)
154{
155 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
156}
157
158extern const struct iwl_channel_info *iwl_get_channel_info(
159 const struct iwl_priv *priv, int phymode, u16 channel);
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
new file mode 100644
index 000000000000..9de8d7f6efa3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -0,0 +1,1734 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __iwl_commands_h__
65#define __iwl_commands_h__
66
67enum {
68 REPLY_ALIVE = 0x1,
69 REPLY_ERROR = 0x2,
70
71 /* RXON and QOS commands */
72 REPLY_RXON = 0x10,
73 REPLY_RXON_ASSOC = 0x11,
74 REPLY_QOS_PARAM = 0x13,
75 REPLY_RXON_TIMING = 0x14,
76
77 /* Multi-Station support */
78 REPLY_ADD_STA = 0x18,
79 REPLY_REMOVE_STA = 0x19, /* not used */
80 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
81
82 /* RX, TX, LEDs */
83#if IWL == 3945
84 REPLY_3945_RX = 0x1b, /* 3945 only */
85#endif
86 REPLY_TX = 0x1c,
87 REPLY_RATE_SCALE = 0x47, /* 3945 only */
88 REPLY_LEDS_CMD = 0x48,
89 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
90
91 /* 802.11h related */
92 RADAR_NOTIFICATION = 0x70, /* not used */
93 REPLY_QUIET_CMD = 0x71, /* not used */
94 REPLY_CHANNEL_SWITCH = 0x72,
95 CHANNEL_SWITCH_NOTIFICATION = 0x73,
96 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
97 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
98
99 /* Power Management */
100 POWER_TABLE_CMD = 0x77,
101 PM_SLEEP_NOTIFICATION = 0x7A,
102 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
103
104 /* Scan commands and notifications */
105 REPLY_SCAN_CMD = 0x80,
106 REPLY_SCAN_ABORT_CMD = 0x81,
107 SCAN_START_NOTIFICATION = 0x82,
108 SCAN_RESULTS_NOTIFICATION = 0x83,
109 SCAN_COMPLETE_NOTIFICATION = 0x84,
110
111 /* IBSS/AP commands */
112 BEACON_NOTIFICATION = 0x90,
113 REPLY_TX_BEACON = 0x91,
114 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
115
116 /* Miscellaneous commands */
117 QUIET_NOTIFICATION = 0x96, /* not used */
118 REPLY_TX_PWR_TABLE_CMD = 0x97,
119 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
120
121 /* BT config command */
122 REPLY_BT_CONFIG = 0x9b,
123
124 /* 4965 Statistics */
125 REPLY_STATISTICS_CMD = 0x9c,
126 STATISTICS_NOTIFICATION = 0x9d,
127
128 /* RF-KILL commands and notifications */
129 REPLY_CARD_STATE_CMD = 0xa0,
130 CARD_STATE_NOTIFICATION = 0xa1,
131
132 /* Missed beacons notification */
133 MISSED_BEACONS_NOTIFICATION = 0xa2,
134
135#if IWL == 4965
136 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
137 SENSITIVITY_CMD = 0xa8,
138 REPLY_PHY_CALIBRATION_CMD = 0xb0,
139 REPLY_RX_PHY_CMD = 0xc0,
140 REPLY_RX_MPDU_CMD = 0xc1,
141 REPLY_4965_RX = 0xc3,
142 REPLY_COMPRESSED_BA = 0xc5,
143#endif
144 REPLY_MAX = 0xff
145};
146
147/******************************************************************************
148 * (0)
149 * Header
150 *
151 *****************************************************************************/
152
153#define IWL_CMD_FAILED_MSK 0x40
154
155struct iwl_cmd_header {
156 u8 cmd;
157 u8 flags;
158 /* We have 15 LSB to use as we please (MSB indicates
159 * a frame Rx'd from the HW). We encode the following
160 * information into the sequence field:
161 *
162 * 0:7 index in fifo
163 * 8:13 fifo selection
164 * 14:14 bit indicating if this packet references the 'extra'
165 * storage at the end of the memory queue
166 * 15:15 (Rx indication)
167 *
168 */
169 __le16 sequence;
170
171 /* command data follows immediately */
172 u8 data[0];
173} __attribute__ ((packed));
174
175/******************************************************************************
176 * (0a)
177 * Alive and Error Commands & Responses:
178 *
179 *****************************************************************************/
180
181#define UCODE_VALID_OK __constant_cpu_to_le32(0x1)
182#define INITIALIZE_SUBTYPE (9)
183
184/*
185 * REPLY_ALIVE = 0x1 (response only, not a command)
186 */
187struct iwl_alive_resp {
188 u8 ucode_minor;
189 u8 ucode_major;
190 __le16 reserved1;
191 u8 sw_rev[8];
192 u8 ver_type;
193 u8 ver_subtype;
194 __le16 reserved2;
195 __le32 log_event_table_ptr;
196 __le32 error_event_table_ptr;
197 __le32 timestamp;
198 __le32 is_valid;
199} __attribute__ ((packed));
200
201struct iwl_init_alive_resp {
202 u8 ucode_minor;
203 u8 ucode_major;
204 __le16 reserved1;
205 u8 sw_rev[8];
206 u8 ver_type;
207 u8 ver_subtype;
208 __le16 reserved2;
209 __le32 log_event_table_ptr;
210 __le32 error_event_table_ptr;
211 __le32 timestamp;
212 __le32 is_valid;
213
214#if IWL == 4965
215 /* calibration values from "initialize" uCode */
216 __le32 voltage; /* signed */
217 __le32 therm_r1[2]; /* signed 1st for normal, 2nd for FAT channel */
218 __le32 therm_r2[2]; /* signed */
219 __le32 therm_r3[2]; /* signed */
220 __le32 therm_r4[2]; /* signed */
221 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
222 * 2 Tx chains */
223#endif
224} __attribute__ ((packed));
225
226union tsf {
227 u8 byte[8];
228 __le16 word[4];
229 __le32 dw[2];
230};
231
232/*
233 * REPLY_ERROR = 0x2 (response only, not a command)
234 */
235struct iwl_error_resp {
236 __le32 error_type;
237 u8 cmd_id;
238 u8 reserved1;
239 __le16 bad_cmd_seq_num;
240#if IWL == 3945
241 __le16 reserved2;
242#endif
243 __le32 error_info;
244 union tsf timestamp;
245} __attribute__ ((packed));
246
247/******************************************************************************
248 * (1)
249 * RXON Commands & Responses:
250 *
251 *****************************************************************************/
252
253/*
254 * Rx config defines & structure
255 */
256/* rx_config device types */
257enum {
258 RXON_DEV_TYPE_AP = 1,
259 RXON_DEV_TYPE_ESS = 3,
260 RXON_DEV_TYPE_IBSS = 4,
261 RXON_DEV_TYPE_SNIFFER = 6,
262};
263
264/* rx_config flags */
265/* band & modulation selection */
266#define RXON_FLG_BAND_24G_MSK __constant_cpu_to_le32(1 << 0)
267#define RXON_FLG_CCK_MSK __constant_cpu_to_le32(1 << 1)
268/* auto detection enable */
269#define RXON_FLG_AUTO_DETECT_MSK __constant_cpu_to_le32(1 << 2)
270/* TGg protection when tx */
271#define RXON_FLG_TGG_PROTECT_MSK __constant_cpu_to_le32(1 << 3)
272/* cck short slot & preamble */
273#define RXON_FLG_SHORT_SLOT_MSK __constant_cpu_to_le32(1 << 4)
274#define RXON_FLG_SHORT_PREAMBLE_MSK __constant_cpu_to_le32(1 << 5)
275/* antenna selection */
276#define RXON_FLG_DIS_DIV_MSK __constant_cpu_to_le32(1 << 7)
277#define RXON_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0x0f00)
278#define RXON_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8)
279#define RXON_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9)
280/* radar detection enable */
281#define RXON_FLG_RADAR_DETECT_MSK __constant_cpu_to_le32(1 << 12)
282#define RXON_FLG_TGJ_NARROW_BAND_MSK __constant_cpu_to_le32(1 << 13)
283/* rx response to host with 8-byte TSF
284* (according to ON_AIR deassertion) */
285#define RXON_FLG_TSF2HOST_MSK __constant_cpu_to_le32(1 << 15)
286
287/* rx_config filter flags */
288/* accept all data frames */
289#define RXON_FILTER_PROMISC_MSK __constant_cpu_to_le32(1 << 0)
290/* pass control & management to host */
291#define RXON_FILTER_CTL2HOST_MSK __constant_cpu_to_le32(1 << 1)
292/* accept multi-cast */
293#define RXON_FILTER_ACCEPT_GRP_MSK __constant_cpu_to_le32(1 << 2)
294/* don't decrypt uni-cast frames */
295#define RXON_FILTER_DIS_DECRYPT_MSK __constant_cpu_to_le32(1 << 3)
296/* don't decrypt multi-cast frames */
297#define RXON_FILTER_DIS_GRP_DECRYPT_MSK __constant_cpu_to_le32(1 << 4)
298/* STA is associated */
299#define RXON_FILTER_ASSOC_MSK __constant_cpu_to_le32(1 << 5)
300/* transfer to host non bssid beacons in associated state */
301#define RXON_FILTER_BCON_AWARE_MSK __constant_cpu_to_le32(1 << 6)
302
303/*
304 * REPLY_RXON = 0x10 (command, has simple generic response)
305 */
306struct iwl_rxon_cmd {
307 u8 node_addr[6];
308 __le16 reserved1;
309 u8 bssid_addr[6];
310 __le16 reserved2;
311 u8 wlap_bssid_addr[6];
312 __le16 reserved3;
313 u8 dev_type;
314 u8 air_propagation;
315#if IWL == 3945
316 __le16 reserved4;
317#elif IWL == 4965
318 __le16 rx_chain;
319#endif
320 u8 ofdm_basic_rates;
321 u8 cck_basic_rates;
322 __le16 assoc_id;
323 __le32 flags;
324 __le32 filter_flags;
325 __le16 channel;
326#if IWL == 3945
327 __le16 reserved5;
328#elif IWL == 4965
329 u8 ofdm_ht_single_stream_basic_rates;
330 u8 ofdm_ht_dual_stream_basic_rates;
331#endif
332} __attribute__ ((packed));
333
334/*
335 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
336 */
337struct iwl_rxon_assoc_cmd {
338 __le32 flags;
339 __le32 filter_flags;
340 u8 ofdm_basic_rates;
341 u8 cck_basic_rates;
342#if IWL == 4965
343 u8 ofdm_ht_single_stream_basic_rates;
344 u8 ofdm_ht_dual_stream_basic_rates;
345 __le16 rx_chain_select_flags;
346#endif
347 __le16 reserved;
348} __attribute__ ((packed));
349
350/*
351 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
352 */
353struct iwl_rxon_time_cmd {
354 union tsf timestamp;
355 __le16 beacon_interval;
356 __le16 atim_window;
357 __le32 beacon_init_val;
358 __le16 listen_interval;
359 __le16 reserved;
360} __attribute__ ((packed));
361
362struct iwl_tx_power {
363 u8 tx_gain; /* gain for analog radio */
364 u8 dsp_atten; /* gain for DSP */
365} __attribute__ ((packed));
366
367#if IWL == 3945
368struct iwl_power_per_rate {
369 u8 rate; /* plcp */
370 struct iwl_tx_power tpc;
371 u8 reserved;
372} __attribute__ ((packed));
373
374#elif IWL == 4965
375#define POWER_TABLE_NUM_ENTRIES 33
376#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
377#define POWER_TABLE_CCK_ENTRY 32
378struct tx_power_dual_stream {
379 __le32 dw;
380} __attribute__ ((packed));
381
382struct iwl_tx_power_db {
383 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
384} __attribute__ ((packed));
385#endif
386
387/*
388 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
389 */
390struct iwl_channel_switch_cmd {
391 u8 band;
392 u8 expect_beacon;
393 __le16 channel;
394 __le32 rxon_flags;
395 __le32 rxon_filter_flags;
396 __le32 switch_time;
397#if IWL == 3945
398 struct iwl_power_per_rate power[IWL_MAX_RATES];
399#elif IWL == 4965
400 struct iwl_tx_power_db tx_power;
401#endif
402} __attribute__ ((packed));
403
404/*
405 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
406 */
407struct iwl_csa_notification {
408 __le16 band;
409 __le16 channel;
410 __le32 status; /* 0 - OK, 1 - fail */
411} __attribute__ ((packed));
412
413/******************************************************************************
414 * (2)
415 * Quality-of-Service (QOS) Commands & Responses:
416 *
417 *****************************************************************************/
418struct iwl_ac_qos {
419 __le16 cw_min;
420 __le16 cw_max;
421 u8 aifsn;
422 u8 reserved1;
423 __le16 edca_txop;
424} __attribute__ ((packed));
425
426/* QoS flags defines */
427#define QOS_PARAM_FLG_UPDATE_EDCA_MSK __constant_cpu_to_le32(0x01)
428#define QOS_PARAM_FLG_TGN_MSK __constant_cpu_to_le32(0x02)
429#define QOS_PARAM_FLG_TXOP_TYPE_MSK __constant_cpu_to_le32(0x10)
430
431/*
432 * TXFIFO Queue number defines
433 */
434/* number of Access categories (AC) (EDCA), queues 0..3 */
435#define AC_NUM 4
436
437/*
438 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
439 */
440struct iwl_qosparam_cmd {
441 __le32 qos_flags;
442 struct iwl_ac_qos ac[AC_NUM];
443} __attribute__ ((packed));
444
445/******************************************************************************
446 * (3)
447 * Add/Modify Stations Commands & Responses:
448 *
449 *****************************************************************************/
450/*
451 * Multi station support
452 */
453#define IWL_AP_ID 0
454#define IWL_MULTICAST_ID 1
455#define IWL_STA_ID 2
456
457#define IWL3945_BROADCAST_ID 24
458#define IWL3945_STATION_COUNT 25
459
460#define IWL4965_BROADCAST_ID 31
461#define IWL4965_STATION_COUNT 32
462
463#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
464#define IWL_INVALID_STATION 255
465
466#if IWL == 3945
467#define STA_FLG_TX_RATE_MSK __constant_cpu_to_le32(1<<2);
468#endif
469#define STA_FLG_PWR_SAVE_MSK __constant_cpu_to_le32(1<<8);
470
471#define STA_CONTROL_MODIFY_MSK 0x01
472
473/* key flags __le16*/
474#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x7)
475#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0)
476#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x1)
477#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x2)
478#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x3)
479
480#define STA_KEY_FLG_KEYID_POS 8
481#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800)
482
483/* modify flags */
484#define STA_MODIFY_KEY_MASK 0x01
485#define STA_MODIFY_TID_DISABLE_TX 0x02
486#define STA_MODIFY_TX_RATE_MSK 0x04
487#define STA_MODIFY_ADDBA_TID_MSK 0x08
488#define STA_MODIFY_DELBA_TID_MSK 0x10
489#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
490
491/*
492 * Antenna masks:
493 * bit14:15 01 B inactive, A active
494 * 10 B active, A inactive
495 * 11 Both active
496 */
497#define RATE_MCS_ANT_A_POS 14
498#define RATE_MCS_ANT_B_POS 15
499#define RATE_MCS_ANT_A_MSK 0x4000
500#define RATE_MCS_ANT_B_MSK 0x8000
501#define RATE_MCS_ANT_AB_MSK 0xc000
502
503struct iwl_keyinfo {
504 __le16 key_flags;
505 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
506 u8 reserved1;
507 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
508 __le16 reserved2;
509 u8 key[16]; /* 16-byte unicast decryption key */
510} __attribute__ ((packed));
511
512struct sta_id_modify {
513 u8 addr[ETH_ALEN];
514 __le16 reserved1;
515 u8 sta_id;
516 u8 modify_mask;
517 __le16 reserved2;
518} __attribute__ ((packed));
519
520/*
521 * REPLY_ADD_STA = 0x18 (command)
522 */
523struct iwl_addsta_cmd {
524 u8 mode;
525 u8 reserved[3];
526 struct sta_id_modify sta;
527 struct iwl_keyinfo key;
528 __le32 station_flags;
529 __le32 station_flags_msk;
530 __le16 tid_disable_tx;
531#if IWL == 3945
532 __le16 rate_n_flags;
533#else
534 __le16 reserved1;
535#endif
536 u8 add_immediate_ba_tid;
537 u8 remove_immediate_ba_tid;
538 __le16 add_immediate_ba_ssn;
539#if IWL == 4965
540 __le32 reserved2;
541#endif
542} __attribute__ ((packed));
543
544/*
545 * REPLY_ADD_STA = 0x18 (response)
546 */
547struct iwl_add_sta_resp {
548 u8 status;
549} __attribute__ ((packed));
550
551#define ADD_STA_SUCCESS_MSK 0x1
552
553/******************************************************************************
554 * (4)
555 * Rx Responses:
556 *
557 *****************************************************************************/
558
559struct iwl_rx_frame_stats {
560 u8 phy_count;
561 u8 id;
562 u8 rssi;
563 u8 agc;
564 __le16 sig_avg;
565 __le16 noise_diff;
566 u8 payload[0];
567} __attribute__ ((packed));
568
569struct iwl_rx_frame_hdr {
570 __le16 channel;
571 __le16 phy_flags;
572 u8 reserved1;
573 u8 rate;
574 __le16 len;
575 u8 payload[0];
576} __attribute__ ((packed));
577
578#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0)
579#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1)
580
581#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0)
582#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1)
583#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2)
584#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3)
585#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0)
586
587#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
588#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
589#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
590#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
591#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
592
593#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
594#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
595#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
596#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
597#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
598
599struct iwl_rx_frame_end {
600 __le32 status;
601 __le64 timestamp;
602 __le32 beacon_timestamp;
603} __attribute__ ((packed));
604
605/*
606 * REPLY_3945_RX = 0x1b (response only, not a command)
607 *
608 * NOTE: DO NOT dereference from casts to this structure
609 * It is provided only for calculating minimum data set size.
610 * The actual offsets of the hdr and end are dynamic based on
611 * stats.phy_count
612 */
613struct iwl_rx_frame {
614 struct iwl_rx_frame_stats stats;
615 struct iwl_rx_frame_hdr hdr;
616 struct iwl_rx_frame_end end;
617} __attribute__ ((packed));
618
619/* Fixed (non-configurable) rx data from phy */
620#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
621#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
622#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
623#define IWL_AGC_DB_POS (7)
624struct iwl4965_rx_non_cfg_phy {
625 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
626 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
627 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
628 u8 pad[0];
629} __attribute__ ((packed));
630
631/*
632 * REPLY_4965_RX = 0xc3 (response only, not a command)
633 * Used only for legacy (non 11n) frames.
634 */
635#define RX_RES_PHY_CNT 14
636struct iwl4965_rx_phy_res {
637 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
638 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
639 u8 stat_id; /* configurable DSP phy data set ID */
640 u8 reserved1;
641 __le64 timestamp; /* TSF at on air rise */
642 __le32 beacon_time_stamp; /* beacon at on-air rise */
643 __le16 phy_flags; /* general phy flags: band, modulation, ... */
644 __le16 channel; /* channel number */
645 __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */
646 __le32 reserved2;
647 __le32 rate_n_flags;
648 __le16 byte_count; /* frame's byte-count */
649 __le16 reserved3;
650} __attribute__ ((packed));
651
652struct iwl4965_rx_mpdu_res_start {
653 __le16 byte_count;
654 __le16 reserved;
655} __attribute__ ((packed));
656
657
658/******************************************************************************
659 * (5)
660 * Tx Commands & Responses:
661 *
662 *****************************************************************************/
663
664/* Tx flags */
665#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1)
666#define TX_CMD_FLG_CTS_MSK __constant_cpu_to_le32(1 << 2)
667#define TX_CMD_FLG_ACK_MSK __constant_cpu_to_le32(1 << 3)
668#define TX_CMD_FLG_STA_RATE_MSK __constant_cpu_to_le32(1 << 4)
669#define TX_CMD_FLG_IMM_BA_RSP_MASK __constant_cpu_to_le32(1 << 6)
670#define TX_CMD_FLG_FULL_TXOP_PROT_MSK __constant_cpu_to_le32(1 << 7)
671#define TX_CMD_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0xf00)
672#define TX_CMD_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8)
673#define TX_CMD_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9)
674
675/* ucode ignores BT priority for this frame */
676#define TX_CMD_FLG_BT_DIS_MSK __constant_cpu_to_le32(1 << 12)
677
678/* ucode overrides sequence control */
679#define TX_CMD_FLG_SEQ_CTL_MSK __constant_cpu_to_le32(1 << 13)
680
681/* signal that this frame is non-last MPDU */
682#define TX_CMD_FLG_MORE_FRAG_MSK __constant_cpu_to_le32(1 << 14)
683
684/* calculate TSF in outgoing frame */
685#define TX_CMD_FLG_TSF_MSK __constant_cpu_to_le32(1 << 16)
686
687/* activate TX calibration. */
688#define TX_CMD_FLG_CALIB_MSK __constant_cpu_to_le32(1 << 17)
689
690/* signals that 2 bytes pad was inserted
691 after the MAC header */
692#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20)
693
694/* HCCA-AP - disable duration overwriting. */
695#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25)
696
697/*
698 * TX command security control
699 */
700#define TX_CMD_SEC_WEP 0x01
701#define TX_CMD_SEC_CCM 0x02
702#define TX_CMD_SEC_TKIP 0x03
703#define TX_CMD_SEC_MSK 0x03
704#define TX_CMD_SEC_SHIFT 6
705#define TX_CMD_SEC_KEY128 0x08
706
707/*
708 * TX command Frame life time
709 */
710
711struct iwl_dram_scratch {
712 u8 try_cnt;
713 u8 bt_kill_cnt;
714 __le16 reserved;
715} __attribute__ ((packed));
716
717/*
718 * REPLY_TX = 0x1c (command)
719 */
720struct iwl_tx_cmd {
721 __le16 len;
722 __le16 next_frame_len;
723 __le32 tx_flags;
724#if IWL == 3945
725 u8 rate;
726 u8 sta_id;
727 u8 tid_tspec;
728#elif IWL == 4965
729 struct iwl_dram_scratch scratch;
730 __le32 rate_n_flags;
731 u8 sta_id;
732#endif
733 u8 sec_ctl;
734#if IWL == 4965
735 u8 initial_rate_index;
736 u8 reserved;
737#endif
738 u8 key[16];
739#if IWL == 3945
740 union {
741 u8 byte[8];
742 __le16 word[4];
743 __le32 dw[2];
744 } tkip_mic;
745 __le32 next_frame_info;
746#elif IWL == 4965
747 __le16 next_frame_flags;
748 __le16 reserved2;
749#endif
750 union {
751 __le32 life_time;
752 __le32 attempt;
753 } stop_time;
754#if IWL == 3945
755 u8 supp_rates[2];
756#elif IWL == 4965
757 __le32 dram_lsb_ptr;
758 u8 dram_msb_ptr;
759#endif
760 u8 rts_retry_limit; /*byte 50 */
761 u8 data_retry_limit; /*byte 51 */
762#if IWL == 4965
763 u8 tid_tspec;
764#endif
765 union {
766 __le16 pm_frame_timeout;
767 __le16 attempt_duration;
768 } timeout;
769 __le16 driver_txop;
770 u8 payload[0];
771 struct ieee80211_hdr hdr[0];
772} __attribute__ ((packed));
773
774/* TX command response is sent after *all* transmission attempts.
775 *
776 * NOTES:
777 *
778 * TX_STATUS_FAIL_NEXT_FRAG
779 *
780 * If the fragment flag in the MAC header for the frame being transmitted
781 * is set and there is insufficient time to transmit the next frame, the
782 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
783 *
784 * TX_STATUS_FIFO_UNDERRUN
785 *
786 * Indicates the host did not provide bytes to the FIFO fast enough while
787 * a TX was in progress.
788 *
789 * TX_STATUS_FAIL_MGMNT_ABORT
790 *
791 * This status is only possible if the ABORT ON MGMT RX parameter was
792 * set to true with the TX command.
793 *
794 * If the MSB of the status parameter is set then an abort sequence is
795 * required. This sequence consists of the host activating the TX Abort
796 * control line, and then waiting for the TX Abort command response. This
797 * indicates that a the device is no longer in a transmit state, and that the
798 * command FIFO has been cleared. The host must then deactivate the TX Abort
799 * control line. Receiving is still allowed in this case.
800 */
801enum {
802 TX_STATUS_SUCCESS = 0x01,
803 TX_STATUS_DIRECT_DONE = 0x02,
804 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
805 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
806 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
807 TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
808 TX_STATUS_FAIL_NEXT_FRAG = 0x86,
809 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
810 TX_STATUS_FAIL_DEST_PS = 0x88,
811 TX_STATUS_FAIL_ABORTED = 0x89,
812 TX_STATUS_FAIL_BT_RETRY = 0x8a,
813 TX_STATUS_FAIL_STA_INVALID = 0x8b,
814 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
815 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
816 TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
817 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
818 TX_STATUS_FAIL_TX_LOCKED = 0x90,
819 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
820};
821
822#define TX_PACKET_MODE_REGULAR 0x0000
823#define TX_PACKET_MODE_BURST_SEQ 0x0100
824#define TX_PACKET_MODE_BURST_FIRST 0x0200
825
826enum {
827 TX_POWER_PA_NOT_ACTIVE = 0x0,
828};
829
830enum {
831 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
832 TX_STATUS_DELAY_MSK = 0x00000040,
833 TX_STATUS_ABORT_MSK = 0x00000080,
834 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
835 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
836 TX_RESERVED = 0x00780000, /* bits 19:22 */
837 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
838 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
839};
840
841/* *******************************
842 * TX aggregation state
843 ******************************* */
844
845enum {
846 AGG_TX_STATE_TRANSMITTED = 0x00,
847 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
848 AGG_TX_STATE_BT_PRIO_MSK = 0x02,
849 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
850 AGG_TX_STATE_ABORT_MSK = 0x08,
851 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
852 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
853 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40,
854 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
855 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
856 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
857 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
858 AGG_TX_STATE_DELAY_TX_MSK = 0x400
859};
860
861#define AGG_TX_STATE_LAST_SENT_MSK \
862(AGG_TX_STATE_LAST_SENT_TTL_MSK | \
863 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
864 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
865
866#define AGG_TX_STATE_TRY_CNT_POS 12
867#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
868
869#define AGG_TX_STATE_SEQ_NUM_POS 16
870#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
871
872/*
873 * REPLY_TX = 0x1c (response)
874 */
875#if IWL == 4965
876struct iwl_tx_resp {
877 u8 frame_count; /* 1 no aggregation, >1 aggregation */
878 u8 bt_kill_count;
879 u8 failure_rts;
880 u8 failure_frame;
881 __le32 rate_n_flags;
882 __le16 wireless_media_time;
883 __le16 reserved;
884 __le32 pa_power1;
885 __le32 pa_power2;
886 __le32 status; /* TX status (for aggregation status of 1st frame) */
887} __attribute__ ((packed));
888
889#elif IWL == 3945
890struct iwl_tx_resp {
891 u8 failure_rts;
892 u8 failure_frame;
893 u8 bt_kill_count;
894 u8 rate;
895 __le32 wireless_media_time;
896 __le32 status; /* TX status (for aggregation status of 1st frame) */
897} __attribute__ ((packed));
898#endif
899
900/*
901 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
902 */
903struct iwl_compressed_ba_resp {
904 __le32 sta_addr_lo32;
905 __le16 sta_addr_hi16;
906 __le16 reserved;
907 u8 sta_id;
908 u8 tid;
909 __le16 ba_seq_ctl;
910 __le32 ba_bitmap0;
911 __le32 ba_bitmap1;
912 __le16 scd_flow;
913 __le16 scd_ssn;
914} __attribute__ ((packed));
915
916/*
917 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
918 */
919struct iwl_txpowertable_cmd {
920 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
921 u8 reserved;
922 __le16 channel;
923#if IWL == 3945
924 struct iwl_power_per_rate power[IWL_MAX_RATES];
925#elif IWL == 4965
926 struct iwl_tx_power_db tx_power;
927#endif
928} __attribute__ ((packed));
929
930#if IWL == 3945
931struct iwl_rate_scaling_info {
932 __le16 rate_n_flags;
933 u8 try_cnt;
934 u8 next_rate_index;
935} __attribute__ ((packed));
936
937/**
938 * struct iwl_rate_scaling_cmd - Rate Scaling Command & Response
939 *
940 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
941 *
942 * NOTE: The table of rates passed to the uCode via the
943 * RATE_SCALE command sets up the corresponding order of
944 * rates used for all related commands, including rate
945 * masks, etc.
946 *
947 * For example, if you set 9MB (PLCP 0x0f) as the first
948 * rate in the rate table, the bit mask for that rate
949 * when passed through ofdm_basic_rates on the REPLY_RXON
950 * command would be bit 0 (1<<0)
951 */
952struct iwl_rate_scaling_cmd {
953 u8 table_id;
954 u8 reserved[3];
955 struct iwl_rate_scaling_info table[IWL_MAX_RATES];
956} __attribute__ ((packed));
957
958#elif IWL == 4965
959
960/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
961#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1<<0)
962
963#define LINK_QUAL_AC_NUM AC_NUM
964#define LINK_QUAL_MAX_RETRY_NUM 16
965
966#define LINK_QUAL_ANT_A_MSK (1<<0)
967#define LINK_QUAL_ANT_B_MSK (1<<1)
968#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
969
970struct iwl_link_qual_general_params {
971 u8 flags;
972 u8 mimo_delimiter;
973 u8 single_stream_ant_msk;
974 u8 dual_stream_ant_msk;
975 u8 start_rate_index[LINK_QUAL_AC_NUM];
976} __attribute__ ((packed));
977
978struct iwl_link_qual_agg_params {
979 __le16 agg_time_limit;
980 u8 agg_dis_start_th;
981 u8 agg_frame_cnt_limit;
982 __le32 reserved;
983} __attribute__ ((packed));
984
985/*
986 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
987 */
988struct iwl_link_quality_cmd {
989 u8 sta_id;
990 u8 reserved1;
991 __le16 control;
992 struct iwl_link_qual_general_params general_params;
993 struct iwl_link_qual_agg_params agg_params;
994 struct {
995 __le32 rate_n_flags;
996 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
997 __le32 reserved2;
998} __attribute__ ((packed));
999#endif
1000
1001/*
1002 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
1003 */
1004struct iwl_bt_cmd {
1005 u8 flags;
1006 u8 lead_time;
1007 u8 max_kill;
1008 u8 reserved;
1009 __le32 kill_ack_mask;
1010 __le32 kill_cts_mask;
1011} __attribute__ ((packed));
1012
1013/******************************************************************************
1014 * (6)
1015 * Spectrum Management (802.11h) Commands, Responses, Notifications:
1016 *
1017 *****************************************************************************/
1018
1019/*
1020 * Spectrum Management
1021 */
1022#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
1023 RXON_FILTER_CTL2HOST_MSK | \
1024 RXON_FILTER_ACCEPT_GRP_MSK | \
1025 RXON_FILTER_DIS_DECRYPT_MSK | \
1026 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
1027 RXON_FILTER_ASSOC_MSK | \
1028 RXON_FILTER_BCON_AWARE_MSK)
1029
1030struct iwl_measure_channel {
1031 __le32 duration; /* measurement duration in extended beacon
1032 * format */
1033 u8 channel; /* channel to measure */
1034 u8 type; /* see enum iwl_measure_type */
1035 __le16 reserved;
1036} __attribute__ ((packed));
1037
1038/*
1039 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
1040 */
1041struct iwl_spectrum_cmd {
1042 __le16 len; /* number of bytes starting from token */
1043 u8 token; /* token id */
1044 u8 id; /* measurement id -- 0 or 1 */
1045 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
1046 u8 periodic; /* 1 = periodic */
1047 __le16 path_loss_timeout;
1048 __le32 start_time; /* start time in extended beacon format */
1049 __le32 reserved2;
1050 __le32 flags; /* rxon flags */
1051 __le32 filter_flags; /* rxon filter flags */
1052 __le16 channel_count; /* minimum 1, maximum 10 */
1053 __le16 reserved3;
1054 struct iwl_measure_channel channels[10];
1055} __attribute__ ((packed));
1056
1057/*
1058 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
1059 */
1060struct iwl_spectrum_resp {
1061 u8 token;
1062 u8 id; /* id of the prior command replaced, or 0xff */
1063 __le16 status; /* 0 - command will be handled
1064 * 1 - cannot handle (conflicts with another
1065 * measurement) */
1066} __attribute__ ((packed));
1067
1068enum iwl_measurement_state {
1069 IWL_MEASUREMENT_START = 0,
1070 IWL_MEASUREMENT_STOP = 1,
1071};
1072
1073enum iwl_measurement_status {
1074 IWL_MEASUREMENT_OK = 0,
1075 IWL_MEASUREMENT_CONCURRENT = 1,
1076 IWL_MEASUREMENT_CSA_CONFLICT = 2,
1077 IWL_MEASUREMENT_TGH_CONFLICT = 3,
1078 /* 4-5 reserved */
1079 IWL_MEASUREMENT_STOPPED = 6,
1080 IWL_MEASUREMENT_TIMEOUT = 7,
1081 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
1082};
1083
1084#define NUM_ELEMENTS_IN_HISTOGRAM 8
1085
1086struct iwl_measurement_histogram {
1087 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
1088 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
1089} __attribute__ ((packed));
1090
1091/* clear channel availability counters */
1092struct iwl_measurement_cca_counters {
1093 __le32 ofdm;
1094 __le32 cck;
1095} __attribute__ ((packed));
1096
1097enum iwl_measure_type {
1098 IWL_MEASURE_BASIC = (1 << 0),
1099 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
1100 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
1101 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
1102 IWL_MEASURE_FRAME = (1 << 4),
1103 /* bits 5:6 are reserved */
1104 IWL_MEASURE_IDLE = (1 << 7),
1105};
1106
1107/*
1108 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
1109 */
1110struct iwl_spectrum_notification {
1111 u8 id; /* measurement id -- 0 or 1 */
1112 u8 token;
1113 u8 channel_index; /* index in measurement channel list */
1114 u8 state; /* 0 - start, 1 - stop */
1115 __le32 start_time; /* lower 32-bits of TSF */
1116 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
1117 u8 channel;
1118 u8 type; /* see enum iwl_measurement_type */
1119 u8 reserved1;
1120 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
1121 * valid if applicable for measurement type requested. */
1122 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
1123 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
1124 __le32 cca_time; /* channel load time in usecs */
1125 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
1126 * unidentified */
1127 u8 reserved2[3];
1128 struct iwl_measurement_histogram histogram;
1129 __le32 stop_time; /* lower 32-bits of TSF */
1130 __le32 status; /* see iwl_measurement_status */
1131} __attribute__ ((packed));
1132
1133/******************************************************************************
1134 * (7)
1135 * Power Management Commands, Responses, Notifications:
1136 *
1137 *****************************************************************************/
1138
1139/**
1140 * struct iwl_powertable_cmd - Power Table Command
1141 * @flags: See below:
1142 *
1143 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
1144 *
1145 * PM allow:
1146 * bit 0 - '0' Driver not allow power management
1147 * '1' Driver allow PM (use rest of parameters)
1148 * uCode send sleep notifications:
1149 * bit 1 - '0' Don't send sleep notification
1150 * '1' send sleep notification (SEND_PM_NOTIFICATION)
1151 * Sleep over DTIM
1152 * bit 2 - '0' PM have to walk up every DTIM
1153 * '1' PM could sleep over DTIM till listen Interval.
1154 * PCI power managed
1155 * bit 3 - '0' (PCI_LINK_CTRL & 0x1)
1156 * '1' !(PCI_LINK_CTRL & 0x1)
1157 * Force sleep Modes
1158 * bit 31/30- '00' use both mac/xtal sleeps
1159 * '01' force Mac sleep
1160 * '10' force xtal sleep
1161 * '11' Illegal set
1162 *
1163 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
1164 * ucode assume sleep over DTIM is allowed and we don't need to wakeup
1165 * for every DTIM.
1166 */
1167#define IWL_POWER_VEC_SIZE 5
1168
1169
1170#if IWL == 3945
1171
1172#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le32(1<<0)
1173#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le32(1<<2)
1174#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le32(1<<3)
1175struct iwl_powertable_cmd {
1176 __le32 flags;
1177 __le32 rx_data_timeout;
1178 __le32 tx_data_timeout;
1179 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
1180} __attribute__((packed));
1181
1182#elif IWL == 4965
1183
1184#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1<<0)
1185#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1<<2)
1186#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1<<3)
1187
1188struct iwl_powertable_cmd {
1189 __le16 flags;
1190 u8 keep_alive_seconds;
1191 u8 debug_flags;
1192 __le32 rx_data_timeout;
1193 __le32 tx_data_timeout;
1194 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
1195 __le32 keep_alive_beacons;
1196} __attribute__ ((packed));
1197#endif
1198
1199/*
1200 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
1201 * 3945 and 4965 identical.
1202 */
1203struct iwl_sleep_notification {
1204 u8 pm_sleep_mode;
1205 u8 pm_wakeup_src;
1206 __le16 reserved;
1207 __le32 sleep_time;
1208 __le32 tsf_low;
1209 __le32 bcon_timer;
1210} __attribute__ ((packed));
1211
1212/* Sleep states. 3945 and 4965 identical. */
1213enum {
1214 IWL_PM_NO_SLEEP = 0,
1215 IWL_PM_SLP_MAC = 1,
1216 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
1217 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
1218 IWL_PM_SLP_PHY = 4,
1219 IWL_PM_SLP_REPENT = 5,
1220 IWL_PM_WAKEUP_BY_TIMER = 6,
1221 IWL_PM_WAKEUP_BY_DRIVER = 7,
1222 IWL_PM_WAKEUP_BY_RFKILL = 8,
1223 /* 3 reserved */
1224 IWL_PM_NUM_OF_MODES = 12,
1225};
1226
1227/*
1228 * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
1229 */
1230#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
1231#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
1232#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
1233struct iwl_card_state_cmd {
1234 __le32 status; /* CARD_STATE_CMD_* request new power state */
1235} __attribute__ ((packed));
1236
1237/*
1238 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
1239 */
1240struct iwl_card_state_notif {
1241 __le32 flags;
1242} __attribute__ ((packed));
1243
1244#define HW_CARD_DISABLED 0x01
1245#define SW_CARD_DISABLED 0x02
1246#define RF_CARD_DISABLED 0x04
1247#define RXON_CARD_DISABLED 0x10
1248
1249struct iwl_ct_kill_config {
1250 __le32 reserved;
1251 __le32 critical_temperature_M;
1252 __le32 critical_temperature_R;
1253} __attribute__ ((packed));
1254
1255/******************************************************************************
1256 * (8)
1257 * Scan Commands, Responses, Notifications:
1258 *
1259 *****************************************************************************/
1260
1261struct iwl_scan_channel {
1262 /* type is defined as:
1263 * 0:0 active (0 - passive)
1264 * 1:4 SSID direct
1265 * If 1 is set then corresponding SSID IE is transmitted in probe
1266 * 5:7 reserved
1267 */
1268 u8 type;
1269 u8 channel;
1270 struct iwl_tx_power tpc;
1271 __le16 active_dwell;
1272 __le16 passive_dwell;
1273} __attribute__ ((packed));
1274
1275struct iwl_ssid_ie {
1276 u8 id;
1277 u8 len;
1278 u8 ssid[32];
1279} __attribute__ ((packed));
1280
1281#define PROBE_OPTION_MAX 0x4
1282#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF)
1283#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1)
1284#define IWL_MAX_SCAN_SIZE 1024
1285
1286/*
1287 * REPLY_SCAN_CMD = 0x80 (command)
1288 */
1289struct iwl_scan_cmd {
1290 __le16 len;
1291 u8 reserved0;
1292 u8 channel_count;
1293 __le16 quiet_time; /* dwell only this long on quiet chnl
1294 * (active scan) */
1295 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
1296 __le16 good_CRC_th; /* passive -> active promotion threshold */
1297#if IWL == 3945
1298 __le16 reserved1;
1299#elif IWL == 4965
1300 __le16 rx_chain;
1301#endif
1302 __le32 max_out_time; /* max usec to be out of associated (service)
1303 * chnl */
1304 __le32 suspend_time; /* pause scan this long when returning to svc
1305 * chnl.
1306 * 3945 -- 31:24 # beacons, 19:0 additional usec,
1307 * 4965 -- 31:22 # beacons, 21:0 additional usec.
1308 */
1309 __le32 flags;
1310 __le32 filter_flags;
1311
1312 struct iwl_tx_cmd tx_cmd;
1313 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
1314
1315 u8 data[0];
1316 /*
1317 * The channels start after the probe request payload and are of type:
1318 *
1319 * struct iwl_scan_channel channels[0];
1320 *
1321 * NOTE: Only one band of channels can be scanned per pass. You
1322 * can not mix 2.4GHz channels and 5.2GHz channels and must
1323 * request a scan multiple times (not concurrently)
1324 *
1325 */
1326} __attribute__ ((packed));
1327
1328/* Can abort will notify by complete notification with abort status. */
1329#define CAN_ABORT_STATUS __constant_cpu_to_le32(0x1)
1330/* complete notification statuses */
1331#define ABORT_STATUS 0x2
1332
1333/*
1334 * REPLY_SCAN_CMD = 0x80 (response)
1335 */
1336struct iwl_scanreq_notification {
1337 __le32 status; /* 1: okay, 2: cannot fulfill request */
1338} __attribute__ ((packed));
1339
1340/*
1341 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
1342 */
1343struct iwl_scanstart_notification {
1344 __le32 tsf_low;
1345 __le32 tsf_high;
1346 __le32 beacon_timer;
1347 u8 channel;
1348 u8 band;
1349 u8 reserved[2];
1350 __le32 status;
1351} __attribute__ ((packed));
1352
1353#define SCAN_OWNER_STATUS 0x1;
1354#define MEASURE_OWNER_STATUS 0x2;
1355
1356#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
1357/*
1358 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
1359 */
1360struct iwl_scanresults_notification {
1361 u8 channel;
1362 u8 band;
1363 u8 reserved[2];
1364 __le32 tsf_low;
1365 __le32 tsf_high;
1366 __le32 statistics[NUMBER_OF_STATISTICS];
1367} __attribute__ ((packed));
1368
1369/*
1370 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
1371 */
1372struct iwl_scancomplete_notification {
1373 u8 scanned_channels;
1374 u8 status;
1375 u8 reserved;
1376 u8 last_channel;
1377 __le32 tsf_low;
1378 __le32 tsf_high;
1379} __attribute__ ((packed));
1380
1381
1382/******************************************************************************
1383 * (9)
1384 * IBSS/AP Commands and Notifications:
1385 *
1386 *****************************************************************************/
1387
1388/*
1389 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
1390 */
1391struct iwl_beacon_notif {
1392 struct iwl_tx_resp beacon_notify_hdr;
1393 __le32 low_tsf;
1394 __le32 high_tsf;
1395 __le32 ibss_mgr_status;
1396} __attribute__ ((packed));
1397
1398/*
1399 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
1400 */
1401struct iwl_tx_beacon_cmd {
1402 struct iwl_tx_cmd tx;
1403 __le16 tim_idx;
1404 u8 tim_size;
1405 u8 reserved1;
1406 struct ieee80211_hdr frame[0]; /* beacon frame */
1407} __attribute__ ((packed));
1408
1409/******************************************************************************
1410 * (10)
1411 * Statistics Commands and Notifications:
1412 *
1413 *****************************************************************************/
1414
1415#define IWL_TEMP_CONVERT 260
1416
1417#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
1418#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
1419#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
1420
1421/* Used for passing to driver number of successes and failures per rate */
1422struct rate_histogram {
1423 union {
1424 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
1425 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
1426 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
1427 } success;
1428 union {
1429 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
1430 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
1431 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
1432 } failed;
1433} __attribute__ ((packed));
1434
1435/* statistics command response */
1436
1437struct statistics_rx_phy {
1438 __le32 ina_cnt;
1439 __le32 fina_cnt;
1440 __le32 plcp_err;
1441 __le32 crc32_err;
1442 __le32 overrun_err;
1443 __le32 early_overrun_err;
1444 __le32 crc32_good;
1445 __le32 false_alarm_cnt;
1446 __le32 fina_sync_err_cnt;
1447 __le32 sfd_timeout;
1448 __le32 fina_timeout;
1449 __le32 unresponded_rts;
1450 __le32 rxe_frame_limit_overrun;
1451 __le32 sent_ack_cnt;
1452 __le32 sent_cts_cnt;
1453#if IWL == 4965
1454 __le32 sent_ba_rsp_cnt;
1455 __le32 dsp_self_kill;
1456 __le32 mh_format_err;
1457 __le32 re_acq_main_rssi_sum;
1458 __le32 reserved3;
1459#endif
1460} __attribute__ ((packed));
1461
1462#if IWL == 4965
1463struct statistics_rx_ht_phy {
1464 __le32 plcp_err;
1465 __le32 overrun_err;
1466 __le32 early_overrun_err;
1467 __le32 crc32_good;
1468 __le32 crc32_err;
1469 __le32 mh_format_err;
1470 __le32 agg_crc32_good;
1471 __le32 agg_mpdu_cnt;
1472 __le32 agg_cnt;
1473 __le32 reserved2;
1474} __attribute__ ((packed));
1475#endif
1476
1477struct statistics_rx_non_phy {
1478 __le32 bogus_cts; /* CTS received when not expecting CTS */
1479 __le32 bogus_ack; /* ACK received when not expecting ACK */
1480 __le32 non_bssid_frames; /* number of frames with BSSID that
1481 * doesn't belong to the STA BSSID */
1482 __le32 filtered_frames; /* count frames that were dumped in the
1483 * filtering process */
1484 __le32 non_channel_beacons; /* beacons with our bss id but not on
1485 * our serving channel */
1486#if IWL == 4965
1487 __le32 channel_beacons; /* beacons with our bss id and in our
1488 * serving channel */
1489 __le32 num_missed_bcon; /* number of missed beacons */
1490 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
1491 * ADC was in saturation */
1492 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
1493 * for INA */
1494 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
1495 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
1496 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
1497 __le32 interference_data_flag; /* flag for interference data
1498 * availability. 1 when data is
1499 * available. */
1500 __le32 channel_load; /* counts RX Enable time */
1501 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
1502 * and CCK) counter */
1503 __le32 beacon_rssi_a;
1504 __le32 beacon_rssi_b;
1505 __le32 beacon_rssi_c;
1506 __le32 beacon_energy_a;
1507 __le32 beacon_energy_b;
1508 __le32 beacon_energy_c;
1509#endif
1510} __attribute__ ((packed));
1511
1512struct statistics_rx {
1513 struct statistics_rx_phy ofdm;
1514 struct statistics_rx_phy cck;
1515 struct statistics_rx_non_phy general;
1516#if IWL == 4965
1517 struct statistics_rx_ht_phy ofdm_ht;
1518#endif
1519} __attribute__ ((packed));
1520
1521#if IWL == 4965
1522struct statistics_tx_non_phy_agg {
1523 __le32 ba_timeout;
1524 __le32 ba_reschedule_frames;
1525 __le32 scd_query_agg_frame_cnt;
1526 __le32 scd_query_no_agg;
1527 __le32 scd_query_agg;
1528 __le32 scd_query_mismatch;
1529 __le32 frame_not_ready;
1530 __le32 underrun;
1531 __le32 bt_prio_kill;
1532 __le32 rx_ba_rsp_cnt;
1533 __le32 reserved2;
1534 __le32 reserved3;
1535} __attribute__ ((packed));
1536#endif
1537
1538struct statistics_tx {
1539 __le32 preamble_cnt;
1540 __le32 rx_detected_cnt;
1541 __le32 bt_prio_defer_cnt;
1542 __le32 bt_prio_kill_cnt;
1543 __le32 few_bytes_cnt;
1544 __le32 cts_timeout;
1545 __le32 ack_timeout;
1546 __le32 expected_ack_cnt;
1547 __le32 actual_ack_cnt;
1548#if IWL == 4965
1549 __le32 dump_msdu_cnt;
1550 __le32 burst_abort_next_frame_mismatch_cnt;
1551 __le32 burst_abort_missing_next_frame_cnt;
1552 __le32 cts_timeout_collision;
1553 __le32 ack_or_ba_timeout_collision;
1554 struct statistics_tx_non_phy_agg agg;
1555#endif
1556} __attribute__ ((packed));
1557
1558struct statistics_dbg {
1559 __le32 burst_check;
1560 __le32 burst_count;
1561 __le32 reserved[4];
1562} __attribute__ ((packed));
1563
1564struct statistics_div {
1565 __le32 tx_on_a;
1566 __le32 tx_on_b;
1567 __le32 exec_time;
1568 __le32 probe_time;
1569#if IWL == 4965
1570 __le32 reserved1;
1571 __le32 reserved2;
1572#endif
1573} __attribute__ ((packed));
1574
1575struct statistics_general {
1576 __le32 temperature;
1577#if IWL == 4965
1578 __le32 temperature_m;
1579#endif
1580 struct statistics_dbg dbg;
1581 __le32 sleep_time;
1582 __le32 slots_out;
1583 __le32 slots_idle;
1584 __le32 ttl_timestamp;
1585 struct statistics_div div;
1586#if IWL == 4965
1587 __le32 rx_enable_counter;
1588 __le32 reserved1;
1589 __le32 reserved2;
1590 __le32 reserved3;
1591#endif
1592} __attribute__ ((packed));
1593
1594/*
1595 * REPLY_STATISTICS_CMD = 0x9c,
1596 * 3945 and 4965 identical.
1597 *
1598 * This command triggers an immediate response containing uCode statistics.
1599 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
1600 *
1601 * If the CLEAR_STATS configuration flag is set, uCode will clear its
1602 * internal copy of the statistics (counters) after issuing the response.
1603 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
1604 *
1605 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
1606 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
1607 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
1608 */
1609#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */
1610#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */
1611struct iwl_statistics_cmd {
1612 __le32 configuration_flags; /* IWL_STATS_CONF_* */
1613} __attribute__ ((packed));
1614
1615/*
1616 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
1617 *
1618 * By default, uCode issues this notification after receiving a beacon
1619 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
1620 * REPLY_STATISTICS_CMD 0x9c, above.
1621 *
1622 * Statistics counters continue to increment beacon after beacon, but are
1623 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
1624 * 0x9c with CLEAR_STATS bit set (see above).
1625 *
1626 * uCode also issues this notification during scans. uCode clears statistics
1627 * appropriately so that each notification contains statistics for only the
1628 * one channel that has just been scanned.
1629 */
1630#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2)
1631#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8)
1632struct iwl_notif_statistics {
1633 __le32 flag;
1634 struct statistics_rx rx;
1635 struct statistics_tx tx;
1636 struct statistics_general general;
1637} __attribute__ ((packed));
1638
1639
1640/*
1641 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
1642 */
1643/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
1644 * then this notification will be sent. */
1645#define CONSECUTIVE_MISSED_BCONS_TH 20
1646
1647struct iwl_missed_beacon_notif {
1648 __le32 consequtive_missed_beacons;
1649 __le32 total_missed_becons;
1650 __le32 num_expected_beacons;
1651 __le32 num_recvd_beacons;
1652} __attribute__ ((packed));
1653
1654/******************************************************************************
1655 * (11)
1656 * Rx Calibration Commands:
1657 *
1658 *****************************************************************************/
1659
1660#define PHY_CALIBRATE_DIFF_GAIN_CMD (7)
1661#define HD_TABLE_SIZE (11)
1662
1663struct iwl_sensitivity_cmd {
1664 __le16 control;
1665 __le16 table[HD_TABLE_SIZE];
1666} __attribute__ ((packed));
1667
1668struct iwl_calibration_cmd {
1669 u8 opCode;
1670 u8 flags;
1671 __le16 reserved;
1672 s8 diff_gain_a;
1673 s8 diff_gain_b;
1674 s8 diff_gain_c;
1675 u8 reserved1;
1676} __attribute__ ((packed));
1677
1678/******************************************************************************
1679 * (12)
1680 * Miscellaneous Commands:
1681 *
1682 *****************************************************************************/
1683
1684/*
1685 * LEDs Command & Response
1686 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
1687 *
1688 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
1689 * this command turns it on or off, or sets up a periodic blinking cycle.
1690 */
1691struct iwl_led_cmd {
1692 __le32 interval; /* "interval" in uSec */
1693 u8 id; /* 1: Activity, 2: Link, 3: Tech */
1694 u8 off; /* # intervals off while blinking;
1695 * "0", with >0 "on" value, turns LED on */
1696 u8 on; /* # intervals on while blinking;
1697 * "0", regardless of "off", turns LED off */
1698 u8 reserved;
1699} __attribute__ ((packed));
1700
1701/******************************************************************************
1702 * (13)
1703 * Union of all expected notifications/responses:
1704 *
1705 *****************************************************************************/
1706
1707struct iwl_rx_packet {
1708 __le32 len;
1709 struct iwl_cmd_header hdr;
1710 union {
1711 struct iwl_alive_resp alive_frame;
1712 struct iwl_rx_frame rx_frame;
1713 struct iwl_tx_resp tx_resp;
1714 struct iwl_spectrum_notification spectrum_notif;
1715 struct iwl_csa_notification csa_notif;
1716 struct iwl_error_resp err_resp;
1717 struct iwl_card_state_notif card_state_notif;
1718 struct iwl_beacon_notif beacon_status;
1719 struct iwl_add_sta_resp add_sta;
1720 struct iwl_sleep_notification sleep_notif;
1721 struct iwl_spectrum_resp spectrum;
1722 struct iwl_notif_statistics stats;
1723#if IWL == 4965
1724 struct iwl_compressed_ba_resp compressed_ba;
1725 struct iwl_missed_beacon_notif missed_beacon;
1726#endif
1727 __le32 status;
1728 u8 raw[0];
1729 } u;
1730} __attribute__ ((packed));
1731
1732#define IWL_RX_FRAME_SIZE (4 + sizeof(struct iwl_rx_frame))
1733
1734#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
new file mode 100644
index 000000000000..abd344c549aa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -0,0 +1,149 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__
31
32#ifdef CONFIG_IWLWIFI_DEBUG
33extern u32 iwl_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl_debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
38
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43#else
44static inline void IWL_DEBUG(int level, const char *fmt, ...)
45{
46}
47static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
48{
49}
50#endif /* CONFIG_IWLWIFI_DEBUG */
51
52/*
53 * To use the debug system;
54 *
55 * If you are defining a new debug classification, simply add it to the #define
56 * list here in the form of:
57 *
58 * #define IWL_DL_xxxx VALUE
59 *
60 * shifting value to the left one bit from the previous entry. xxxx should be
61 * the name of the classification (for example, WEP)
62 *
63 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
64 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
65 * to send output to that classification.
66 *
67 * To add your debug level to the list of levels seen when you perform
68 *
69 * % cat /proc/net/iwl/debug_level
70 *
71 * you simply need to add your entry to the iwl_debug_levels array.
72 *
73 * If you do not see debug_level in /proc/net/iwl then you do not have
74 * CONFIG_IWLWIFI_DEBUG defined in your kernel configuration
75 *
76 */
77
78#define IWL_DL_INFO (1<<0)
79#define IWL_DL_MAC80211 (1<<1)
80#define IWL_DL_HOST_COMMAND (1<<2)
81#define IWL_DL_STATE (1<<3)
82
83#define IWL_DL_RADIO (1<<7)
84#define IWL_DL_POWER (1<<8)
85#define IWL_DL_TEMP (1<<9)
86
87#define IWL_DL_NOTIF (1<<10)
88#define IWL_DL_SCAN (1<<11)
89#define IWL_DL_ASSOC (1<<12)
90#define IWL_DL_DROP (1<<13)
91
92#define IWL_DL_TXPOWER (1<<14)
93
94#define IWL_DL_AP (1<<15)
95
96#define IWL_DL_FW (1<<16)
97#define IWL_DL_RF_KILL (1<<17)
98#define IWL_DL_FW_ERRORS (1<<18)
99
100#define IWL_DL_LED (1<<19)
101
102#define IWL_DL_RATE (1<<20)
103
104#define IWL_DL_CALIB (1<<21)
105#define IWL_DL_WEP (1<<22)
106#define IWL_DL_TX (1<<23)
107#define IWL_DL_RX (1<<24)
108#define IWL_DL_ISR (1<<25)
109#define IWL_DL_HT (1<<26)
110#define IWL_DL_IO (1<<27)
111#define IWL_DL_11H (1<<28)
112
113#define IWL_DL_STATS (1<<29)
114#define IWL_DL_TX_REPLY (1<<30)
115#define IWL_DL_QOS (1<<31)
116
117#define IWL_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
118#define IWL_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
119#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
120
121#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
122#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
123#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
124#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
125#define IWL_DEBUG_TX(f, a...) IWL_DEBUG(IWL_DL_TX, f, ## a)
126#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a)
127#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a)
128#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a)
129#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a)
130#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a)
131#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a)
132#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a)
133#define IWL_DEBUG_DROP(f, a...) IWL_DEBUG(IWL_DL_DROP, f, ## a)
134#define IWL_DEBUG_DROP_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_DROP, f, ## a)
135#define IWL_DEBUG_AP(f, a...) IWL_DEBUG(IWL_DL_AP, f, ## a)
136#define IWL_DEBUG_TXPOWER(f, a...) IWL_DEBUG(IWL_DL_TXPOWER, f, ## a)
137#define IWL_DEBUG_IO(f, a...) IWL_DEBUG(IWL_DL_IO, f, ## a)
138#define IWL_DEBUG_RATE(f, a...) IWL_DEBUG(IWL_DL_RATE, f, ## a)
139#define IWL_DEBUG_NOTIF(f, a...) IWL_DEBUG(IWL_DL_NOTIF, f, ## a)
140#define IWL_DEBUG_ASSOC(f, a...) IWL_DEBUG(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
141#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a)
142#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a)
143#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a)
144#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a)
145#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a)
146#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a)
147#define IWL_DEBUG_11H(f, a...) IWL_DEBUG(IWL_DL_11H, f, ## a)
148
149#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
new file mode 100644
index 000000000000..e473c97e3f4f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -0,0 +1,336 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __iwl_eeprom_h__
65#define __iwl_eeprom_h__
66
67/*
68 * This file defines EEPROM related constants, enums, and inline functions.
69 *
70 */
71
72#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
73#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */
74/* EEPROM field values */
75#define ANTENNA_SWITCH_NORMAL 0
76#define ANTENNA_SWITCH_INVERSE 1
77
78enum {
79 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
80 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
81 /* Bit 2 Reserved */
82 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
83 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
84 EEPROM_CHANNEL_WIDE = (1 << 5),
85 EEPROM_CHANNEL_NARROW = (1 << 6),
86 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
87};
88
89/* EEPROM field lengths */
90#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11
91
92/* EEPROM field lengths */
93#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11
94#define EEPROM_REGULATORY_SKU_ID_LENGTH 4
95#define EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH 14
96#define EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH 13
97#define EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH 12
98#define EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH 11
99#define EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH 6
100
101#if IWL == 3945
102#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \
103 EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \
104 EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \
105 EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \
106 EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \
107 EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH)
108#elif IWL == 4965
109#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH 7
110#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH 11
111#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \
112 EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \
113 EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \
114 EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \
115 EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \
116 EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH + \
117 EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH + \
118 EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH)
119#endif
120
121#define EEPROM_REGULATORY_NUMBER_OF_BANDS 5
122
123/* SKU Capabilities */
124#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
125#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
126#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
127
128/* *regulatory* channel data from eeprom, one for each channel */
129struct iwl_eeprom_channel {
130 u8 flags; /* flags copied from EEPROM */
131 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
132} __attribute__ ((packed));
133
134/*
135 * Mapping of a Tx power level, at factory calibration temperature,
136 * to a radio/DSP gain table index.
137 * One for each of 5 "sample" power levels in each band.
138 * v_det is measured at the factory, using the 3945's built-in power amplifier
139 * (PA) output voltage detector. This same detector is used during Tx of
140 * long packets in normal operation to provide feedback as to proper output
141 * level.
142 * Data copied from EEPROM.
143 */
144struct iwl_eeprom_txpower_sample {
145 u8 gain_index; /* index into power (gain) setup table ... */
146 s8 power; /* ... for this pwr level for this chnl group */
147 u16 v_det; /* PA output voltage */
148} __attribute__ ((packed));
149
150/*
151 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
152 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
153 * Tx power setup code interpolates between the 5 "sample" power levels
154 * to determine the nominal setup for a requested power level.
155 * Data copied from EEPROM.
156 * DO NOT ALTER THIS STRUCTURE!!!
157 */
158struct iwl_eeprom_txpower_group {
159 struct iwl_eeprom_txpower_sample samples[5]; /* 5 power levels */
160 s32 a, b, c, d, e; /* coefficients for voltage->power
161 * formula (signed) */
162 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
163 * frequency (signed) */
164 s8 saturation_power; /* highest power possible by h/w in this
165 * band */
166 u8 group_channel; /* "representative" channel # in this band */
167 s16 temperature; /* h/w temperature at factory calib this band
168 * (signed) */
169} __attribute__ ((packed));
170
171/*
172 * Temperature-based Tx-power compensation data, not band-specific.
173 * These coefficients are use to modify a/b/c/d/e coeffs based on
174 * difference between current temperature and factory calib temperature.
175 * Data copied from EEPROM.
176 */
177struct iwl_eeprom_temperature_corr {
178 u32 Ta;
179 u32 Tb;
180 u32 Tc;
181 u32 Td;
182 u32 Te;
183} __attribute__ ((packed));
184
185#if IWL == 4965
186#define EEPROM_TX_POWER_TX_CHAINS (2)
187#define EEPROM_TX_POWER_BANDS (8)
188#define EEPROM_TX_POWER_MEASUREMENTS (3)
189#define EEPROM_TX_POWER_VERSION (2)
190#define EEPROM_TX_POWER_VERSION_NEW (5)
191
192struct iwl_eeprom_calib_measure {
193 u8 temperature;
194 u8 gain_idx;
195 u8 actual_pow;
196 s8 pa_det;
197} __attribute__ ((packed));
198
199struct iwl_eeprom_calib_ch_info {
200 u8 ch_num;
201 struct iwl_eeprom_calib_measure measurements[EEPROM_TX_POWER_TX_CHAINS]
202 [EEPROM_TX_POWER_MEASUREMENTS];
203} __attribute__ ((packed));
204
205struct iwl_eeprom_calib_subband_info {
206 u8 ch_from;
207 u8 ch_to;
208 struct iwl_eeprom_calib_ch_info ch1;
209 struct iwl_eeprom_calib_ch_info ch2;
210} __attribute__ ((packed));
211
212struct iwl_eeprom_calib_info {
213 u8 saturation_power24;
214 u8 saturation_power52;
215 s16 voltage; /* signed */
216 struct iwl_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
217} __attribute__ ((packed));
218
219#endif
220
221struct iwl_eeprom {
222 u8 reserved0[16];
223#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
224 u16 device_id; /* abs.ofs: 16 */
225 u8 reserved1[2];
226#define EEPROM_PMC (2*0x0A) /* 2 bytes */
227 u16 pmc; /* abs.ofs: 20 */
228 u8 reserved2[20];
229#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
230 u8 mac_address[6]; /* abs.ofs: 42 */
231 u8 reserved3[58];
232#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
233 u16 board_revision; /* abs.ofs: 106 */
234 u8 reserved4[11];
235#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
236 u8 board_pba_number[9]; /* abs.ofs: 119 */
237 u8 reserved5[8];
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239 u16 version; /* abs.ofs: 136 */
240#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
241 u8 sku_cap; /* abs.ofs: 138 */
242#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
243 u8 leds_mode; /* abs.ofs: 139 */
244#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
245 u16 oem_mode;
246#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
247 u16 wowlan_mode; /* abs.ofs: 142 */
248#define EEPROM_LEDS_TIME_INTERVAL (2*0x48) /* 2 bytes */
249 u16 leds_time_interval; /* abs.ofs: 144 */
250#define EEPROM_LEDS_OFF_TIME (2*0x49) /* 1 bytes */
251 u8 leds_off_time; /* abs.ofs: 146 */
252#define EEPROM_LEDS_ON_TIME (2*0x49+1) /* 1 bytes */
253 u8 leds_on_time; /* abs.ofs: 147 */
254#define EEPROM_ALMGOR_M_VERSION (2*0x4A) /* 1 bytes */
255 u8 almgor_m_version; /* abs.ofs: 148 */
256#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
257 u8 antenna_switch_type; /* abs.ofs: 149 */
258#if IWL == 3945
259 u8 reserved6[42];
260#else
261 u8 reserved6[8];
262#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
263 u16 board_revision_4965; /* abs.ofs: 158 */
264 u8 reserved7[13];
265#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
266 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */
267 u8 reserved8[10];
268#endif
269#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
270 u8 sku_id[4]; /* abs.ofs: 192 */
271#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
272 u16 band_1_count; /* abs.ofs: 196 */
273#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
274 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */
275#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
276 u16 band_2_count; /* abs.ofs: 226 */
277#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
278 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
279#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
280 u16 band_3_count; /* abs.ofs: 254 */
281#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
282 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
283#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
284 u16 band_4_count; /* abs.ofs: 280 */
285#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
286 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
287#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
288 u16 band_5_count; /* abs.ofs: 304 */
289#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
290 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
291
292/* From here on out the EEPROM diverges between the 4965 and the 3945 */
293#if IWL == 3945
294
295 u8 reserved9[194];
296
297#define EEPROM_TXPOWER_CALIB_GROUP0 0x200
298#define EEPROM_TXPOWER_CALIB_GROUP1 0x240
299#define EEPROM_TXPOWER_CALIB_GROUP2 0x280
300#define EEPROM_TXPOWER_CALIB_GROUP3 0x2c0
301#define EEPROM_TXPOWER_CALIB_GROUP4 0x300
302#define IWL_NUM_TX_CALIB_GROUPS 5
303 struct iwl_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
304/* abs.ofs: 512 */
305#define EEPROM_CALIB_TEMPERATURE_CORRECT 0x340
306 struct iwl_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
307 u8 reserved16[172]; /* fill out to full 1024 byte block */
308
309/* 4965AGN adds fat channel support */
310#elif IWL == 4965
311
312 u8 reserved10[2];
313#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */
314 struct iwl_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */
315 u8 reserved11[2];
316#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
317 struct iwl_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */
318 u8 reserved12[6];
319#define EEPROM_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
320 u16 calib_version; /* abs.ofs: 364 */
321 u8 reserved13[2];
322#define EEPROM_SATURATION_POWER_OFFSET (2*0xB8) /* 2 bytes */
323 u16 satruation_power; /* abs.ofs: 368 */
324 u8 reserved14[94];
325#define EEPROM_IWL_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
326 struct iwl_eeprom_calib_info calib_info; /* abs.ofs: 464 */
327
328 u8 reserved16[140]; /* fill out to full 1024 byte block */
329
330#endif
331
332} __attribute__ ((packed));
333
334#define IWL_EEPROM_IMAGE_SIZE 1024
335
336#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
new file mode 100644
index 000000000000..e2a8d95ad9cd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -0,0 +1,255 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_helpers_h__
31#define __iwl_helpers_h__
32
33#include <linux/ctype.h>
34
35/*
36 * The structures defined by the hardware/uCode interface
37 * have bit-wise operations. For each bit-field there is
38 * a data symbol in the structure, the start bit position
39 * and the length of the bit-field.
40 *
41 * iwl_get_bits and iwl_set_bits will return or set the
42 * appropriate bits on a 32-bit value.
43 *
44 * IWL_GET_BITS and IWL_SET_BITS use symbol expansion to
45 * expand out to the appropriate call to iwl_get_bits
46 * and iwl_set_bits without having to reference all of the
47 * numerical constants and defines provided in the hardware
48 * definition
49 */
50
51/**
52 * iwl_get_bits - Extract a hardware bit-field value
53 * @src: source hardware value (__le32)
54 * @pos: bit-position (0-based) of first bit of value
55 * @len: length of bit-field
56 *
57 * iwl_get_bits will return the bit-field in cpu endian ordering.
58 *
59 * NOTE: If used from IWL_GET_BITS then pos and len are compile-constants and
60 * will collapse to minimal code by the compiler.
61 */
62static inline u32 iwl_get_bits(__le32 src, u8 pos, u8 len)
63{
64 u32 tmp = le32_to_cpu(src);
65
66 tmp >>= pos;
67 tmp &= (1UL << len) - 1;
68 return tmp;
69}
70
71/**
72 * iwl_set_bits - Set a hardware bit-field value
73 * @dst: Address of __le32 hardware value
74 * @pos: bit-position (0-based) of first bit of value
75 * @len: length of bit-field
76 * @val: cpu endian value to encode into the bit-field
77 *
78 * iwl_set_bits will encode val into dst, masked to be len bits long at bit
79 * position pos.
80 *
81 * NOTE: If used IWL_SET_BITS pos and len will be compile-constants and
82 * will collapse to minimal code by the compiler.
83 */
84static inline void iwl_set_bits(__le32 *dst, u8 pos, u8 len, int val)
85{
86 u32 tmp = le32_to_cpu(*dst);
87
88 tmp &= ~(((1UL << len) - 1) << pos);
89 tmp |= (val & ((1UL << len) - 1)) << pos;
90 *dst = cpu_to_le32(tmp);
91}
92
93static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val)
94{
95 u16 tmp = le16_to_cpu(*dst);
96
97 tmp &= ~((1UL << (pos + len)) - (1UL << pos));
98 tmp |= (val & ((1UL << len) - 1)) << pos;
99 *dst = cpu_to_le16(tmp);
100}
101
102/*
103 * The bit-field definitions in iwl-xxxx-hw.h are in the form of:
104 *
105 * struct example {
106 * __le32 val1;
107 * #define IWL_name_POS 8
108 * #define IWL_name_LEN 4
109 * #define IWL_name_SYM val1
110 * };
111 *
112 * The IWL_SET_BITS and IWL_GET_BITS macros are provided to allow the driver
113 * to call:
114 *
115 * struct example bar;
116 * u32 val = IWL_GET_BITS(bar, name);
117 * val = val * 2;
118 * IWL_SET_BITS(bar, name, val);
119 *
120 * All cpu / host ordering, masking, and shifts are performed by the macros
121 * and iwl_{get,set}_bits.
122 *
123 */
124#define IWL_SET_BITS(s, sym, v) \
125 iwl_set_bits(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
126 IWL_ ## sym ## _LEN, (v))
127
128#define IWL_SET_BITS16(s, sym, v) \
129 iwl_set_bits16(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
130 IWL_ ## sym ## _LEN, (v))
131
132#define IWL_GET_BITS(s, sym) \
133 iwl_get_bits((s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
134 IWL_ ## sym ## _LEN)
135
136
137#define KELVIN_TO_CELSIUS(x) ((x)-273)
138#define CELSIUS_TO_KELVIN(x) ((x)+273)
139
140#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010
141
142static inline struct ieee80211_conf *ieee80211_get_hw_conf(
143 struct ieee80211_hw *hw)
144{
145 return &hw->conf;
146}
147
148#define QOS_CONTROL_LEN 2
149
150#define IEEE80211_STYPE_BACK_REQ 0x0080
151#define IEEE80211_STYPE_BACK 0x0090
152
153
154static inline int ieee80211_is_management(u16 fc)
155{
156 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT;
157}
158
159static inline int ieee80211_is_control(u16 fc)
160{
161 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL;
162}
163
164static inline int ieee80211_is_data(u16 fc)
165{
166 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA;
167}
168
169static inline int ieee80211_is_back_request(u16 fc)
170{
171 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
172 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ);
173}
174
175static inline int ieee80211_is_probe_response(u16 fc)
176{
177 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
178 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP);
179}
180
181static inline int ieee80211_is_probe_request(u16 fc)
182{
183 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
184 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_REQ);
185}
186
187static inline int ieee80211_is_beacon(u16 fc)
188{
189 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
190 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON);
191}
192
193static inline int ieee80211_is_atim(u16 fc)
194{
195 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
196 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ATIM);
197}
198
199static inline int ieee80211_is_assoc_request(u16 fc)
200{
201 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
202 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
203}
204
205static inline int ieee80211_is_assoc_response(u16 fc)
206{
207 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
208 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_RESP);
209}
210
211static inline int ieee80211_is_auth(u16 fc)
212{
213 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
214 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
215}
216
217static inline int ieee80211_is_deauth(u16 fc)
218{
219 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
220 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
221}
222
223static inline int ieee80211_is_disassoc(u16 fc)
224{
225 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
226 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
227}
228
229static inline int ieee80211_is_reassoc_request(u16 fc)
230{
231 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
232 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ);
233}
234
235static inline int ieee80211_is_reassoc_response(u16 fc)
236{
237 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
238 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP);
239}
240
241static inline int iwl_check_bits(unsigned long field, unsigned long mask)
242{
243 return ((field & mask) == mask) ? 1 : 0;
244}
245
246static inline unsigned long elapsed_jiffies(unsigned long start,
247 unsigned long end)
248{
249 if (end > start)
250 return end - start;
251
252 return end + (MAX_JIFFY_OFFSET - start);
253}
254
255#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hw.h b/drivers/net/wireless/iwlwifi/iwl-hw.h
new file mode 100644
index 000000000000..1aa6fcd39a5e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-hw.h
@@ -0,0 +1,537 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwlwifi_hw_h__
64#define __iwlwifi_hw_h__
65
66/*
67 * This file defines hardware constants common to 3945 and 4965.
68 *
69 * Device-specific constants are defined in iwl-3945-hw.h and iwl-4965-hw.h,
70 * although this file contains a few definitions for which the .c
71 * implementation is the same for 3945 and 4965, except for the value of
72 * a constant.
73 *
74 * uCode API constants are defined in iwl-commands.h.
75 *
76 * NOTE: DO NOT PUT OS IMPLEMENTATION-SPECIFIC DECLARATIONS HERE
77 *
78 * The iwl-*hw.h (and files they include) files should remain OS/driver
79 * implementation independent, declaring only the hardware interface.
80 */
81
82/* uCode queue management definitions */
83#define IWL_CMD_QUEUE_NUM 4
84#define IWL_CMD_FIFO_NUM 4
85#define IWL_BACK_QUEUE_FIRST_ID 7
86
87/* Tx rates */
88#define IWL_CCK_RATES 4
89#define IWL_OFDM_RATES 8
90
91#if IWL == 3945
92#define IWL_HT_RATES 0
93#elif IWL == 4965
94#define IWL_HT_RATES 16
95#endif
96
97#define IWL_MAX_RATES (IWL_CCK_RATES+IWL_OFDM_RATES+IWL_HT_RATES)
98
99/* Time constants */
100#define SHORT_SLOT_TIME 9
101#define LONG_SLOT_TIME 20
102
103/* RSSI to dBm */
104#if IWL == 3945
105#define IWL_RSSI_OFFSET 95
106#elif IWL == 4965
107#define IWL_RSSI_OFFSET 44
108#endif
109
110#include "iwl-eeprom.h"
111#include "iwl-commands.h"
112
113#define PCI_LINK_CTRL 0x0F0
114#define PCI_POWER_SOURCE 0x0C8
115#define PCI_REG_WUM8 0x0E8
116#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
117
118/*=== CSR (control and status registers) ===*/
119#define CSR_BASE (0x000)
120
121#define CSR_SW_VER (CSR_BASE+0x000)
122#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
123#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
124#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
125#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
126#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
127#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
128#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
129#define CSR_GP_CNTRL (CSR_BASE+0x024)
130#define CSR_HW_REV (CSR_BASE+0x028)
131#define CSR_EEPROM_REG (CSR_BASE+0x02c)
132#define CSR_EEPROM_GP (CSR_BASE+0x030)
133#define CSR_GP_UCODE (CSR_BASE+0x044)
134#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
135#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
136#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
137#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
138#define CSR_LED_REG (CSR_BASE+0x094)
139#define CSR_DRAM_INT_TBL_CTL (CSR_BASE+0x0A0)
140#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
141#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
142#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
143
144/* HW I/F configuration */
145#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB (0x00000100)
146#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM (0x00000200)
147#define CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
148#define CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
149#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
150#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
151#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
152
153/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
154 * acknowledged (reset) by host writing "1" to flagged bits. */
155#define CSR_INT_BIT_FH_RX (1<<31) /* Rx DMA, cmd responses, FH_INT[17:16] */
156#define CSR_INT_BIT_HW_ERR (1<<29) /* DMA hardware error FH_INT[31] */
157#define CSR_INT_BIT_DNLD (1<<28) /* uCode Download */
158#define CSR_INT_BIT_FH_TX (1<<27) /* Tx DMA FH_INT[1:0] */
159#define CSR_INT_BIT_MAC_CLK_ACTV (1<<26) /* NIC controller's clock toggled on/off */
160#define CSR_INT_BIT_SW_ERR (1<<25) /* uCode error */
161#define CSR_INT_BIT_RF_KILL (1<<7) /* HW RFKILL switch GP_CNTRL[27] toggled */
162#define CSR_INT_BIT_CT_KILL (1<<6) /* Critical temp (chip too hot) rfkill */
163#define CSR_INT_BIT_SW_RX (1<<3) /* Rx, command responses, 3945 */
164#define CSR_INT_BIT_WAKEUP (1<<1) /* NIC controller waking up (pwr mgmt) */
165#define CSR_INT_BIT_ALIVE (1<<0) /* uCode interrupts once it initializes */
166
167#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
168 CSR_INT_BIT_HW_ERR | \
169 CSR_INT_BIT_FH_TX | \
170 CSR_INT_BIT_SW_ERR | \
171 CSR_INT_BIT_RF_KILL | \
172 CSR_INT_BIT_SW_RX | \
173 CSR_INT_BIT_WAKEUP | \
174 CSR_INT_BIT_ALIVE)
175
176/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
177#define CSR_FH_INT_BIT_ERR (1<<31) /* Error */
178#define CSR_FH_INT_BIT_HI_PRIOR (1<<30) /* High priority Rx, bypass coalescing */
179#define CSR_FH_INT_BIT_RX_CHNL2 (1<<18) /* Rx channel 2 (3945 only) */
180#define CSR_FH_INT_BIT_RX_CHNL1 (1<<17) /* Rx channel 1 */
181#define CSR_FH_INT_BIT_RX_CHNL0 (1<<16) /* Rx channel 0 */
182#define CSR_FH_INT_BIT_TX_CHNL6 (1<<6) /* Tx channel 6 (3945 only) */
183#define CSR_FH_INT_BIT_TX_CHNL1 (1<<1) /* Tx channel 1 */
184#define CSR_FH_INT_BIT_TX_CHNL0 (1<<0) /* Tx channel 0 */
185
186#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
187 CSR_FH_INT_BIT_RX_CHNL2 | \
188 CSR_FH_INT_BIT_RX_CHNL1 | \
189 CSR_FH_INT_BIT_RX_CHNL0)
190
191#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL6 | \
192 CSR_FH_INT_BIT_TX_CHNL1 | \
193 CSR_FH_INT_BIT_TX_CHNL0 )
194
195
196/* RESET */
197#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
198#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
199#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
200#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
201#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
202
203/* GP (general purpose) CONTROL */
204#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
205#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
206#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
207#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
208
209#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
210
211#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
212#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
213#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
214
215
216/* EEPROM REG */
217#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
218#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
219
220/* EEPROM GP */
221#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
222#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
223#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
224
225/* UCODE DRV GP */
226#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
227#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
228#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
229#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
230
231/* GPIO */
232#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
233#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
234#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
235
236/* GI Chicken Bits */
237#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
238#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
239
240/* CSR_ANA_PLL_CFG */
241#define CSR_ANA_PLL_CFG_SH (0x00880300)
242
243#define CSR_LED_REG_TRUN_ON (0x00000078)
244#define CSR_LED_REG_TRUN_OFF (0x00000038)
245#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
246
247/* DRAM_INT_TBL_CTRL */
248#define CSR_DRAM_INT_TBL_CTRL_EN (1<<31)
249#define CSR_DRAM_INT_TBL_CTRL_WRAP_CHK (1<<27)
250
251/*=== HBUS (Host-side Bus) ===*/
252#define HBUS_BASE (0x400)
253
254#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
255#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
256#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
257#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
258#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
259#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
260#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
261#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
262#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
263
264#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
265
266
267/* SCD (Scheduler) */
268#define SCD_BASE (CSR_BASE + 0x2E00)
269
270#define SCD_MODE_REG (SCD_BASE + 0x000)
271#define SCD_ARASTAT_REG (SCD_BASE + 0x004)
272#define SCD_TXFACT_REG (SCD_BASE + 0x010)
273#define SCD_TXF4MF_REG (SCD_BASE + 0x014)
274#define SCD_TXF5MF_REG (SCD_BASE + 0x020)
275#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C)
276#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030)
277
278/*=== FH (data Flow Handler) ===*/
279#define FH_BASE (0x800)
280
281#define FH_CBCC_TABLE (FH_BASE+0x140)
282#define FH_TFDB_TABLE (FH_BASE+0x180)
283#define FH_RCSR_TABLE (FH_BASE+0x400)
284#define FH_RSSR_TABLE (FH_BASE+0x4c0)
285#define FH_TCSR_TABLE (FH_BASE+0x500)
286#define FH_TSSR_TABLE (FH_BASE+0x680)
287
288/* TFDB (Transmit Frame Buffer Descriptor) */
289#define FH_TFDB(_channel, buf) \
290 (FH_TFDB_TABLE+((_channel)*2+(buf))*0x28)
291#define ALM_FH_TFDB_CHNL_BUF_CTRL_REG(_channel) \
292 (FH_TFDB_TABLE + 0x50 * _channel)
293/* CBCC _channel is [0,2] */
294#define FH_CBCC(_channel) (FH_CBCC_TABLE+(_channel)*0x8)
295#define FH_CBCC_CTRL(_channel) (FH_CBCC(_channel)+0x00)
296#define FH_CBCC_BASE(_channel) (FH_CBCC(_channel)+0x04)
297
298/* RCSR _channel is [0,2] */
299#define FH_RCSR(_channel) (FH_RCSR_TABLE+(_channel)*0x40)
300#define FH_RCSR_CONFIG(_channel) (FH_RCSR(_channel)+0x00)
301#define FH_RCSR_RBD_BASE(_channel) (FH_RCSR(_channel)+0x04)
302#define FH_RCSR_WPTR(_channel) (FH_RCSR(_channel)+0x20)
303#define FH_RCSR_RPTR_ADDR(_channel) (FH_RCSR(_channel)+0x24)
304
305#if IWL == 3945
306#define FH_RSCSR_CHNL0_WPTR (FH_RCSR_WPTR(0))
307#elif IWL == 4965
308#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
309#endif
310
311/* RSSR */
312#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000)
313#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004)
314/* TCSR */
315#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20)
316#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00)
317#define FH_TCSR_CREDIT(_channel) (FH_TCSR(_channel)+0x04)
318#define FH_TCSR_BUFF_STTS(_channel) (FH_TCSR(_channel)+0x08)
319/* TSSR */
320#define FH_TSSR_CBB_BASE (FH_TSSR_TABLE+0x000)
321#define FH_TSSR_MSG_CONFIG (FH_TSSR_TABLE+0x008)
322#define FH_TSSR_TX_STATUS (FH_TSSR_TABLE+0x010)
323/* 18 - reserved */
324
325/* card static random access memory (SRAM) for processor data and instructs */
326#define RTC_INST_LOWER_BOUND (0x000000)
327#define RTC_DATA_LOWER_BOUND (0x800000)
328
329
330/* DBM */
331
332#define ALM_FH_SRVC_CHNL (6)
333
334#define ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
335#define ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
336
337#define ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
338
339#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
340
341#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
342
343#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
344
345#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
346
347#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
348
349#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
350#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
351
352#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
353#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
354
355#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
356
357#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
358
359#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
360#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
361
362#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
363
364#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
365
366#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
367#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
368
369#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
370
371#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
372#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
373
374#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
375#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
376
377#define ALM_TB_MAX_BYTES_COUNT (0xFFF0)
378
379#define ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) \
380 ((1LU << _channel) << 24)
381#define ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel) \
382 ((1LU << _channel) << 16)
383
384#define ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_channel) \
385 (ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) | \
386 ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel))
387#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
388#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
389
390#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
391
392#define TFD_QUEUE_MIN 0
393#define TFD_QUEUE_MAX 6
394#define TFD_QUEUE_SIZE_MAX (256)
395
396/* spectrum and channel data structures */
397#define IWL_NUM_SCAN_RATES (2)
398
399#define IWL_SCAN_FLAG_24GHZ (1<<0)
400#define IWL_SCAN_FLAG_52GHZ (1<<1)
401#define IWL_SCAN_FLAG_ACTIVE (1<<2)
402#define IWL_SCAN_FLAG_DIRECT (1<<3)
403
404#define IWL_MAX_CMD_SIZE 1024
405
406#define IWL_DEFAULT_TX_RETRY 15
407#define IWL_MAX_TX_RETRY 16
408
409/*********************************************/
410
411#define RFD_SIZE 4
412#define NUM_TFD_CHUNKS 4
413
414#define RX_QUEUE_SIZE 256
415#define RX_QUEUE_MASK 255
416#define RX_QUEUE_SIZE_LOG 8
417
418/* QoS definitions */
419
420#define CW_MIN_OFDM 15
421#define CW_MAX_OFDM 1023
422#define CW_MIN_CCK 31
423#define CW_MAX_CCK 1023
424
425#define QOS_TX0_CW_MIN_OFDM CW_MIN_OFDM
426#define QOS_TX1_CW_MIN_OFDM CW_MIN_OFDM
427#define QOS_TX2_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 2 - 1)
428#define QOS_TX3_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 4 - 1)
429
430#define QOS_TX0_CW_MIN_CCK CW_MIN_CCK
431#define QOS_TX1_CW_MIN_CCK CW_MIN_CCK
432#define QOS_TX2_CW_MIN_CCK ((CW_MIN_CCK + 1) / 2 - 1)
433#define QOS_TX3_CW_MIN_CCK ((CW_MIN_CCK + 1) / 4 - 1)
434
435#define QOS_TX0_CW_MAX_OFDM CW_MAX_OFDM
436#define QOS_TX1_CW_MAX_OFDM CW_MAX_OFDM
437#define QOS_TX2_CW_MAX_OFDM CW_MIN_OFDM
438#define QOS_TX3_CW_MAX_OFDM ((CW_MIN_OFDM + 1) / 2 - 1)
439
440#define QOS_TX0_CW_MAX_CCK CW_MAX_CCK
441#define QOS_TX1_CW_MAX_CCK CW_MAX_CCK
442#define QOS_TX2_CW_MAX_CCK CW_MIN_CCK
443#define QOS_TX3_CW_MAX_CCK ((CW_MIN_CCK + 1) / 2 - 1)
444
445#define QOS_TX0_AIFS 3
446#define QOS_TX1_AIFS 7
447#define QOS_TX2_AIFS 2
448#define QOS_TX3_AIFS 2
449
450#define QOS_TX0_ACM 0
451#define QOS_TX1_ACM 0
452#define QOS_TX2_ACM 0
453#define QOS_TX3_ACM 0
454
455#define QOS_TX0_TXOP_LIMIT_CCK 0
456#define QOS_TX1_TXOP_LIMIT_CCK 0
457#define QOS_TX2_TXOP_LIMIT_CCK 6016
458#define QOS_TX3_TXOP_LIMIT_CCK 3264
459
460#define QOS_TX0_TXOP_LIMIT_OFDM 0
461#define QOS_TX1_TXOP_LIMIT_OFDM 0
462#define QOS_TX2_TXOP_LIMIT_OFDM 3008
463#define QOS_TX3_TXOP_LIMIT_OFDM 1504
464
465#define DEF_TX0_CW_MIN_OFDM CW_MIN_OFDM
466#define DEF_TX1_CW_MIN_OFDM CW_MIN_OFDM
467#define DEF_TX2_CW_MIN_OFDM CW_MIN_OFDM
468#define DEF_TX3_CW_MIN_OFDM CW_MIN_OFDM
469
470#define DEF_TX0_CW_MIN_CCK CW_MIN_CCK
471#define DEF_TX1_CW_MIN_CCK CW_MIN_CCK
472#define DEF_TX2_CW_MIN_CCK CW_MIN_CCK
473#define DEF_TX3_CW_MIN_CCK CW_MIN_CCK
474
475#define DEF_TX0_CW_MAX_OFDM CW_MAX_OFDM
476#define DEF_TX1_CW_MAX_OFDM CW_MAX_OFDM
477#define DEF_TX2_CW_MAX_OFDM CW_MAX_OFDM
478#define DEF_TX3_CW_MAX_OFDM CW_MAX_OFDM
479
480#define DEF_TX0_CW_MAX_CCK CW_MAX_CCK
481#define DEF_TX1_CW_MAX_CCK CW_MAX_CCK
482#define DEF_TX2_CW_MAX_CCK CW_MAX_CCK
483#define DEF_TX3_CW_MAX_CCK CW_MAX_CCK
484
485#define DEF_TX0_AIFS (2)
486#define DEF_TX1_AIFS (2)
487#define DEF_TX2_AIFS (2)
488#define DEF_TX3_AIFS (2)
489
490#define DEF_TX0_ACM 0
491#define DEF_TX1_ACM 0
492#define DEF_TX2_ACM 0
493#define DEF_TX3_ACM 0
494
495#define DEF_TX0_TXOP_LIMIT_CCK 0
496#define DEF_TX1_TXOP_LIMIT_CCK 0
497#define DEF_TX2_TXOP_LIMIT_CCK 0
498#define DEF_TX3_TXOP_LIMIT_CCK 0
499
500#define DEF_TX0_TXOP_LIMIT_OFDM 0
501#define DEF_TX1_TXOP_LIMIT_OFDM 0
502#define DEF_TX2_TXOP_LIMIT_OFDM 0
503#define DEF_TX3_TXOP_LIMIT_OFDM 0
504
505#define QOS_QOS_SETS 3
506#define QOS_PARAM_SET_ACTIVE 0
507#define QOS_PARAM_SET_DEF_CCK 1
508#define QOS_PARAM_SET_DEF_OFDM 2
509
510#define CTRL_QOS_NO_ACK (0x0020)
511#define DCT_FLAG_EXT_QOS_ENABLED (0x10)
512
513#define U32_PAD(n) ((4-(n))&0x3)
514
515/*
516 * Generic queue structure
517 *
518 * Contains common data for Rx and Tx queues
519 */
520#define TFD_CTL_COUNT_SET(n) (n<<24)
521#define TFD_CTL_COUNT_GET(ctl) ((ctl>>24) & 7)
522#define TFD_CTL_PAD_SET(n) (n<<28)
523#define TFD_CTL_PAD_GET(ctl) (ctl>>28)
524
525#define TFD_TX_CMD_SLOTS 256
526#define TFD_CMD_SLOTS 32
527
528#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \
529 sizeof(struct iwl_cmd_meta))
530
531/*
532 * RX related structures and functions
533 */
534#define RX_FREE_BUFFERS 64
535#define RX_LOW_WATERMARK 8
536
537#endif /* __iwlwifi_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
new file mode 100644
index 000000000000..8a8b96fcf48d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -0,0 +1,470 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_io_h__
30#define __iwl_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-debug.h"
35
36/*
37 * IO, register, and NIC memory access functions
38 *
39 * NOTE on naming convention and macro usage for these
40 *
41 * A single _ prefix before a an access function means that no state
42 * check or debug information is printed when that function is called.
43 *
44 * A double __ prefix before an access function means that state is checked
45 * (in the case of *restricted calls) and the current line number is printed
46 * in addition to any other debug output.
47 *
48 * The non-prefixed name is the #define that maps the caller into a
49 * #define that provides the caller's __LINE__ to the double prefix version.
50 *
51 * If you wish to call the function without any debug or state checking,
52 * you should use the single _ prefix version (as is used by dependent IO
53 * routines, for example _iwl_read_restricted calls the non-check version of
54 * _iwl_read32.)
55 *
56 * These declarations are *extremely* useful in quickly isolating code deltas
57 * which result in misconfiguring of the hardware I/O. In combination with
58 * git-bisect and the IO debug level you can quickly determine the specific
59 * commit which breaks the IO sequence to the hardware.
60 *
61 */
62
63#define _iwl_write32(iwl, ofs, val) writel((val), (iwl)->hw_base + (ofs))
64#ifdef CONFIG_IWLWIFI_DEBUG
65static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *iwl,
66 u32 ofs, u32 val)
67{
68 IWL_DEBUG_IO("write_direct32(0x%08X, 0x%08X) - %s %d\n",
69 (u32) (ofs), (u32) (val), f, l);
70 _iwl_write32(iwl, ofs, val);
71}
72#define iwl_write32(iwl, ofs, val) \
73 __iwl_write32(__FILE__, __LINE__, iwl, ofs, val)
74#else
75#define iwl_write32(iwl, ofs, val) _iwl_write32(iwl, ofs, val)
76#endif
77
78#define _iwl_read32(iwl, ofs) readl((iwl)->hw_base + (ofs))
79#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *iwl, u32 ofs)
81{
82 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
83 return _iwl_read32(iwl, ofs);
84}
85#define iwl_read32(iwl, ofs) __iwl_read32(__FILE__, __LINE__, iwl, ofs)
86#else
87#define iwl_read32(p, o) _iwl_read32(p, o)
88#endif
89
90static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr,
91 u32 bits, u32 mask, int timeout)
92{
93 int i = 0;
94
95 do {
96 if ((_iwl_read32(priv, addr) & mask) == (bits & mask))
97 return i;
98 mdelay(10);
99 i += 10;
100 } while (i < timeout);
101
102 return -ETIMEDOUT;
103}
104#ifdef CONFIG_IWLWIFI_DEBUG
105static inline int __iwl_poll_bit(const char *f, u32 l,
106 struct iwl_priv *priv, u32 addr,
107 u32 bits, u32 mask, int timeout)
108{
109 int rc = _iwl_poll_bit(priv, addr, bits, mask, timeout);
110 if (unlikely(rc == -ETIMEDOUT))
111 IWL_DEBUG_IO
112 ("poll_bit(0x%08X, 0x%08X, 0x%08X) - timedout - %s %d\n",
113 addr, bits, mask, f, l);
114 else
115 IWL_DEBUG_IO
116 ("poll_bit(0x%08X, 0x%08X, 0x%08X) = 0x%08X - %s %d\n",
117 addr, bits, mask, rc, f, l);
118 return rc;
119}
120#define iwl_poll_bit(iwl, addr, bits, mask, timeout) \
121 __iwl_poll_bit(__FILE__, __LINE__, iwl, addr, bits, mask, timeout)
122#else
123#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t)
124#endif
125
126static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
127{
128 _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask);
129}
130#ifdef CONFIG_IWLWIFI_DEBUG
131static inline void __iwl_set_bit(const char *f, u32 l,
132 struct iwl_priv *priv, u32 reg, u32 mask)
133{
134 u32 val = _iwl_read32(priv, reg) | mask;
135 IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
136 _iwl_write32(priv, reg, val);
137}
138#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m)
139#else
140#define iwl_set_bit(p, r, m) _iwl_set_bit(p, r, m)
141#endif
142
143static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
144{
145 _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask);
146}
147#ifdef CONFIG_IWLWIFI_DEBUG
148static inline void __iwl_clear_bit(const char *f, u32 l,
149 struct iwl_priv *priv, u32 reg, u32 mask)
150{
151 u32 val = _iwl_read32(priv, reg) & ~mask;
152 IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
153 _iwl_write32(priv, reg, val);
154}
155#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m)
156#else
157#define iwl_clear_bit(p, r, m) _iwl_clear_bit(p, r, m)
158#endif
159
160static inline int _iwl_grab_restricted_access(struct iwl_priv *priv)
161{
162 int rc;
163 u32 gp_ctl;
164
165#ifdef CONFIG_IWLWIFI_DEBUG
166 if (atomic_read(&priv->restrict_refcnt))
167 return 0;
168#endif
169 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
170 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
171 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
172 "wakes up NIC\n");
173
174 /* 10 msec allows time for NIC to complete its data save */
175 gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL);
176 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
177 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
178 "gpctl = 0x%08x\n", gp_ctl);
179 mdelay(10);
180 } else
181 IWL_DEBUG_RF_KILL("power-down complete, "
182 "gpctl = 0x%08x\n", gp_ctl);
183 }
184
185 /* this bit wakes up the NIC */
186 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187 rc = _iwl_poll_bit(priv, CSR_GP_CNTRL,
188 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
189 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
190 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50);
191 if (rc < 0) {
192 IWL_ERROR("MAC is in deep sleep!\n");
193 return -EIO;
194 }
195
196#ifdef CONFIG_IWLWIFI_DEBUG
197 atomic_inc(&priv->restrict_refcnt);
198#endif
199 return 0;
200}
201
202#ifdef CONFIG_IWLWIFI_DEBUG
203static inline int __iwl_grab_restricted_access(const char *f, u32 l,
204 struct iwl_priv *priv)
205{
206 if (atomic_read(&priv->restrict_refcnt))
207 IWL_DEBUG_INFO("Grabbing access while already held at "
208 "line %d.\n", l);
209
210 IWL_DEBUG_IO("grabbing restricted access - %s %d\n", f, l);
211
212 return _iwl_grab_restricted_access(priv);
213}
214#define iwl_grab_restricted_access(priv) \
215 __iwl_grab_restricted_access(__FILE__, __LINE__, priv)
216#else
217#define iwl_grab_restricted_access(priv) \
218 _iwl_grab_restricted_access(priv)
219#endif
220
221static inline void _iwl_release_restricted_access(struct iwl_priv *priv)
222{
223#ifdef CONFIG_IWLWIFI_DEBUG
224 if (atomic_dec_and_test(&priv->restrict_refcnt))
225#endif
226 _iwl_clear_bit(priv, CSR_GP_CNTRL,
227 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
228}
229#ifdef CONFIG_IWLWIFI_DEBUG
230static inline void __iwl_release_restricted_access(const char *f, u32 l,
231 struct iwl_priv *priv)
232{
233 if (atomic_read(&priv->restrict_refcnt) <= 0)
234 IWL_ERROR("Release unheld restricted access at line %d.\n", l);
235
236 IWL_DEBUG_IO("releasing restricted access - %s %d\n", f, l);
237 _iwl_release_restricted_access(priv);
238}
239#define iwl_release_restricted_access(priv) \
240 __iwl_release_restricted_access(__FILE__, __LINE__, priv)
241#else
242#define iwl_release_restricted_access(priv) \
243 _iwl_release_restricted_access(priv)
244#endif
245
246static inline u32 _iwl_read_restricted(struct iwl_priv *priv, u32 reg)
247{
248 return _iwl_read32(priv, reg);
249}
250#ifdef CONFIG_IWLWIFI_DEBUG
251static inline u32 __iwl_read_restricted(const char *f, u32 l,
252 struct iwl_priv *priv, u32 reg)
253{
254 u32 value = _iwl_read_restricted(priv, reg);
255 if (!atomic_read(&priv->restrict_refcnt))
256 IWL_ERROR("Unrestricted access from %s %d\n", f, l);
257 IWL_DEBUG_IO("read_restricted(0x%4X) = 0x%08x - %s %d \n", reg, value,
258 f, l);
259 return value;
260}
261#define iwl_read_restricted(priv, reg) \
262 __iwl_read_restricted(__FILE__, __LINE__, priv, reg)
263#else
264#define iwl_read_restricted _iwl_read_restricted
265#endif
266
267static inline void _iwl_write_restricted(struct iwl_priv *priv,
268 u32 reg, u32 value)
269{
270 _iwl_write32(priv, reg, value);
271}
272#ifdef CONFIG_IWLWIFI_DEBUG
273static void __iwl_write_restricted(u32 line,
274 struct iwl_priv *priv, u32 reg, u32 value)
275{
276 if (!atomic_read(&priv->restrict_refcnt))
277 IWL_ERROR("Unrestricted access from line %d\n", line);
278 _iwl_write_restricted(priv, reg, value);
279}
280#define iwl_write_restricted(priv, reg, value) \
281 __iwl_write_restricted(__LINE__, priv, reg, value)
282#else
283#define iwl_write_restricted _iwl_write_restricted
284#endif
285
286static inline void iwl_write_buffer_restricted(struct iwl_priv *priv,
287 u32 reg, u32 len, u32 *values)
288{
289 u32 count = sizeof(u32);
290
291 if ((priv != NULL) && (values != NULL)) {
292 for (; 0 < len; len -= count, reg += count, values++)
293 _iwl_write_restricted(priv, reg, *values);
294 }
295}
296
297static inline int _iwl_poll_restricted_bit(struct iwl_priv *priv,
298 u32 addr, u32 mask, int timeout)
299{
300 int i = 0;
301
302 do {
303 if ((_iwl_read_restricted(priv, addr) & mask) == mask)
304 return i;
305 mdelay(10);
306 i += 10;
307 } while (i < timeout);
308
309 return -ETIMEDOUT;
310}
311
312#ifdef CONFIG_IWLWIFI_DEBUG
313static inline int __iwl_poll_restricted_bit(const char *f, u32 l,
314 struct iwl_priv *priv,
315 u32 addr, u32 mask, int timeout)
316{
317 int rc = _iwl_poll_restricted_bit(priv, addr, mask, timeout);
318
319 if (unlikely(rc == -ETIMEDOUT))
320 IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) - "
321 "timedout - %s %d\n", addr, mask, f, l);
322 else
323 IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) = 0x%08X "
324 "- %s %d\n", addr, mask, rc, f, l);
325 return rc;
326}
327#define iwl_poll_restricted_bit(iwl, addr, mask, timeout) \
328 __iwl_poll_restricted_bit(__FILE__, __LINE__, iwl, addr, mask, timeout)
329#else
330#define iwl_poll_restricted_bit _iwl_poll_restricted_bit
331#endif
332
333static inline u32 _iwl_read_restricted_reg(struct iwl_priv *priv, u32 reg)
334{
335 _iwl_write_restricted(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
336 return _iwl_read_restricted(priv, HBUS_TARG_PRPH_RDAT);
337}
338#ifdef CONFIG_IWLWIFI_DEBUG
339static inline u32 __iwl_read_restricted_reg(u32 line,
340 struct iwl_priv *priv, u32 reg)
341{
342 if (!atomic_read(&priv->restrict_refcnt))
343 IWL_ERROR("Unrestricted access from line %d\n", line);
344 return _iwl_read_restricted_reg(priv, reg);
345}
346
347#define iwl_read_restricted_reg(priv, reg) \
348 __iwl_read_restricted_reg(__LINE__, priv, reg)
349#else
350#define iwl_read_restricted_reg _iwl_read_restricted_reg
351#endif
352
353static inline void _iwl_write_restricted_reg(struct iwl_priv *priv,
354 u32 addr, u32 val)
355{
356 _iwl_write_restricted(priv, HBUS_TARG_PRPH_WADDR,
357 ((addr & 0x0000FFFF) | (3 << 24)));
358 _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT, val);
359}
360#ifdef CONFIG_IWLWIFI_DEBUG
361static inline void __iwl_write_restricted_reg(u32 line,
362 struct iwl_priv *priv,
363 u32 addr, u32 val)
364{
365 if (!atomic_read(&priv->restrict_refcnt))
366 IWL_ERROR("Unrestricted access from line %d\n", line);
367 _iwl_write_restricted_reg(priv, addr, val);
368}
369
370#define iwl_write_restricted_reg(priv, addr, val) \
371 __iwl_write_restricted_reg(__LINE__, priv, addr, val);
372#else
373#define iwl_write_restricted_reg _iwl_write_restricted_reg
374#endif
375
376#define _iwl_set_bits_restricted_reg(priv, reg, mask) \
377 _iwl_write_restricted_reg(priv, reg, \
378 (_iwl_read_restricted_reg(priv, reg) | mask))
379#ifdef CONFIG_IWLWIFI_DEBUG
380static inline void __iwl_set_bits_restricted_reg(u32 line, struct iwl_priv
381 *priv, u32 reg, u32 mask)
382{
383 if (!atomic_read(&priv->restrict_refcnt))
384 IWL_ERROR("Unrestricted access from line %d\n", line);
385 _iwl_set_bits_restricted_reg(priv, reg, mask);
386}
387#define iwl_set_bits_restricted_reg(priv, reg, mask) \
388 __iwl_set_bits_restricted_reg(__LINE__, priv, reg, mask)
389#else
390#define iwl_set_bits_restricted_reg _iwl_set_bits_restricted_reg
391#endif
392
393#define _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \
394 _iwl_write_restricted_reg( \
395 priv, reg, ((_iwl_read_restricted_reg(priv, reg) & mask) | bits))
396#ifdef CONFIG_IWLWIFI_DEBUG
397static inline void __iwl_set_bits_mask_restricted_reg(u32 line,
398 struct iwl_priv *priv, u32 reg, u32 bits, u32 mask)
399{
400 if (!atomic_read(&priv->restrict_refcnt))
401 IWL_ERROR("Unrestricted access from line %d\n", line);
402 _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask);
403}
404
405#define iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \
406 __iwl_set_bits_mask_restricted_reg(__LINE__, priv, reg, bits, mask)
407#else
408#define iwl_set_bits_mask_restricted_reg _iwl_set_bits_mask_restricted_reg
409#endif
410
411static inline void iwl_clear_bits_restricted_reg(struct iwl_priv
412 *priv, u32 reg, u32 mask)
413{
414 u32 val = _iwl_read_restricted_reg(priv, reg);
415 _iwl_write_restricted_reg(priv, reg, (val & ~mask));
416}
417
418static inline u32 iwl_read_restricted_mem(struct iwl_priv *priv, u32 addr)
419{
420 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, addr);
421 return iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
422}
423
424static inline void iwl_write_restricted_mem(struct iwl_priv *priv, u32 addr,
425 u32 val)
426{
427 iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr);
428 iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, val);
429}
430
431static inline void iwl_write_restricted_mems(struct iwl_priv *priv, u32 addr,
432 u32 len, u32 *values)
433{
434 iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr);
435 for (; 0 < len; len -= sizeof(u32), values++)
436 iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, *values);
437}
438
439static inline void iwl_write_restricted_regs(struct iwl_priv *priv, u32 reg,
440 u32 len, u8 *values)
441{
442 u32 reg_offset = reg;
443 u32 aligment = reg & 0x3;
444
445 /* write any non-dword-aligned stuff at the beginning */
446 if (len < sizeof(u32)) {
447 if ((aligment + len) <= sizeof(u32)) {
448 u8 size;
449 u32 value = 0;
450 size = len - 1;
451 memcpy(&value, values, len);
452 reg_offset = (reg_offset & 0x0000FFFF);
453
454 _iwl_write_restricted(priv,
455 HBUS_TARG_PRPH_WADDR,
456 (reg_offset | (size << 24)));
457 _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT,
458 value);
459 }
460
461 return;
462 }
463
464 /* now write all the dword-aligned stuff */
465 for (; reg_offset < (reg + len);
466 reg_offset += sizeof(u32), values += sizeof(u32))
467 _iwl_write_restricted_reg(priv, reg_offset, *((u32 *) values));
468}
469
470#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-priv.h b/drivers/net/wireless/iwlwifi/iwl-priv.h
new file mode 100644
index 000000000000..6b490d08fea9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-priv.h
@@ -0,0 +1,308 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_priv_h__
28#define __iwl_priv_h__
29
30#include <linux/workqueue.h>
31
32#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
33
34enum {
35 MEASUREMENT_READY = (1 << 0),
36 MEASUREMENT_ACTIVE = (1 << 1),
37};
38
39#endif
40
41struct iwl_priv {
42
43 /* ieee device used by generic ieee processing code */
44 struct ieee80211_hw *hw;
45 struct ieee80211_channel *ieee_channels;
46 struct ieee80211_rate *ieee_rates;
47
48 /* temporary frame storage list */
49 struct list_head free_frames;
50 int frames_count;
51
52 u8 phymode;
53 int alloc_rxb_skb;
54
55 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
56 struct iwl_rx_mem_buffer *rxb);
57
58 const struct ieee80211_hw_mode *modes;
59
60#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
61 /* spectrum measurement report caching */
62 struct iwl_spectrum_notification measure_report;
63 u8 measurement_status;
64#endif
65 /* ucode beacon time */
66 u32 ucode_beacon_time;
67
68 /* we allocate array of iwl_channel_info for NIC's valid channels.
69 * Access via channel # using indirect index array */
70 struct iwl_channel_info *channel_info; /* channel info array */
71 u8 channel_count; /* # of channels */
72
73 /* each calibration channel group in the EEPROM has a derived
74 * clip setting for each rate. */
75 const struct iwl_clip_group clip_groups[5];
76
77 /* thermal calibration */
78 s32 temperature; /* degrees Kelvin */
79 s32 last_temperature;
80
81 /* Scan related variables */
82 unsigned long last_scan_jiffies;
83 unsigned long scan_start;
84 unsigned long scan_pass_start;
85 unsigned long scan_start_tsf;
86 int scan_bands;
87 int one_direct_scan;
88 u8 direct_ssid_len;
89 u8 direct_ssid[IW_ESSID_MAX_SIZE];
90 struct iwl_scan_cmd *scan;
91 u8 only_active_channel;
92
93 /* spinlock */
94 spinlock_t lock; /* protect general shared data */
95 spinlock_t hcmd_lock; /* protect hcmd */
96 struct mutex mutex;
97
98 /* basic pci-network driver stuff */
99 struct pci_dev *pci_dev;
100
101 /* pci hardware address support */
102 void __iomem *hw_base;
103
104 /* uCode images, save to reload in case of failure */
105 struct fw_image_desc ucode_code; /* runtime inst */
106 struct fw_image_desc ucode_data; /* runtime data original */
107 struct fw_image_desc ucode_data_backup; /* runtime data save/restore */
108 struct fw_image_desc ucode_init; /* initialization inst */
109 struct fw_image_desc ucode_init_data; /* initialization data */
110 struct fw_image_desc ucode_boot; /* bootstrap inst */
111
112
113 struct iwl_rxon_time_cmd rxon_timing;
114
115 /* We declare this const so it can only be
116 * changed via explicit cast within the
117 * routines that actually update the physical
118 * hardware */
119 const struct iwl_rxon_cmd active_rxon;
120 struct iwl_rxon_cmd staging_rxon;
121
122 int error_recovering;
123 struct iwl_rxon_cmd recovery_rxon;
124
125 /* 1st responses from initialize and runtime uCode images.
126 * 4965's initialize alive response contains some calibration data. */
127 struct iwl_init_alive_resp card_alive_init;
128 struct iwl_alive_resp card_alive;
129
130#ifdef LED
131 /* LED related variables */
132 struct iwl_activity_blink activity;
133 unsigned long led_packets;
134 int led_state;
135#endif
136
137 u16 active_rate;
138 u16 active_rate_basic;
139
140 u8 call_post_assoc_from_beacon;
141 u8 assoc_station_added;
142#if IWL == 4965
143 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
144 /* HT variables */
145 u8 is_dup;
146 u8 is_ht_enabled;
147 u8 channel_width; /* 0=20MHZ, 1=40MHZ */
148 u8 current_channel_width;
149 u8 valid_antenna; /* Bit mask of antennas actually connected */
150#ifdef CONFIG_IWLWIFI_SENSITIVITY
151 struct iwl_sensitivity_data sensitivity_data;
152 struct iwl_chain_noise_data chain_noise_data;
153 u8 start_calib;
154 __le16 sensitivity_tbl[HD_TABLE_SIZE];
155#endif /*CONFIG_IWLWIFI_SENSITIVITY*/
156
157#ifdef CONFIG_IWLWIFI_HT
158 struct sta_ht_info current_assoc_ht;
159#endif
160 u8 active_rate_ht[2];
161 u8 last_phy_res[100];
162
163 /* Rate scaling data */
164 struct iwl_lq_mngr lq_mngr;
165#endif
166
167 /* Rate scaling data */
168 s8 data_retry_limit;
169 u8 retry_rate;
170
171 wait_queue_head_t wait_command_queue;
172
173 int activity_timer_active;
174
175 /* Rx and Tx DMA processing queues */
176 struct iwl_rx_queue rxq;
177 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
178#if IWL == 4965
179 unsigned long txq_ctx_active_msk;
180 struct iwl_kw kw; /* keep warm address */
181 u32 scd_base_addr; /* scheduler sram base address */
182#endif
183
184 unsigned long status;
185 u32 config;
186
187 int last_rx_rssi; /* From Rx packet statisitics */
188 int last_rx_noise; /* From beacon statistics */
189
190 struct iwl_power_mgr power_data;
191
192 struct iwl_notif_statistics statistics;
193 unsigned long last_statistics_time;
194
195 /* context information */
196 u8 essid[IW_ESSID_MAX_SIZE];
197 u8 essid_len;
198 u16 rates_mask;
199
200 u32 power_mode;
201 u32 antenna;
202 u8 bssid[ETH_ALEN];
203 u16 rts_threshold;
204 u8 mac_addr[ETH_ALEN];
205
206 /*station table variables */
207 spinlock_t sta_lock;
208 int num_stations;
209 struct iwl_station_entry stations[IWL_STATION_COUNT];
210
211 /* Indication if ieee80211_ops->open has been called */
212 int is_open;
213
214 u8 mac80211_registered;
215 int is_abg;
216
217 u32 notif_missed_beacons;
218
219 /* Rx'd packet timing information */
220 u32 last_beacon_time;
221 u64 last_tsf;
222
223 /* Duplicate packet detection */
224 u16 last_seq_num;
225 u16 last_frag_num;
226 unsigned long last_packet_time;
227 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE];
228
229 /* eeprom */
230 struct iwl_eeprom eeprom;
231
232 int iw_mode;
233
234 struct sk_buff *ibss_beacon;
235
236 /* Last Rx'd beacon timestamp */
237 u32 timestamp0;
238 u32 timestamp1;
239 u16 beacon_int;
240 struct iwl_driver_hw_info hw_setting;
241 int interface_id;
242
243 /* Current association information needed to configure the
244 * hardware */
245 u16 assoc_id;
246 u16 assoc_capability;
247 u8 ps_mode;
248
249#ifdef CONFIG_IWLWIFI_QOS
250 struct iwl_qos_info qos_data;
251#endif /*CONFIG_IWLWIFI_QOS */
252
253 struct workqueue_struct *workqueue;
254
255 struct work_struct up;
256 struct work_struct restart;
257 struct work_struct calibrated_work;
258 struct work_struct scan_completed;
259 struct work_struct rx_replenish;
260 struct work_struct rf_kill;
261 struct work_struct abort_scan;
262 struct work_struct update_link_led;
263 struct work_struct auth_work;
264 struct work_struct report_work;
265 struct work_struct request_scan;
266 struct work_struct beacon_update;
267
268 struct tasklet_struct irq_tasklet;
269
270 struct delayed_work init_alive_start;
271 struct delayed_work alive_start;
272 struct delayed_work activity_timer;
273 struct delayed_work thermal_periodic;
274 struct delayed_work gather_stats;
275 struct delayed_work scan_check;
276 struct delayed_work post_associate;
277
278#define IWL_DEFAULT_TX_POWER 0x0F
279 s8 user_txpower_limit;
280 s8 max_channel_txpower_limit;
281 u32 cck_power_index_compensation;
282
283#ifdef CONFIG_PM
284 u32 pm_state[16];
285#endif
286
287#ifdef CONFIG_IWLWIFI_DEBUG
288 /* debugging info */
289 u32 framecnt_to_us;
290 atomic_t restrict_refcnt;
291#endif
292
293#if IWL == 4965
294 struct work_struct txpower_work;
295#ifdef CONFIG_IWLWIFI_SENSITIVITY
296 struct work_struct sensitivity_work;
297#endif
298 struct work_struct statistics_work;
299 struct timer_list statistics_periodic;
300
301#ifdef CONFIG_IWLWIFI_HT_AGG
302 struct work_struct agg_work;
303#endif
304
305#endif /* 4965 */
306}; /*iwl_priv */
307
308#endif /* __iwl_priv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
new file mode 100644
index 000000000000..0df41148eadc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -0,0 +1,229 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU Geeral Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_prph_h__
64#define __iwl_prph_h__
65
66
67#define PRPH_BASE (0x00000)
68#define PRPH_END (0xFFFFF)
69
70/* APMG (power management) constants */
71#define APMG_BASE (PRPH_BASE + 0x3000)
72#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
73#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
74#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
75#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
76#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
77#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
78#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
79#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
80
81#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
82#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
83
84#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
85
86#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
87
88#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
89#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
90#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000)
91
92
93/**
94 * BSM (Bootstrap State Machine)
95 *
96 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
97 * in special SRAM that does not power down when the embedded control
98 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
99 *
100 * When powering back up after sleeps (or during initial uCode load), the BSM
101 * internally loads the short bootstrap program from the special SRAM into the
102 * embedded processor's instruction SRAM, and starts the processor so it runs
103 * the bootstrap program.
104 *
105 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
106 * images for a uCode program from host DRAM locations. The host driver
107 * indicates DRAM locations and sizes for instruction and data images via the
108 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
109 * the new program starts automatically.
110 *
111 * The uCode used for open-source drivers includes two programs:
112 *
113 * 1) Initialization -- performs hardware calibration and sets up some
114 * internal data, then notifies host via "initialize alive" notification
115 * (struct iwl_init_alive_resp) that it has completed all of its work.
116 * After signal from host, it then loads and starts the runtime program.
117 * The initialization program must be used when initially setting up the
118 * NIC after loading the driver.
119 *
120 * 2) Runtime/Protocol -- performs all normal runtime operations. This
121 * notifies host via "alive" notification (struct iwl_alive_resp) that it
122 * is ready to be used.
123 *
124 * When initializing the NIC, the host driver does the following procedure:
125 *
126 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
127 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
128 *
129 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
130 * images in host DRAM.
131 *
132 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
133 * BSM_WR_MEM_SRC_REG = 0
134 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
135 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
136 *
137 * 4) Load bootstrap into instruction SRAM:
138 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
139 *
140 * 5) Wait for load completion:
141 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
142 *
143 * 6) Enable future boot loads whenever NIC's power management triggers it:
144 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
145 *
146 * 7) Start the NIC by removing all reset bits:
147 * CSR_RESET = 0
148 *
149 * The bootstrap uCode (already in instruction SRAM) loads initialization
150 * uCode. Initialization uCode performs data initialization, sends
151 * "initialize alive" notification to host, and waits for a signal from
152 * host to load runtime code.
153 *
154 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
155 * images in host DRAM. The last register loaded must be the instruction
156 * bytecount register ("1" in MSbit tells initialization uCode to load
157 * the runtime uCode):
158 * BSM_DRAM_INST_BYTECOUNT_REG = bytecount | BSM_DRAM_INST_LOAD
159 *
160 * 5) Wait for "alive" notification, then issue normal runtime commands.
161 *
162 * Data caching during power-downs:
163 *
164 * Just before the embedded controller powers down (e.g for automatic
165 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
166 * a current snapshot of the embedded processor's data SRAM into host DRAM.
167 * This caches the data while the embedded processor's memory is powered down.
168 * Location and size are controlled by BSM_DRAM_DATA_* registers.
169 *
170 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
171 * change during operation; the original image (from uCode distribution
172 * file) can be used for reload.
173 *
174 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
175 * at the BSM_DRAM_* registers, which now point to the runtime instruction
176 * image and the cached (modified) runtime data (*not* the initialization
177 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
178 * uCode from where it left off before the power-down.
179 *
180 * NOTE: Initialization uCode does *not* run as part of the save/restore
181 * procedure.
182 *
183 * This save/restore method is mostly for autonomous power management during
184 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
185 * RFKILL should use complete restarts (with total re-initialization) of uCode,
186 * allowing total shutdown (including BSM memory).
187 *
188 * Note that, during normal operation, the host DRAM that held the initial
189 * startup data for the runtime code is now being used as a backup data cache
190 * for modified data! If you need to completely re-initialize the NIC, make
191 * sure that you use the runtime data image from the uCode distribution file,
192 * not the modified/saved runtime data. You may want to store a separate
193 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
194 */
195
196/* BSM bit fields */
197#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
198#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
199#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
200
201/* BSM addresses */
202#define BSM_BASE (PRPH_BASE + 0x3400)
203#define BSM_END (PRPH_BASE + 0x3800)
204
205#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
206#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
207#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
208#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
209#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
210
211/*
212 * Pointers and size regs for bootstrap load and data SRAM save/restore.
213 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
214 * 4965 pointers use bits 35:4 of DRAM address.
215 */
216#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
217#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
218#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
219#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
220
221/*
222 * BSM special memory, stays powered on during power-save sleeps.
223 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
224 */
225#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
226#define BSM_SRAM_SIZE (1024) /* bytes */
227
228
229#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
new file mode 100644
index 000000000000..b576ff24eb4f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -0,0 +1,91 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_spectrum_h__
30#define __iwl_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __attribute__ ((packed));
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __attribute__ ((packed));
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __attribute__ ((packed));
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __attribute__ ((packed));
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __attribute__ ((packed));
91#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
new file mode 100644
index 000000000000..474b6402040c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -0,0 +1,8732 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30/*
31 * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets
32 * by defining IWL to either 3945 or 4965. The Makefile used when building
33 * the base targets will create base-3945.o and base-4965.o
34 *
35 * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36 * this file and into the hardware specific implementation files (iwl-XXXX.c)
37 * and leave only the common (non #ifdef sprinkled) code in this file
38 */
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/version.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/dma-mapping.h>
46#include <linux/delay.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/wireless.h>
50#include <linux/firmware.h>
51#include <linux/skbuff.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/if_arp.h>
55
56#include <net/ieee80211_radiotap.h>
57#include <net/mac80211.h>
58
59#include <asm/div64.h>
60
61#include "iwlwifi.h"
62#include "iwl-3945.h"
63#include "iwl-helpers.h"
64
65#ifdef CONFIG_IWLWIFI_DEBUG
66u32 iwl_debug_level;
67#endif
68
69/******************************************************************************
70 *
71 * module boiler plate
72 *
73 ******************************************************************************/
74
75/* module parameters */
76int iwl_param_disable_hw_scan;
77int iwl_param_debug;
78int iwl_param_disable; /* def: enable radio */
79int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */
80int iwl_param_hwcrypto; /* def: using software encryption */
81int iwl_param_qos_enable = 1;
82int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
83
84/*
85 * module name, copyright, version, etc.
86 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
87 */
88
89#define DRV_DESCRIPTION \
90"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
91
92#ifdef CONFIG_IWLWIFI_DEBUG
93#define VD "d"
94#else
95#define VD
96#endif
97
98#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
99#define VS "s"
100#else
101#define VS
102#endif
103
104#define IWLWIFI_VERSION "0.1.15k" VD VS
105#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
106#define DRV_VERSION IWLWIFI_VERSION
107
108/* Change firmware file name, using "-" and incrementing number,
109 * *only* when uCode interface or architecture changes so that it
110 * is not compatible with earlier drivers.
111 * This number will also appear in << 8 position of 1st dword of uCode file */
112#define IWL3945_UCODE_API "-1"
113
114MODULE_DESCRIPTION(DRV_DESCRIPTION);
115MODULE_VERSION(DRV_VERSION);
116MODULE_AUTHOR(DRV_COPYRIGHT);
117MODULE_LICENSE("GPL");
118
119__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
120{
121 u16 fc = le16_to_cpu(hdr->frame_control);
122 int hdr_len = ieee80211_get_hdrlen(fc);
123
124 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
125 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
126 return NULL;
127}
128
129static const struct ieee80211_hw_mode *iwl_get_hw_mode(
130 struct iwl_priv *priv, int mode)
131{
132 int i;
133
134 for (i = 0; i < 3; i++)
135 if (priv->modes[i].mode == mode)
136 return &priv->modes[i];
137
138 return NULL;
139}
140
141static int iwl_is_empty_essid(const char *essid, int essid_len)
142{
143 /* Single white space is for Linksys APs */
144 if (essid_len == 1 && essid[0] == ' ')
145 return 1;
146
147 /* Otherwise, if the entire essid is 0, we assume it is hidden */
148 while (essid_len) {
149 essid_len--;
150 if (essid[essid_len] != '\0')
151 return 0;
152 }
153
154 return 1;
155}
156
157static const char *iwl_escape_essid(const char *essid, u8 essid_len)
158{
159 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
160 const char *s = essid;
161 char *d = escaped;
162
163 if (iwl_is_empty_essid(essid, essid_len)) {
164 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
165 return escaped;
166 }
167
168 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
169 while (essid_len--) {
170 if (*s == '\0') {
171 *d++ = '\\';
172 *d++ = '0';
173 s++;
174 } else
175 *d++ = *s++;
176 }
177 *d = '\0';
178 return escaped;
179}
180
181static void iwl_print_hex_dump(int level, void *p, u32 len)
182{
183#ifdef CONFIG_IWLWIFI_DEBUG
184 if (!(iwl_debug_level & level))
185 return;
186
187 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
188 p, len, 1);
189#endif
190}
191
192/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
193 * DMA services
194 *
195 * Theory of operation
196 *
197 * A queue is a circular buffers with 'Read' and 'Write' pointers.
198 * 2 empty entries always kept in the buffer to protect from overflow.
199 *
200 * For Tx queue, there are low mark and high mark limits. If, after queuing
201 * the packet for Tx, free space become < low mark, Tx queue stopped. When
202 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
203 * Tx queue resumed.
204 *
205 * The IPW operates with six queues, one receive queue in the device's
206 * sram, one transmit queue for sending commands to the device firmware,
207 * and four transmit queues for data.
208 ***************************************************/
209
210static int iwl_queue_space(const struct iwl_queue *q)
211{
212 int s = q->last_used - q->first_empty;
213
214 if (q->last_used > q->first_empty)
215 s -= q->n_bd;
216
217 if (s <= 0)
218 s += q->n_window;
219 /* keep some reserve to not confuse empty and full situations */
220 s -= 2;
221 if (s < 0)
222 s = 0;
223 return s;
224}
225
226/* XXX: n_bd must be power-of-two size */
227static inline int iwl_queue_inc_wrap(int index, int n_bd)
228{
229 return ++index & (n_bd - 1);
230}
231
232/* XXX: n_bd must be power-of-two size */
233static inline int iwl_queue_dec_wrap(int index, int n_bd)
234{
235 return --index & (n_bd - 1);
236}
237
238static inline int x2_queue_used(const struct iwl_queue *q, int i)
239{
240 return q->first_empty > q->last_used ?
241 (i >= q->last_used && i < q->first_empty) :
242 !(i < q->last_used && i >= q->first_empty);
243}
244
245static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
246{
247 if (is_huge)
248 return q->n_window;
249
250 return index & (q->n_window - 1);
251}
252
253static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
254 int count, int slots_num, u32 id)
255{
256 q->n_bd = count;
257 q->n_window = slots_num;
258 q->id = id;
259
260 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
261 * and iwl_queue_dec_wrap are broken. */
262 BUG_ON(!is_power_of_2(count));
263
264 /* slots_num must be power-of-two size, otherwise
265 * get_cmd_index is broken. */
266 BUG_ON(!is_power_of_2(slots_num));
267
268 q->low_mark = q->n_window / 4;
269 if (q->low_mark < 4)
270 q->low_mark = 4;
271
272 q->high_mark = q->n_window / 8;
273 if (q->high_mark < 2)
274 q->high_mark = 2;
275
276 q->first_empty = q->last_used = 0;
277
278 return 0;
279}
280
281static int iwl_tx_queue_alloc(struct iwl_priv *priv,
282 struct iwl_tx_queue *txq, u32 id)
283{
284 struct pci_dev *dev = priv->pci_dev;
285
286 if (id != IWL_CMD_QUEUE_NUM) {
287 txq->txb = kmalloc(sizeof(txq->txb[0]) *
288 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
289 if (!txq->txb) {
290 IWL_ERROR("kmalloc for auxilary BD "
291 "structures failed\n");
292 goto error;
293 }
294 } else
295 txq->txb = NULL;
296
297 txq->bd = pci_alloc_consistent(dev,
298 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
299 &txq->q.dma_addr);
300
301 if (!txq->bd) {
302 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
303 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
304 goto error;
305 }
306 txq->q.id = id;
307
308 return 0;
309
310 error:
311 if (txq->txb) {
312 kfree(txq->txb);
313 txq->txb = NULL;
314 }
315
316 return -ENOMEM;
317}
318
319int iwl_tx_queue_init(struct iwl_priv *priv,
320 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
321{
322 struct pci_dev *dev = priv->pci_dev;
323 int len;
324 int rc = 0;
325
326 /* alocate command space + one big command for scan since scan
327 * command is very huge the system will not have two scan at the
328 * same time */
329 len = sizeof(struct iwl_cmd) * slots_num;
330 if (txq_id == IWL_CMD_QUEUE_NUM)
331 len += IWL_MAX_SCAN_SIZE;
332 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
333 if (!txq->cmd)
334 return -ENOMEM;
335
336 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
337 if (rc) {
338 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
339
340 return -ENOMEM;
341 }
342 txq->need_update = 0;
343
344 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
345 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
346 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
347 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
348
349 iwl_hw_tx_queue_init(priv, txq);
350
351 return 0;
352}
353
354/**
355 * iwl_tx_queue_free - Deallocate DMA queue.
356 * @txq: Transmit queue to deallocate.
357 *
358 * Empty queue by removing and destroying all BD's.
359 * Free all buffers. txq itself is not freed.
360 *
361 */
362void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
363{
364 struct iwl_queue *q = &txq->q;
365 struct pci_dev *dev = priv->pci_dev;
366 int len;
367
368 if (q->n_bd == 0)
369 return;
370
371 /* first, empty all BD's */
372 for (; q->first_empty != q->last_used;
373 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
374 iwl_hw_txq_free_tfd(priv, txq);
375
376 len = sizeof(struct iwl_cmd) * q->n_window;
377 if (q->id == IWL_CMD_QUEUE_NUM)
378 len += IWL_MAX_SCAN_SIZE;
379
380 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
381
382 /* free buffers belonging to queue itself */
383 if (txq->q.n_bd)
384 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
385 txq->q.n_bd, txq->bd, txq->q.dma_addr);
386
387 if (txq->txb) {
388 kfree(txq->txb);
389 txq->txb = NULL;
390 }
391
392 /* 0 fill whole structure */
393 memset(txq, 0, sizeof(*txq));
394}
395
396const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
397
398/*************** STATION TABLE MANAGEMENT ****
399 *
400 * NOTE: This needs to be overhauled to better synchronize between
401 * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
402 *
403 * mac80211 should also be examined to determine if sta_info is duplicating
404 * the functionality provided here
405 */
406
407/**************************************************************/
408static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
409{
410 int index = IWL_INVALID_STATION;
411 int i;
412 unsigned long flags;
413
414 spin_lock_irqsave(&priv->sta_lock, flags);
415
416 if (is_ap)
417 index = IWL_AP_ID;
418 else if (is_broadcast_ether_addr(addr))
419 index = priv->hw_setting.bcast_sta_id;
420 else
421 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
422 if (priv->stations[i].used &&
423 !compare_ether_addr(priv->stations[i].sta.sta.addr,
424 addr)) {
425 index = i;
426 break;
427 }
428
429 if (unlikely(index == IWL_INVALID_STATION))
430 goto out;
431
432 if (priv->stations[index].used) {
433 priv->stations[index].used = 0;
434 priv->num_stations--;
435 }
436
437 BUG_ON(priv->num_stations < 0);
438
439out:
440 spin_unlock_irqrestore(&priv->sta_lock, flags);
441 return 0;
442}
443
444static void iwl_clear_stations_table(struct iwl_priv *priv)
445{
446 unsigned long flags;
447
448 spin_lock_irqsave(&priv->sta_lock, flags);
449
450 priv->num_stations = 0;
451 memset(priv->stations, 0, sizeof(priv->stations));
452
453 spin_unlock_irqrestore(&priv->sta_lock, flags);
454}
455
456
457u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
458{
459 int i;
460 int index = IWL_INVALID_STATION;
461 struct iwl_station_entry *station;
462 unsigned long flags_spin;
463
464 spin_lock_irqsave(&priv->sta_lock, flags_spin);
465 if (is_ap)
466 index = IWL_AP_ID;
467 else if (is_broadcast_ether_addr(addr))
468 index = priv->hw_setting.bcast_sta_id;
469 else
470 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
471 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
472 addr)) {
473 index = i;
474 break;
475 }
476
477 if (!priv->stations[i].used &&
478 index == IWL_INVALID_STATION)
479 index = i;
480 }
481
482 /* These twh conditions has the same outcome but keep them separate
483 since they have different meaning */
484 if (unlikely(index == IWL_INVALID_STATION)) {
485 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
486 return index;
487 }
488
489 if (priv->stations[index].used &&
490 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
491 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
492 return index;
493 }
494
495 IWL_DEBUG_ASSOC("Add STA ID %d: " MAC_FMT "\n", index, MAC_ARG(addr));
496 station = &priv->stations[index];
497 station->used = 1;
498 priv->num_stations++;
499
500 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
501 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
502 station->sta.mode = 0;
503 station->sta.sta.sta_id = index;
504 station->sta.station_flags = 0;
505
506 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
507 iwl_send_add_station(priv, &station->sta, flags);
508 return index;
509
510}
511
512/*************** DRIVER STATUS FUNCTIONS *****/
513
514static inline int iwl_is_ready(struct iwl_priv *priv)
515{
516 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
517 * set but EXIT_PENDING is not */
518 return test_bit(STATUS_READY, &priv->status) &&
519 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
520 !test_bit(STATUS_EXIT_PENDING, &priv->status);
521}
522
523static inline int iwl_is_alive(struct iwl_priv *priv)
524{
525 return test_bit(STATUS_ALIVE, &priv->status);
526}
527
528static inline int iwl_is_init(struct iwl_priv *priv)
529{
530 return test_bit(STATUS_INIT, &priv->status);
531}
532
533static inline int iwl_is_rfkill(struct iwl_priv *priv)
534{
535 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
536 test_bit(STATUS_RF_KILL_SW, &priv->status);
537}
538
539static inline int iwl_is_ready_rf(struct iwl_priv *priv)
540{
541
542 if (iwl_is_rfkill(priv))
543 return 0;
544
545 return iwl_is_ready(priv);
546}
547
548/*************** HOST COMMAND QUEUE FUNCTIONS *****/
549
550#define IWL_CMD(x) case x : return #x
551
552static const char *get_cmd_string(u8 cmd)
553{
554 switch (cmd) {
555 IWL_CMD(REPLY_ALIVE);
556 IWL_CMD(REPLY_ERROR);
557 IWL_CMD(REPLY_RXON);
558 IWL_CMD(REPLY_RXON_ASSOC);
559 IWL_CMD(REPLY_QOS_PARAM);
560 IWL_CMD(REPLY_RXON_TIMING);
561 IWL_CMD(REPLY_ADD_STA);
562 IWL_CMD(REPLY_REMOVE_STA);
563 IWL_CMD(REPLY_REMOVE_ALL_STA);
564 IWL_CMD(REPLY_3945_RX);
565 IWL_CMD(REPLY_TX);
566 IWL_CMD(REPLY_RATE_SCALE);
567 IWL_CMD(REPLY_LEDS_CMD);
568 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
569 IWL_CMD(RADAR_NOTIFICATION);
570 IWL_CMD(REPLY_QUIET_CMD);
571 IWL_CMD(REPLY_CHANNEL_SWITCH);
572 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
573 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
574 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
575 IWL_CMD(POWER_TABLE_CMD);
576 IWL_CMD(PM_SLEEP_NOTIFICATION);
577 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
578 IWL_CMD(REPLY_SCAN_CMD);
579 IWL_CMD(REPLY_SCAN_ABORT_CMD);
580 IWL_CMD(SCAN_START_NOTIFICATION);
581 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
582 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
583 IWL_CMD(BEACON_NOTIFICATION);
584 IWL_CMD(REPLY_TX_BEACON);
585 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
586 IWL_CMD(QUIET_NOTIFICATION);
587 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
588 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
589 IWL_CMD(REPLY_BT_CONFIG);
590 IWL_CMD(REPLY_STATISTICS_CMD);
591 IWL_CMD(STATISTICS_NOTIFICATION);
592 IWL_CMD(REPLY_CARD_STATE_CMD);
593 IWL_CMD(CARD_STATE_NOTIFICATION);
594 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
595 default:
596 return "UNKNOWN";
597
598 }
599}
600
601#define HOST_COMPLETE_TIMEOUT (HZ / 2)
602
603/**
604 * iwl_enqueue_hcmd - enqueue a uCode command
605 * @priv: device private data point
606 * @cmd: a point to the ucode command structure
607 *
608 * The function returns < 0 values to indicate the operation is
609 * failed. On success, it turns the index (> 0) of command in the
610 * command queue.
611 */
612static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
613{
614 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
615 struct iwl_queue *q = &txq->q;
616 struct iwl_tfd_frame *tfd;
617 u32 *control_flags;
618 struct iwl_cmd *out_cmd;
619 u32 idx;
620 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
621 dma_addr_t phys_addr;
622 int pad;
623 u16 count;
624 int ret;
625 unsigned long flags;
626
627 /* If any of the command structures end up being larger than
628 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
629 * we will need to increase the size of the TFD entries */
630 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
631 !(cmd->meta.flags & CMD_SIZE_HUGE));
632
633 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
634 IWL_ERROR("No space for Tx\n");
635 return -ENOSPC;
636 }
637
638 spin_lock_irqsave(&priv->hcmd_lock, flags);
639
640 tfd = &txq->bd[q->first_empty];
641 memset(tfd, 0, sizeof(*tfd));
642
643 control_flags = (u32 *) tfd;
644
645 idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
646 out_cmd = &txq->cmd[idx];
647
648 out_cmd->hdr.cmd = cmd->id;
649 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
650 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
651
652 /* At this point, the out_cmd now has all of the incoming cmd
653 * information */
654
655 out_cmd->hdr.flags = 0;
656 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
657 INDEX_TO_SEQ(q->first_empty));
658 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
659 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
660
661 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
662 offsetof(struct iwl_cmd, hdr);
663 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
664
665 pad = U32_PAD(cmd->len);
666 count = TFD_CTL_COUNT_GET(*control_flags);
667 *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
668
669 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
670 "%d bytes at %d[%d]:%d\n",
671 get_cmd_string(out_cmd->hdr.cmd),
672 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
673 fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
674
675 txq->need_update = 1;
676 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
677 ret = iwl_tx_queue_update_write_ptr(priv, txq);
678
679 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
680 return ret ? ret : idx;
681}
682
683int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
684{
685 int ret;
686
687 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
688
689 /* An asynchronous command can not expect an SKB to be set. */
690 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
691
692 /* An asynchronous command MUST have a callback. */
693 BUG_ON(!cmd->meta.u.callback);
694
695 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
696 return -EBUSY;
697
698 ret = iwl_enqueue_hcmd(priv, cmd);
699 if (ret < 0) {
700 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
701 get_cmd_string(cmd->id), ret);
702 return ret;
703 }
704 return 0;
705}
706
707int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
708{
709 int cmd_idx;
710 int ret;
711 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
712
713 BUG_ON(cmd->meta.flags & CMD_ASYNC);
714
715 /* A synchronous command can not have a callback set. */
716 BUG_ON(cmd->meta.u.callback != NULL);
717
718 if (atomic_xchg(&entry, 1)) {
719 IWL_ERROR("Error sending %s: Already sending a host command\n",
720 get_cmd_string(cmd->id));
721 return -EBUSY;
722 }
723
724 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
725
726 if (cmd->meta.flags & CMD_WANT_SKB)
727 cmd->meta.source = &cmd->meta;
728
729 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
730 if (cmd_idx < 0) {
731 ret = cmd_idx;
732 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
733 get_cmd_string(cmd->id), ret);
734 goto out;
735 }
736
737 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
738 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
739 HOST_COMPLETE_TIMEOUT);
740 if (!ret) {
741 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
742 IWL_ERROR("Error sending %s: time out after %dms.\n",
743 get_cmd_string(cmd->id),
744 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
745
746 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
747 ret = -ETIMEDOUT;
748 goto cancel;
749 }
750 }
751
752 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
753 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
754 get_cmd_string(cmd->id));
755 ret = -ECANCELED;
756 goto fail;
757 }
758 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
759 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
760 get_cmd_string(cmd->id));
761 ret = -EIO;
762 goto fail;
763 }
764 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
765 IWL_ERROR("Error: Response NULL in '%s'\n",
766 get_cmd_string(cmd->id));
767 ret = -EIO;
768 goto out;
769 }
770
771 ret = 0;
772 goto out;
773
774cancel:
775 if (cmd->meta.flags & CMD_WANT_SKB) {
776 struct iwl_cmd *qcmd;
777
778 /* Cancel the CMD_WANT_SKB flag for the cmd in the
779 * TX cmd queue. Otherwise in case the cmd comes
780 * in later, it will possibly set an invalid
781 * address (cmd->meta.source). */
782 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
783 qcmd->meta.flags &= ~CMD_WANT_SKB;
784 }
785fail:
786 if (cmd->meta.u.skb) {
787 dev_kfree_skb_any(cmd->meta.u.skb);
788 cmd->meta.u.skb = NULL;
789 }
790out:
791 atomic_set(&entry, 0);
792 return ret;
793}
794
795int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
796{
797 /* A command can not be asynchronous AND expect an SKB to be set. */
798 BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
799 (cmd->meta.flags & CMD_WANT_SKB));
800
801 if (cmd->meta.flags & CMD_ASYNC)
802 return iwl_send_cmd_async(priv, cmd);
803
804 return iwl_send_cmd_sync(priv, cmd);
805}
806
807int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
808{
809 struct iwl_host_cmd cmd = {
810 .id = id,
811 .len = len,
812 .data = data,
813 };
814
815 return iwl_send_cmd_sync(priv, &cmd);
816}
817
818static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
819{
820 struct iwl_host_cmd cmd = {
821 .id = id,
822 .len = sizeof(val),
823 .data = &val,
824 };
825
826 return iwl_send_cmd_sync(priv, &cmd);
827}
828
829int iwl_send_statistics_request(struct iwl_priv *priv)
830{
831 return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
832}
833
834/**
835 * iwl_rxon_add_station - add station into station table.
836 *
837 * there is only one AP station with id= IWL_AP_ID
838 * NOTE: mutex must be held before calling the this fnction
839*/
840static int iwl_rxon_add_station(struct iwl_priv *priv,
841 const u8 *addr, int is_ap)
842{
843 u8 rc;
844
845 /* Remove this station if it happens to already exist */
846 iwl_remove_station(priv, addr, is_ap);
847
848 rc = iwl_add_station(priv, addr, is_ap, 0);
849
850 return rc;
851}
852
853/**
854 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
855 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
856 * @channel: Any channel valid for the requested phymode
857
858 * In addition to setting the staging RXON, priv->phymode is also set.
859 *
860 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
861 * in the staging RXON flag structure based on the phymode
862 */
863static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
864{
865 if (!iwl_get_channel_info(priv, phymode, channel)) {
866 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
867 channel, phymode);
868 return -EINVAL;
869 }
870
871 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
872 (priv->phymode == phymode))
873 return 0;
874
875 priv->staging_rxon.channel = cpu_to_le16(channel);
876 if (phymode == MODE_IEEE80211A)
877 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
878 else
879 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
880
881 priv->phymode = phymode;
882
883 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
884
885 return 0;
886}
887
888/**
889 * iwl_check_rxon_cmd - validate RXON structure is valid
890 *
891 * NOTE: This is really only useful during development and can eventually
892 * be #ifdef'd out once the driver is stable and folks aren't actively
893 * making changes
894 */
895static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
896{
897 int error = 0;
898 int counter = 1;
899
900 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
901 error |= le32_to_cpu(rxon->flags &
902 (RXON_FLG_TGJ_NARROW_BAND_MSK |
903 RXON_FLG_RADAR_DETECT_MSK));
904 if (error)
905 IWL_WARNING("check 24G fields %d | %d\n",
906 counter++, error);
907 } else {
908 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
909 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
910 if (error)
911 IWL_WARNING("check 52 fields %d | %d\n",
912 counter++, error);
913 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
914 if (error)
915 IWL_WARNING("check 52 CCK %d | %d\n",
916 counter++, error);
917 }
918 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
919 if (error)
920 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
921
922 /* make sure basic rates 6Mbps and 1Mbps are supported */
923 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
924 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
925 if (error)
926 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
927
928 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
929 if (error)
930 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
931
932 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
933 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
934 if (error)
935 IWL_WARNING("check CCK and short slot %d | %d\n",
936 counter++, error);
937
938 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
939 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
940 if (error)
941 IWL_WARNING("check CCK & auto detect %d | %d\n",
942 counter++, error);
943
944 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
945 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
946 if (error)
947 IWL_WARNING("check TGG and auto detect %d | %d\n",
948 counter++, error);
949
950 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
951 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
952 RXON_FLG_ANT_A_MSK)) == 0);
953 if (error)
954 IWL_WARNING("check antenna %d %d\n", counter++, error);
955
956 if (error)
957 IWL_WARNING("Tuning to channel %d\n",
958 le16_to_cpu(rxon->channel));
959
960 if (error) {
961 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
962 return -1;
963 }
964 return 0;
965}
966
967/**
968 * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
969 * @priv: staging_rxon is comapred to active_rxon
970 *
971 * If the RXON structure is changing sufficient to require a new
972 * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
973 * to indicate a new tune is required.
974 */
975static int iwl_full_rxon_required(struct iwl_priv *priv)
976{
977
978 /* These items are only settable from the full RXON command */
979 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
980 compare_ether_addr(priv->staging_rxon.bssid_addr,
981 priv->active_rxon.bssid_addr) ||
982 compare_ether_addr(priv->staging_rxon.node_addr,
983 priv->active_rxon.node_addr) ||
984 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
985 priv->active_rxon.wlap_bssid_addr) ||
986 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
987 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
988 (priv->staging_rxon.air_propagation !=
989 priv->active_rxon.air_propagation) ||
990 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
991 return 1;
992
993 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
994 * be updated with the RXON_ASSOC command -- however only some
995 * flag transitions are allowed using RXON_ASSOC */
996
997 /* Check if we are not switching bands */
998 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
999 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1000 return 1;
1001
1002 /* Check if we are switching association toggle */
1003 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1004 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1005 return 1;
1006
1007 return 0;
1008}
1009
1010static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1011{
1012 int rc = 0;
1013 struct iwl_rx_packet *res = NULL;
1014 struct iwl_rxon_assoc_cmd rxon_assoc;
1015 struct iwl_host_cmd cmd = {
1016 .id = REPLY_RXON_ASSOC,
1017 .len = sizeof(rxon_assoc),
1018 .meta.flags = CMD_WANT_SKB,
1019 .data = &rxon_assoc,
1020 };
1021 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1022 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1023
1024 if ((rxon1->flags == rxon2->flags) &&
1025 (rxon1->filter_flags == rxon2->filter_flags) &&
1026 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1027 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1028 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1029 return 0;
1030 }
1031
1032 rxon_assoc.flags = priv->staging_rxon.flags;
1033 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1034 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1035 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1036 rxon_assoc.reserved = 0;
1037
1038 rc = iwl_send_cmd_sync(priv, &cmd);
1039 if (rc)
1040 return rc;
1041
1042 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1043 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1044 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1045 rc = -EIO;
1046 }
1047
1048 priv->alloc_rxb_skb--;
1049 dev_kfree_skb_any(cmd.meta.u.skb);
1050
1051 return rc;
1052}
1053
1054/**
1055 * iwl_commit_rxon - commit staging_rxon to hardware
1056 *
1057 * The RXON command in staging_rxon is commited to the hardware and
1058 * the active_rxon structure is updated with the new data. This
1059 * function correctly transitions out of the RXON_ASSOC_MSK state if
1060 * a HW tune is required based on the RXON structure changes.
1061 */
1062static int iwl_commit_rxon(struct iwl_priv *priv)
1063{
1064 /* cast away the const for active_rxon in this function */
1065 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1066 int rc = 0;
1067
1068 if (!iwl_is_alive(priv))
1069 return -1;
1070
1071 /* always get timestamp with Rx frame */
1072 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1073
1074 /* select antenna */
1075 priv->staging_rxon.flags &=
1076 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1077 priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv);
1078
1079 rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1080 if (rc) {
1081 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1082 return -EINVAL;
1083 }
1084
1085 /* If we don't need to send a full RXON, we can use
1086 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1087 * and other flags for the current radio configuration. */
1088 if (!iwl_full_rxon_required(priv)) {
1089 rc = iwl_send_rxon_assoc(priv);
1090 if (rc) {
1091 IWL_ERROR("Error setting RXON_ASSOC "
1092 "configuration (%d).\n", rc);
1093 return rc;
1094 }
1095
1096 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1097
1098 return 0;
1099 }
1100
1101 /* If we are currently associated and the new config requires
1102 * an RXON_ASSOC and the new config wants the associated mask enabled,
1103 * we must clear the associated from the active configuration
1104 * before we apply the new config */
1105 if (iwl_is_associated(priv) &&
1106 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1107 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1108 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1109
1110 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1111 sizeof(struct iwl_rxon_cmd),
1112 &priv->active_rxon);
1113
1114 /* If the mask clearing failed then we set
1115 * active_rxon back to what it was previously */
1116 if (rc) {
1117 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1118 IWL_ERROR("Error clearing ASSOC_MSK on current "
1119 "configuration (%d).\n", rc);
1120 return rc;
1121 }
1122
1123 /* The RXON bit toggling will have cleared out the
1124 * station table in the uCode, so blank it in the driver
1125 * as well */
1126 iwl_clear_stations_table(priv);
1127 } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) {
1128 /* When switching from non-associated to associated, the
1129 * uCode clears out the station table; so clear it in the
1130 * driver as well */
1131 iwl_clear_stations_table(priv);
1132 }
1133
1134 IWL_DEBUG_INFO("Sending RXON\n"
1135 "* with%s RXON_FILTER_ASSOC_MSK\n"
1136 "* channel = %d\n"
1137 "* bssid = " MAC_FMT "\n",
1138 ((priv->staging_rxon.filter_flags &
1139 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1140 le16_to_cpu(priv->staging_rxon.channel),
1141 MAC_ARG(priv->staging_rxon.bssid_addr));
1142
1143 /* Apply the new configuration */
1144 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1145 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1146 if (rc) {
1147 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1148 return rc;
1149 }
1150
1151 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1152
1153 /* If we issue a new RXON command which required a tune then we must
1154 * send a new TXPOWER command or we won't be able to Tx any frames */
1155 rc = iwl_hw_reg_send_txpower(priv);
1156 if (rc) {
1157 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1158 return rc;
1159 }
1160
1161 /* Add the broadcast address so we can send broadcast frames */
1162 if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) ==
1163 IWL_INVALID_STATION) {
1164 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1165 return -EIO;
1166 }
1167
1168 /* If we have set the ASSOC_MSK and we are in BSS mode then
1169 * add the IWL_AP_ID to the station rate table */
1170 if (iwl_is_associated(priv) &&
1171 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
1172 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1173 == IWL_INVALID_STATION) {
1174 IWL_ERROR("Error adding AP address for transmit.\n");
1175 return -EIO;
1176 }
1177
1178 /* Init the hardware's rate fallback order based on the
1179 * phymode */
1180 rc = iwl3945_init_hw_rate_table(priv);
1181 if (rc) {
1182 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
1183 return -EIO;
1184 }
1185
1186 return 0;
1187}
1188
1189static int iwl_send_bt_config(struct iwl_priv *priv)
1190{
1191 struct iwl_bt_cmd bt_cmd = {
1192 .flags = 3,
1193 .lead_time = 0xAA,
1194 .max_kill = 1,
1195 .kill_ack_mask = 0,
1196 .kill_cts_mask = 0,
1197 };
1198
1199 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1200 sizeof(struct iwl_bt_cmd), &bt_cmd);
1201}
1202
1203static int iwl_send_scan_abort(struct iwl_priv *priv)
1204{
1205 int rc = 0;
1206 struct iwl_rx_packet *res;
1207 struct iwl_host_cmd cmd = {
1208 .id = REPLY_SCAN_ABORT_CMD,
1209 .meta.flags = CMD_WANT_SKB,
1210 };
1211
1212 /* If there isn't a scan actively going on in the hardware
1213 * then we are in between scan bands and not actually
1214 * actively scanning, so don't send the abort command */
1215 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1216 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1217 return 0;
1218 }
1219
1220 rc = iwl_send_cmd_sync(priv, &cmd);
1221 if (rc) {
1222 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1223 return rc;
1224 }
1225
1226 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1227 if (res->u.status != CAN_ABORT_STATUS) {
1228 /* The scan abort will return 1 for success or
1229 * 2 for "failure". A failure condition can be
1230 * due to simply not being in an active scan which
1231 * can occur if we send the scan abort before we
1232 * the microcode has notified us that a scan is
1233 * completed. */
1234 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1235 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1236 clear_bit(STATUS_SCAN_HW, &priv->status);
1237 }
1238
1239 dev_kfree_skb_any(cmd.meta.u.skb);
1240
1241 return rc;
1242}
1243
1244static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1245 struct iwl_cmd *cmd,
1246 struct sk_buff *skb)
1247{
1248 return 1;
1249}
1250
1251/*
1252 * CARD_STATE_CMD
1253 *
1254 * Use: Sets the internal card state to enable, disable, or halt
1255 *
1256 * When in the 'enable' state the card operates as normal.
1257 * When in the 'disable' state, the card enters into a low power mode.
1258 * When in the 'halt' state, the card is shut down and must be fully
1259 * restarted to come back on.
1260 */
1261static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1262{
1263 struct iwl_host_cmd cmd = {
1264 .id = REPLY_CARD_STATE_CMD,
1265 .len = sizeof(u32),
1266 .data = &flags,
1267 .meta.flags = meta_flag,
1268 };
1269
1270 if (meta_flag & CMD_ASYNC)
1271 cmd.meta.u.callback = iwl_card_state_sync_callback;
1272
1273 return iwl_send_cmd(priv, &cmd);
1274}
1275
1276static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1277 struct iwl_cmd *cmd, struct sk_buff *skb)
1278{
1279 struct iwl_rx_packet *res = NULL;
1280
1281 if (!skb) {
1282 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1283 return 1;
1284 }
1285
1286 res = (struct iwl_rx_packet *)skb->data;
1287 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1288 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1289 res->hdr.flags);
1290 return 1;
1291 }
1292
1293 switch (res->u.add_sta.status) {
1294 case ADD_STA_SUCCESS_MSK:
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 /* We didn't cache the SKB; let the caller free it */
1301 return 1;
1302}
1303
1304int iwl_send_add_station(struct iwl_priv *priv,
1305 struct iwl_addsta_cmd *sta, u8 flags)
1306{
1307 struct iwl_rx_packet *res = NULL;
1308 int rc = 0;
1309 struct iwl_host_cmd cmd = {
1310 .id = REPLY_ADD_STA,
1311 .len = sizeof(struct iwl_addsta_cmd),
1312 .meta.flags = flags,
1313 .data = sta,
1314 };
1315
1316 if (flags & CMD_ASYNC)
1317 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1318 else
1319 cmd.meta.flags |= CMD_WANT_SKB;
1320
1321 rc = iwl_send_cmd(priv, &cmd);
1322
1323 if (rc || (flags & CMD_ASYNC))
1324 return rc;
1325
1326 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1327 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1328 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1329 res->hdr.flags);
1330 rc = -EIO;
1331 }
1332
1333 if (rc == 0) {
1334 switch (res->u.add_sta.status) {
1335 case ADD_STA_SUCCESS_MSK:
1336 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1337 break;
1338 default:
1339 rc = -EIO;
1340 IWL_WARNING("REPLY_ADD_STA failed\n");
1341 break;
1342 }
1343 }
1344
1345 priv->alloc_rxb_skb--;
1346 dev_kfree_skb_any(cmd.meta.u.skb);
1347
1348 return rc;
1349}
1350
1351static int iwl_update_sta_key_info(struct iwl_priv *priv,
1352 struct ieee80211_key_conf *keyconf,
1353 u8 sta_id)
1354{
1355 unsigned long flags;
1356 __le16 key_flags = 0;
1357
1358 switch (keyconf->alg) {
1359 case ALG_CCMP:
1360 key_flags |= STA_KEY_FLG_CCMP;
1361 key_flags |= cpu_to_le16(
1362 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1363 key_flags &= ~STA_KEY_FLG_INVALID;
1364 break;
1365 case ALG_TKIP:
1366 case ALG_WEP:
1367 return -EINVAL;
1368 default:
1369 return -EINVAL;
1370 }
1371 spin_lock_irqsave(&priv->sta_lock, flags);
1372 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1373 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1374 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1375 keyconf->keylen);
1376
1377 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1378 keyconf->keylen);
1379 priv->stations[sta_id].sta.key.key_flags = key_flags;
1380 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1381 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1382
1383 spin_unlock_irqrestore(&priv->sta_lock, flags);
1384
1385 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1386 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1387 return 0;
1388}
1389
1390static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1391{
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&priv->sta_lock, flags);
1395 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1396 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1397 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1398 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1399 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1400 spin_unlock_irqrestore(&priv->sta_lock, flags);
1401
1402 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1403 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1404 return 0;
1405}
1406
1407static void iwl_clear_free_frames(struct iwl_priv *priv)
1408{
1409 struct list_head *element;
1410
1411 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1412 priv->frames_count);
1413
1414 while (!list_empty(&priv->free_frames)) {
1415 element = priv->free_frames.next;
1416 list_del(element);
1417 kfree(list_entry(element, struct iwl_frame, list));
1418 priv->frames_count--;
1419 }
1420
1421 if (priv->frames_count) {
1422 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1423 priv->frames_count);
1424 priv->frames_count = 0;
1425 }
1426}
1427
1428static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1429{
1430 struct iwl_frame *frame;
1431 struct list_head *element;
1432 if (list_empty(&priv->free_frames)) {
1433 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1434 if (!frame) {
1435 IWL_ERROR("Could not allocate frame!\n");
1436 return NULL;
1437 }
1438
1439 priv->frames_count++;
1440 return frame;
1441 }
1442
1443 element = priv->free_frames.next;
1444 list_del(element);
1445 return list_entry(element, struct iwl_frame, list);
1446}
1447
1448static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1449{
1450 memset(frame, 0, sizeof(*frame));
1451 list_add(&frame->list, &priv->free_frames);
1452}
1453
1454unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1455 struct ieee80211_hdr *hdr,
1456 const u8 *dest, int left)
1457{
1458
1459 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1460 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1461 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1462 return 0;
1463
1464 if (priv->ibss_beacon->len > left)
1465 return 0;
1466
1467 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1468
1469 return priv->ibss_beacon->len;
1470}
1471
1472static int iwl_rate_index_from_plcp(int plcp)
1473{
1474 int i = 0;
1475
1476 for (i = 0; i < IWL_RATE_COUNT; i++)
1477 if (iwl_rates[i].plcp == plcp)
1478 return i;
1479 return -1;
1480}
1481
1482static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1483{
1484 u8 i;
1485
1486 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1487 i = iwl_rates[i].next_ieee) {
1488 if (rate_mask & (1 << i))
1489 return iwl_rates[i].plcp;
1490 }
1491
1492 return IWL_RATE_INVALID;
1493}
1494
1495static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1496{
1497 struct iwl_frame *frame;
1498 unsigned int frame_size;
1499 int rc;
1500 u8 rate;
1501
1502 frame = iwl_get_free_frame(priv);
1503
1504 if (!frame) {
1505 IWL_ERROR("Could not obtain free frame buffer for beacon "
1506 "command.\n");
1507 return -ENOMEM;
1508 }
1509
1510 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1511 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1512 0xFF0);
1513 if (rate == IWL_INVALID_RATE)
1514 rate = IWL_RATE_6M_PLCP;
1515 } else {
1516 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1517 if (rate == IWL_INVALID_RATE)
1518 rate = IWL_RATE_1M_PLCP;
1519 }
1520
1521 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1522
1523 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1524 &frame->u.cmd[0]);
1525
1526 iwl_free_frame(priv, frame);
1527
1528 return rc;
1529}
1530
1531/******************************************************************************
1532 *
1533 * EEPROM related functions
1534 *
1535 ******************************************************************************/
1536
1537static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1538{
1539 memcpy(mac, priv->eeprom.mac_address, 6);
1540}
1541
1542/**
1543 * iwl_eeprom_init - read EEPROM contents
1544 *
1545 * Load the EEPROM from adapter into priv->eeprom
1546 *
1547 * NOTE: This routine uses the non-debug IO access functions.
1548 */
1549int iwl_eeprom_init(struct iwl_priv *priv)
1550{
1551 u16 *e = (u16 *)&priv->eeprom;
1552 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1553 u32 r;
1554 int sz = sizeof(priv->eeprom);
1555 int rc;
1556 int i;
1557 u16 addr;
1558
1559 /* The EEPROM structure has several padding buffers within it
1560 * and when adding new EEPROM maps is subject to programmer errors
1561 * which may be very difficult to identify without explicitly
1562 * checking the resulting size of the eeprom map. */
1563 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1564
1565 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1566 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1567 return -ENOENT;
1568 }
1569
1570 rc = iwl_eeprom_aqcuire_semaphore(priv);
1571 if (rc < 0) {
1572 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1573 return -ENOENT;
1574 }
1575
1576 /* eeprom is an array of 16bit values */
1577 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1578 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1579 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1580
1581 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1582 i += IWL_EEPROM_ACCESS_DELAY) {
1583 r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1584 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1585 break;
1586 udelay(IWL_EEPROM_ACCESS_DELAY);
1587 }
1588
1589 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1590 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1591 return -ETIMEDOUT;
1592 }
1593 e[addr / 2] = le16_to_cpu(r >> 16);
1594 }
1595
1596 return 0;
1597}
1598
1599/******************************************************************************
1600 *
1601 * Misc. internal state and helper functions
1602 *
1603 ******************************************************************************/
1604#ifdef CONFIG_IWLWIFI_DEBUG
1605
1606/**
1607 * iwl_report_frame - dump frame to syslog during debug sessions
1608 *
1609 * hack this function to show different aspects of received frames,
1610 * including selective frame dumps.
1611 * group100 parameter selects whether to show 1 out of 100 good frames.
1612 *
1613 * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1614 * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1615 * is 3945-specific and gives bad output for 4965. Need to split the
1616 * functionality, keep common stuff here.
1617 */
1618void iwl_report_frame(struct iwl_priv *priv,
1619 struct iwl_rx_packet *pkt,
1620 struct ieee80211_hdr *header, int group100)
1621{
1622 u32 to_us;
1623 u32 print_summary = 0;
1624 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1625 u32 hundred = 0;
1626 u32 dataframe = 0;
1627 u16 fc;
1628 u16 seq_ctl;
1629 u16 channel;
1630 u16 phy_flags;
1631 int rate_sym;
1632 u16 length;
1633 u16 status;
1634 u16 bcn_tmr;
1635 u32 tsf_low;
1636 u64 tsf;
1637 u8 rssi;
1638 u8 agc;
1639 u16 sig_avg;
1640 u16 noise_diff;
1641 struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1642 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1643 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1644 u8 *data = IWL_RX_DATA(pkt);
1645
1646 /* MAC header */
1647 fc = le16_to_cpu(header->frame_control);
1648 seq_ctl = le16_to_cpu(header->seq_ctrl);
1649
1650 /* metadata */
1651 channel = le16_to_cpu(rx_hdr->channel);
1652 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1653 rate_sym = rx_hdr->rate;
1654 length = le16_to_cpu(rx_hdr->len);
1655
1656 /* end-of-frame status and timestamp */
1657 status = le32_to_cpu(rx_end->status);
1658 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1659 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1660 tsf = le64_to_cpu(rx_end->timestamp);
1661
1662 /* signal statistics */
1663 rssi = rx_stats->rssi;
1664 agc = rx_stats->agc;
1665 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1666 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1667
1668 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1669
1670 /* if data frame is to us and all is good,
1671 * (optionally) print summary for only 1 out of every 100 */
1672 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1673 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1674 dataframe = 1;
1675 if (!group100)
1676 print_summary = 1; /* print each frame */
1677 else if (priv->framecnt_to_us < 100) {
1678 priv->framecnt_to_us++;
1679 print_summary = 0;
1680 } else {
1681 priv->framecnt_to_us = 0;
1682 print_summary = 1;
1683 hundred = 1;
1684 }
1685 } else {
1686 /* print summary for all other frames */
1687 print_summary = 1;
1688 }
1689
1690 if (print_summary) {
1691 char *title;
1692 u32 rate;
1693
1694 if (hundred)
1695 title = "100Frames";
1696 else if (fc & IEEE80211_FCTL_RETRY)
1697 title = "Retry";
1698 else if (ieee80211_is_assoc_response(fc))
1699 title = "AscRsp";
1700 else if (ieee80211_is_reassoc_response(fc))
1701 title = "RasRsp";
1702 else if (ieee80211_is_probe_response(fc)) {
1703 title = "PrbRsp";
1704 print_dump = 1; /* dump frame contents */
1705 } else if (ieee80211_is_beacon(fc)) {
1706 title = "Beacon";
1707 print_dump = 1; /* dump frame contents */
1708 } else if (ieee80211_is_atim(fc))
1709 title = "ATIM";
1710 else if (ieee80211_is_auth(fc))
1711 title = "Auth";
1712 else if (ieee80211_is_deauth(fc))
1713 title = "DeAuth";
1714 else if (ieee80211_is_disassoc(fc))
1715 title = "DisAssoc";
1716 else
1717 title = "Frame";
1718
1719 rate = iwl_rate_index_from_plcp(rate_sym);
1720 if (rate == -1)
1721 rate = 0;
1722 else
1723 rate = iwl_rates[rate].ieee / 2;
1724
1725 /* print frame summary.
1726 * MAC addresses show just the last byte (for brevity),
1727 * but you can hack it to show more, if you'd like to. */
1728 if (dataframe)
1729 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1730 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1731 title, fc, header->addr1[5],
1732 length, rssi, channel, rate);
1733 else {
1734 /* src/dst addresses assume managed mode */
1735 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1736 "src=0x%02x, rssi=%u, tim=%lu usec, "
1737 "phy=0x%02x, chnl=%d\n",
1738 title, fc, header->addr1[5],
1739 header->addr3[5], rssi,
1740 tsf_low - priv->scan_start_tsf,
1741 phy_flags, channel);
1742 }
1743 }
1744 if (print_dump)
1745 iwl_print_hex_dump(IWL_DL_RX, data, length);
1746}
1747#endif
1748
1749static void iwl_unset_hw_setting(struct iwl_priv *priv)
1750{
1751 if (priv->hw_setting.shared_virt)
1752 pci_free_consistent(priv->pci_dev,
1753 sizeof(struct iwl_shared),
1754 priv->hw_setting.shared_virt,
1755 priv->hw_setting.shared_phys);
1756}
1757
1758/**
1759 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1760 *
1761 * return : set the bit for each supported rate insert in ie
1762 */
1763static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1764 u16 basic_rate, int max_count)
1765{
1766 u16 ret_rates = 0, bit;
1767 int i;
1768 u8 *rates;
1769
1770 rates = &(ie[1]);
1771
1772 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1773 if (bit & supported_rate) {
1774 ret_rates |= bit;
1775 rates[*ie] = iwl_rates[i].ieee |
1776 ((bit & basic_rate) ? 0x80 : 0x00);
1777 *ie = *ie + 1;
1778 if (*ie >= max_count)
1779 break;
1780 }
1781 }
1782
1783 return ret_rates;
1784}
1785
1786/**
1787 * iwl_fill_probe_req - fill in all required fields and IE for probe request
1788 */
1789static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1790 struct ieee80211_mgmt *frame,
1791 int left, int is_direct)
1792{
1793 int len = 0;
1794 u8 *pos = NULL;
1795 u16 ret_rates;
1796
1797 /* Make sure there is enough space for the probe request,
1798 * two mandatory IEs and the data */
1799 left -= 24;
1800 if (left < 0)
1801 return 0;
1802 len += 24;
1803
1804 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1805 memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1806 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1807 memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1808 frame->seq_ctrl = 0;
1809
1810 /* fill in our indirect SSID IE */
1811 /* ...next IE... */
1812
1813 left -= 2;
1814 if (left < 0)
1815 return 0;
1816 len += 2;
1817 pos = &(frame->u.probe_req.variable[0]);
1818 *pos++ = WLAN_EID_SSID;
1819 *pos++ = 0;
1820
1821 /* fill in our direct SSID IE... */
1822 if (is_direct) {
1823 /* ...next IE... */
1824 left -= 2 + priv->essid_len;
1825 if (left < 0)
1826 return 0;
1827 /* ... fill it in... */
1828 *pos++ = WLAN_EID_SSID;
1829 *pos++ = priv->essid_len;
1830 memcpy(pos, priv->essid, priv->essid_len);
1831 pos += priv->essid_len;
1832 len += 2 + priv->essid_len;
1833 }
1834
1835 /* fill in supported rate */
1836 /* ...next IE... */
1837 left -= 2;
1838 if (left < 0)
1839 return 0;
1840 /* ... fill it in... */
1841 *pos++ = WLAN_EID_SUPP_RATES;
1842 *pos = 0;
1843 ret_rates = priv->active_rate = priv->rates_mask;
1844 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1845
1846 iwl_supported_rate_to_ie(pos, priv->active_rate,
1847 priv->active_rate_basic, left);
1848 len += 2 + *pos;
1849 pos += (*pos) + 1;
1850 ret_rates = ~ret_rates & priv->active_rate;
1851
1852 if (ret_rates == 0)
1853 goto fill_end;
1854
1855 /* fill in supported extended rate */
1856 /* ...next IE... */
1857 left -= 2;
1858 if (left < 0)
1859 return 0;
1860 /* ... fill it in... */
1861 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1862 *pos = 0;
1863 iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1864 if (*pos > 0)
1865 len += 2 + *pos;
1866
1867 fill_end:
1868 return (u16)len;
1869}
1870
1871/*
1872 * QoS support
1873*/
1874#ifdef CONFIG_IWLWIFI_QOS
1875static int iwl_send_qos_params_command(struct iwl_priv *priv,
1876 struct iwl_qosparam_cmd *qos)
1877{
1878
1879 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1880 sizeof(struct iwl_qosparam_cmd), qos);
1881}
1882
1883static void iwl_reset_qos(struct iwl_priv *priv)
1884{
1885 u16 cw_min = 15;
1886 u16 cw_max = 1023;
1887 u8 aifs = 2;
1888 u8 is_legacy = 0;
1889 unsigned long flags;
1890 int i;
1891
1892 spin_lock_irqsave(&priv->lock, flags);
1893 priv->qos_data.qos_active = 0;
1894
1895 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1896 if (priv->qos_data.qos_enable)
1897 priv->qos_data.qos_active = 1;
1898 if (!(priv->active_rate & 0xfff0)) {
1899 cw_min = 31;
1900 is_legacy = 1;
1901 }
1902 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1903 if (priv->qos_data.qos_enable)
1904 priv->qos_data.qos_active = 1;
1905 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1906 cw_min = 31;
1907 is_legacy = 1;
1908 }
1909
1910 if (priv->qos_data.qos_active)
1911 aifs = 3;
1912
1913 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1914 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1915 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1916 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1917 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1918
1919 if (priv->qos_data.qos_active) {
1920 i = 1;
1921 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1922 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1923 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1924 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1925 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1926
1927 i = 2;
1928 priv->qos_data.def_qos_parm.ac[i].cw_min =
1929 cpu_to_le16((cw_min + 1) / 2 - 1);
1930 priv->qos_data.def_qos_parm.ac[i].cw_max =
1931 cpu_to_le16(cw_max);
1932 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1933 if (is_legacy)
1934 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1935 cpu_to_le16(6016);
1936 else
1937 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1938 cpu_to_le16(3008);
1939 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1940
1941 i = 3;
1942 priv->qos_data.def_qos_parm.ac[i].cw_min =
1943 cpu_to_le16((cw_min + 1) / 4 - 1);
1944 priv->qos_data.def_qos_parm.ac[i].cw_max =
1945 cpu_to_le16((cw_max + 1) / 2 - 1);
1946 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1947 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1948 if (is_legacy)
1949 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1950 cpu_to_le16(3264);
1951 else
1952 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1953 cpu_to_le16(1504);
1954 } else {
1955 for (i = 1; i < 4; i++) {
1956 priv->qos_data.def_qos_parm.ac[i].cw_min =
1957 cpu_to_le16(cw_min);
1958 priv->qos_data.def_qos_parm.ac[i].cw_max =
1959 cpu_to_le16(cw_max);
1960 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1961 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1962 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1963 }
1964 }
1965 IWL_DEBUG_QOS("set QoS to default \n");
1966
1967 spin_unlock_irqrestore(&priv->lock, flags);
1968}
1969
1970static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
1971{
1972 unsigned long flags;
1973
1974 if (priv == NULL)
1975 return;
1976
1977 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1978 return;
1979
1980 if (!priv->qos_data.qos_enable)
1981 return;
1982
1983 spin_lock_irqsave(&priv->lock, flags);
1984 priv->qos_data.def_qos_parm.qos_flags = 0;
1985
1986 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1987 !priv->qos_data.qos_cap.q_AP.txop_request)
1988 priv->qos_data.def_qos_parm.qos_flags |=
1989 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1990
1991 if (priv->qos_data.qos_active)
1992 priv->qos_data.def_qos_parm.qos_flags |=
1993 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1994
1995 spin_unlock_irqrestore(&priv->lock, flags);
1996
1997 if (force || iwl_is_associated(priv)) {
1998 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
1999 priv->qos_data.qos_active);
2000
2001 iwl_send_qos_params_command(priv,
2002 &(priv->qos_data.def_qos_parm));
2003 }
2004}
2005
2006#endif /* CONFIG_IWLWIFI_QOS */
2007/*
2008 * Power management (not Tx power!) functions
2009 */
2010#define MSEC_TO_USEC 1024
2011
2012#define NOSLP __constant_cpu_to_le32(0)
2013#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
2014#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2015#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2016 __constant_cpu_to_le32(X1), \
2017 __constant_cpu_to_le32(X2), \
2018 __constant_cpu_to_le32(X3), \
2019 __constant_cpu_to_le32(X4)}
2020
2021
2022/* default power management (not Tx power) table values */
2023/* for tim 0-10 */
2024static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2025 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2026 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2027 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2028 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2029 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2030 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2031};
2032
2033/* for tim > 10 */
2034static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2035 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2036 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2037 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2038 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2039 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2040 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2041 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2042 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2043 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2044 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2045};
2046
2047int iwl_power_init_handle(struct iwl_priv *priv)
2048{
2049 int rc = 0, i;
2050 struct iwl_power_mgr *pow_data;
2051 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2052 u16 pci_pm;
2053
2054 IWL_DEBUG_POWER("Initialize power \n");
2055
2056 pow_data = &(priv->power_data);
2057
2058 memset(pow_data, 0, sizeof(*pow_data));
2059
2060 pow_data->active_index = IWL_POWER_RANGE_0;
2061 pow_data->dtim_val = 0xffff;
2062
2063 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2064 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2065
2066 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2067 if (rc != 0)
2068 return 0;
2069 else {
2070 struct iwl_powertable_cmd *cmd;
2071
2072 IWL_DEBUG_POWER("adjust power command flags\n");
2073
2074 for (i = 0; i < IWL_POWER_AC; i++) {
2075 cmd = &pow_data->pwr_range_0[i].cmd;
2076
2077 if (pci_pm & 0x1)
2078 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2079 else
2080 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2081 }
2082 }
2083 return rc;
2084}
2085
2086static int iwl_update_power_cmd(struct iwl_priv *priv,
2087 struct iwl_powertable_cmd *cmd, u32 mode)
2088{
2089 int rc = 0, i;
2090 u8 skip;
2091 u32 max_sleep = 0;
2092 struct iwl_power_vec_entry *range;
2093 u8 period = 0;
2094 struct iwl_power_mgr *pow_data;
2095
2096 if (mode > IWL_POWER_INDEX_5) {
2097 IWL_DEBUG_POWER("Error invalid power mode \n");
2098 return -1;
2099 }
2100 pow_data = &(priv->power_data);
2101
2102 if (pow_data->active_index == IWL_POWER_RANGE_0)
2103 range = &pow_data->pwr_range_0[0];
2104 else
2105 range = &pow_data->pwr_range_1[1];
2106
2107 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2108
2109#ifdef IWL_MAC80211_DISABLE
2110 if (priv->assoc_network != NULL) {
2111 unsigned long flags;
2112
2113 period = priv->assoc_network->tim.tim_period;
2114 }
2115#endif /*IWL_MAC80211_DISABLE */
2116 skip = range[mode].no_dtim;
2117
2118 if (period == 0) {
2119 period = 1;
2120 skip = 0;
2121 }
2122
2123 if (skip == 0) {
2124 max_sleep = period;
2125 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2126 } else {
2127 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2128 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2129 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2130 }
2131
2132 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2133 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2134 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2135 }
2136
2137 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2138 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2139 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2140 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2141 le32_to_cpu(cmd->sleep_interval[0]),
2142 le32_to_cpu(cmd->sleep_interval[1]),
2143 le32_to_cpu(cmd->sleep_interval[2]),
2144 le32_to_cpu(cmd->sleep_interval[3]),
2145 le32_to_cpu(cmd->sleep_interval[4]));
2146
2147 return rc;
2148}
2149
2150static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2151{
2152 u32 final_mode = mode;
2153 int rc;
2154 struct iwl_powertable_cmd cmd;
2155
2156 /* If on battery, set to 3,
2157 * if plugged into AC power, set to CAM ("continuosly aware mode"),
2158 * else user level */
2159 switch (mode) {
2160 case IWL_POWER_BATTERY:
2161 final_mode = IWL_POWER_INDEX_3;
2162 break;
2163 case IWL_POWER_AC:
2164 final_mode = IWL_POWER_MODE_CAM;
2165 break;
2166 default:
2167 final_mode = mode;
2168 break;
2169 }
2170
2171 iwl_update_power_cmd(priv, &cmd, final_mode);
2172
2173 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2174
2175 if (final_mode == IWL_POWER_MODE_CAM)
2176 clear_bit(STATUS_POWER_PMI, &priv->status);
2177 else
2178 set_bit(STATUS_POWER_PMI, &priv->status);
2179
2180 return rc;
2181}
2182
2183int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2184{
2185 /* Filter incoming packets to determine if they are targeted toward
2186 * this network, discarding packets coming from ourselves */
2187 switch (priv->iw_mode) {
2188 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2189 /* packets from our adapter are dropped (echo) */
2190 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2191 return 0;
2192 /* {broad,multi}cast packets to our IBSS go through */
2193 if (is_multicast_ether_addr(header->addr1))
2194 return !compare_ether_addr(header->addr3, priv->bssid);
2195 /* packets to our adapter go through */
2196 return !compare_ether_addr(header->addr1, priv->mac_addr);
2197 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2198 /* packets from our adapter are dropped (echo) */
2199 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2200 return 0;
2201 /* {broad,multi}cast packets to our BSS go through */
2202 if (is_multicast_ether_addr(header->addr1))
2203 return !compare_ether_addr(header->addr2, priv->bssid);
2204 /* packets to our adapter go through */
2205 return !compare_ether_addr(header->addr1, priv->mac_addr);
2206 }
2207
2208 return 1;
2209}
2210
2211#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2212
2213const char *iwl_get_tx_fail_reason(u32 status)
2214{
2215 switch (status & TX_STATUS_MSK) {
2216 case TX_STATUS_SUCCESS:
2217 return "SUCCESS";
2218 TX_STATUS_ENTRY(SHORT_LIMIT);
2219 TX_STATUS_ENTRY(LONG_LIMIT);
2220 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2221 TX_STATUS_ENTRY(MGMNT_ABORT);
2222 TX_STATUS_ENTRY(NEXT_FRAG);
2223 TX_STATUS_ENTRY(LIFE_EXPIRE);
2224 TX_STATUS_ENTRY(DEST_PS);
2225 TX_STATUS_ENTRY(ABORTED);
2226 TX_STATUS_ENTRY(BT_RETRY);
2227 TX_STATUS_ENTRY(STA_INVALID);
2228 TX_STATUS_ENTRY(FRAG_DROPPED);
2229 TX_STATUS_ENTRY(TID_DISABLE);
2230 TX_STATUS_ENTRY(FRAME_FLUSHED);
2231 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2232 TX_STATUS_ENTRY(TX_LOCKED);
2233 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2234 }
2235
2236 return "UNKNOWN";
2237}
2238
2239/**
2240 * iwl_scan_cancel - Cancel any currently executing HW scan
2241 *
2242 * NOTE: priv->mutex is not required before calling this function
2243 */
2244static int iwl_scan_cancel(struct iwl_priv *priv)
2245{
2246 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2247 clear_bit(STATUS_SCANNING, &priv->status);
2248 return 0;
2249 }
2250
2251 if (test_bit(STATUS_SCANNING, &priv->status)) {
2252 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2253 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2254 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2255 queue_work(priv->workqueue, &priv->abort_scan);
2256
2257 } else
2258 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2259
2260 return test_bit(STATUS_SCANNING, &priv->status);
2261 }
2262
2263 return 0;
2264}
2265
2266/**
2267 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2268 * @ms: amount of time to wait (in milliseconds) for scan to abort
2269 *
2270 * NOTE: priv->mutex must be held before calling this function
2271 */
2272static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2273{
2274 unsigned long now = jiffies;
2275 int ret;
2276
2277 ret = iwl_scan_cancel(priv);
2278 if (ret && ms) {
2279 mutex_unlock(&priv->mutex);
2280 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2281 test_bit(STATUS_SCANNING, &priv->status))
2282 msleep(1);
2283 mutex_lock(&priv->mutex);
2284
2285 return test_bit(STATUS_SCANNING, &priv->status);
2286 }
2287
2288 return ret;
2289}
2290
2291static void iwl_sequence_reset(struct iwl_priv *priv)
2292{
2293 /* Reset ieee stats */
2294
2295 /* We don't reset the net_device_stats (ieee->stats) on
2296 * re-association */
2297
2298 priv->last_seq_num = -1;
2299 priv->last_frag_num = -1;
2300 priv->last_packet_time = 0;
2301
2302 iwl_scan_cancel(priv);
2303}
2304
2305#define MAX_UCODE_BEACON_INTERVAL 1024
2306#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2307
2308static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2309{
2310 u16 new_val = 0;
2311 u16 beacon_factor = 0;
2312
2313 beacon_factor =
2314 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2315 / MAX_UCODE_BEACON_INTERVAL;
2316 new_val = beacon_val / beacon_factor;
2317
2318 return cpu_to_le16(new_val);
2319}
2320
2321static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2322{
2323 u64 interval_tm_unit;
2324 u64 tsf, result;
2325 unsigned long flags;
2326 struct ieee80211_conf *conf = NULL;
2327 u16 beacon_int = 0;
2328
2329 conf = ieee80211_get_hw_conf(priv->hw);
2330
2331 spin_lock_irqsave(&priv->lock, flags);
2332 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2333 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2334
2335 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2336
2337 tsf = priv->timestamp1;
2338 tsf = ((tsf << 32) | priv->timestamp0);
2339
2340 beacon_int = priv->beacon_int;
2341 spin_unlock_irqrestore(&priv->lock, flags);
2342
2343 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2344 if (beacon_int == 0) {
2345 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2346 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2347 } else {
2348 priv->rxon_timing.beacon_interval =
2349 cpu_to_le16(beacon_int);
2350 priv->rxon_timing.beacon_interval =
2351 iwl_adjust_beacon_interval(
2352 le16_to_cpu(priv->rxon_timing.beacon_interval));
2353 }
2354
2355 priv->rxon_timing.atim_window = 0;
2356 } else {
2357 priv->rxon_timing.beacon_interval =
2358 iwl_adjust_beacon_interval(conf->beacon_int);
2359 /* TODO: we need to get atim_window from upper stack
2360 * for now we set to 0 */
2361 priv->rxon_timing.atim_window = 0;
2362 }
2363
2364 interval_tm_unit =
2365 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2366 result = do_div(tsf, interval_tm_unit);
2367 priv->rxon_timing.beacon_init_val =
2368 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2369
2370 IWL_DEBUG_ASSOC
2371 ("beacon interval %d beacon timer %d beacon tim %d\n",
2372 le16_to_cpu(priv->rxon_timing.beacon_interval),
2373 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2374 le16_to_cpu(priv->rxon_timing.atim_window));
2375}
2376
2377static int iwl_scan_initiate(struct iwl_priv *priv)
2378{
2379 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2380 IWL_ERROR("APs don't scan.\n");
2381 return 0;
2382 }
2383
2384 if (!iwl_is_ready_rf(priv)) {
2385 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2386 return -EIO;
2387 }
2388
2389 if (test_bit(STATUS_SCANNING, &priv->status)) {
2390 IWL_DEBUG_SCAN("Scan already in progress.\n");
2391 return -EAGAIN;
2392 }
2393
2394 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2395 IWL_DEBUG_SCAN("Scan request while abort pending. "
2396 "Queuing.\n");
2397 return -EAGAIN;
2398 }
2399
2400 IWL_DEBUG_INFO("Starting scan...\n");
2401 priv->scan_bands = 2;
2402 set_bit(STATUS_SCANNING, &priv->status);
2403 priv->scan_start = jiffies;
2404 priv->scan_pass_start = priv->scan_start;
2405
2406 queue_work(priv->workqueue, &priv->request_scan);
2407
2408 return 0;
2409}
2410
2411static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2412{
2413 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2414
2415 if (hw_decrypt)
2416 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2417 else
2418 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2419
2420 return 0;
2421}
2422
2423static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2424{
2425 if (phymode == MODE_IEEE80211A) {
2426 priv->staging_rxon.flags &=
2427 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2428 | RXON_FLG_CCK_MSK);
2429 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2430 } else {
2431 /* Copied from iwl_bg_post_associate() */
2432 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2433 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2434 else
2435 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2436
2437 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2438 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2439
2440 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2441 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2442 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2443 }
2444}
2445
2446/*
2447 * initilize rxon structure with default values fromm eeprom
2448 */
2449static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2450{
2451 const struct iwl_channel_info *ch_info;
2452
2453 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2454
2455 switch (priv->iw_mode) {
2456 case IEEE80211_IF_TYPE_AP:
2457 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2458 break;
2459
2460 case IEEE80211_IF_TYPE_STA:
2461 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2462 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2463 break;
2464
2465 case IEEE80211_IF_TYPE_IBSS:
2466 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2467 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2468 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2469 RXON_FILTER_ACCEPT_GRP_MSK;
2470 break;
2471
2472 case IEEE80211_IF_TYPE_MNTR:
2473 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2474 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2475 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2476 break;
2477 }
2478
2479#if 0
2480 /* TODO: Figure out when short_preamble would be set and cache from
2481 * that */
2482 if (!hw_to_local(priv->hw)->short_preamble)
2483 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2484 else
2485 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2486#endif
2487
2488 ch_info = iwl_get_channel_info(priv, priv->phymode,
2489 le16_to_cpu(priv->staging_rxon.channel));
2490
2491 if (!ch_info)
2492 ch_info = &priv->channel_info[0];
2493
2494 /*
2495 * in some case A channels are all non IBSS
2496 * in this case force B/G channel
2497 */
2498 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2499 !(is_channel_ibss(ch_info)))
2500 ch_info = &priv->channel_info[0];
2501
2502 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2503 if (is_channel_a_band(ch_info))
2504 priv->phymode = MODE_IEEE80211A;
2505 else
2506 priv->phymode = MODE_IEEE80211G;
2507
2508 iwl_set_flags_for_phymode(priv, priv->phymode);
2509
2510 priv->staging_rxon.ofdm_basic_rates =
2511 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2512 priv->staging_rxon.cck_basic_rates =
2513 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2514}
2515
2516static int iwl_set_mode(struct iwl_priv *priv, int mode)
2517{
2518 if (!iwl_is_ready_rf(priv))
2519 return -EAGAIN;
2520
2521 if (mode == IEEE80211_IF_TYPE_IBSS) {
2522 const struct iwl_channel_info *ch_info;
2523
2524 ch_info = iwl_get_channel_info(priv,
2525 priv->phymode,
2526 le16_to_cpu(priv->staging_rxon.channel));
2527
2528 if (!ch_info || !is_channel_ibss(ch_info)) {
2529 IWL_ERROR("channel %d not IBSS channel\n",
2530 le16_to_cpu(priv->staging_rxon.channel));
2531 return -EINVAL;
2532 }
2533 }
2534
2535 cancel_delayed_work(&priv->scan_check);
2536 if (iwl_scan_cancel_timeout(priv, 100)) {
2537 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2538 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2539 return -EAGAIN;
2540 }
2541
2542 priv->iw_mode = mode;
2543
2544 iwl_connection_init_rx_config(priv);
2545 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2546
2547 iwl_clear_stations_table(priv);
2548
2549 iwl_commit_rxon(priv);
2550
2551 return 0;
2552}
2553
2554static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2555 struct ieee80211_tx_control *ctl,
2556 struct iwl_cmd *cmd,
2557 struct sk_buff *skb_frag,
2558 int last_frag)
2559{
2560 struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2561
2562 switch (keyinfo->alg) {
2563 case ALG_CCMP:
2564 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2565 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2566 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2567 break;
2568
2569 case ALG_TKIP:
2570#if 0
2571 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2572
2573 if (last_frag)
2574 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2575 8);
2576 else
2577 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2578#endif
2579 break;
2580
2581 case ALG_WEP:
2582 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2583 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2584
2585 if (keyinfo->keylen == 13)
2586 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2587
2588 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2589
2590 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2591 "with key %d\n", ctl->key_idx);
2592 break;
2593
2594 case ALG_NONE:
2595 IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n");
2596 break;
2597
2598 default:
2599 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2600 break;
2601 }
2602}
2603
2604/*
2605 * handle build REPLY_TX command notification.
2606 */
2607static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2608 struct iwl_cmd *cmd,
2609 struct ieee80211_tx_control *ctrl,
2610 struct ieee80211_hdr *hdr,
2611 int is_unicast, u8 std_id)
2612{
2613 __le16 *qc;
2614 u16 fc = le16_to_cpu(hdr->frame_control);
2615 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2616
2617 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2618 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2619 tx_flags |= TX_CMD_FLG_ACK_MSK;
2620 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2621 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2622 if (ieee80211_is_probe_response(fc) &&
2623 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2624 tx_flags |= TX_CMD_FLG_TSF_MSK;
2625 } else {
2626 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2627 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2628 }
2629
2630 cmd->cmd.tx.sta_id = std_id;
2631 if (ieee80211_get_morefrag(hdr))
2632 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2633
2634 qc = ieee80211_get_qos_ctrl(hdr);
2635 if (qc) {
2636 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2637 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2638 } else
2639 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2640
2641 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2642 tx_flags |= TX_CMD_FLG_RTS_MSK;
2643 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2644 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2645 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2646 tx_flags |= TX_CMD_FLG_CTS_MSK;
2647 }
2648
2649 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2650 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2651
2652 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2653 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2654 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2655 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2656 cmd->cmd.tx.timeout.pm_frame_timeout =
2657 cpu_to_le16(3);
2658 else
2659 cmd->cmd.tx.timeout.pm_frame_timeout =
2660 cpu_to_le16(2);
2661 } else
2662 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2663
2664 cmd->cmd.tx.driver_txop = 0;
2665 cmd->cmd.tx.tx_flags = tx_flags;
2666 cmd->cmd.tx.next_frame_len = 0;
2667}
2668
2669static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2670{
2671 int sta_id;
2672 u16 fc = le16_to_cpu(hdr->frame_control);
2673
2674 /* If this frame is broadcast or not data then use the broadcast
2675 * station id */
2676 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2677 is_multicast_ether_addr(hdr->addr1))
2678 return priv->hw_setting.bcast_sta_id;
2679
2680 switch (priv->iw_mode) {
2681
2682 /* If this frame is part of a BSS network (we're a station), then
2683 * we use the AP's station id */
2684 case IEEE80211_IF_TYPE_STA:
2685 return IWL_AP_ID;
2686
2687 /* If we are an AP, then find the station, or use BCAST */
2688 case IEEE80211_IF_TYPE_AP:
2689 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2690 if (sta_id != IWL_INVALID_STATION)
2691 return sta_id;
2692 return priv->hw_setting.bcast_sta_id;
2693
2694 /* If this frame is part of a IBSS network, then we use the
2695 * target specific station id */
2696 case IEEE80211_IF_TYPE_IBSS:
2697 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2698 if (sta_id != IWL_INVALID_STATION)
2699 return sta_id;
2700
2701 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2702
2703 if (sta_id != IWL_INVALID_STATION)
2704 return sta_id;
2705
2706 IWL_DEBUG_DROP("Station " MAC_FMT " not in station map. "
2707 "Defaulting to broadcast...\n",
2708 MAC_ARG(hdr->addr1));
2709 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2710 return priv->hw_setting.bcast_sta_id;
2711
2712 default:
2713 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2714 return priv->hw_setting.bcast_sta_id;
2715 }
2716}
2717
2718/*
2719 * start REPLY_TX command process
2720 */
2721static int iwl_tx_skb(struct iwl_priv *priv,
2722 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2723{
2724 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2725 struct iwl_tfd_frame *tfd;
2726 u32 *control_flags;
2727 int txq_id = ctl->queue;
2728 struct iwl_tx_queue *txq = NULL;
2729 struct iwl_queue *q = NULL;
2730 dma_addr_t phys_addr;
2731 dma_addr_t txcmd_phys;
2732 struct iwl_cmd *out_cmd = NULL;
2733 u16 len, idx, len_org;
2734 u8 id, hdr_len, unicast;
2735 u8 sta_id;
2736 u16 seq_number = 0;
2737 u16 fc;
2738 __le16 *qc;
2739 u8 wait_write_ptr = 0;
2740 unsigned long flags;
2741 int rc;
2742
2743 spin_lock_irqsave(&priv->lock, flags);
2744 if (iwl_is_rfkill(priv)) {
2745 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2746 goto drop_unlock;
2747 }
2748
2749 if (!priv->interface_id) {
2750 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2751 goto drop_unlock;
2752 }
2753
2754 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2755 IWL_ERROR("ERROR: No TX rate available.\n");
2756 goto drop_unlock;
2757 }
2758
2759 unicast = !is_multicast_ether_addr(hdr->addr1);
2760 id = 0;
2761
2762 fc = le16_to_cpu(hdr->frame_control);
2763
2764#ifdef CONFIG_IWLWIFI_DEBUG
2765 if (ieee80211_is_auth(fc))
2766 IWL_DEBUG_TX("Sending AUTH frame\n");
2767 else if (ieee80211_is_assoc_request(fc))
2768 IWL_DEBUG_TX("Sending ASSOC frame\n");
2769 else if (ieee80211_is_reassoc_request(fc))
2770 IWL_DEBUG_TX("Sending REASSOC frame\n");
2771#endif
2772
2773 if (!iwl_is_associated(priv) &&
2774 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2775 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2776 goto drop_unlock;
2777 }
2778
2779 spin_unlock_irqrestore(&priv->lock, flags);
2780
2781 hdr_len = ieee80211_get_hdrlen(fc);
2782 sta_id = iwl_get_sta_id(priv, hdr);
2783 if (sta_id == IWL_INVALID_STATION) {
2784 IWL_DEBUG_DROP("Dropping - INVALID STATION: " MAC_FMT "\n",
2785 MAC_ARG(hdr->addr1));
2786 goto drop;
2787 }
2788
2789 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2790
2791 qc = ieee80211_get_qos_ctrl(hdr);
2792 if (qc) {
2793 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2794 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2795 IEEE80211_SCTL_SEQ;
2796 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2797 (hdr->seq_ctrl &
2798 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2799 seq_number += 0x10;
2800 }
2801 txq = &priv->txq[txq_id];
2802 q = &txq->q;
2803
2804 spin_lock_irqsave(&priv->lock, flags);
2805
2806 tfd = &txq->bd[q->first_empty];
2807 memset(tfd, 0, sizeof(*tfd));
2808 control_flags = (u32 *) tfd;
2809 idx = get_cmd_index(q, q->first_empty, 0);
2810
2811 memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2812 txq->txb[q->first_empty].skb[0] = skb;
2813 memcpy(&(txq->txb[q->first_empty].status.control),
2814 ctl, sizeof(struct ieee80211_tx_control));
2815 out_cmd = &txq->cmd[idx];
2816 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2817 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2818 out_cmd->hdr.cmd = REPLY_TX;
2819 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2820 INDEX_TO_SEQ(q->first_empty)));
2821 /* copy frags header */
2822 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2823
2824 /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2825 len = priv->hw_setting.tx_cmd_len +
2826 sizeof(struct iwl_cmd_header) + hdr_len;
2827
2828 len_org = len;
2829 len = (len + 3) & ~3;
2830
2831 if (len_org != len)
2832 len_org = 1;
2833 else
2834 len_org = 0;
2835
2836 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2837 offsetof(struct iwl_cmd, hdr);
2838
2839 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2840
2841 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2842 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2843
2844 /* 802.11 null functions have no payload... */
2845 len = skb->len - hdr_len;
2846 if (len) {
2847 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2848 len, PCI_DMA_TODEVICE);
2849 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2850 }
2851
2852 /* If there is no payload, then only one TFD is used */
2853 if (!len)
2854 *control_flags = TFD_CTL_COUNT_SET(1);
2855 else
2856 *control_flags = TFD_CTL_COUNT_SET(2) |
2857 TFD_CTL_PAD_SET(U32_PAD(len));
2858
2859 len = (u16)skb->len;
2860 out_cmd->cmd.tx.len = cpu_to_le16(len);
2861
2862 /* TODO need this for burst mode later on */
2863 iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2864
2865 /* set is_hcca to 0; it probably will never be implemented */
2866 iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2867
2868 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2869 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2870
2871 if (!ieee80211_get_morefrag(hdr)) {
2872 txq->need_update = 1;
2873 if (qc) {
2874 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2875 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2876 }
2877 } else {
2878 wait_write_ptr = 1;
2879 txq->need_update = 0;
2880 }
2881
2882 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2883 sizeof(out_cmd->cmd.tx));
2884
2885 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2886 ieee80211_get_hdrlen(fc));
2887
2888 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2889 rc = iwl_tx_queue_update_write_ptr(priv, txq);
2890 spin_unlock_irqrestore(&priv->lock, flags);
2891
2892 if (rc)
2893 return rc;
2894
2895 if ((iwl_queue_space(q) < q->high_mark)
2896 && priv->mac80211_registered) {
2897 if (wait_write_ptr) {
2898 spin_lock_irqsave(&priv->lock, flags);
2899 txq->need_update = 1;
2900 iwl_tx_queue_update_write_ptr(priv, txq);
2901 spin_unlock_irqrestore(&priv->lock, flags);
2902 }
2903
2904 ieee80211_stop_queue(priv->hw, ctl->queue);
2905 }
2906
2907 return 0;
2908
2909drop_unlock:
2910 spin_unlock_irqrestore(&priv->lock, flags);
2911drop:
2912 return -1;
2913}
2914
2915static void iwl_set_rate(struct iwl_priv *priv)
2916{
2917 const struct ieee80211_hw_mode *hw = NULL;
2918 struct ieee80211_rate *rate;
2919 int i;
2920
2921 hw = iwl_get_hw_mode(priv, priv->phymode);
2922
2923 priv->active_rate = 0;
2924 priv->active_rate_basic = 0;
2925
2926 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
2927 hw->mode == MODE_IEEE80211A ?
2928 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
2929
2930 for (i = 0; i < hw->num_rates; i++) {
2931 rate = &(hw->rates[i]);
2932 if ((rate->val < IWL_RATE_COUNT) &&
2933 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
2934 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
2935 rate->val, iwl_rates[rate->val].plcp,
2936 (rate->flags & IEEE80211_RATE_BASIC) ?
2937 "*" : "");
2938 priv->active_rate |= (1 << rate->val);
2939 if (rate->flags & IEEE80211_RATE_BASIC)
2940 priv->active_rate_basic |= (1 << rate->val);
2941 } else
2942 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
2943 rate->val, iwl_rates[rate->val].plcp);
2944 }
2945
2946 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2947 priv->active_rate, priv->active_rate_basic);
2948
2949 /*
2950 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2951 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2952 * OFDM
2953 */
2954 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2955 priv->staging_rxon.cck_basic_rates =
2956 ((priv->active_rate_basic &
2957 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2958 else
2959 priv->staging_rxon.cck_basic_rates =
2960 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2961
2962 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2963 priv->staging_rxon.ofdm_basic_rates =
2964 ((priv->active_rate_basic &
2965 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2966 IWL_FIRST_OFDM_RATE) & 0xFF;
2967 else
2968 priv->staging_rxon.ofdm_basic_rates =
2969 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2970}
2971
2972static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2973{
2974 unsigned long flags;
2975
2976 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2977 return;
2978
2979 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2980 disable_radio ? "OFF" : "ON");
2981
2982 if (disable_radio) {
2983 iwl_scan_cancel(priv);
2984 /* FIXME: This is a workaround for AP */
2985 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
2986 spin_lock_irqsave(&priv->lock, flags);
2987 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2988 CSR_UCODE_SW_BIT_RFKILL);
2989 spin_unlock_irqrestore(&priv->lock, flags);
2990 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2991 set_bit(STATUS_RF_KILL_SW, &priv->status);
2992 }
2993 return;
2994 }
2995
2996 spin_lock_irqsave(&priv->lock, flags);
2997 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2998
2999 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3000 spin_unlock_irqrestore(&priv->lock, flags);
3001
3002 /* wake up ucode */
3003 msleep(10);
3004
3005 spin_lock_irqsave(&priv->lock, flags);
3006 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3007 if (!iwl_grab_restricted_access(priv))
3008 iwl_release_restricted_access(priv);
3009 spin_unlock_irqrestore(&priv->lock, flags);
3010
3011 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3012 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3013 "disabled by HW switch\n");
3014 return;
3015 }
3016
3017 queue_work(priv->workqueue, &priv->restart);
3018 return;
3019}
3020
3021void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3022 u32 decrypt_res, struct ieee80211_rx_status *stats)
3023{
3024 u16 fc =
3025 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3026
3027 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3028 return;
3029
3030 if (!(fc & IEEE80211_FCTL_PROTECTED))
3031 return;
3032
3033 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3034 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3035 case RX_RES_STATUS_SEC_TYPE_TKIP:
3036 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3037 RX_RES_STATUS_BAD_ICV_MIC)
3038 stats->flag |= RX_FLAG_MMIC_ERROR;
3039 case RX_RES_STATUS_SEC_TYPE_WEP:
3040 case RX_RES_STATUS_SEC_TYPE_CCMP:
3041 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3042 RX_RES_STATUS_DECRYPT_OK) {
3043 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3044 stats->flag |= RX_FLAG_DECRYPTED;
3045 }
3046 break;
3047
3048 default:
3049 break;
3050 }
3051}
3052
3053void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3054 struct iwl_rx_mem_buffer *rxb,
3055 void *data, short len,
3056 struct ieee80211_rx_status *stats,
3057 u16 phy_flags)
3058{
3059 struct iwl_rt_rx_hdr *iwl_rt;
3060
3061 /* First cache any information we need before we overwrite
3062 * the information provided in the skb from the hardware */
3063 s8 signal = stats->ssi;
3064 s8 noise = 0;
3065 int rate = stats->rate;
3066 u64 tsf = stats->mactime;
3067 __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3068
3069 /* We received data from the HW, so stop the watchdog */
3070 if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) {
3071 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3072 return;
3073 }
3074
3075 /* copy the frame data to write after where the radiotap header goes */
3076 iwl_rt = (void *)rxb->skb->data;
3077 memmove(iwl_rt->payload, data, len);
3078
3079 iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3080 iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */
3081
3082 /* total header + data */
3083 iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt));
3084
3085 /* Set the size of the skb to the size of the frame */
3086 skb_put(rxb->skb, sizeof(*iwl_rt) + len);
3087
3088 /* Big bitfield of all the fields we provide in radiotap */
3089 iwl_rt->rt_hdr.it_present =
3090 cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3091 (1 << IEEE80211_RADIOTAP_FLAGS) |
3092 (1 << IEEE80211_RADIOTAP_RATE) |
3093 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3094 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3095 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3096 (1 << IEEE80211_RADIOTAP_ANTENNA));
3097
3098 /* Zero the flags, we'll add to them as we go */
3099 iwl_rt->rt_flags = 0;
3100
3101 iwl_rt->rt_tsf = cpu_to_le64(tsf);
3102
3103 /* Convert to dBm */
3104 iwl_rt->rt_dbmsignal = signal;
3105 iwl_rt->rt_dbmnoise = noise;
3106
3107 /* Convert the channel frequency and set the flags */
3108 iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq);
3109 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3110 iwl_rt->rt_chbitmask =
3111 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3112 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3113 iwl_rt->rt_chbitmask =
3114 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3115 else /* 802.11g */
3116 iwl_rt->rt_chbitmask =
3117 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3118
3119 rate = iwl_rate_index_from_plcp(rate);
3120 if (rate == -1)
3121 iwl_rt->rt_rate = 0;
3122 else
3123 iwl_rt->rt_rate = iwl_rates[rate].ieee;
3124
3125 /* antenna number */
3126 iwl_rt->rt_antenna =
3127 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3128
3129 /* set the preamble flag if we have it */
3130 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3131 iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3132
3133 IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3134
3135 stats->flag |= RX_FLAG_RADIOTAP;
3136 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3137 rxb->skb = NULL;
3138}
3139
3140
3141#define IWL_PACKET_RETRY_TIME HZ
3142
3143int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3144{
3145 u16 sc = le16_to_cpu(header->seq_ctrl);
3146 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3147 u16 frag = sc & IEEE80211_SCTL_FRAG;
3148 u16 *last_seq, *last_frag;
3149 unsigned long *last_time;
3150
3151 switch (priv->iw_mode) {
3152 case IEEE80211_IF_TYPE_IBSS:{
3153 struct list_head *p;
3154 struct iwl_ibss_seq *entry = NULL;
3155 u8 *mac = header->addr2;
3156 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3157
3158 __list_for_each(p, &priv->ibss_mac_hash[index]) {
3159 entry =
3160 list_entry(p, struct iwl_ibss_seq, list);
3161 if (!compare_ether_addr(entry->mac, mac))
3162 break;
3163 }
3164 if (p == &priv->ibss_mac_hash[index]) {
3165 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3166 if (!entry) {
3167 IWL_ERROR
3168 ("Cannot malloc new mac entry\n");
3169 return 0;
3170 }
3171 memcpy(entry->mac, mac, ETH_ALEN);
3172 entry->seq_num = seq;
3173 entry->frag_num = frag;
3174 entry->packet_time = jiffies;
3175 list_add(&entry->list,
3176 &priv->ibss_mac_hash[index]);
3177 return 0;
3178 }
3179 last_seq = &entry->seq_num;
3180 last_frag = &entry->frag_num;
3181 last_time = &entry->packet_time;
3182 break;
3183 }
3184 case IEEE80211_IF_TYPE_STA:
3185 last_seq = &priv->last_seq_num;
3186 last_frag = &priv->last_frag_num;
3187 last_time = &priv->last_packet_time;
3188 break;
3189 default:
3190 return 0;
3191 }
3192 if ((*last_seq == seq) &&
3193 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3194 if (*last_frag == frag)
3195 goto drop;
3196 if (*last_frag + 1 != frag)
3197 /* out-of-order fragment */
3198 goto drop;
3199 } else
3200 *last_seq = seq;
3201
3202 *last_frag = frag;
3203 *last_time = jiffies;
3204 return 0;
3205
3206 drop:
3207 return 1;
3208}
3209
3210#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3211
3212#include "iwl-spectrum.h"
3213
3214#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3215#define BEACON_TIME_MASK_HIGH 0xFF000000
3216#define TIME_UNIT 1024
3217
3218/*
3219 * extended beacon time format
3220 * time in usec will be changed into a 32-bit value in 8:24 format
3221 * the high 1 byte is the beacon counts
3222 * the lower 3 bytes is the time in usec within one beacon interval
3223 */
3224
3225static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
3226{
3227 u32 quot;
3228 u32 rem;
3229 u32 interval = beacon_interval * 1024;
3230
3231 if (!interval || !usec)
3232 return 0;
3233
3234 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3235 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3236
3237 return (quot << 24) + rem;
3238}
3239
3240/* base is usually what we get from ucode with each received frame,
3241 * the same as HW timer counter counting down
3242 */
3243
3244static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3245{
3246 u32 base_low = base & BEACON_TIME_MASK_LOW;
3247 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3248 u32 interval = beacon_interval * TIME_UNIT;
3249 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3250 (addon & BEACON_TIME_MASK_HIGH);
3251
3252 if (base_low > addon_low)
3253 res += base_low - addon_low;
3254 else if (base_low < addon_low) {
3255 res += interval + base_low - addon_low;
3256 res += (1 << 24);
3257 } else
3258 res += (1 << 24);
3259
3260 return cpu_to_le32(res);
3261}
3262
3263static int iwl_get_measurement(struct iwl_priv *priv,
3264 struct ieee80211_measurement_params *params,
3265 u8 type)
3266{
3267 struct iwl_spectrum_cmd spectrum;
3268 struct iwl_rx_packet *res;
3269 struct iwl_host_cmd cmd = {
3270 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3271 .data = (void *)&spectrum,
3272 .meta.flags = CMD_WANT_SKB,
3273 };
3274 u32 add_time = le64_to_cpu(params->start_time);
3275 int rc;
3276 int spectrum_resp_status;
3277 int duration = le16_to_cpu(params->duration);
3278
3279 if (iwl_is_associated(priv))
3280 add_time =
3281 iwl_usecs_to_beacons(
3282 le64_to_cpu(params->start_time) - priv->last_tsf,
3283 le16_to_cpu(priv->rxon_timing.beacon_interval));
3284
3285 memset(&spectrum, 0, sizeof(spectrum));
3286
3287 spectrum.channel_count = cpu_to_le16(1);
3288 spectrum.flags =
3289 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3290 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3291 cmd.len = sizeof(spectrum);
3292 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3293
3294 if (iwl_is_associated(priv))
3295 spectrum.start_time =
3296 iwl_add_beacon_time(priv->last_beacon_time,
3297 add_time,
3298 le16_to_cpu(priv->rxon_timing.beacon_interval));
3299 else
3300 spectrum.start_time = 0;
3301
3302 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3303 spectrum.channels[0].channel = params->channel;
3304 spectrum.channels[0].type = type;
3305 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3306 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3307 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3308
3309 rc = iwl_send_cmd_sync(priv, &cmd);
3310 if (rc)
3311 return rc;
3312
3313 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
3314 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3315 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3316 rc = -EIO;
3317 }
3318
3319 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3320 switch (spectrum_resp_status) {
3321 case 0: /* Command will be handled */
3322 if (res->u.spectrum.id != 0xff) {
3323 IWL_DEBUG_INFO
3324 ("Replaced existing measurement: %d\n",
3325 res->u.spectrum.id);
3326 priv->measurement_status &= ~MEASUREMENT_READY;
3327 }
3328 priv->measurement_status |= MEASUREMENT_ACTIVE;
3329 rc = 0;
3330 break;
3331
3332 case 1: /* Command will not be handled */
3333 rc = -EAGAIN;
3334 break;
3335 }
3336
3337 dev_kfree_skb_any(cmd.meta.u.skb);
3338
3339 return rc;
3340}
3341#endif
3342
3343static void iwl_txstatus_to_ieee(struct iwl_priv *priv,
3344 struct iwl_tx_info *tx_sta)
3345{
3346
3347 tx_sta->status.ack_signal = 0;
3348 tx_sta->status.excessive_retries = 0;
3349 tx_sta->status.queue_length = 0;
3350 tx_sta->status.queue_number = 0;
3351
3352 if (in_interrupt())
3353 ieee80211_tx_status_irqsafe(priv->hw,
3354 tx_sta->skb[0], &(tx_sta->status));
3355 else
3356 ieee80211_tx_status(priv->hw,
3357 tx_sta->skb[0], &(tx_sta->status));
3358
3359 tx_sta->skb[0] = NULL;
3360}
3361
3362/**
3363 * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC.
3364 *
3365 * When FW advances 'R' index, all entries between old and
3366 * new 'R' index need to be reclaimed. As result, some free space
3367 * forms. If there is enough free space (> low mark), wake Tx queue.
3368 */
3369int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3370{
3371 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3372 struct iwl_queue *q = &txq->q;
3373 int nfreed = 0;
3374
3375 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3376 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3377 "is out of range [0-%d] %d %d.\n", txq_id,
3378 index, q->n_bd, q->first_empty, q->last_used);
3379 return 0;
3380 }
3381
3382 for (index = iwl_queue_inc_wrap(index, q->n_bd);
3383 q->last_used != index;
3384 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) {
3385 if (txq_id != IWL_CMD_QUEUE_NUM) {
3386 iwl_txstatus_to_ieee(priv,
3387 &(txq->txb[txq->q.last_used]));
3388 iwl_hw_txq_free_tfd(priv, txq);
3389 } else if (nfreed > 1) {
3390 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3391 q->first_empty, q->last_used);
3392 queue_work(priv->workqueue, &priv->restart);
3393 }
3394 nfreed++;
3395 }
3396
3397 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3398 (txq_id != IWL_CMD_QUEUE_NUM) &&
3399 priv->mac80211_registered)
3400 ieee80211_wake_queue(priv->hw, txq_id);
3401
3402
3403 return nfreed;
3404}
3405
3406static int iwl_is_tx_success(u32 status)
3407{
3408 return (status & 0xFF) == 0x1;
3409}
3410
3411/******************************************************************************
3412 *
3413 * Generic RX handler implementations
3414 *
3415 ******************************************************************************/
3416static void iwl_rx_reply_tx(struct iwl_priv *priv,
3417 struct iwl_rx_mem_buffer *rxb)
3418{
3419 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3420 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3421 int txq_id = SEQ_TO_QUEUE(sequence);
3422 int index = SEQ_TO_INDEX(sequence);
3423 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3424 struct ieee80211_tx_status *tx_status;
3425 struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3426 u32 status = le32_to_cpu(tx_resp->status);
3427
3428 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3429 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3430 "is out of range [0-%d] %d %d\n", txq_id,
3431 index, txq->q.n_bd, txq->q.first_empty,
3432 txq->q.last_used);
3433 return;
3434 }
3435
3436 tx_status = &(txq->txb[txq->q.last_used].status);
3437
3438 tx_status->retry_count = tx_resp->failure_frame;
3439 tx_status->queue_number = status;
3440 tx_status->queue_length = tx_resp->bt_kill_count;
3441 tx_status->queue_length |= tx_resp->failure_rts;
3442
3443 tx_status->flags =
3444 iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3445
3446 tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate);
3447
3448 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
3449 txq_id, iwl_get_tx_fail_reason(status), status,
3450 tx_resp->rate, tx_resp->failure_frame);
3451
3452 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3453 if (index != -1)
3454 iwl_tx_queue_reclaim(priv, txq_id, index);
3455
3456 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3457 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3458}
3459
3460
3461static void iwl_rx_reply_alive(struct iwl_priv *priv,
3462 struct iwl_rx_mem_buffer *rxb)
3463{
3464 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3465 struct iwl_alive_resp *palive;
3466 struct delayed_work *pwork;
3467
3468 palive = &pkt->u.alive_frame;
3469
3470 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3471 "0x%01X 0x%01X\n",
3472 palive->is_valid, palive->ver_type,
3473 palive->ver_subtype);
3474
3475 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3476 IWL_DEBUG_INFO("Initialization Alive received.\n");
3477 memcpy(&priv->card_alive_init,
3478 &pkt->u.alive_frame,
3479 sizeof(struct iwl_init_alive_resp));
3480 pwork = &priv->init_alive_start;
3481 } else {
3482 IWL_DEBUG_INFO("Runtime Alive received.\n");
3483 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3484 sizeof(struct iwl_alive_resp));
3485 pwork = &priv->alive_start;
3486 iwl_disable_events(priv);
3487 }
3488
3489 /* We delay the ALIVE response by 5ms to
3490 * give the HW RF Kill time to activate... */
3491 if (palive->is_valid == UCODE_VALID_OK)
3492 queue_delayed_work(priv->workqueue, pwork,
3493 msecs_to_jiffies(5));
3494 else
3495 IWL_WARNING("uCode did not respond OK.\n");
3496}
3497
3498static void iwl_rx_reply_add_sta(struct iwl_priv *priv,
3499 struct iwl_rx_mem_buffer *rxb)
3500{
3501 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3502
3503 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3504 return;
3505}
3506
3507static void iwl_rx_reply_error(struct iwl_priv *priv,
3508 struct iwl_rx_mem_buffer *rxb)
3509{
3510 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3511
3512 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3513 "seq 0x%04X ser 0x%08X\n",
3514 le32_to_cpu(pkt->u.err_resp.error_type),
3515 get_cmd_string(pkt->u.err_resp.cmd_id),
3516 pkt->u.err_resp.cmd_id,
3517 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3518 le32_to_cpu(pkt->u.err_resp.error_info));
3519}
3520
3521#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3522
3523static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3524{
3525 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3526 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3527 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
3528 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3529 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3530 rxon->channel = csa->channel;
3531 priv->staging_rxon.channel = csa->channel;
3532}
3533
3534static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
3535 struct iwl_rx_mem_buffer *rxb)
3536{
3537#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3538 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3539 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3540
3541 if (!report->state) {
3542 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3543 "Spectrum Measure Notification: Start\n");
3544 return;
3545 }
3546
3547 memcpy(&priv->measure_report, report, sizeof(*report));
3548 priv->measurement_status |= MEASUREMENT_READY;
3549#endif
3550}
3551
3552static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
3553 struct iwl_rx_mem_buffer *rxb)
3554{
3555#ifdef CONFIG_IWLWIFI_DEBUG
3556 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3557 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3558 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3559 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3560#endif
3561}
3562
3563static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3564 struct iwl_rx_mem_buffer *rxb)
3565{
3566 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3567 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3568 "notification for %s:\n",
3569 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3570 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3571}
3572
3573static void iwl_bg_beacon_update(struct work_struct *work)
3574{
3575 struct iwl_priv *priv =
3576 container_of(work, struct iwl_priv, beacon_update);
3577 struct sk_buff *beacon;
3578
3579 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3580 beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3581
3582 if (!beacon) {
3583 IWL_ERROR("update beacon failed\n");
3584 return;
3585 }
3586
3587 mutex_lock(&priv->mutex);
3588 /* new beacon skb is allocated every time; dispose previous.*/
3589 if (priv->ibss_beacon)
3590 dev_kfree_skb(priv->ibss_beacon);
3591
3592 priv->ibss_beacon = beacon;
3593 mutex_unlock(&priv->mutex);
3594
3595 iwl_send_beacon_cmd(priv);
3596}
3597
3598static void iwl_rx_beacon_notif(struct iwl_priv *priv,
3599 struct iwl_rx_mem_buffer *rxb)
3600{
3601#ifdef CONFIG_IWLWIFI_DEBUG
3602 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3603 struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status);
3604 u8 rate = beacon->beacon_notify_hdr.rate;
3605
3606 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3607 "tsf %d %d rate %d\n",
3608 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3609 beacon->beacon_notify_hdr.failure_frame,
3610 le32_to_cpu(beacon->ibss_mgr_status),
3611 le32_to_cpu(beacon->high_tsf),
3612 le32_to_cpu(beacon->low_tsf), rate);
3613#endif
3614
3615 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3616 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3617 queue_work(priv->workqueue, &priv->beacon_update);
3618}
3619
3620/* Service response to REPLY_SCAN_CMD (0x80) */
3621static void iwl_rx_reply_scan(struct iwl_priv *priv,
3622 struct iwl_rx_mem_buffer *rxb)
3623{
3624#ifdef CONFIG_IWLWIFI_DEBUG
3625 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3626 struct iwl_scanreq_notification *notif =
3627 (struct iwl_scanreq_notification *)pkt->u.raw;
3628
3629 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3630#endif
3631}
3632
3633/* Service SCAN_START_NOTIFICATION (0x82) */
3634static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
3635 struct iwl_rx_mem_buffer *rxb)
3636{
3637 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3638 struct iwl_scanstart_notification *notif =
3639 (struct iwl_scanstart_notification *)pkt->u.raw;
3640 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3641 IWL_DEBUG_SCAN("Scan start: "
3642 "%d [802.11%s] "
3643 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3644 notif->channel,
3645 notif->band ? "bg" : "a",
3646 notif->tsf_high,
3647 notif->tsf_low, notif->status, notif->beacon_timer);
3648}
3649
3650/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3651static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
3652 struct iwl_rx_mem_buffer *rxb)
3653{
3654 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3655 struct iwl_scanresults_notification *notif =
3656 (struct iwl_scanresults_notification *)pkt->u.raw;
3657
3658 IWL_DEBUG_SCAN("Scan ch.res: "
3659 "%d [802.11%s] "
3660 "(TSF: 0x%08X:%08X) - %d "
3661 "elapsed=%lu usec (%dms since last)\n",
3662 notif->channel,
3663 notif->band ? "bg" : "a",
3664 le32_to_cpu(notif->tsf_high),
3665 le32_to_cpu(notif->tsf_low),
3666 le32_to_cpu(notif->statistics[0]),
3667 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3668 jiffies_to_msecs(elapsed_jiffies
3669 (priv->last_scan_jiffies, jiffies)));
3670
3671 priv->last_scan_jiffies = jiffies;
3672}
3673
3674/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3675static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
3676 struct iwl_rx_mem_buffer *rxb)
3677{
3678 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3679 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3680
3681 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3682 scan_notif->scanned_channels,
3683 scan_notif->tsf_low,
3684 scan_notif->tsf_high, scan_notif->status);
3685
3686 /* The HW is no longer scanning */
3687 clear_bit(STATUS_SCAN_HW, &priv->status);
3688
3689 /* The scan completion notification came in, so kill that timer... */
3690 cancel_delayed_work(&priv->scan_check);
3691
3692 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3693 (priv->scan_bands == 2) ? "2.4" : "5.2",
3694 jiffies_to_msecs(elapsed_jiffies
3695 (priv->scan_pass_start, jiffies)));
3696
3697 /* Remove this scanned band from the list
3698 * of pending bands to scan */
3699 priv->scan_bands--;
3700
3701 /* If a request to abort was given, or the scan did not succeed
3702 * then we reset the scan state machine and terminate,
3703 * re-queuing another scan if one has been requested */
3704 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3705 IWL_DEBUG_INFO("Aborted scan completed.\n");
3706 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3707 } else {
3708 /* If there are more bands on this scan pass reschedule */
3709 if (priv->scan_bands > 0)
3710 goto reschedule;
3711 }
3712
3713 priv->last_scan_jiffies = jiffies;
3714 IWL_DEBUG_INFO("Setting scan to off\n");
3715
3716 clear_bit(STATUS_SCANNING, &priv->status);
3717
3718 IWL_DEBUG_INFO("Scan took %dms\n",
3719 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3720
3721 queue_work(priv->workqueue, &priv->scan_completed);
3722
3723 return;
3724
3725reschedule:
3726 priv->scan_pass_start = jiffies;
3727 queue_work(priv->workqueue, &priv->request_scan);
3728}
3729
3730/* Handle notification from uCode that card's power state is changing
3731 * due to software, hardware, or critical temperature RFKILL */
3732static void iwl_rx_card_state_notif(struct iwl_priv *priv,
3733 struct iwl_rx_mem_buffer *rxb)
3734{
3735 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3736 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3737 unsigned long status = priv->status;
3738
3739 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3740 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3741 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3742
3743 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3744 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3745
3746 if (flags & HW_CARD_DISABLED)
3747 set_bit(STATUS_RF_KILL_HW, &priv->status);
3748 else
3749 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3750
3751
3752 if (flags & SW_CARD_DISABLED)
3753 set_bit(STATUS_RF_KILL_SW, &priv->status);
3754 else
3755 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3756
3757 iwl_scan_cancel(priv);
3758
3759 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3760 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3761 (test_bit(STATUS_RF_KILL_SW, &status) !=
3762 test_bit(STATUS_RF_KILL_SW, &priv->status)))
3763 queue_work(priv->workqueue, &priv->rf_kill);
3764 else
3765 wake_up_interruptible(&priv->wait_command_queue);
3766}
3767
3768/**
3769 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
3770 *
3771 * Setup the RX handlers for each of the reply types sent from the uCode
3772 * to the host.
3773 *
3774 * This function chains into the hardware specific files for them to setup
3775 * any hardware specific handlers as well.
3776 */
3777static void iwl_setup_rx_handlers(struct iwl_priv *priv)
3778{
3779 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3780 priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta;
3781 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
3782 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
3783 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
3784 iwl_rx_spectrum_measure_notif;
3785 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
3786 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
3787 iwl_rx_pm_debug_statistics_notif;
3788 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
3789
3790 /* NOTE: iwl_rx_statistics is different based on whether
3791 * the build is for the 3945 or the 4965. See the
3792 * corresponding implementation in iwl-XXXX.c
3793 *
3794 * The same handler is used for both the REPLY to a
3795 * discrete statistics request from the host as well as
3796 * for the periodic statistics notification from the uCode
3797 */
3798 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics;
3799 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics;
3800
3801 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
3802 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
3803 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3804 iwl_rx_scan_results_notif;
3805 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3806 iwl_rx_scan_complete_notif;
3807 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
3808 priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx;
3809
3810 /* Setup hardware specific Rx handlers */
3811 iwl_hw_rx_handler_setup(priv);
3812}
3813
3814/**
3815 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3816 * @rxb: Rx buffer to reclaim
3817 *
3818 * If an Rx buffer has an async callback associated with it the callback
3819 * will be executed. The attached skb (if present) will only be freed
3820 * if the callback returns 1
3821 */
3822static void iwl_tx_cmd_complete(struct iwl_priv *priv,
3823 struct iwl_rx_mem_buffer *rxb)
3824{
3825 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3826 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3827 int txq_id = SEQ_TO_QUEUE(sequence);
3828 int index = SEQ_TO_INDEX(sequence);
3829 int huge = sequence & SEQ_HUGE_FRAME;
3830 int cmd_index;
3831 struct iwl_cmd *cmd;
3832
3833 /* If a Tx command is being handled and it isn't in the actual
3834 * command queue then there a command routing bug has been introduced
3835 * in the queue management code. */
3836 if (txq_id != IWL_CMD_QUEUE_NUM)
3837 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3838 txq_id, pkt->hdr.cmd);
3839 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3840
3841 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3842 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3843
3844 /* Input error checking is done when commands are added to queue. */
3845 if (cmd->meta.flags & CMD_WANT_SKB) {
3846 cmd->meta.source->u.skb = rxb->skb;
3847 rxb->skb = NULL;
3848 } else if (cmd->meta.u.callback &&
3849 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3850 rxb->skb = NULL;
3851
3852 iwl_tx_queue_reclaim(priv, txq_id, index);
3853
3854 if (!(cmd->meta.flags & CMD_ASYNC)) {
3855 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3856 wake_up_interruptible(&priv->wait_command_queue);
3857 }
3858}
3859
3860/************************** RX-FUNCTIONS ****************************/
3861/*
3862 * Rx theory of operation
3863 *
3864 * The host allocates 32 DMA target addresses and passes the host address
3865 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3866 * 0 to 31
3867 *
3868 * Rx Queue Indexes
3869 * The host/firmware share two index registers for managing the Rx buffers.
3870 *
3871 * The READ index maps to the first position that the firmware may be writing
3872 * to -- the driver can read up to (but not including) this position and get
3873 * good data.
3874 * The READ index is managed by the firmware once the card is enabled.
3875 *
3876 * The WRITE index maps to the last position the driver has read from -- the
3877 * position preceding WRITE is the last slot the firmware can place a packet.
3878 *
3879 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3880 * WRITE = READ.
3881 *
3882 * During initialization the host sets up the READ queue position to the first
3883 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3884 *
3885 * When the firmware places a packet in a buffer it will advance the READ index
3886 * and fire the RX interrupt. The driver can then query the READ index and
3887 * process as many packets as possible, moving the WRITE index forward as it
3888 * resets the Rx queue buffers with new memory.
3889 *
3890 * The management in the driver is as follows:
3891 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3892 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3893 * to replensish the iwl->rxq->rx_free.
3894 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
3895 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3896 * 'processed' and 'read' driver indexes as well)
3897 * + A received packet is processed and handed to the kernel network stack,
3898 * detached from the iwl->rxq. The driver 'processed' index is updated.
3899 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3900 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3901 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3902 * were enough free buffers and RX_STALLED is set it is cleared.
3903 *
3904 *
3905 * Driver sequence:
3906 *
3907 * iwl_rx_queue_alloc() Allocates rx_free
3908 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
3909 * iwl_rx_queue_restock
3910 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
3911 * queue, updates firmware pointers, and updates
3912 * the WRITE index. If insufficient rx_free buffers
3913 * are available, schedules iwl_rx_replenish
3914 *
3915 * -- enable interrupts --
3916 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
3917 * READ INDEX, detaching the SKB from the pool.
3918 * Moves the packet buffer from queue to rx_used.
3919 * Calls iwl_rx_queue_restock to refill any empty
3920 * slots.
3921 * ...
3922 *
3923 */
3924
3925/**
3926 * iwl_rx_queue_space - Return number of free slots available in queue.
3927 */
3928static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
3929{
3930 int s = q->read - q->write;
3931 if (s <= 0)
3932 s += RX_QUEUE_SIZE;
3933 /* keep some buffer to not confuse full and empty queue */
3934 s -= 2;
3935 if (s < 0)
3936 s = 0;
3937 return s;
3938}
3939
3940/**
3941 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3942 *
3943 * NOTE: This function has 3945 and 4965 specific code sections
3944 * but is declared in base due to the majority of the
3945 * implementation being the same (only a numeric constant is
3946 * different)
3947 *
3948 */
3949int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
3950{
3951 u32 reg = 0;
3952 int rc = 0;
3953 unsigned long flags;
3954
3955 spin_lock_irqsave(&q->lock, flags);
3956
3957 if (q->need_update == 0)
3958 goto exit_unlock;
3959
3960 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3961 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3962
3963 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3964 iwl_set_bit(priv, CSR_GP_CNTRL,
3965 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3966 goto exit_unlock;
3967 }
3968
3969 rc = iwl_grab_restricted_access(priv);
3970 if (rc)
3971 goto exit_unlock;
3972
3973 iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR,
3974 q->write & ~0x7);
3975 iwl_release_restricted_access(priv);
3976 } else
3977 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3978
3979
3980 q->need_update = 0;
3981
3982 exit_unlock:
3983 spin_unlock_irqrestore(&q->lock, flags);
3984 return rc;
3985}
3986
3987/**
3988 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer.
3989 *
3990 * NOTE: This function has 3945 and 4965 specific code paths in it.
3991 */
3992static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
3993 dma_addr_t dma_addr)
3994{
3995 return cpu_to_le32((u32)dma_addr);
3996}
3997
3998/**
3999 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
4000 *
4001 * If there are slots in the RX queue that need to be restocked,
4002 * and we have free pre-allocated buffers, fill the ranks as much
4003 * as we can pulling from rx_free.
4004 *
4005 * This moves the 'write' index forward to catch up with 'processed', and
4006 * also updates the memory address in the firmware to reference the new
4007 * target buffer.
4008 */
4009int iwl_rx_queue_restock(struct iwl_priv *priv)
4010{
4011 struct iwl_rx_queue *rxq = &priv->rxq;
4012 struct list_head *element;
4013 struct iwl_rx_mem_buffer *rxb;
4014 unsigned long flags;
4015 int write, rc;
4016
4017 spin_lock_irqsave(&rxq->lock, flags);
4018 write = rxq->write & ~0x7;
4019 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
4020 element = rxq->rx_free.next;
4021 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4022 list_del(element);
4023 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
4024 rxq->queue[rxq->write] = rxb;
4025 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4026 rxq->free_count--;
4027 }
4028 spin_unlock_irqrestore(&rxq->lock, flags);
4029 /* If the pre-allocated buffer pool is dropping low, schedule to
4030 * refill it */
4031 if (rxq->free_count <= RX_LOW_WATERMARK)
4032 queue_work(priv->workqueue, &priv->rx_replenish);
4033
4034
4035 /* If we've added more space for the firmware to place data, tell it */
4036 if ((write != (rxq->write & ~0x7))
4037 || (abs(rxq->write - rxq->read) > 7)) {
4038 spin_lock_irqsave(&rxq->lock, flags);
4039 rxq->need_update = 1;
4040 spin_unlock_irqrestore(&rxq->lock, flags);
4041 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
4042 if (rc)
4043 return rc;
4044 }
4045
4046 return 0;
4047}
4048
4049/**
4050 * iwl_rx_replensih - Move all used packet from rx_used to rx_free
4051 *
4052 * When moving to rx_free an SKB is allocated for the slot.
4053 *
4054 * Also restock the Rx queue via iwl_rx_queue_restock.
4055 * This is called as a scheduled work item (except for during intialization)
4056 */
4057void iwl_rx_replenish(void *data)
4058{
4059 struct iwl_priv *priv = data;
4060 struct iwl_rx_queue *rxq = &priv->rxq;
4061 struct list_head *element;
4062 struct iwl_rx_mem_buffer *rxb;
4063 unsigned long flags;
4064 spin_lock_irqsave(&rxq->lock, flags);
4065 while (!list_empty(&rxq->rx_used)) {
4066 element = rxq->rx_used.next;
4067 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4068 rxb->skb =
4069 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4070 if (!rxb->skb) {
4071 if (net_ratelimit())
4072 printk(KERN_CRIT DRV_NAME
4073 ": Can not allocate SKB buffers\n");
4074 /* We don't reschedule replenish work here -- we will
4075 * call the restock method and if it still needs
4076 * more buffers it will schedule replenish */
4077 break;
4078 }
4079 priv->alloc_rxb_skb++;
4080 list_del(element);
4081 rxb->dma_addr =
4082 pci_map_single(priv->pci_dev, rxb->skb->data,
4083 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4084 list_add_tail(&rxb->list, &rxq->rx_free);
4085 rxq->free_count++;
4086 }
4087 spin_unlock_irqrestore(&rxq->lock, flags);
4088
4089 spin_lock_irqsave(&priv->lock, flags);
4090 iwl_rx_queue_restock(priv);
4091 spin_unlock_irqrestore(&priv->lock, flags);
4092}
4093
4094/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4095 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4096 * This free routine walks the list of POOL entries and if SKB is set to
4097 * non NULL it is unmapped and freed
4098 */
4099void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4100{
4101 int i;
4102 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4103 if (rxq->pool[i].skb != NULL) {
4104 pci_unmap_single(priv->pci_dev,
4105 rxq->pool[i].dma_addr,
4106 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4107 dev_kfree_skb(rxq->pool[i].skb);
4108 }
4109 }
4110
4111 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4112 rxq->dma_addr);
4113 rxq->bd = NULL;
4114}
4115
4116int iwl_rx_queue_alloc(struct iwl_priv *priv)
4117{
4118 struct iwl_rx_queue *rxq = &priv->rxq;
4119 struct pci_dev *dev = priv->pci_dev;
4120 int i;
4121
4122 spin_lock_init(&rxq->lock);
4123 INIT_LIST_HEAD(&rxq->rx_free);
4124 INIT_LIST_HEAD(&rxq->rx_used);
4125 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4126 if (!rxq->bd)
4127 return -ENOMEM;
4128 /* Fill the rx_used queue with _all_ of the Rx buffers */
4129 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4130 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4131 /* Set us so that we have processed and used all buffers, but have
4132 * not restocked the Rx queue with fresh buffers */
4133 rxq->read = rxq->write = 0;
4134 rxq->free_count = 0;
4135 rxq->need_update = 0;
4136 return 0;
4137}
4138
4139void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4140{
4141 unsigned long flags;
4142 int i;
4143 spin_lock_irqsave(&rxq->lock, flags);
4144 INIT_LIST_HEAD(&rxq->rx_free);
4145 INIT_LIST_HEAD(&rxq->rx_used);
4146 /* Fill the rx_used queue with _all_ of the Rx buffers */
4147 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4148 /* In the reset function, these buffers may have been allocated
4149 * to an SKB, so we need to unmap and free potential storage */
4150 if (rxq->pool[i].skb != NULL) {
4151 pci_unmap_single(priv->pci_dev,
4152 rxq->pool[i].dma_addr,
4153 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4154 priv->alloc_rxb_skb--;
4155 dev_kfree_skb(rxq->pool[i].skb);
4156 rxq->pool[i].skb = NULL;
4157 }
4158 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4159 }
4160
4161 /* Set us so that we have processed and used all buffers, but have
4162 * not restocked the Rx queue with fresh buffers */
4163 rxq->read = rxq->write = 0;
4164 rxq->free_count = 0;
4165 spin_unlock_irqrestore(&rxq->lock, flags);
4166}
4167
4168/* Convert linear signal-to-noise ratio into dB */
4169static u8 ratio2dB[100] = {
4170/* 0 1 2 3 4 5 6 7 8 9 */
4171 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4172 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4173 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4174 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4175 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4176 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4177 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4178 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4179 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4180 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4181};
4182
4183/* Calculates a relative dB value from a ratio of linear
4184 * (i.e. not dB) signal levels.
4185 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4186int iwl_calc_db_from_ratio(int sig_ratio)
4187{
4188 /* Anything above 1000:1 just report as 60 dB */
4189 if (sig_ratio > 1000)
4190 return 60;
4191
4192 /* Above 100:1, divide by 10 and use table,
4193 * add 20 dB to make up for divide by 10 */
4194 if (sig_ratio > 100)
4195 return (20 + (int)ratio2dB[sig_ratio/10]);
4196
4197 /* We shouldn't see this */
4198 if (sig_ratio < 1)
4199 return 0;
4200
4201 /* Use table for ratios 1:1 - 99:1 */
4202 return (int)ratio2dB[sig_ratio];
4203}
4204
4205#define PERFECT_RSSI (-20) /* dBm */
4206#define WORST_RSSI (-95) /* dBm */
4207#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4208
4209/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4210 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4211 * about formulas used below. */
4212int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
4213{
4214 int sig_qual;
4215 int degradation = PERFECT_RSSI - rssi_dbm;
4216
4217 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4218 * as indicator; formula is (signal dbm - noise dbm).
4219 * SNR at or above 40 is a great signal (100%).
4220 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4221 * Weakest usable signal is usually 10 - 15 dB SNR. */
4222 if (noise_dbm) {
4223 if (rssi_dbm - noise_dbm >= 40)
4224 return 100;
4225 else if (rssi_dbm < noise_dbm)
4226 return 0;
4227 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4228
4229 /* Else use just the signal level.
4230 * This formula is a least squares fit of data points collected and
4231 * compared with a reference system that had a percentage (%) display
4232 * for signal quality. */
4233 } else
4234 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4235 (15 * RSSI_RANGE + 62 * degradation)) /
4236 (RSSI_RANGE * RSSI_RANGE);
4237
4238 if (sig_qual > 100)
4239 sig_qual = 100;
4240 else if (sig_qual < 1)
4241 sig_qual = 0;
4242
4243 return sig_qual;
4244}
4245
4246/**
4247 * iwl_rx_handle - Main entry function for receiving responses from the uCode
4248 *
4249 * Uses the priv->rx_handlers callback function array to invoke
4250 * the appropriate handlers, including command responses,
4251 * frame-received notifications, and other notifications.
4252 */
4253static void iwl_rx_handle(struct iwl_priv *priv)
4254{
4255 struct iwl_rx_mem_buffer *rxb;
4256 struct iwl_rx_packet *pkt;
4257 struct iwl_rx_queue *rxq = &priv->rxq;
4258 u32 r, i;
4259 int reclaim;
4260 unsigned long flags;
4261
4262 r = iwl_hw_get_rx_read(priv);
4263 i = rxq->read;
4264
4265 /* Rx interrupt, but nothing sent from uCode */
4266 if (i == r)
4267 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4268
4269 while (i != r) {
4270 rxb = rxq->queue[i];
4271
4272 /* If an RXB doesn't have a queue slot associated with it
4273 * then a bug has been introduced in the queue refilling
4274 * routines -- catch it here */
4275 BUG_ON(rxb == NULL);
4276
4277 rxq->queue[i] = NULL;
4278
4279 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4280 IWL_RX_BUF_SIZE,
4281 PCI_DMA_FROMDEVICE);
4282 pkt = (struct iwl_rx_packet *)rxb->skb->data;
4283
4284 /* Reclaim a command buffer only if this packet is a response
4285 * to a (driver-originated) command.
4286 * If the packet (e.g. Rx frame) originated from uCode,
4287 * there is no command buffer to reclaim.
4288 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4289 * but apparently a few don't get set; catch them here. */
4290 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4291 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4292 (pkt->hdr.cmd != REPLY_TX);
4293
4294 /* Based on type of command response or notification,
4295 * handle those that need handling via function in
4296 * rx_handlers table. See iwl_setup_rx_handlers() */
4297 if (priv->rx_handlers[pkt->hdr.cmd]) {
4298 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4299 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4300 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4301 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4302 } else {
4303 /* No handling needed */
4304 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4305 "r %d i %d No handler needed for %s, 0x%02x\n",
4306 r, i, get_cmd_string(pkt->hdr.cmd),
4307 pkt->hdr.cmd);
4308 }
4309
4310 if (reclaim) {
4311 /* Invoke any callbacks, transfer the skb to caller,
4312 * and fire off the (possibly) blocking iwl_send_cmd()
4313 * as we reclaim the driver command queue */
4314 if (rxb && rxb->skb)
4315 iwl_tx_cmd_complete(priv, rxb);
4316 else
4317 IWL_WARNING("Claim null rxb?\n");
4318 }
4319
4320 /* For now we just don't re-use anything. We can tweak this
4321 * later to try and re-use notification packets and SKBs that
4322 * fail to Rx correctly */
4323 if (rxb->skb != NULL) {
4324 priv->alloc_rxb_skb--;
4325 dev_kfree_skb_any(rxb->skb);
4326 rxb->skb = NULL;
4327 }
4328
4329 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4330 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4331 spin_lock_irqsave(&rxq->lock, flags);
4332 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4333 spin_unlock_irqrestore(&rxq->lock, flags);
4334 i = (i + 1) & RX_QUEUE_MASK;
4335 }
4336
4337 /* Backtrack one entry */
4338 priv->rxq.read = i;
4339 iwl_rx_queue_restock(priv);
4340}
4341
4342int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
4343 struct iwl_tx_queue *txq)
4344{
4345 u32 reg = 0;
4346 int rc = 0;
4347 int txq_id = txq->q.id;
4348
4349 if (txq->need_update == 0)
4350 return rc;
4351
4352 /* if we're trying to save power */
4353 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4354 /* wake up nic if it's powered down ...
4355 * uCode will wake up, and interrupt us again, so next
4356 * time we'll skip this part. */
4357 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4358
4359 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4360 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4361 iwl_set_bit(priv, CSR_GP_CNTRL,
4362 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4363 return rc;
4364 }
4365
4366 /* restore this queue's parameters in nic hardware. */
4367 rc = iwl_grab_restricted_access(priv);
4368 if (rc)
4369 return rc;
4370 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
4371 txq->q.first_empty | (txq_id << 8));
4372 iwl_release_restricted_access(priv);
4373
4374 /* else not in power-save mode, uCode will never sleep when we're
4375 * trying to tx (during RFKILL, we're not trying to tx). */
4376 } else
4377 iwl_write32(priv, HBUS_TARG_WRPTR,
4378 txq->q.first_empty | (txq_id << 8));
4379
4380 txq->need_update = 0;
4381
4382 return rc;
4383}
4384
4385#ifdef CONFIG_IWLWIFI_DEBUG
4386static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon)
4387{
4388 IWL_DEBUG_RADIO("RX CONFIG:\n");
4389 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4390 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4391 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4392 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4393 le32_to_cpu(rxon->filter_flags));
4394 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4395 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4396 rxon->ofdm_basic_rates);
4397 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4398 IWL_DEBUG_RADIO("u8[6] node_addr: " MAC_FMT "\n",
4399 MAC_ARG(rxon->node_addr));
4400 IWL_DEBUG_RADIO("u8[6] bssid_addr: " MAC_FMT "\n",
4401 MAC_ARG(rxon->bssid_addr));
4402 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4403}
4404#endif
4405
4406static void iwl_enable_interrupts(struct iwl_priv *priv)
4407{
4408 IWL_DEBUG_ISR("Enabling interrupts\n");
4409 set_bit(STATUS_INT_ENABLED, &priv->status);
4410 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4411}
4412
4413static inline void iwl_disable_interrupts(struct iwl_priv *priv)
4414{
4415 clear_bit(STATUS_INT_ENABLED, &priv->status);
4416
4417 /* disable interrupts from uCode/NIC to host */
4418 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4419
4420 /* acknowledge/clear/reset any interrupts still pending
4421 * from uCode or flow handler (Rx/Tx DMA) */
4422 iwl_write32(priv, CSR_INT, 0xffffffff);
4423 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4424 IWL_DEBUG_ISR("Disabled interrupts\n");
4425}
4426
4427static const char *desc_lookup(int i)
4428{
4429 switch (i) {
4430 case 1:
4431 return "FAIL";
4432 case 2:
4433 return "BAD_PARAM";
4434 case 3:
4435 return "BAD_CHECKSUM";
4436 case 4:
4437 return "NMI_INTERRUPT";
4438 case 5:
4439 return "SYSASSERT";
4440 case 6:
4441 return "FATAL_ERROR";
4442 }
4443
4444 return "UNKNOWN";
4445}
4446
4447#define ERROR_START_OFFSET (1 * sizeof(u32))
4448#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4449
4450static void iwl_dump_nic_error_log(struct iwl_priv *priv)
4451{
4452 u32 i;
4453 u32 desc, time, count, base, data1;
4454 u32 blink1, blink2, ilink1, ilink2;
4455 int rc;
4456
4457 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4458
4459 if (!iwl_hw_valid_rtc_data_addr(base)) {
4460 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4461 return;
4462 }
4463
4464 rc = iwl_grab_restricted_access(priv);
4465 if (rc) {
4466 IWL_WARNING("Can not read from adapter at this time.\n");
4467 return;
4468 }
4469
4470 count = iwl_read_restricted_mem(priv, base);
4471
4472 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4473 IWL_ERROR("Start IWL Error Log Dump:\n");
4474 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4475 priv->status, priv->config, count);
4476 }
4477
4478 IWL_ERROR("Desc Time asrtPC blink2 "
4479 "ilink1 nmiPC Line\n");
4480 for (i = ERROR_START_OFFSET;
4481 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
4482 i += ERROR_ELEM_SIZE) {
4483 desc = iwl_read_restricted_mem(priv, base + i);
4484 time =
4485 iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32));
4486 blink1 =
4487 iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32));
4488 blink2 =
4489 iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32));
4490 ilink1 =
4491 iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32));
4492 ilink2 =
4493 iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32));
4494 data1 =
4495 iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32));
4496
4497 IWL_ERROR
4498 ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
4499 desc_lookup(desc), desc, time, blink1, blink2,
4500 ilink1, ilink2, data1);
4501 }
4502
4503 iwl_release_restricted_access(priv);
4504
4505}
4506
4507#define EVENT_START_OFFSET (4 * sizeof(u32))
4508
4509/**
4510 * iwl_print_event_log - Dump error event log to syslog
4511 *
4512 * NOTE: Must be called with iwl_grab_restricted_access() already obtained!
4513 */
4514static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
4515 u32 num_events, u32 mode)
4516{
4517 u32 i;
4518 u32 base; /* SRAM byte address of event log header */
4519 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4520 u32 ptr; /* SRAM byte address of log data */
4521 u32 ev, time, data; /* event log data */
4522
4523 if (num_events == 0)
4524 return;
4525
4526 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4527
4528 if (mode == 0)
4529 event_size = 2 * sizeof(u32);
4530 else
4531 event_size = 3 * sizeof(u32);
4532
4533 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4534
4535 /* "time" is actually "data" for mode 0 (no timestamp).
4536 * place event id # at far right for easier visual parsing. */
4537 for (i = 0; i < num_events; i++) {
4538 ev = iwl_read_restricted_mem(priv, ptr);
4539 ptr += sizeof(u32);
4540 time = iwl_read_restricted_mem(priv, ptr);
4541 ptr += sizeof(u32);
4542 if (mode == 0)
4543 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4544 else {
4545 data = iwl_read_restricted_mem(priv, ptr);
4546 ptr += sizeof(u32);
4547 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4548 }
4549 }
4550}
4551
4552static void iwl_dump_nic_event_log(struct iwl_priv *priv)
4553{
4554 int rc;
4555 u32 base; /* SRAM byte address of event log header */
4556 u32 capacity; /* event log capacity in # entries */
4557 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4558 u32 num_wraps; /* # times uCode wrapped to top of log */
4559 u32 next_entry; /* index of next entry to be written by uCode */
4560 u32 size; /* # entries that we'll print */
4561
4562 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4563 if (!iwl_hw_valid_rtc_data_addr(base)) {
4564 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4565 return;
4566 }
4567
4568 rc = iwl_grab_restricted_access(priv);
4569 if (rc) {
4570 IWL_WARNING("Can not read from adapter at this time.\n");
4571 return;
4572 }
4573
4574 /* event log header */
4575 capacity = iwl_read_restricted_mem(priv, base);
4576 mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32)));
4577 num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32)));
4578 next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32)));
4579
4580 size = num_wraps ? capacity : next_entry;
4581
4582 /* bail out if nothing in log */
4583 if (size == 0) {
4584 IWL_ERROR("Start IPW Event Log Dump: nothing in log\n");
4585 iwl_release_restricted_access(priv);
4586 return;
4587 }
4588
4589 IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n",
4590 size, num_wraps);
4591
4592 /* if uCode has wrapped back to top of log, start at the oldest entry,
4593 * i.e the next one that uCode would fill. */
4594 if (num_wraps)
4595 iwl_print_event_log(priv, next_entry,
4596 capacity - next_entry, mode);
4597
4598 /* (then/else) start at top of log */
4599 iwl_print_event_log(priv, 0, next_entry, mode);
4600
4601 iwl_release_restricted_access(priv);
4602}
4603
4604/**
4605 * iwl_irq_handle_error - called for HW or SW error interrupt from card
4606 */
4607static void iwl_irq_handle_error(struct iwl_priv *priv)
4608{
4609 /* Set the FW error flag -- cleared on iwl_down */
4610 set_bit(STATUS_FW_ERROR, &priv->status);
4611
4612 /* Cancel currently queued command. */
4613 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4614
4615#ifdef CONFIG_IWLWIFI_DEBUG
4616 if (iwl_debug_level & IWL_DL_FW_ERRORS) {
4617 iwl_dump_nic_error_log(priv);
4618 iwl_dump_nic_event_log(priv);
4619 iwl_print_rx_config_cmd(&priv->staging_rxon);
4620 }
4621#endif
4622
4623 wake_up_interruptible(&priv->wait_command_queue);
4624
4625 /* Keep the restart process from trying to send host
4626 * commands by clearing the INIT status bit */
4627 clear_bit(STATUS_READY, &priv->status);
4628
4629 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4630 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4631 "Restarting adapter due to uCode error.\n");
4632
4633 if (iwl_is_associated(priv)) {
4634 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4635 sizeof(priv->recovery_rxon));
4636 priv->error_recovering = 1;
4637 }
4638 queue_work(priv->workqueue, &priv->restart);
4639 }
4640}
4641
4642static void iwl_error_recovery(struct iwl_priv *priv)
4643{
4644 unsigned long flags;
4645
4646 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4647 sizeof(priv->staging_rxon));
4648 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4649 iwl_commit_rxon(priv);
4650
4651 iwl_rxon_add_station(priv, priv->bssid, 1);
4652
4653 spin_lock_irqsave(&priv->lock, flags);
4654 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4655 priv->error_recovering = 0;
4656 spin_unlock_irqrestore(&priv->lock, flags);
4657}
4658
4659static void iwl_irq_tasklet(struct iwl_priv *priv)
4660{
4661 u32 inta, handled = 0;
4662 u32 inta_fh;
4663 unsigned long flags;
4664#ifdef CONFIG_IWLWIFI_DEBUG
4665 u32 inta_mask;
4666#endif
4667
4668 spin_lock_irqsave(&priv->lock, flags);
4669
4670 /* Ack/clear/reset pending uCode interrupts.
4671 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4672 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4673 inta = iwl_read32(priv, CSR_INT);
4674 iwl_write32(priv, CSR_INT, inta);
4675
4676 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4677 * Any new interrupts that happen after this, either while we're
4678 * in this tasklet, or later, will show up in next ISR/tasklet. */
4679 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4680 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4681
4682#ifdef CONFIG_IWLWIFI_DEBUG
4683 if (iwl_debug_level & IWL_DL_ISR) {
4684 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4685 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4686 inta, inta_mask, inta_fh);
4687 }
4688#endif
4689
4690 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4691 * atomic, make sure that inta covers all the interrupts that
4692 * we've discovered, even if FH interrupt came in just after
4693 * reading CSR_INT. */
4694 if (inta_fh & CSR_FH_INT_RX_MASK)
4695 inta |= CSR_INT_BIT_FH_RX;
4696 if (inta_fh & CSR_FH_INT_TX_MASK)
4697 inta |= CSR_INT_BIT_FH_TX;
4698
4699 /* Now service all interrupt bits discovered above. */
4700 if (inta & CSR_INT_BIT_HW_ERR) {
4701 IWL_ERROR("Microcode HW error detected. Restarting.\n");
4702
4703 /* Tell the device to stop sending interrupts */
4704 iwl_disable_interrupts(priv);
4705
4706 iwl_irq_handle_error(priv);
4707
4708 handled |= CSR_INT_BIT_HW_ERR;
4709
4710 spin_unlock_irqrestore(&priv->lock, flags);
4711
4712 return;
4713 }
4714
4715#ifdef CONFIG_IWLWIFI_DEBUG
4716 if (iwl_debug_level & (IWL_DL_ISR)) {
4717 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4718 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
4719 IWL_DEBUG_ISR("Microcode started or stopped.\n");
4720
4721 /* Alive notification via Rx interrupt will do the real work */
4722 if (inta & CSR_INT_BIT_ALIVE)
4723 IWL_DEBUG_ISR("Alive interrupt\n");
4724 }
4725#endif
4726 /* Safely ignore these bits for debug checks below */
4727 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
4728
4729 /* HW RF KILL switch toggled (4965 only) */
4730 if (inta & CSR_INT_BIT_RF_KILL) {
4731 int hw_rf_kill = 0;
4732 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
4733 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4734 hw_rf_kill = 1;
4735
4736 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
4737 "RF_KILL bit toggled to %s.\n",
4738 hw_rf_kill ? "disable radio":"enable radio");
4739
4740 /* Queue restart only if RF_KILL switch was set to "kill"
4741 * when we loaded driver, and is now set to "enable".
4742 * After we're Alive, RF_KILL gets handled by
4743 * iwl_rx_card_state_notif() */
4744 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
4745 queue_work(priv->workqueue, &priv->restart);
4746
4747 handled |= CSR_INT_BIT_RF_KILL;
4748 }
4749
4750 /* Chip got too hot and stopped itself (4965 only) */
4751 if (inta & CSR_INT_BIT_CT_KILL) {
4752 IWL_ERROR("Microcode CT kill error detected.\n");
4753 handled |= CSR_INT_BIT_CT_KILL;
4754 }
4755
4756 /* Error detected by uCode */
4757 if (inta & CSR_INT_BIT_SW_ERR) {
4758 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
4759 inta);
4760 iwl_irq_handle_error(priv);
4761 handled |= CSR_INT_BIT_SW_ERR;
4762 }
4763
4764 /* uCode wakes up after power-down sleep */
4765 if (inta & CSR_INT_BIT_WAKEUP) {
4766 IWL_DEBUG_ISR("Wakeup interrupt\n");
4767 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4768 iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4769 iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4770 iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]);
4771 iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]);
4772 iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]);
4773 iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]);
4774
4775 handled |= CSR_INT_BIT_WAKEUP;
4776 }
4777
4778 /* All uCode command responses, including Tx command responses,
4779 * Rx "responses" (frame-received notification), and other
4780 * notifications from uCode come through here*/
4781 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4782 iwl_rx_handle(priv);
4783 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4784 }
4785
4786 if (inta & CSR_INT_BIT_FH_TX) {
4787 IWL_DEBUG_ISR("Tx interrupt\n");
4788
4789 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4790 if (!iwl_grab_restricted_access(priv)) {
4791 iwl_write_restricted(priv,
4792 FH_TCSR_CREDIT
4793 (ALM_FH_SRVC_CHNL), 0x0);
4794 iwl_release_restricted_access(priv);
4795 }
4796 handled |= CSR_INT_BIT_FH_TX;
4797 }
4798
4799 if (inta & ~handled)
4800 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4801
4802 if (inta & ~CSR_INI_SET_MASK) {
4803 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4804 inta & ~CSR_INI_SET_MASK);
4805 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
4806 }
4807
4808 /* Re-enable all interrupts */
4809 iwl_enable_interrupts(priv);
4810
4811#ifdef CONFIG_IWLWIFI_DEBUG
4812 if (iwl_debug_level & (IWL_DL_ISR)) {
4813 inta = iwl_read32(priv, CSR_INT);
4814 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4815 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4816 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4817 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4818 }
4819#endif
4820 spin_unlock_irqrestore(&priv->lock, flags);
4821}
4822
4823static irqreturn_t iwl_isr(int irq, void *data)
4824{
4825 struct iwl_priv *priv = data;
4826 u32 inta, inta_mask;
4827 u32 inta_fh;
4828 if (!priv)
4829 return IRQ_NONE;
4830
4831 spin_lock(&priv->lock);
4832
4833 /* Disable (but don't clear!) interrupts here to avoid
4834 * back-to-back ISRs and sporadic interrupts from our NIC.
4835 * If we have something to service, the tasklet will re-enable ints.
4836 * If we *don't* have something, we'll re-enable before leaving here. */
4837 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4838 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4839
4840 /* Discover which interrupts are active/pending */
4841 inta = iwl_read32(priv, CSR_INT);
4842 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4843
4844 /* Ignore interrupt if there's nothing in NIC to service.
4845 * This may be due to IRQ shared with another device,
4846 * or due to sporadic interrupts thrown from our NIC. */
4847 if (!inta && !inta_fh) {
4848 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4849 goto none;
4850 }
4851
4852 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4853 /* Hardware disappeared */
4854 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
4855 goto none;
4856 }
4857
4858 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4859 inta, inta_mask, inta_fh);
4860
4861 /* iwl_irq_tasklet() will service interrupts and re-enable them */
4862 tasklet_schedule(&priv->irq_tasklet);
4863 spin_unlock(&priv->lock);
4864
4865 return IRQ_HANDLED;
4866
4867 none:
4868 /* re-enable interrupts here since we don't have anything to service. */
4869 iwl_enable_interrupts(priv);
4870 spin_unlock(&priv->lock);
4871 return IRQ_NONE;
4872}
4873
4874/************************** EEPROM BANDS ****************************
4875 *
4876 * The iwl_eeprom_band definitions below provide the mapping from the
4877 * EEPROM contents to the specific channel number supported for each
4878 * band.
4879 *
4880 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
4881 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4882 * The specific geography and calibration information for that channel
4883 * is contained in the eeprom map itself.
4884 *
4885 * During init, we copy the eeprom information and channel map
4886 * information into priv->channel_info_24/52 and priv->channel_map_24/52
4887 *
4888 * channel_map_24/52 provides the index in the channel_info array for a
4889 * given channel. We have to have two separate maps as there is channel
4890 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4891 * band_2
4892 *
4893 * A value of 0xff stored in the channel_map indicates that the channel
4894 * is not supported by the hardware at all.
4895 *
4896 * A value of 0xfe in the channel_map indicates that the channel is not
4897 * valid for Tx with the current hardware. This means that
4898 * while the system can tune and receive on a given channel, it may not
4899 * be able to associate or transmit any frames on that
4900 * channel. There is no corresponding channel information for that
4901 * entry.
4902 *
4903 *********************************************************************/
4904
4905/* 2.4 GHz */
4906static const u8 iwl_eeprom_band_1[14] = {
4907 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4908};
4909
4910/* 5.2 GHz bands */
4911static const u8 iwl_eeprom_band_2[] = {
4912 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4913};
4914
4915static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */
4916 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4917};
4918
4919static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
4920 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4921};
4922
4923static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
4924 145, 149, 153, 157, 161, 165
4925};
4926
4927static void iwl_init_band_reference(const struct iwl_priv *priv, int band,
4928 int *eeprom_ch_count,
4929 const struct iwl_eeprom_channel
4930 **eeprom_ch_info,
4931 const u8 **eeprom_ch_index)
4932{
4933 switch (band) {
4934 case 1: /* 2.4GHz band */
4935 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
4936 *eeprom_ch_info = priv->eeprom.band_1_channels;
4937 *eeprom_ch_index = iwl_eeprom_band_1;
4938 break;
4939 case 2: /* 5.2GHz band */
4940 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
4941 *eeprom_ch_info = priv->eeprom.band_2_channels;
4942 *eeprom_ch_index = iwl_eeprom_band_2;
4943 break;
4944 case 3: /* 5.2GHz band */
4945 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
4946 *eeprom_ch_info = priv->eeprom.band_3_channels;
4947 *eeprom_ch_index = iwl_eeprom_band_3;
4948 break;
4949 case 4: /* 5.2GHz band */
4950 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
4951 *eeprom_ch_info = priv->eeprom.band_4_channels;
4952 *eeprom_ch_index = iwl_eeprom_band_4;
4953 break;
4954 case 5: /* 5.2GHz band */
4955 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
4956 *eeprom_ch_info = priv->eeprom.band_5_channels;
4957 *eeprom_ch_index = iwl_eeprom_band_5;
4958 break;
4959 default:
4960 BUG();
4961 return;
4962 }
4963}
4964
4965const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
4966 int phymode, u16 channel)
4967{
4968 int i;
4969
4970 switch (phymode) {
4971 case MODE_IEEE80211A:
4972 for (i = 14; i < priv->channel_count; i++) {
4973 if (priv->channel_info[i].channel == channel)
4974 return &priv->channel_info[i];
4975 }
4976 break;
4977
4978 case MODE_IEEE80211B:
4979 case MODE_IEEE80211G:
4980 if (channel >= 1 && channel <= 14)
4981 return &priv->channel_info[channel - 1];
4982 break;
4983
4984 }
4985
4986 return NULL;
4987}
4988
4989#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4990 ? # x " " : "")
4991
4992static int iwl_init_channel_map(struct iwl_priv *priv)
4993{
4994 int eeprom_ch_count = 0;
4995 const u8 *eeprom_ch_index = NULL;
4996 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
4997 int band, ch;
4998 struct iwl_channel_info *ch_info;
4999
5000 if (priv->channel_count) {
5001 IWL_DEBUG_INFO("Channel map already initialized.\n");
5002 return 0;
5003 }
5004
5005 if (priv->eeprom.version < 0x2f) {
5006 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5007 priv->eeprom.version);
5008 return -EINVAL;
5009 }
5010
5011 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5012
5013 priv->channel_count =
5014 ARRAY_SIZE(iwl_eeprom_band_1) +
5015 ARRAY_SIZE(iwl_eeprom_band_2) +
5016 ARRAY_SIZE(iwl_eeprom_band_3) +
5017 ARRAY_SIZE(iwl_eeprom_band_4) +
5018 ARRAY_SIZE(iwl_eeprom_band_5);
5019
5020 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5021
5022 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
5023 priv->channel_count, GFP_KERNEL);
5024 if (!priv->channel_info) {
5025 IWL_ERROR("Could not allocate channel_info\n");
5026 priv->channel_count = 0;
5027 return -ENOMEM;
5028 }
5029
5030 ch_info = priv->channel_info;
5031
5032 /* Loop through the 5 EEPROM bands adding them in order to the
5033 * channel map we maintain (that contains additional information than
5034 * what just in the EEPROM) */
5035 for (band = 1; band <= 5; band++) {
5036
5037 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5038 &eeprom_ch_info, &eeprom_ch_index);
5039
5040 /* Loop through each band adding each of the channels */
5041 for (ch = 0; ch < eeprom_ch_count; ch++) {
5042 ch_info->channel = eeprom_ch_index[ch];
5043 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5044 MODE_IEEE80211A;
5045
5046 /* permanently store EEPROM's channel regulatory flags
5047 * and max power in channel info database. */
5048 ch_info->eeprom = eeprom_ch_info[ch];
5049
5050 /* Copy the run-time flags so they are there even on
5051 * invalid channels */
5052 ch_info->flags = eeprom_ch_info[ch].flags;
5053
5054 if (!(is_channel_valid(ch_info))) {
5055 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5056 "No traffic\n",
5057 ch_info->channel,
5058 ch_info->flags,
5059 is_channel_a_band(ch_info) ?
5060 "5.2" : "2.4");
5061 ch_info++;
5062 continue;
5063 }
5064
5065 /* Initialize regulatory-based run-time data */
5066 ch_info->max_power_avg = ch_info->curr_txpow =
5067 eeprom_ch_info[ch].max_power_avg;
5068 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5069 ch_info->min_power = 0;
5070
5071 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5072 " %ddBm): Ad-Hoc %ssupported\n",
5073 ch_info->channel,
5074 is_channel_a_band(ch_info) ?
5075 "5.2" : "2.4",
5076 CHECK_AND_PRINT(IBSS),
5077 CHECK_AND_PRINT(ACTIVE),
5078 CHECK_AND_PRINT(RADAR),
5079 CHECK_AND_PRINT(WIDE),
5080 CHECK_AND_PRINT(NARROW),
5081 CHECK_AND_PRINT(DFS),
5082 eeprom_ch_info[ch].flags,
5083 eeprom_ch_info[ch].max_power_avg,
5084 ((eeprom_ch_info[ch].
5085 flags & EEPROM_CHANNEL_IBSS)
5086 && !(eeprom_ch_info[ch].
5087 flags & EEPROM_CHANNEL_RADAR))
5088 ? "" : "not ");
5089
5090 /* Set the user_txpower_limit to the highest power
5091 * supported by any channel */
5092 if (eeprom_ch_info[ch].max_power_avg >
5093 priv->user_txpower_limit)
5094 priv->user_txpower_limit =
5095 eeprom_ch_info[ch].max_power_avg;
5096
5097 ch_info++;
5098 }
5099 }
5100
5101 if (iwl3945_txpower_set_from_eeprom(priv))
5102 return -EIO;
5103
5104 return 0;
5105}
5106
5107/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5108 * sending probe req. This should be set long enough to hear probe responses
5109 * from more than one AP. */
5110#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5111#define IWL_ACTIVE_DWELL_TIME_52 (10)
5112
5113/* For faster active scanning, scan will move to the next channel if fewer than
5114 * PLCP_QUIET_THRESH packets are heard on this channel within
5115 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5116 * time if it's a quiet channel (nothing responded to our probe, and there's
5117 * no other traffic).
5118 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5119#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5120#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5121
5122/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5123 * Must be set longer than active dwell time.
5124 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5125#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5126#define IWL_PASSIVE_DWELL_TIME_52 (10)
5127#define IWL_PASSIVE_DWELL_BASE (100)
5128#define IWL_CHANNEL_TUNE_TIME 5
5129
5130static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode)
5131{
5132 if (phymode == MODE_IEEE80211A)
5133 return IWL_ACTIVE_DWELL_TIME_52;
5134 else
5135 return IWL_ACTIVE_DWELL_TIME_24;
5136}
5137
5138static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode)
5139{
5140 u16 active = iwl_get_active_dwell_time(priv, phymode);
5141 u16 passive = (phymode != MODE_IEEE80211A) ?
5142 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5143 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5144
5145 if (iwl_is_associated(priv)) {
5146 /* If we're associated, we clamp the maximum passive
5147 * dwell time to be 98% of the beacon interval (minus
5148 * 2 * channel tune time) */
5149 passive = priv->beacon_int;
5150 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5151 passive = IWL_PASSIVE_DWELL_BASE;
5152 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5153 }
5154
5155 if (passive <= active)
5156 passive = active + 1;
5157
5158 return passive;
5159}
5160
5161static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode,
5162 u8 is_active, u8 direct_mask,
5163 struct iwl_scan_channel *scan_ch)
5164{
5165 const struct ieee80211_channel *channels = NULL;
5166 const struct ieee80211_hw_mode *hw_mode;
5167 const struct iwl_channel_info *ch_info;
5168 u16 passive_dwell = 0;
5169 u16 active_dwell = 0;
5170 int added, i;
5171
5172 hw_mode = iwl_get_hw_mode(priv, phymode);
5173 if (!hw_mode)
5174 return 0;
5175
5176 channels = hw_mode->channels;
5177
5178 active_dwell = iwl_get_active_dwell_time(priv, phymode);
5179 passive_dwell = iwl_get_passive_dwell_time(priv, phymode);
5180
5181 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5182 if (channels[i].chan ==
5183 le16_to_cpu(priv->active_rxon.channel)) {
5184 if (iwl_is_associated(priv)) {
5185 IWL_DEBUG_SCAN
5186 ("Skipping current channel %d\n",
5187 le16_to_cpu(priv->active_rxon.channel));
5188 continue;
5189 }
5190 } else if (priv->only_active_channel)
5191 continue;
5192
5193 scan_ch->channel = channels[i].chan;
5194
5195 ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel);
5196 if (!is_channel_valid(ch_info)) {
5197 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5198 scan_ch->channel);
5199 continue;
5200 }
5201
5202 if (!is_active || is_channel_passive(ch_info) ||
5203 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5204 scan_ch->type = 0; /* passive */
5205 else
5206 scan_ch->type = 1; /* active */
5207
5208 if (scan_ch->type & 1)
5209 scan_ch->type |= (direct_mask << 1);
5210
5211 if (is_channel_narrow(ch_info))
5212 scan_ch->type |= (1 << 7);
5213
5214 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5215 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5216
5217 /* Set power levels to defaults */
5218 scan_ch->tpc.dsp_atten = 110;
5219 /* scan_pwr_info->tpc.dsp_atten; */
5220
5221 /*scan_pwr_info->tpc.tx_gain; */
5222 if (phymode == MODE_IEEE80211A)
5223 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5224 else {
5225 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5226 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
5227 * power level
5228 scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
5229 */
5230 }
5231
5232 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5233 scan_ch->channel,
5234 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5235 (scan_ch->type & 1) ?
5236 active_dwell : passive_dwell);
5237
5238 scan_ch++;
5239 added++;
5240 }
5241
5242 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5243 return added;
5244}
5245
5246static void iwl_reset_channel_flag(struct iwl_priv *priv)
5247{
5248 int i, j;
5249 for (i = 0; i < 3; i++) {
5250 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5251 for (j = 0; j < hw_mode->num_channels; j++)
5252 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5253 }
5254}
5255
5256static void iwl_init_hw_rates(struct iwl_priv *priv,
5257 struct ieee80211_rate *rates)
5258{
5259 int i;
5260
5261 for (i = 0; i < IWL_RATE_COUNT; i++) {
5262 rates[i].rate = iwl_rates[i].ieee * 5;
5263 rates[i].val = i; /* Rate scaling will work on indexes */
5264 rates[i].val2 = i;
5265 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5266 /* Only OFDM have the bits-per-symbol set */
5267 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5268 rates[i].flags |= IEEE80211_RATE_OFDM;
5269 else {
5270 /*
5271 * If CCK 1M then set rate flag to CCK else CCK_2
5272 * which is CCK | PREAMBLE2
5273 */
5274 rates[i].flags |= (iwl_rates[i].plcp == 10) ?
5275 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5276 }
5277
5278 /* Set up which ones are basic rates... */
5279 if (IWL_BASIC_RATES_MASK & (1 << i))
5280 rates[i].flags |= IEEE80211_RATE_BASIC;
5281 }
5282}
5283
5284/**
5285 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
5286 */
5287static int iwl_init_geos(struct iwl_priv *priv)
5288{
5289 struct iwl_channel_info *ch;
5290 struct ieee80211_hw_mode *modes;
5291 struct ieee80211_channel *channels;
5292 struct ieee80211_channel *geo_ch;
5293 struct ieee80211_rate *rates;
5294 int i = 0;
5295 enum {
5296 A = 0,
5297 B = 1,
5298 G = 2,
5299 };
5300 int mode_count = 3;
5301
5302 if (priv->modes) {
5303 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5304 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5305 return 0;
5306 }
5307
5308 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5309 GFP_KERNEL);
5310 if (!modes)
5311 return -ENOMEM;
5312
5313 channels = kzalloc(sizeof(struct ieee80211_channel) *
5314 priv->channel_count, GFP_KERNEL);
5315 if (!channels) {
5316 kfree(modes);
5317 return -ENOMEM;
5318 }
5319
5320 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5321 GFP_KERNEL);
5322 if (!rates) {
5323 kfree(modes);
5324 kfree(channels);
5325 return -ENOMEM;
5326 }
5327
5328 /* 0 = 802.11a
5329 * 1 = 802.11b
5330 * 2 = 802.11g
5331 */
5332
5333 /* 5.2GHz channels start after the 2.4GHz channels */
5334 modes[A].mode = MODE_IEEE80211A;
5335 modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5336 modes[A].rates = rates;
5337 modes[A].num_rates = 8; /* just OFDM */
5338 modes[A].num_channels = 0;
5339
5340 modes[B].mode = MODE_IEEE80211B;
5341 modes[B].channels = channels;
5342 modes[B].rates = &rates[8];
5343 modes[B].num_rates = 4; /* just CCK */
5344 modes[B].num_channels = 0;
5345
5346 modes[G].mode = MODE_IEEE80211G;
5347 modes[G].channels = channels;
5348 modes[G].rates = rates;
5349 modes[G].num_rates = 12; /* OFDM & CCK */
5350 modes[G].num_channels = 0;
5351
5352 priv->ieee_channels = channels;
5353 priv->ieee_rates = rates;
5354
5355 iwl_init_hw_rates(priv, rates);
5356
5357 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5358 ch = &priv->channel_info[i];
5359
5360 if (!is_channel_valid(ch)) {
5361 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5362 "skipping.\n",
5363 ch->channel, is_channel_a_band(ch) ?
5364 "5.2" : "2.4");
5365 continue;
5366 }
5367
5368 if (is_channel_a_band(ch))
5369 geo_ch = &modes[A].channels[modes[A].num_channels++];
5370 else {
5371 geo_ch = &modes[B].channels[modes[B].num_channels++];
5372 modes[G].num_channels++;
5373 }
5374
5375 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5376 geo_ch->chan = ch->channel;
5377 geo_ch->power_level = ch->max_power_avg;
5378 geo_ch->antenna_max = 0xff;
5379
5380 if (is_channel_valid(ch)) {
5381 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5382 if (ch->flags & EEPROM_CHANNEL_IBSS)
5383 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5384
5385 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5386 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5387
5388 if (ch->flags & EEPROM_CHANNEL_RADAR)
5389 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5390
5391 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5392 priv->max_channel_txpower_limit =
5393 ch->max_power_avg;
5394 }
5395
5396 geo_ch->val = geo_ch->flag;
5397 }
5398
5399 if ((modes[A].num_channels == 0) && priv->is_abg) {
5400 printk(KERN_INFO DRV_NAME
5401 ": Incorrectly detected BG card as ABG. Please send "
5402 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5403 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5404 priv->is_abg = 0;
5405 }
5406
5407 printk(KERN_INFO DRV_NAME
5408 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5409 modes[G].num_channels, modes[A].num_channels);
5410
5411 /*
5412 * NOTE: We register these in preference of order -- the
5413 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5414 * a phymode based on rates or AP capabilities but seems to
5415 * configure it purely on if the channel being configured
5416 * is supported by a mode -- and the first match is taken
5417 */
5418
5419 if (modes[G].num_channels)
5420 ieee80211_register_hwmode(priv->hw, &modes[G]);
5421 if (modes[B].num_channels)
5422 ieee80211_register_hwmode(priv->hw, &modes[B]);
5423 if (modes[A].num_channels)
5424 ieee80211_register_hwmode(priv->hw, &modes[A]);
5425
5426 priv->modes = modes;
5427 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5428
5429 return 0;
5430}
5431
5432/******************************************************************************
5433 *
5434 * uCode download functions
5435 *
5436 ******************************************************************************/
5437
5438static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
5439{
5440 if (priv->ucode_code.v_addr != NULL) {
5441 pci_free_consistent(priv->pci_dev,
5442 priv->ucode_code.len,
5443 priv->ucode_code.v_addr,
5444 priv->ucode_code.p_addr);
5445 priv->ucode_code.v_addr = NULL;
5446 }
5447 if (priv->ucode_data.v_addr != NULL) {
5448 pci_free_consistent(priv->pci_dev,
5449 priv->ucode_data.len,
5450 priv->ucode_data.v_addr,
5451 priv->ucode_data.p_addr);
5452 priv->ucode_data.v_addr = NULL;
5453 }
5454 if (priv->ucode_data_backup.v_addr != NULL) {
5455 pci_free_consistent(priv->pci_dev,
5456 priv->ucode_data_backup.len,
5457 priv->ucode_data_backup.v_addr,
5458 priv->ucode_data_backup.p_addr);
5459 priv->ucode_data_backup.v_addr = NULL;
5460 }
5461 if (priv->ucode_init.v_addr != NULL) {
5462 pci_free_consistent(priv->pci_dev,
5463 priv->ucode_init.len,
5464 priv->ucode_init.v_addr,
5465 priv->ucode_init.p_addr);
5466 priv->ucode_init.v_addr = NULL;
5467 }
5468 if (priv->ucode_init_data.v_addr != NULL) {
5469 pci_free_consistent(priv->pci_dev,
5470 priv->ucode_init_data.len,
5471 priv->ucode_init_data.v_addr,
5472 priv->ucode_init_data.p_addr);
5473 priv->ucode_init_data.v_addr = NULL;
5474 }
5475 if (priv->ucode_boot.v_addr != NULL) {
5476 pci_free_consistent(priv->pci_dev,
5477 priv->ucode_boot.len,
5478 priv->ucode_boot.v_addr,
5479 priv->ucode_boot.p_addr);
5480 priv->ucode_boot.v_addr = NULL;
5481 }
5482}
5483
5484/**
5485 * iwl_verify_inst_full - verify runtime uCode image in card vs. host,
5486 * looking at all data.
5487 */
5488static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len)
5489{
5490 u32 val;
5491 u32 save_len = len;
5492 int rc = 0;
5493 u32 errcnt;
5494
5495 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5496
5497 rc = iwl_grab_restricted_access(priv);
5498 if (rc)
5499 return rc;
5500
5501 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5502
5503 errcnt = 0;
5504 for (; len > 0; len -= sizeof(u32), image++) {
5505 /* read data comes through single port, auto-incr addr */
5506 /* NOTE: Use the debugless read so we don't flood kernel log
5507 * if IWL_DL_IO is set */
5508 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5509 if (val != le32_to_cpu(*image)) {
5510 IWL_ERROR("uCode INST section is invalid at "
5511 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5512 save_len - len, val, le32_to_cpu(*image));
5513 rc = -EIO;
5514 errcnt++;
5515 if (errcnt >= 20)
5516 break;
5517 }
5518 }
5519
5520 iwl_release_restricted_access(priv);
5521
5522 if (!errcnt)
5523 IWL_DEBUG_INFO
5524 ("ucode image in INSTRUCTION memory is good\n");
5525
5526 return rc;
5527}
5528
5529
5530/**
5531 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
5532 * using sample data 100 bytes apart. If these sample points are good,
5533 * it's a pretty good bet that everything between them is good, too.
5534 */
5535static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5536{
5537 u32 val;
5538 int rc = 0;
5539 u32 errcnt = 0;
5540 u32 i;
5541
5542 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5543
5544 rc = iwl_grab_restricted_access(priv);
5545 if (rc)
5546 return rc;
5547
5548 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5549 /* read data comes through single port, auto-incr addr */
5550 /* NOTE: Use the debugless read so we don't flood kernel log
5551 * if IWL_DL_IO is set */
5552 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR,
5553 i + RTC_INST_LOWER_BOUND);
5554 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5555 if (val != le32_to_cpu(*image)) {
5556#if 0 /* Enable this if you want to see details */
5557 IWL_ERROR("uCode INST section is invalid at "
5558 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5559 i, val, *image);
5560#endif
5561 rc = -EIO;
5562 errcnt++;
5563 if (errcnt >= 3)
5564 break;
5565 }
5566 }
5567
5568 iwl_release_restricted_access(priv);
5569
5570 return rc;
5571}
5572
5573
5574/**
5575 * iwl_verify_ucode - determine which instruction image is in SRAM,
5576 * and verify its contents
5577 */
5578static int iwl_verify_ucode(struct iwl_priv *priv)
5579{
5580 __le32 *image;
5581 u32 len;
5582 int rc = 0;
5583
5584 /* Try bootstrap */
5585 image = (__le32 *)priv->ucode_boot.v_addr;
5586 len = priv->ucode_boot.len;
5587 rc = iwl_verify_inst_sparse(priv, image, len);
5588 if (rc == 0) {
5589 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5590 return 0;
5591 }
5592
5593 /* Try initialize */
5594 image = (__le32 *)priv->ucode_init.v_addr;
5595 len = priv->ucode_init.len;
5596 rc = iwl_verify_inst_sparse(priv, image, len);
5597 if (rc == 0) {
5598 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5599 return 0;
5600 }
5601
5602 /* Try runtime/protocol */
5603 image = (__le32 *)priv->ucode_code.v_addr;
5604 len = priv->ucode_code.len;
5605 rc = iwl_verify_inst_sparse(priv, image, len);
5606 if (rc == 0) {
5607 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5608 return 0;
5609 }
5610
5611 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5612
5613 /* Show first several data entries in instruction SRAM.
5614 * Selection of bootstrap image is arbitrary. */
5615 image = (__le32 *)priv->ucode_boot.v_addr;
5616 len = priv->ucode_boot.len;
5617 rc = iwl_verify_inst_full(priv, image, len);
5618
5619 return rc;
5620}
5621
5622
5623/* check contents of special bootstrap uCode SRAM */
5624static int iwl_verify_bsm(struct iwl_priv *priv)
5625{
5626 __le32 *image = priv->ucode_boot.v_addr;
5627 u32 len = priv->ucode_boot.len;
5628 u32 reg;
5629 u32 val;
5630
5631 IWL_DEBUG_INFO("Begin verify bsm\n");
5632
5633 /* verify BSM SRAM contents */
5634 val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG);
5635 for (reg = BSM_SRAM_LOWER_BOUND;
5636 reg < BSM_SRAM_LOWER_BOUND + len;
5637 reg += sizeof(u32), image ++) {
5638 val = iwl_read_restricted_reg(priv, reg);
5639 if (val != le32_to_cpu(*image)) {
5640 IWL_ERROR("BSM uCode verification failed at "
5641 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5642 BSM_SRAM_LOWER_BOUND,
5643 reg - BSM_SRAM_LOWER_BOUND, len,
5644 val, le32_to_cpu(*image));
5645 return -EIO;
5646 }
5647 }
5648
5649 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5650
5651 return 0;
5652}
5653
5654/**
5655 * iwl_load_bsm - Load bootstrap instructions
5656 *
5657 * BSM operation:
5658 *
5659 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5660 * in special SRAM that does not power down during RFKILL. When powering back
5661 * up after power-saving sleeps (or during initial uCode load), the BSM loads
5662 * the bootstrap program into the on-board processor, and starts it.
5663 *
5664 * The bootstrap program loads (via DMA) instructions and data for a new
5665 * program from host DRAM locations indicated by the host driver in the
5666 * BSM_DRAM_* registers. Once the new program is loaded, it starts
5667 * automatically.
5668 *
5669 * When initializing the NIC, the host driver points the BSM to the
5670 * "initialize" uCode image. This uCode sets up some internal data, then
5671 * notifies host via "initialize alive" that it is complete.
5672 *
5673 * The host then replaces the BSM_DRAM_* pointer values to point to the
5674 * normal runtime uCode instructions and a backup uCode data cache buffer
5675 * (filled initially with starting data values for the on-board processor),
5676 * then triggers the "initialize" uCode to load and launch the runtime uCode,
5677 * which begins normal operation.
5678 *
5679 * When doing a power-save shutdown, runtime uCode saves data SRAM into
5680 * the backup data cache in DRAM before SRAM is powered down.
5681 *
5682 * When powering back up, the BSM loads the bootstrap program. This reloads
5683 * the runtime uCode instructions and the backup data cache into SRAM,
5684 * and re-launches the runtime uCode from where it left off.
5685 */
5686static int iwl_load_bsm(struct iwl_priv *priv)
5687{
5688 __le32 *image = priv->ucode_boot.v_addr;
5689 u32 len = priv->ucode_boot.len;
5690 dma_addr_t pinst;
5691 dma_addr_t pdata;
5692 u32 inst_len;
5693 u32 data_len;
5694 int rc;
5695 int i;
5696 u32 done;
5697 u32 reg_offset;
5698
5699 IWL_DEBUG_INFO("Begin load bsm\n");
5700
5701 /* make sure bootstrap program is no larger than BSM's SRAM size */
5702 if (len > IWL_MAX_BSM_SIZE)
5703 return -EINVAL;
5704
5705 /* Tell bootstrap uCode where to find the "Initialize" uCode
5706 * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965.
5707 * NOTE: iwl_initialize_alive_start() will replace these values,
5708 * after the "initialize" uCode has run, to point to
5709 * runtime/protocol instructions and backup data cache. */
5710 pinst = priv->ucode_init.p_addr;
5711 pdata = priv->ucode_init_data.p_addr;
5712 inst_len = priv->ucode_init.len;
5713 data_len = priv->ucode_init_data.len;
5714
5715 rc = iwl_grab_restricted_access(priv);
5716 if (rc)
5717 return rc;
5718
5719 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
5720 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5721 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5722 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
5723
5724 /* Fill BSM memory with bootstrap instructions */
5725 for (reg_offset = BSM_SRAM_LOWER_BOUND;
5726 reg_offset < BSM_SRAM_LOWER_BOUND + len;
5727 reg_offset += sizeof(u32), image++)
5728 _iwl_write_restricted_reg(priv, reg_offset,
5729 le32_to_cpu(*image));
5730
5731 rc = iwl_verify_bsm(priv);
5732 if (rc) {
5733 iwl_release_restricted_access(priv);
5734 return rc;
5735 }
5736
5737 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
5738 iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0);
5739 iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG,
5740 RTC_INST_LOWER_BOUND);
5741 iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
5742
5743 /* Load bootstrap code into instruction SRAM now,
5744 * to prepare to load "initialize" uCode */
5745 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5746 BSM_WR_CTRL_REG_BIT_START);
5747
5748 /* Wait for load of bootstrap uCode to finish */
5749 for (i = 0; i < 100; i++) {
5750 done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG);
5751 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5752 break;
5753 udelay(10);
5754 }
5755 if (i < 100)
5756 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5757 else {
5758 IWL_ERROR("BSM write did not complete!\n");
5759 return -EIO;
5760 }
5761
5762 /* Enable future boot loads whenever power management unit triggers it
5763 * (e.g. when powering back up after power-save shutdown) */
5764 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5765 BSM_WR_CTRL_REG_BIT_START_EN);
5766
5767 iwl_release_restricted_access(priv);
5768
5769 return 0;
5770}
5771
5772static void iwl_nic_start(struct iwl_priv *priv)
5773{
5774 /* Remove all resets to allow NIC to operate */
5775 iwl_write32(priv, CSR_RESET, 0);
5776}
5777
5778/**
5779 * iwl_read_ucode - Read uCode images from disk file.
5780 *
5781 * Copy into buffers for card to fetch via bus-mastering
5782 */
5783static int iwl_read_ucode(struct iwl_priv *priv)
5784{
5785 struct iwl_ucode *ucode;
5786 int rc = 0;
5787 const struct firmware *ucode_raw;
5788 /* firmware file name contains uCode/driver compatibility version */
5789 const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode";
5790 u8 *src;
5791 size_t len;
5792 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
5793
5794 /* Ask kernel firmware_class module to get the boot firmware off disk.
5795 * request_firmware() is synchronous, file is in memory on return. */
5796 rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
5797 if (rc < 0) {
5798 IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc);
5799 goto error;
5800 }
5801
5802 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5803 name, ucode_raw->size);
5804
5805 /* Make sure that we got at least our header! */
5806 if (ucode_raw->size < sizeof(*ucode)) {
5807 IWL_ERROR("File size way too small!\n");
5808 rc = -EINVAL;
5809 goto err_release;
5810 }
5811
5812 /* Data from ucode file: header followed by uCode images */
5813 ucode = (void *)ucode_raw->data;
5814
5815 ver = le32_to_cpu(ucode->ver);
5816 inst_size = le32_to_cpu(ucode->inst_size);
5817 data_size = le32_to_cpu(ucode->data_size);
5818 init_size = le32_to_cpu(ucode->init_size);
5819 init_data_size = le32_to_cpu(ucode->init_data_size);
5820 boot_size = le32_to_cpu(ucode->boot_size);
5821
5822 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
5823 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
5824 inst_size);
5825 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
5826 data_size);
5827 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
5828 init_size);
5829 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
5830 init_data_size);
5831 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
5832 boot_size);
5833
5834 /* Verify size of file vs. image size info in file's header */
5835 if (ucode_raw->size < sizeof(*ucode) +
5836 inst_size + data_size + init_size +
5837 init_data_size + boot_size) {
5838
5839 IWL_DEBUG_INFO("uCode file size %d too small\n",
5840 (int)ucode_raw->size);
5841 rc = -EINVAL;
5842 goto err_release;
5843 }
5844
5845 /* Verify that uCode images will fit in card's SRAM */
5846 if (inst_size > IWL_MAX_INST_SIZE) {
5847 IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n",
5848 (int)inst_size);
5849 rc = -EINVAL;
5850 goto err_release;
5851 }
5852
5853 if (data_size > IWL_MAX_DATA_SIZE) {
5854 IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n",
5855 (int)data_size);
5856 rc = -EINVAL;
5857 goto err_release;
5858 }
5859 if (init_size > IWL_MAX_INST_SIZE) {
5860 IWL_DEBUG_INFO
5861 ("uCode init instr len %d too large to fit in card\n",
5862 (int)init_size);
5863 rc = -EINVAL;
5864 goto err_release;
5865 }
5866 if (init_data_size > IWL_MAX_DATA_SIZE) {
5867 IWL_DEBUG_INFO
5868 ("uCode init data len %d too large to fit in card\n",
5869 (int)init_data_size);
5870 rc = -EINVAL;
5871 goto err_release;
5872 }
5873 if (boot_size > IWL_MAX_BSM_SIZE) {
5874 IWL_DEBUG_INFO
5875 ("uCode boot instr len %d too large to fit in bsm\n",
5876 (int)boot_size);
5877 rc = -EINVAL;
5878 goto err_release;
5879 }
5880
5881 /* Allocate ucode buffers for card's bus-master loading ... */
5882
5883 /* Runtime instructions and 2 copies of data:
5884 * 1) unmodified from disk
5885 * 2) backup cache for save/restore during power-downs */
5886 priv->ucode_code.len = inst_size;
5887 priv->ucode_code.v_addr =
5888 pci_alloc_consistent(priv->pci_dev,
5889 priv->ucode_code.len,
5890 &(priv->ucode_code.p_addr));
5891
5892 priv->ucode_data.len = data_size;
5893 priv->ucode_data.v_addr =
5894 pci_alloc_consistent(priv->pci_dev,
5895 priv->ucode_data.len,
5896 &(priv->ucode_data.p_addr));
5897
5898 priv->ucode_data_backup.len = data_size;
5899 priv->ucode_data_backup.v_addr =
5900 pci_alloc_consistent(priv->pci_dev,
5901 priv->ucode_data_backup.len,
5902 &(priv->ucode_data_backup.p_addr));
5903
5904
5905 /* Initialization instructions and data */
5906 priv->ucode_init.len = init_size;
5907 priv->ucode_init.v_addr =
5908 pci_alloc_consistent(priv->pci_dev,
5909 priv->ucode_init.len,
5910 &(priv->ucode_init.p_addr));
5911
5912 priv->ucode_init_data.len = init_data_size;
5913 priv->ucode_init_data.v_addr =
5914 pci_alloc_consistent(priv->pci_dev,
5915 priv->ucode_init_data.len,
5916 &(priv->ucode_init_data.p_addr));
5917
5918 /* Bootstrap (instructions only, no data) */
5919 priv->ucode_boot.len = boot_size;
5920 priv->ucode_boot.v_addr =
5921 pci_alloc_consistent(priv->pci_dev,
5922 priv->ucode_boot.len,
5923 &(priv->ucode_boot.p_addr));
5924
5925 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
5926 !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr ||
5927 !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr)
5928 goto err_pci_alloc;
5929
5930 /* Copy images into buffers for card's bus-master reads ... */
5931
5932 /* Runtime instructions (first block of data in file) */
5933 src = &ucode->data[0];
5934 len = priv->ucode_code.len;
5935 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n",
5936 (int)len);
5937 memcpy(priv->ucode_code.v_addr, src, len);
5938 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5939 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5940
5941 /* Runtime data (2nd block)
5942 * NOTE: Copy into backup buffer will be done in iwl_up() */
5943 src = &ucode->data[inst_size];
5944 len = priv->ucode_data.len;
5945 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n",
5946 (int)len);
5947 memcpy(priv->ucode_data.v_addr, src, len);
5948 memcpy(priv->ucode_data_backup.v_addr, src, len);
5949
5950 /* Initialization instructions (3rd block) */
5951 if (init_size) {
5952 src = &ucode->data[inst_size + data_size];
5953 len = priv->ucode_init.len;
5954 IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n",
5955 (int)len);
5956 memcpy(priv->ucode_init.v_addr, src, len);
5957 }
5958
5959 /* Initialization data (4th block) */
5960 if (init_data_size) {
5961 src = &ucode->data[inst_size + data_size + init_size];
5962 len = priv->ucode_init_data.len;
5963 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
5964 (int)len);
5965 memcpy(priv->ucode_init_data.v_addr, src, len);
5966 }
5967
5968 /* Bootstrap instructions (5th block) */
5969 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5970 len = priv->ucode_boot.len;
5971 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
5972 (int)len);
5973 memcpy(priv->ucode_boot.v_addr, src, len);
5974
5975 /* We have our copies now, allow OS release its copies */
5976 release_firmware(ucode_raw);
5977 return 0;
5978
5979 err_pci_alloc:
5980 IWL_ERROR("failed to allocate pci memory\n");
5981 rc = -ENOMEM;
5982 iwl_dealloc_ucode_pci(priv);
5983
5984 err_release:
5985 release_firmware(ucode_raw);
5986
5987 error:
5988 return rc;
5989}
5990
5991
5992/**
5993 * iwl_set_ucode_ptrs - Set uCode address location
5994 *
5995 * Tell initialization uCode where to find runtime uCode.
5996 *
5997 * BSM registers initially contain pointers to initialization uCode.
5998 * We need to replace them to load runtime uCode inst and data,
5999 * and to save runtime data when powering down.
6000 */
6001static int iwl_set_ucode_ptrs(struct iwl_priv *priv)
6002{
6003 dma_addr_t pinst;
6004 dma_addr_t pdata;
6005 int rc = 0;
6006 unsigned long flags;
6007
6008 /* bits 31:0 for 3945 */
6009 pinst = priv->ucode_code.p_addr;
6010 pdata = priv->ucode_data_backup.p_addr;
6011
6012 spin_lock_irqsave(&priv->lock, flags);
6013 rc = iwl_grab_restricted_access(priv);
6014 if (rc) {
6015 spin_unlock_irqrestore(&priv->lock, flags);
6016 return rc;
6017 }
6018
6019 /* Tell bootstrap uCode where to find image to load */
6020 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
6021 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6022 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
6023 priv->ucode_data.len);
6024
6025 /* Inst bytecount must be last to set up, bit 31 signals uCode
6026 * that all new ptr/size info is in place */
6027 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG,
6028 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6029
6030 iwl_release_restricted_access(priv);
6031
6032 spin_unlock_irqrestore(&priv->lock, flags);
6033
6034 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6035
6036 return rc;
6037}
6038
6039/**
6040 * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved
6041 *
6042 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6043 *
6044 * The 4965 "initialize" ALIVE reply contains calibration data for:
6045 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6046 * (3945 does not contain this data).
6047 *
6048 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6049*/
6050static void iwl_init_alive_start(struct iwl_priv *priv)
6051{
6052 /* Check alive response for "valid" sign from uCode */
6053 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6054 /* We had an error bringing up the hardware, so take it
6055 * all the way back down so we can try again */
6056 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6057 goto restart;
6058 }
6059
6060 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6061 * This is a paranoid check, because we would not have gotten the
6062 * "initialize" alive if code weren't properly loaded. */
6063 if (iwl_verify_ucode(priv)) {
6064 /* Runtime instruction load was bad;
6065 * take it all the way back down so we can try again */
6066 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6067 goto restart;
6068 }
6069
6070 /* Send pointers to protocol/runtime uCode image ... init code will
6071 * load and launch runtime uCode, which will send us another "Alive"
6072 * notification. */
6073 IWL_DEBUG_INFO("Initialization Alive received.\n");
6074 if (iwl_set_ucode_ptrs(priv)) {
6075 /* Runtime instruction load won't happen;
6076 * take it all the way back down so we can try again */
6077 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6078 goto restart;
6079 }
6080 return;
6081
6082 restart:
6083 queue_work(priv->workqueue, &priv->restart);
6084}
6085
6086
6087/**
6088 * iwl_alive_start - called after REPLY_ALIVE notification received
6089 * from protocol/runtime uCode (initialization uCode's
6090 * Alive gets handled by iwl_init_alive_start()).
6091 */
6092static void iwl_alive_start(struct iwl_priv *priv)
6093{
6094 int rc = 0;
6095 int thermal_spin = 0;
6096 u32 rfkill;
6097
6098 IWL_DEBUG_INFO("Runtime Alive received.\n");
6099
6100 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6101 /* We had an error bringing up the hardware, so take it
6102 * all the way back down so we can try again */
6103 IWL_DEBUG_INFO("Alive failed.\n");
6104 goto restart;
6105 }
6106
6107 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6108 * This is a paranoid check, because we would not have gotten the
6109 * "runtime" alive if code weren't properly loaded. */
6110 if (iwl_verify_ucode(priv)) {
6111 /* Runtime instruction load was bad;
6112 * take it all the way back down so we can try again */
6113 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6114 goto restart;
6115 }
6116
6117 iwl_clear_stations_table(priv);
6118
6119 rc = iwl_grab_restricted_access(priv);
6120 if (rc) {
6121 IWL_WARNING("Can not read rfkill status from adapter\n");
6122 return;
6123 }
6124
6125 rfkill = iwl_read_restricted_reg(priv, APMG_RFKILL_REG);
6126 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
6127 iwl_release_restricted_access(priv);
6128
6129 if (rfkill & 0x1) {
6130 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6131 /* if rfkill is not on, then wait for thermal
6132 * sensor in adapter to kick in */
6133 while (iwl_hw_get_temperature(priv) == 0) {
6134 thermal_spin++;
6135 udelay(10);
6136 }
6137
6138 if (thermal_spin)
6139 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
6140 thermal_spin * 10);
6141 } else
6142 set_bit(STATUS_RF_KILL_HW, &priv->status);
6143
6144 /* After the ALIVE response, we can process host commands */
6145 set_bit(STATUS_ALIVE, &priv->status);
6146
6147 /* Clear out the uCode error bit if it is set */
6148 clear_bit(STATUS_FW_ERROR, &priv->status);
6149
6150 rc = iwl_init_channel_map(priv);
6151 if (rc) {
6152 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6153 return;
6154 }
6155
6156 iwl_init_geos(priv);
6157
6158 if (iwl_is_rfkill(priv))
6159 return;
6160
6161 if (!priv->mac80211_registered) {
6162 /* Unlock so any user space entry points can call back into
6163 * the driver without a deadlock... */
6164 mutex_unlock(&priv->mutex);
6165 iwl_rate_control_register(priv->hw);
6166 rc = ieee80211_register_hw(priv->hw);
6167 priv->hw->conf.beacon_int = 100;
6168 mutex_lock(&priv->mutex);
6169
6170 if (rc) {
6171 IWL_ERROR("Failed to register network "
6172 "device (error %d)\n", rc);
6173 return;
6174 }
6175
6176 priv->mac80211_registered = 1;
6177
6178 iwl_reset_channel_flag(priv);
6179 } else
6180 ieee80211_start_queues(priv->hw);
6181
6182 priv->active_rate = priv->rates_mask;
6183 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6184
6185 iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
6186
6187 if (iwl_is_associated(priv)) {
6188 struct iwl_rxon_cmd *active_rxon =
6189 (struct iwl_rxon_cmd *)(&priv->active_rxon);
6190
6191 memcpy(&priv->staging_rxon, &priv->active_rxon,
6192 sizeof(priv->staging_rxon));
6193 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6194 } else {
6195 /* Initialize our rx_config data */
6196 iwl_connection_init_rx_config(priv);
6197 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6198 }
6199
6200 /* Configure BT coexistence */
6201 iwl_send_bt_config(priv);
6202
6203 /* Configure the adapter for unassociated operation */
6204 iwl_commit_rxon(priv);
6205
6206 /* At this point, the NIC is initialized and operational */
6207 priv->notif_missed_beacons = 0;
6208 set_bit(STATUS_READY, &priv->status);
6209
6210 iwl3945_reg_txpower_periodic(priv);
6211
6212 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6213
6214 if (priv->error_recovering)
6215 iwl_error_recovery(priv);
6216
6217 return;
6218
6219 restart:
6220 queue_work(priv->workqueue, &priv->restart);
6221}
6222
6223static void iwl_cancel_deferred_work(struct iwl_priv *priv);
6224
6225static void __iwl_down(struct iwl_priv *priv)
6226{
6227 unsigned long flags;
6228 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6229 struct ieee80211_conf *conf = NULL;
6230
6231 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6232
6233 conf = ieee80211_get_hw_conf(priv->hw);
6234
6235 if (!exit_pending)
6236 set_bit(STATUS_EXIT_PENDING, &priv->status);
6237
6238 iwl_clear_stations_table(priv);
6239
6240 /* Unblock any waiting calls */
6241 wake_up_interruptible_all(&priv->wait_command_queue);
6242
6243 iwl_cancel_deferred_work(priv);
6244
6245 /* Wipe out the EXIT_PENDING status bit if we are not actually
6246 * exiting the module */
6247 if (!exit_pending)
6248 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6249
6250 /* stop and reset the on-board processor */
6251 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6252
6253 /* tell the device to stop sending interrupts */
6254 iwl_disable_interrupts(priv);
6255
6256 if (priv->mac80211_registered)
6257 ieee80211_stop_queues(priv->hw);
6258
6259 /* If we have not previously called iwl_init() then
6260 * clear all bits but the RF Kill and SUSPEND bits and return */
6261 if (!iwl_is_init(priv)) {
6262 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6263 STATUS_RF_KILL_HW |
6264 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6265 STATUS_RF_KILL_SW |
6266 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6267 STATUS_IN_SUSPEND;
6268 goto exit;
6269 }
6270
6271 /* ...otherwise clear out all the status bits but the RF Kill and
6272 * SUSPEND bits and continue taking the NIC down. */
6273 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6274 STATUS_RF_KILL_HW |
6275 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6276 STATUS_RF_KILL_SW |
6277 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6278 STATUS_IN_SUSPEND |
6279 test_bit(STATUS_FW_ERROR, &priv->status) <<
6280 STATUS_FW_ERROR;
6281
6282 spin_lock_irqsave(&priv->lock, flags);
6283 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6284 spin_unlock_irqrestore(&priv->lock, flags);
6285
6286 iwl_hw_txq_ctx_stop(priv);
6287 iwl_hw_rxq_stop(priv);
6288
6289 spin_lock_irqsave(&priv->lock, flags);
6290 if (!iwl_grab_restricted_access(priv)) {
6291 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
6292 APMG_CLK_VAL_DMA_CLK_RQT);
6293 iwl_release_restricted_access(priv);
6294 }
6295 spin_unlock_irqrestore(&priv->lock, flags);
6296
6297 udelay(5);
6298
6299 iwl_hw_nic_stop_master(priv);
6300 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6301 iwl_hw_nic_reset(priv);
6302
6303 exit:
6304 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
6305
6306 if (priv->ibss_beacon)
6307 dev_kfree_skb(priv->ibss_beacon);
6308 priv->ibss_beacon = NULL;
6309
6310 /* clear out any free frames */
6311 iwl_clear_free_frames(priv);
6312}
6313
6314static void iwl_down(struct iwl_priv *priv)
6315{
6316 mutex_lock(&priv->mutex);
6317 __iwl_down(priv);
6318 mutex_unlock(&priv->mutex);
6319}
6320
6321#define MAX_HW_RESTARTS 5
6322
6323static int __iwl_up(struct iwl_priv *priv)
6324{
6325 int rc, i;
6326
6327 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6328 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6329 return -EIO;
6330 }
6331
6332 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6333 IWL_WARNING("Radio disabled by SW RF kill (module "
6334 "parameter)\n");
6335 return 0;
6336 }
6337
6338 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6339
6340 rc = iwl_hw_nic_init(priv);
6341 if (rc) {
6342 IWL_ERROR("Unable to int nic\n");
6343 return rc;
6344 }
6345
6346 /* make sure rfkill handshake bits are cleared */
6347 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6348 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
6349 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6350
6351 /* clear (again), then enable host interrupts */
6352 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6353 iwl_enable_interrupts(priv);
6354
6355 /* really make sure rfkill handshake bits are cleared */
6356 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6357 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6358
6359 /* Copy original ucode data image from disk into backup cache.
6360 * This will be used to initialize the on-board processor's
6361 * data SRAM for a clean start when the runtime program first loads. */
6362 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6363 priv->ucode_data.len);
6364
6365 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6366
6367 iwl_clear_stations_table(priv);
6368
6369 /* load bootstrap state machine,
6370 * load bootstrap program into processor's memory,
6371 * prepare to load the "initialize" uCode */
6372 rc = iwl_load_bsm(priv);
6373
6374 if (rc) {
6375 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6376 continue;
6377 }
6378
6379 /* start card; "initialize" will load runtime ucode */
6380 iwl_nic_start(priv);
6381
6382 /* MAC Address location in EEPROM same for 3945/4965 */
6383 get_eeprom_mac(priv, priv->mac_addr);
6384 IWL_DEBUG_INFO("MAC address: " MAC_FMT "\n",
6385 MAC_ARG(priv->mac_addr));
6386
6387 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6388
6389 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6390
6391 return 0;
6392 }
6393
6394 set_bit(STATUS_EXIT_PENDING, &priv->status);
6395 __iwl_down(priv);
6396
6397 /* tried to restart and config the device for as long as our
6398 * patience could withstand */
6399 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6400 return -EIO;
6401}
6402
6403
6404/*****************************************************************************
6405 *
6406 * Workqueue callbacks
6407 *
6408 *****************************************************************************/
6409
6410static void iwl_bg_init_alive_start(struct work_struct *data)
6411{
6412 struct iwl_priv *priv =
6413 container_of(data, struct iwl_priv, init_alive_start.work);
6414
6415 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6416 return;
6417
6418 mutex_lock(&priv->mutex);
6419 iwl_init_alive_start(priv);
6420 mutex_unlock(&priv->mutex);
6421}
6422
6423static void iwl_bg_alive_start(struct work_struct *data)
6424{
6425 struct iwl_priv *priv =
6426 container_of(data, struct iwl_priv, alive_start.work);
6427
6428 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6429 return;
6430
6431 mutex_lock(&priv->mutex);
6432 iwl_alive_start(priv);
6433 mutex_unlock(&priv->mutex);
6434}
6435
6436static void iwl_bg_rf_kill(struct work_struct *work)
6437{
6438 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
6439
6440 wake_up_interruptible(&priv->wait_command_queue);
6441
6442 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6443 return;
6444
6445 mutex_lock(&priv->mutex);
6446
6447 if (!iwl_is_rfkill(priv)) {
6448 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6449 "HW and/or SW RF Kill no longer active, restarting "
6450 "device\n");
6451 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6452 queue_work(priv->workqueue, &priv->restart);
6453 } else {
6454
6455 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6456 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6457 "disabled by SW switch\n");
6458 else
6459 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6460 "Kill switch must be turned off for "
6461 "wireless networking to work.\n");
6462 }
6463 mutex_unlock(&priv->mutex);
6464}
6465
6466#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6467
6468static void iwl_bg_scan_check(struct work_struct *data)
6469{
6470 struct iwl_priv *priv =
6471 container_of(data, struct iwl_priv, scan_check.work);
6472
6473 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6474 return;
6475
6476 mutex_lock(&priv->mutex);
6477 if (test_bit(STATUS_SCANNING, &priv->status) ||
6478 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6479 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6480 "Scan completion watchdog resetting adapter (%dms)\n",
6481 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
6482 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6483 queue_work(priv->workqueue, &priv->restart);
6484 }
6485 mutex_unlock(&priv->mutex);
6486}
6487
6488static void iwl_bg_request_scan(struct work_struct *data)
6489{
6490 struct iwl_priv *priv =
6491 container_of(data, struct iwl_priv, request_scan);
6492 struct iwl_host_cmd cmd = {
6493 .id = REPLY_SCAN_CMD,
6494 .len = sizeof(struct iwl_scan_cmd),
6495 .meta.flags = CMD_SIZE_HUGE,
6496 };
6497 int rc = 0;
6498 struct iwl_scan_cmd *scan;
6499 struct ieee80211_conf *conf = NULL;
6500 u8 direct_mask;
6501 int phymode;
6502
6503 conf = ieee80211_get_hw_conf(priv->hw);
6504
6505 mutex_lock(&priv->mutex);
6506
6507 if (!iwl_is_ready(priv)) {
6508 IWL_WARNING("request scan called when driver not ready.\n");
6509 goto done;
6510 }
6511
6512 /* Make sure the scan wasn't cancelled before this queued work
6513 * was given the chance to run... */
6514 if (!test_bit(STATUS_SCANNING, &priv->status))
6515 goto done;
6516
6517 /* This should never be called or scheduled if there is currently
6518 * a scan active in the hardware. */
6519 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6520 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6521 "Ignoring second request.\n");
6522 rc = -EIO;
6523 goto done;
6524 }
6525
6526 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6527 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6528 goto done;
6529 }
6530
6531 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6532 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6533 goto done;
6534 }
6535
6536 if (iwl_is_rfkill(priv)) {
6537 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6538 goto done;
6539 }
6540
6541 if (!test_bit(STATUS_READY, &priv->status)) {
6542 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6543 goto done;
6544 }
6545
6546 if (!priv->scan_bands) {
6547 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6548 goto done;
6549 }
6550
6551 if (!priv->scan) {
6552 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
6553 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6554 if (!priv->scan) {
6555 rc = -ENOMEM;
6556 goto done;
6557 }
6558 }
6559 scan = priv->scan;
6560 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
6561
6562 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6563 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6564
6565 if (iwl_is_associated(priv)) {
6566 u16 interval = 0;
6567 u32 extra;
6568 u32 suspend_time = 100;
6569 u32 scan_suspend_time = 100;
6570 unsigned long flags;
6571
6572 IWL_DEBUG_INFO("Scanning while associated...\n");
6573
6574 spin_lock_irqsave(&priv->lock, flags);
6575 interval = priv->beacon_int;
6576 spin_unlock_irqrestore(&priv->lock, flags);
6577
6578 scan->suspend_time = 0;
6579 scan->max_out_time = cpu_to_le32(600 * 1024);
6580 if (!interval)
6581 interval = suspend_time;
6582 /*
6583 * suspend time format:
6584 * 0-19: beacon interval in usec (time before exec.)
6585 * 20-23: 0
6586 * 24-31: number of beacons (suspend between channels)
6587 */
6588
6589 extra = (suspend_time / interval) << 24;
6590 scan_suspend_time = 0xFF0FFFFF &
6591 (extra | ((suspend_time % interval) * 1024));
6592
6593 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6594 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
6595 scan_suspend_time, interval);
6596 }
6597
6598 /* We should add the ability for user to lock to PASSIVE ONLY */
6599 if (priv->one_direct_scan) {
6600 IWL_DEBUG_SCAN
6601 ("Kicking off one direct scan for '%s'\n",
6602 iwl_escape_essid(priv->direct_ssid,
6603 priv->direct_ssid_len));
6604 scan->direct_scan[0].id = WLAN_EID_SSID;
6605 scan->direct_scan[0].len = priv->direct_ssid_len;
6606 memcpy(scan->direct_scan[0].ssid,
6607 priv->direct_ssid, priv->direct_ssid_len);
6608 direct_mask = 1;
6609 } else if (!iwl_is_associated(priv)) {
6610 scan->direct_scan[0].id = WLAN_EID_SSID;
6611 scan->direct_scan[0].len = priv->essid_len;
6612 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6613 direct_mask = 1;
6614 } else
6615 direct_mask = 0;
6616
6617 /* We don't build a direct scan probe request; the uCode will do
6618 * that based on the direct_mask added to each channel entry */
6619 scan->tx_cmd.len = cpu_to_le16(
6620 iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
6621 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
6622 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6623 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6624 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6625
6626 /* flags + rate selection */
6627
6628 switch (priv->scan_bands) {
6629 case 2:
6630 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6631 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
6632 scan->good_CRC_th = 0;
6633 phymode = MODE_IEEE80211G;
6634 break;
6635
6636 case 1:
6637 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
6638 scan->good_CRC_th = IWL_GOOD_CRC_TH;
6639 phymode = MODE_IEEE80211A;
6640 break;
6641
6642 default:
6643 IWL_WARNING("Invalid scan band count\n");
6644 goto done;
6645 }
6646
6647 /* select Rx antennas */
6648 scan->flags |= iwl3945_get_antenna_flags(priv);
6649
6650 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
6651 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6652
6653 if (direct_mask)
6654 IWL_DEBUG_SCAN
6655 ("Initiating direct scan for %s.\n",
6656 iwl_escape_essid(priv->essid, priv->essid_len));
6657 else
6658 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
6659
6660 scan->channel_count =
6661 iwl_get_channels_for_scan(
6662 priv, phymode, 1, /* active */
6663 direct_mask,
6664 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6665
6666 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6667 scan->channel_count * sizeof(struct iwl_scan_channel);
6668 cmd.data = scan;
6669 scan->len = cpu_to_le16(cmd.len);
6670
6671 set_bit(STATUS_SCAN_HW, &priv->status);
6672 rc = iwl_send_cmd_sync(priv, &cmd);
6673 if (rc)
6674 goto done;
6675
6676 queue_delayed_work(priv->workqueue, &priv->scan_check,
6677 IWL_SCAN_CHECK_WATCHDOG);
6678
6679 mutex_unlock(&priv->mutex);
6680 return;
6681
6682 done:
6683 /* inform mac80211 sacn aborted */
6684 queue_work(priv->workqueue, &priv->scan_completed);
6685 mutex_unlock(&priv->mutex);
6686}
6687
6688static void iwl_bg_up(struct work_struct *data)
6689{
6690 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
6691
6692 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6693 return;
6694
6695 mutex_lock(&priv->mutex);
6696 __iwl_up(priv);
6697 mutex_unlock(&priv->mutex);
6698}
6699
6700static void iwl_bg_restart(struct work_struct *data)
6701{
6702 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
6703
6704 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6705 return;
6706
6707 iwl_down(priv);
6708 queue_work(priv->workqueue, &priv->up);
6709}
6710
6711static void iwl_bg_rx_replenish(struct work_struct *data)
6712{
6713 struct iwl_priv *priv =
6714 container_of(data, struct iwl_priv, rx_replenish);
6715
6716 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6717 return;
6718
6719 mutex_lock(&priv->mutex);
6720 iwl_rx_replenish(priv);
6721 mutex_unlock(&priv->mutex);
6722}
6723
6724static void iwl_bg_post_associate(struct work_struct *data)
6725{
6726 struct iwl_priv *priv = container_of(data, struct iwl_priv,
6727 post_associate.work);
6728
6729 int rc = 0;
6730 struct ieee80211_conf *conf = NULL;
6731
6732 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6733 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
6734 return;
6735 }
6736
6737
6738 IWL_DEBUG_ASSOC("Associated as %d to: " MAC_FMT "\n",
6739 priv->assoc_id, MAC_ARG(priv->active_rxon.bssid_addr));
6740
6741 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6742 return;
6743
6744 mutex_lock(&priv->mutex);
6745
6746 conf = ieee80211_get_hw_conf(priv->hw);
6747
6748 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6749 iwl_commit_rxon(priv);
6750
6751 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
6752 iwl_setup_rxon_timing(priv);
6753 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
6754 sizeof(priv->rxon_timing), &priv->rxon_timing);
6755 if (rc)
6756 IWL_WARNING("REPLY_RXON_TIMING failed - "
6757 "Attempting to continue.\n");
6758
6759 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6760
6761 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6762
6763 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
6764 priv->assoc_id, priv->beacon_int);
6765
6766 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6767 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6768 else
6769 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6770
6771 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6772 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
6773 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
6774 else
6775 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6776
6777 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6778 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6779
6780 }
6781
6782 iwl_commit_rxon(priv);
6783
6784 switch (priv->iw_mode) {
6785 case IEEE80211_IF_TYPE_STA:
6786 iwl_rate_scale_init(priv->hw, IWL_AP_ID);
6787 break;
6788
6789 case IEEE80211_IF_TYPE_IBSS:
6790
6791 /* clear out the station table */
6792 iwl_clear_stations_table(priv);
6793
6794 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
6795 iwl_rxon_add_station(priv, priv->bssid, 0);
6796 iwl3945_sync_sta(priv, IWL_STA_ID,
6797 (priv->phymode == MODE_IEEE80211A)?
6798 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
6799 CMD_ASYNC);
6800 iwl_rate_scale_init(priv->hw, IWL_STA_ID);
6801 iwl_send_beacon_cmd(priv);
6802
6803 break;
6804
6805 default:
6806 IWL_ERROR("%s Should not be called in %d mode\n",
6807 __FUNCTION__, priv->iw_mode);
6808 break;
6809 }
6810
6811 iwl_sequence_reset(priv);
6812
6813#ifdef CONFIG_IWLWIFI_QOS
6814 iwl_activate_qos(priv, 0);
6815#endif /* CONFIG_IWLWIFI_QOS */
6816 mutex_unlock(&priv->mutex);
6817}
6818
6819static void iwl_bg_abort_scan(struct work_struct *work)
6820{
6821 struct iwl_priv *priv = container_of(work, struct iwl_priv,
6822 abort_scan);
6823
6824 if (!iwl_is_ready(priv))
6825 return;
6826
6827 mutex_lock(&priv->mutex);
6828
6829 set_bit(STATUS_SCAN_ABORTING, &priv->status);
6830 iwl_send_scan_abort(priv);
6831
6832 mutex_unlock(&priv->mutex);
6833}
6834
6835static void iwl_bg_scan_completed(struct work_struct *work)
6836{
6837 struct iwl_priv *priv =
6838 container_of(work, struct iwl_priv, scan_completed);
6839
6840 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
6841
6842 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6843 return;
6844
6845 ieee80211_scan_completed(priv->hw);
6846
6847 /* Since setting the TXPOWER may have been deferred while
6848 * performing the scan, fire one off */
6849 mutex_lock(&priv->mutex);
6850 iwl_hw_reg_send_txpower(priv);
6851 mutex_unlock(&priv->mutex);
6852}
6853
6854/*****************************************************************************
6855 *
6856 * mac80211 entry point functions
6857 *
6858 *****************************************************************************/
6859
6860static int iwl_mac_open(struct ieee80211_hw *hw)
6861{
6862 struct iwl_priv *priv = hw->priv;
6863
6864 IWL_DEBUG_MAC80211("enter\n");
6865
6866 /* we should be verifying the device is ready to be opened */
6867 mutex_lock(&priv->mutex);
6868
6869 priv->is_open = 1;
6870
6871 if (!iwl_is_rfkill(priv))
6872 ieee80211_start_queues(priv->hw);
6873
6874 mutex_unlock(&priv->mutex);
6875 IWL_DEBUG_MAC80211("leave\n");
6876 return 0;
6877}
6878
6879static int iwl_mac_stop(struct ieee80211_hw *hw)
6880{
6881 struct iwl_priv *priv = hw->priv;
6882
6883 IWL_DEBUG_MAC80211("enter\n");
6884 priv->is_open = 0;
6885 /*netif_stop_queue(dev); */
6886 flush_workqueue(priv->workqueue);
6887 IWL_DEBUG_MAC80211("leave\n");
6888
6889 return 0;
6890}
6891
6892static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6893 struct ieee80211_tx_control *ctl)
6894{
6895 struct iwl_priv *priv = hw->priv;
6896
6897 IWL_DEBUG_MAC80211("enter\n");
6898
6899 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
6900 IWL_DEBUG_MAC80211("leave - monitor\n");
6901 return -1;
6902 }
6903
6904 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6905 ctl->tx_rate);
6906
6907 if (iwl_tx_skb(priv, skb, ctl))
6908 dev_kfree_skb_any(skb);
6909
6910 IWL_DEBUG_MAC80211("leave\n");
6911 return 0;
6912}
6913
6914static int iwl_mac_add_interface(struct ieee80211_hw *hw,
6915 struct ieee80211_if_init_conf *conf)
6916{
6917 struct iwl_priv *priv = hw->priv;
6918 unsigned long flags;
6919
6920 IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type);
6921 if (conf->mac_addr)
6922 IWL_DEBUG_MAC80211("enter: MAC " MAC_FMT "\n",
6923 MAC_ARG(conf->mac_addr));
6924
6925 if (priv->interface_id) {
6926 IWL_DEBUG_MAC80211("leave - interface_id != 0\n");
6927 return 0;
6928 }
6929
6930 spin_lock_irqsave(&priv->lock, flags);
6931 priv->interface_id = conf->if_id;
6932
6933 spin_unlock_irqrestore(&priv->lock, flags);
6934
6935 mutex_lock(&priv->mutex);
6936 iwl_set_mode(priv, conf->type);
6937
6938 IWL_DEBUG_MAC80211("leave\n");
6939 mutex_unlock(&priv->mutex);
6940
6941 return 0;
6942}
6943
6944/**
6945 * iwl_mac_config - mac80211 config callback
6946 *
6947 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
6948 * be set inappropriately and the driver currently sets the hardware up to
6949 * use it whenever needed.
6950 */
6951static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
6952{
6953 struct iwl_priv *priv = hw->priv;
6954 const struct iwl_channel_info *ch_info;
6955 unsigned long flags;
6956
6957 mutex_lock(&priv->mutex);
6958 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
6959
6960 if (!iwl_is_ready(priv)) {
6961 IWL_DEBUG_MAC80211("leave - not ready\n");
6962 mutex_unlock(&priv->mutex);
6963 return -EIO;
6964 }
6965
6966 /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only
6967 * what is exposed through include/ declrations */
6968 if (unlikely(!iwl_param_disable_hw_scan &&
6969 test_bit(STATUS_SCANNING, &priv->status))) {
6970 IWL_DEBUG_MAC80211("leave - scanning\n");
6971 mutex_unlock(&priv->mutex);
6972 return 0;
6973 }
6974
6975 spin_lock_irqsave(&priv->lock, flags);
6976
6977 ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel);
6978 if (!is_channel_valid(ch_info)) {
6979 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
6980 conf->channel, conf->phymode);
6981 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6982 spin_unlock_irqrestore(&priv->lock, flags);
6983 mutex_unlock(&priv->mutex);
6984 return -EINVAL;
6985 }
6986
6987 iwl_set_rxon_channel(priv, conf->phymode, conf->channel);
6988
6989 iwl_set_flags_for_phymode(priv, conf->phymode);
6990
6991 /* The list of supported rates and rate mask can be different
6992 * for each phymode; since the phymode may have changed, reset
6993 * the rate mask to what mac80211 lists */
6994 iwl_set_rate(priv);
6995
6996 spin_unlock_irqrestore(&priv->lock, flags);
6997
6998#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6999 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
7000 iwl_hw_channel_switch(priv, conf->channel);
7001 mutex_unlock(&priv->mutex);
7002 return 0;
7003 }
7004#endif
7005
7006 iwl_radio_kill_sw(priv, !conf->radio_enabled);
7007
7008 if (!conf->radio_enabled) {
7009 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7010 mutex_unlock(&priv->mutex);
7011 return 0;
7012 }
7013
7014 if (iwl_is_rfkill(priv)) {
7015 IWL_DEBUG_MAC80211("leave - RF kill\n");
7016 mutex_unlock(&priv->mutex);
7017 return -EIO;
7018 }
7019
7020 iwl_set_rate(priv);
7021
7022 if (memcmp(&priv->active_rxon,
7023 &priv->staging_rxon, sizeof(priv->staging_rxon)))
7024 iwl_commit_rxon(priv);
7025 else
7026 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7027
7028 IWL_DEBUG_MAC80211("leave\n");
7029
7030 mutex_unlock(&priv->mutex);
7031
7032 return 0;
7033}
7034
7035static void iwl_config_ap(struct iwl_priv *priv)
7036{
7037 int rc = 0;
7038
7039 if (priv->status & STATUS_EXIT_PENDING)
7040 return;
7041
7042 /* The following should be done only at AP bring up */
7043 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7044
7045 /* RXON - unassoc (to set timing command) */
7046 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7047 iwl_commit_rxon(priv);
7048
7049 /* RXON Timing */
7050 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
7051 iwl_setup_rxon_timing(priv);
7052 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7053 sizeof(priv->rxon_timing), &priv->rxon_timing);
7054 if (rc)
7055 IWL_WARNING("REPLY_RXON_TIMING failed - "
7056 "Attempting to continue.\n");
7057
7058 /* FIXME: what should be the assoc_id for AP? */
7059 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7060 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7061 priv->staging_rxon.flags |=
7062 RXON_FLG_SHORT_PREAMBLE_MSK;
7063 else
7064 priv->staging_rxon.flags &=
7065 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7066
7067 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7068 if (priv->assoc_capability &
7069 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7070 priv->staging_rxon.flags |=
7071 RXON_FLG_SHORT_SLOT_MSK;
7072 else
7073 priv->staging_rxon.flags &=
7074 ~RXON_FLG_SHORT_SLOT_MSK;
7075
7076 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7077 priv->staging_rxon.flags &=
7078 ~RXON_FLG_SHORT_SLOT_MSK;
7079 }
7080 /* restore RXON assoc */
7081 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7082 iwl_commit_rxon(priv);
7083 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
7084 iwl_send_beacon_cmd(priv);
7085 } else
7086 iwl_send_beacon_cmd(priv);
7087
7088 /* FIXME - we need to add code here to detect a totally new
7089 * configuration, reset the AP, unassoc, rxon timing, assoc,
7090 * clear sta table, add BCAST sta... */
7091}
7092
7093static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
7094 struct ieee80211_if_conf *conf)
7095{
7096 struct iwl_priv *priv = hw->priv;
7097 unsigned long flags;
7098 int rc;
7099
7100 if (conf == NULL)
7101 return -EIO;
7102
7103 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7104 (!conf->beacon || !conf->ssid_len)) {
7105 IWL_DEBUG_MAC80211
7106 ("Leaving in AP mode because HostAPD is not ready.\n");
7107 return 0;
7108 }
7109
7110 mutex_lock(&priv->mutex);
7111
7112 IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id);
7113 if (conf->bssid)
7114 IWL_DEBUG_MAC80211("bssid: " MAC_FMT "\n",
7115 MAC_ARG(conf->bssid));
7116
7117 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7118 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
7119 IWL_DEBUG_MAC80211("leave - scanning\n");
7120 mutex_unlock(&priv->mutex);
7121 return 0;
7122 }
7123
7124 if (priv->interface_id != if_id) {
7125 IWL_DEBUG_MAC80211("leave - interface_id != if_id\n");
7126 mutex_unlock(&priv->mutex);
7127 return 0;
7128 }
7129
7130 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7131 if (!conf->bssid) {
7132 conf->bssid = priv->mac_addr;
7133 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
7134 IWL_DEBUG_MAC80211("bssid was set to: " MAC_FMT "\n",
7135 MAC_ARG(conf->bssid));
7136 }
7137 if (priv->ibss_beacon)
7138 dev_kfree_skb(priv->ibss_beacon);
7139
7140 priv->ibss_beacon = conf->beacon;
7141 }
7142
7143 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7144 !is_multicast_ether_addr(conf->bssid)) {
7145 /* If there is currently a HW scan going on in the background
7146 * then we need to cancel it else the RXON below will fail. */
7147 if (iwl_scan_cancel_timeout(priv, 100)) {
7148 IWL_WARNING("Aborted scan still in progress "
7149 "after 100ms\n");
7150 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7151 mutex_unlock(&priv->mutex);
7152 return -EAGAIN;
7153 }
7154 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7155
7156 /* TODO: Audit driver for usage of these members and see
7157 * if mac80211 deprecates them (priv->bssid looks like it
7158 * shouldn't be there, but I haven't scanned the IBSS code
7159 * to verify) - jpk */
7160 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7161
7162 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7163 iwl_config_ap(priv);
7164 else {
7165 priv->staging_rxon.filter_flags |=
7166 RXON_FILTER_ASSOC_MSK;
7167 rc = iwl_commit_rxon(priv);
7168 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
7169 iwl_rxon_add_station(
7170 priv, priv->active_rxon.bssid_addr, 1);
7171 }
7172
7173 } else {
7174 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7175 iwl_commit_rxon(priv);
7176 }
7177
7178 spin_lock_irqsave(&priv->lock, flags);
7179 if (!conf->ssid_len)
7180 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7181 else
7182 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7183
7184 priv->essid_len = conf->ssid_len;
7185 spin_unlock_irqrestore(&priv->lock, flags);
7186
7187 IWL_DEBUG_MAC80211("leave\n");
7188 mutex_unlock(&priv->mutex);
7189
7190 return 0;
7191}
7192
7193static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
7194 struct ieee80211_if_init_conf *conf)
7195{
7196 struct iwl_priv *priv = hw->priv;
7197
7198 IWL_DEBUG_MAC80211("enter\n");
7199
7200 mutex_lock(&priv->mutex);
7201 if (priv->interface_id == conf->if_id) {
7202 priv->interface_id = 0;
7203 memset(priv->bssid, 0, ETH_ALEN);
7204 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7205 priv->essid_len = 0;
7206 }
7207 mutex_unlock(&priv->mutex);
7208
7209 IWL_DEBUG_MAC80211("leave\n");
7210
7211}
7212
7213#define IWL_DELAY_NEXT_SCAN (HZ*2)
7214static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7215{
7216 int rc = 0;
7217 unsigned long flags;
7218 struct iwl_priv *priv = hw->priv;
7219
7220 IWL_DEBUG_MAC80211("enter\n");
7221
7222 spin_lock_irqsave(&priv->lock, flags);
7223
7224 if (!iwl_is_ready_rf(priv)) {
7225 rc = -EIO;
7226 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7227 goto out_unlock;
7228 }
7229
7230 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7231 rc = -EIO;
7232 IWL_ERROR("ERROR: APs don't scan\n");
7233 goto out_unlock;
7234 }
7235
7236 /* if we just finished scan ask for delay */
7237 if (priv->last_scan_jiffies &&
7238 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7239 jiffies)) {
7240 rc = -EAGAIN;
7241 goto out_unlock;
7242 }
7243 if (len) {
7244 IWL_DEBUG_SCAN("direct scan for "
7245 "%s [%d]\n ",
7246 iwl_escape_essid(ssid, len), (int)len);
7247
7248 priv->one_direct_scan = 1;
7249 priv->direct_ssid_len = (u8)
7250 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7251 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
7252 }
7253
7254 rc = iwl_scan_initiate(priv);
7255
7256 IWL_DEBUG_MAC80211("leave\n");
7257
7258out_unlock:
7259 spin_unlock_irqrestore(&priv->lock, flags);
7260
7261 return rc;
7262}
7263
7264static int iwl_mac_set_key(struct ieee80211_hw *hw, set_key_cmd cmd,
7265 const u8 *local_addr, const u8 *addr,
7266 struct ieee80211_key_conf *key)
7267{
7268 struct iwl_priv *priv = hw->priv;
7269 int rc = 0;
7270 u8 sta_id;
7271
7272 IWL_DEBUG_MAC80211("enter\n");
7273
7274 if (!iwl_param_hwcrypto) {
7275 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7276 return -EOPNOTSUPP;
7277 }
7278
7279 if (is_zero_ether_addr(addr))
7280 /* only support pairwise keys */
7281 return -EOPNOTSUPP;
7282
7283 sta_id = iwl_hw_find_station(priv, addr);
7284 if (sta_id == IWL_INVALID_STATION) {
7285 IWL_DEBUG_MAC80211("leave - " MAC_FMT " not in station map.\n",
7286 MAC_ARG(addr));
7287 return -EINVAL;
7288 }
7289
7290 mutex_lock(&priv->mutex);
7291
7292 switch (cmd) {
7293 case SET_KEY:
7294 rc = iwl_update_sta_key_info(priv, key, sta_id);
7295 if (!rc) {
7296 iwl_set_rxon_hwcrypto(priv, 1);
7297 iwl_commit_rxon(priv);
7298 key->hw_key_idx = sta_id;
7299 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7300 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7301 }
7302 break;
7303 case DISABLE_KEY:
7304 rc = iwl_clear_sta_key_info(priv, sta_id);
7305 if (!rc) {
7306 iwl_set_rxon_hwcrypto(priv, 0);
7307 iwl_commit_rxon(priv);
7308 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7309 }
7310 break;
7311 default:
7312 rc = -EINVAL;
7313 }
7314
7315 IWL_DEBUG_MAC80211("leave\n");
7316 mutex_unlock(&priv->mutex);
7317
7318 return rc;
7319}
7320
7321static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7322 const struct ieee80211_tx_queue_params *params)
7323{
7324 struct iwl_priv *priv = hw->priv;
7325#ifdef CONFIG_IWLWIFI_QOS
7326 unsigned long flags;
7327 int q;
7328#endif /* CONFIG_IWL_QOS */
7329
7330 IWL_DEBUG_MAC80211("enter\n");
7331
7332 if (!iwl_is_ready_rf(priv)) {
7333 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7334 return -EIO;
7335 }
7336
7337 if (queue >= AC_NUM) {
7338 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7339 return 0;
7340 }
7341
7342#ifdef CONFIG_IWLWIFI_QOS
7343 if (!priv->qos_data.qos_enable) {
7344 priv->qos_data.qos_active = 0;
7345 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7346 return 0;
7347 }
7348 q = AC_NUM - 1 - queue;
7349
7350 spin_lock_irqsave(&priv->lock, flags);
7351
7352 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7353 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7354 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7355 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7356 cpu_to_le16((params->burst_time * 100));
7357
7358 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7359 priv->qos_data.qos_active = 1;
7360
7361 spin_unlock_irqrestore(&priv->lock, flags);
7362
7363 mutex_lock(&priv->mutex);
7364 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7365 iwl_activate_qos(priv, 1);
7366 else if (priv->assoc_id && iwl_is_associated(priv))
7367 iwl_activate_qos(priv, 0);
7368
7369 mutex_unlock(&priv->mutex);
7370
7371#endif /*CONFIG_IWLWIFI_QOS */
7372
7373 IWL_DEBUG_MAC80211("leave\n");
7374 return 0;
7375}
7376
7377static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
7378 struct ieee80211_tx_queue_stats *stats)
7379{
7380 struct iwl_priv *priv = hw->priv;
7381 int i, avail;
7382 struct iwl_tx_queue *txq;
7383 struct iwl_queue *q;
7384 unsigned long flags;
7385
7386 IWL_DEBUG_MAC80211("enter\n");
7387
7388 if (!iwl_is_ready_rf(priv)) {
7389 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7390 return -EIO;
7391 }
7392
7393 spin_lock_irqsave(&priv->lock, flags);
7394
7395 for (i = 0; i < AC_NUM; i++) {
7396 txq = &priv->txq[i];
7397 q = &txq->q;
7398 avail = iwl_queue_space(q);
7399
7400 stats->data[i].len = q->n_window - avail;
7401 stats->data[i].limit = q->n_window - q->high_mark;
7402 stats->data[i].count = q->n_window;
7403
7404 }
7405 spin_unlock_irqrestore(&priv->lock, flags);
7406
7407 IWL_DEBUG_MAC80211("leave\n");
7408
7409 return 0;
7410}
7411
7412static int iwl_mac_get_stats(struct ieee80211_hw *hw,
7413 struct ieee80211_low_level_stats *stats)
7414{
7415 IWL_DEBUG_MAC80211("enter\n");
7416 IWL_DEBUG_MAC80211("leave\n");
7417
7418 return 0;
7419}
7420
7421static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw)
7422{
7423 IWL_DEBUG_MAC80211("enter\n");
7424 IWL_DEBUG_MAC80211("leave\n");
7425
7426 return 0;
7427}
7428
7429static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
7430{
7431 struct iwl_priv *priv = hw->priv;
7432 unsigned long flags;
7433
7434 mutex_lock(&priv->mutex);
7435 IWL_DEBUG_MAC80211("enter\n");
7436
7437#ifdef CONFIG_IWLWIFI_QOS
7438 iwl_reset_qos(priv);
7439#endif
7440 cancel_delayed_work(&priv->post_associate);
7441
7442 spin_lock_irqsave(&priv->lock, flags);
7443 priv->assoc_id = 0;
7444 priv->assoc_capability = 0;
7445 priv->call_post_assoc_from_beacon = 0;
7446
7447 /* new association get rid of ibss beacon skb */
7448 if (priv->ibss_beacon)
7449 dev_kfree_skb(priv->ibss_beacon);
7450
7451 priv->ibss_beacon = NULL;
7452
7453 priv->beacon_int = priv->hw->conf.beacon_int;
7454 priv->timestamp1 = 0;
7455 priv->timestamp0 = 0;
7456 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
7457 priv->beacon_int = 0;
7458
7459 spin_unlock_irqrestore(&priv->lock, flags);
7460
7461 /* Per mac80211.h: This is only used in IBSS mode... */
7462 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7463 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7464 mutex_unlock(&priv->mutex);
7465 return;
7466 }
7467
7468 if (!iwl_is_ready_rf(priv)) {
7469 IWL_DEBUG_MAC80211("leave - not ready\n");
7470 mutex_unlock(&priv->mutex);
7471 return;
7472 }
7473
7474 priv->only_active_channel = 0;
7475
7476 iwl_set_rate(priv);
7477
7478 mutex_unlock(&priv->mutex);
7479
7480 IWL_DEBUG_MAC80211("leave\n");
7481
7482}
7483
7484static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
7485 struct ieee80211_tx_control *control)
7486{
7487 struct iwl_priv *priv = hw->priv;
7488 unsigned long flags;
7489
7490 mutex_lock(&priv->mutex);
7491 IWL_DEBUG_MAC80211("enter\n");
7492
7493 if (!iwl_is_ready_rf(priv)) {
7494 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7495 mutex_unlock(&priv->mutex);
7496 return -EIO;
7497 }
7498
7499 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7500 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7501 mutex_unlock(&priv->mutex);
7502 return -EIO;
7503 }
7504
7505 spin_lock_irqsave(&priv->lock, flags);
7506
7507 if (priv->ibss_beacon)
7508 dev_kfree_skb(priv->ibss_beacon);
7509
7510 priv->ibss_beacon = skb;
7511
7512 priv->assoc_id = 0;
7513
7514 IWL_DEBUG_MAC80211("leave\n");
7515 spin_unlock_irqrestore(&priv->lock, flags);
7516
7517#ifdef CONFIG_IWLWIFI_QOS
7518 iwl_reset_qos(priv);
7519#endif
7520
7521 queue_work(priv->workqueue, &priv->post_associate.work);
7522
7523 mutex_unlock(&priv->mutex);
7524
7525 return 0;
7526}
7527
7528/*****************************************************************************
7529 *
7530 * sysfs attributes
7531 *
7532 *****************************************************************************/
7533
7534#ifdef CONFIG_IWLWIFI_DEBUG
7535
7536/*
7537 * The following adds a new attribute to the sysfs representation
7538 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
7539 * used for controlling the debug level.
7540 *
7541 * See the level definitions in iwl for details.
7542 */
7543
7544static ssize_t show_debug_level(struct device_driver *d, char *buf)
7545{
7546 return sprintf(buf, "0x%08X\n", iwl_debug_level);
7547}
7548static ssize_t store_debug_level(struct device_driver *d,
7549 const char *buf, size_t count)
7550{
7551 char *p = (char *)buf;
7552 u32 val;
7553
7554 val = simple_strtoul(p, &p, 0);
7555 if (p == buf)
7556 printk(KERN_INFO DRV_NAME
7557 ": %s is not in hex or decimal form.\n", buf);
7558 else
7559 iwl_debug_level = val;
7560
7561 return strnlen(buf, count);
7562}
7563
7564static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
7565 show_debug_level, store_debug_level);
7566
7567#endif /* CONFIG_IWLWIFI_DEBUG */
7568
7569static ssize_t show_rf_kill(struct device *d,
7570 struct device_attribute *attr, char *buf)
7571{
7572 /*
7573 * 0 - RF kill not enabled
7574 * 1 - SW based RF kill active (sysfs)
7575 * 2 - HW based RF kill active
7576 * 3 - Both HW and SW based RF kill active
7577 */
7578 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7579 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
7580 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
7581
7582 return sprintf(buf, "%i\n", val);
7583}
7584
7585static ssize_t store_rf_kill(struct device *d,
7586 struct device_attribute *attr,
7587 const char *buf, size_t count)
7588{
7589 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7590
7591 mutex_lock(&priv->mutex);
7592 iwl_radio_kill_sw(priv, buf[0] == '1');
7593 mutex_unlock(&priv->mutex);
7594
7595 return count;
7596}
7597
7598static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
7599
7600static ssize_t show_temperature(struct device *d,
7601 struct device_attribute *attr, char *buf)
7602{
7603 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7604
7605 if (!iwl_is_alive(priv))
7606 return -EAGAIN;
7607
7608 return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv));
7609}
7610
7611static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7612
7613static ssize_t show_rs_window(struct device *d,
7614 struct device_attribute *attr,
7615 char *buf)
7616{
7617 struct iwl_priv *priv = d->driver_data;
7618 return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID);
7619}
7620static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
7621
7622static ssize_t show_tx_power(struct device *d,
7623 struct device_attribute *attr, char *buf)
7624{
7625 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7626 return sprintf(buf, "%d\n", priv->user_txpower_limit);
7627}
7628
7629static ssize_t store_tx_power(struct device *d,
7630 struct device_attribute *attr,
7631 const char *buf, size_t count)
7632{
7633 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7634 char *p = (char *)buf;
7635 u32 val;
7636
7637 val = simple_strtoul(p, &p, 10);
7638 if (p == buf)
7639 printk(KERN_INFO DRV_NAME
7640 ": %s is not in decimal form.\n", buf);
7641 else
7642 iwl_hw_reg_set_txpower(priv, val);
7643
7644 return count;
7645}
7646
7647static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
7648
7649static ssize_t show_flags(struct device *d,
7650 struct device_attribute *attr, char *buf)
7651{
7652 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7653
7654 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
7655}
7656
7657static ssize_t store_flags(struct device *d,
7658 struct device_attribute *attr,
7659 const char *buf, size_t count)
7660{
7661 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7662 u32 flags = simple_strtoul(buf, NULL, 0);
7663
7664 mutex_lock(&priv->mutex);
7665 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7666 /* Cancel any currently running scans... */
7667 if (iwl_scan_cancel_timeout(priv, 100))
7668 IWL_WARNING("Could not cancel scan.\n");
7669 else {
7670 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
7671 flags);
7672 priv->staging_rxon.flags = cpu_to_le32(flags);
7673 iwl_commit_rxon(priv);
7674 }
7675 }
7676 mutex_unlock(&priv->mutex);
7677
7678 return count;
7679}
7680
7681static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
7682
7683static ssize_t show_filter_flags(struct device *d,
7684 struct device_attribute *attr, char *buf)
7685{
7686 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7687
7688 return sprintf(buf, "0x%04X\n",
7689 le32_to_cpu(priv->active_rxon.filter_flags));
7690}
7691
7692static ssize_t store_filter_flags(struct device *d,
7693 struct device_attribute *attr,
7694 const char *buf, size_t count)
7695{
7696 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7697 u32 filter_flags = simple_strtoul(buf, NULL, 0);
7698
7699 mutex_lock(&priv->mutex);
7700 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7701 /* Cancel any currently running scans... */
7702 if (iwl_scan_cancel_timeout(priv, 100))
7703 IWL_WARNING("Could not cancel scan.\n");
7704 else {
7705 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
7706 "0x%04X\n", filter_flags);
7707 priv->staging_rxon.filter_flags =
7708 cpu_to_le32(filter_flags);
7709 iwl_commit_rxon(priv);
7710 }
7711 }
7712 mutex_unlock(&priv->mutex);
7713
7714 return count;
7715}
7716
7717static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7718 store_filter_flags);
7719
7720static ssize_t show_tune(struct device *d,
7721 struct device_attribute *attr, char *buf)
7722{
7723 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7724
7725 return sprintf(buf, "0x%04X\n",
7726 (priv->phymode << 8) |
7727 le16_to_cpu(priv->active_rxon.channel));
7728}
7729
7730static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode);
7731
7732static ssize_t store_tune(struct device *d,
7733 struct device_attribute *attr,
7734 const char *buf, size_t count)
7735{
7736 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7737 char *p = (char *)buf;
7738 u16 tune = simple_strtoul(p, &p, 0);
7739 u8 phymode = (tune >> 8) & 0xff;
7740 u16 channel = tune & 0xff;
7741
7742 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
7743
7744 mutex_lock(&priv->mutex);
7745 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
7746 (priv->phymode != phymode)) {
7747 const struct iwl_channel_info *ch_info;
7748
7749 ch_info = iwl_get_channel_info(priv, phymode, channel);
7750 if (!ch_info) {
7751 IWL_WARNING("Requested invalid phymode/channel "
7752 "combination: %d %d\n", phymode, channel);
7753 mutex_unlock(&priv->mutex);
7754 return -EINVAL;
7755 }
7756
7757 /* Cancel any currently running scans... */
7758 if (iwl_scan_cancel_timeout(priv, 100))
7759 IWL_WARNING("Could not cancel scan.\n");
7760 else {
7761 IWL_DEBUG_INFO("Committing phymode and "
7762 "rxon.channel = %d %d\n",
7763 phymode, channel);
7764
7765 iwl_set_rxon_channel(priv, phymode, channel);
7766 iwl_set_flags_for_phymode(priv, phymode);
7767
7768 iwl_set_rate(priv);
7769 iwl_commit_rxon(priv);
7770 }
7771 }
7772 mutex_unlock(&priv->mutex);
7773
7774 return count;
7775}
7776
7777static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
7778
7779#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
7780
7781static ssize_t show_measurement(struct device *d,
7782 struct device_attribute *attr, char *buf)
7783{
7784 struct iwl_priv *priv = dev_get_drvdata(d);
7785 struct iwl_spectrum_notification measure_report;
7786 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7787 u8 *data = (u8 *) & measure_report;
7788 unsigned long flags;
7789
7790 spin_lock_irqsave(&priv->lock, flags);
7791 if (!(priv->measurement_status & MEASUREMENT_READY)) {
7792 spin_unlock_irqrestore(&priv->lock, flags);
7793 return 0;
7794 }
7795 memcpy(&measure_report, &priv->measure_report, size);
7796 priv->measurement_status = 0;
7797 spin_unlock_irqrestore(&priv->lock, flags);
7798
7799 while (size && (PAGE_SIZE - len)) {
7800 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7801 PAGE_SIZE - len, 1);
7802 len = strlen(buf);
7803 if (PAGE_SIZE - len)
7804 buf[len++] = '\n';
7805
7806 ofs += 16;
7807 size -= min(size, 16U);
7808 }
7809
7810 return len;
7811}
7812
7813static ssize_t store_measurement(struct device *d,
7814 struct device_attribute *attr,
7815 const char *buf, size_t count)
7816{
7817 struct iwl_priv *priv = dev_get_drvdata(d);
7818 struct ieee80211_measurement_params params = {
7819 .channel = le16_to_cpu(priv->active_rxon.channel),
7820 .start_time = cpu_to_le64(priv->last_tsf),
7821 .duration = cpu_to_le16(1),
7822 };
7823 u8 type = IWL_MEASURE_BASIC;
7824 u8 buffer[32];
7825 u8 channel;
7826
7827 if (count) {
7828 char *p = buffer;
7829 strncpy(buffer, buf, min(sizeof(buffer), count));
7830 channel = simple_strtoul(p, NULL, 0);
7831 if (channel)
7832 params.channel = channel;
7833
7834 p = buffer;
7835 while (*p && *p != ' ')
7836 p++;
7837 if (*p)
7838 type = simple_strtoul(p + 1, NULL, 0);
7839 }
7840
7841 IWL_DEBUG_INFO("Invoking measurement of type %d on "
7842 "channel %d (for '%s')\n", type, params.channel, buf);
7843 iwl_get_measurement(priv, &params, type);
7844
7845 return count;
7846}
7847
7848static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
7849 show_measurement, store_measurement);
7850#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */
7851
7852static ssize_t show_rate(struct device *d,
7853 struct device_attribute *attr, char *buf)
7854{
7855 struct iwl_priv *priv = dev_get_drvdata(d);
7856 unsigned long flags;
7857 int i;
7858
7859 spin_lock_irqsave(&priv->sta_lock, flags);
7860 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
7861 i = priv->stations[IWL_AP_ID].current_rate.s.rate;
7862 else
7863 i = priv->stations[IWL_STA_ID].current_rate.s.rate;
7864 spin_unlock_irqrestore(&priv->sta_lock, flags);
7865
7866 i = iwl_rate_index_from_plcp(i);
7867 if (i == -1)
7868 return sprintf(buf, "0\n");
7869
7870 return sprintf(buf, "%d%s\n",
7871 (iwl_rates[i].ieee >> 1),
7872 (iwl_rates[i].ieee & 0x1) ? ".5" : "");
7873}
7874
7875static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL);
7876
7877static ssize_t store_retry_rate(struct device *d,
7878 struct device_attribute *attr,
7879 const char *buf, size_t count)
7880{
7881 struct iwl_priv *priv = dev_get_drvdata(d);
7882
7883 priv->retry_rate = simple_strtoul(buf, NULL, 0);
7884 if (priv->retry_rate <= 0)
7885 priv->retry_rate = 1;
7886
7887 return count;
7888}
7889
7890static ssize_t show_retry_rate(struct device *d,
7891 struct device_attribute *attr, char *buf)
7892{
7893 struct iwl_priv *priv = dev_get_drvdata(d);
7894 return sprintf(buf, "%d", priv->retry_rate);
7895}
7896
7897static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
7898 store_retry_rate);
7899
7900static ssize_t store_power_level(struct device *d,
7901 struct device_attribute *attr,
7902 const char *buf, size_t count)
7903{
7904 struct iwl_priv *priv = dev_get_drvdata(d);
7905 int rc;
7906 int mode;
7907
7908 mode = simple_strtoul(buf, NULL, 0);
7909 mutex_lock(&priv->mutex);
7910
7911 if (!iwl_is_ready(priv)) {
7912 rc = -EAGAIN;
7913 goto out;
7914 }
7915
7916 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
7917 mode = IWL_POWER_AC;
7918 else
7919 mode |= IWL_POWER_ENABLED;
7920
7921 if (mode != priv->power_mode) {
7922 rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode));
7923 if (rc) {
7924 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7925 goto out;
7926 }
7927 priv->power_mode = mode;
7928 }
7929
7930 rc = count;
7931
7932 out:
7933 mutex_unlock(&priv->mutex);
7934 return rc;
7935}
7936
7937#define MAX_WX_STRING 80
7938
7939/* Values are in microsecond */
7940static const s32 timeout_duration[] = {
7941 350000,
7942 250000,
7943 75000,
7944 37000,
7945 25000,
7946};
7947static const s32 period_duration[] = {
7948 400000,
7949 700000,
7950 1000000,
7951 1000000,
7952 1000000
7953};
7954
7955static ssize_t show_power_level(struct device *d,
7956 struct device_attribute *attr, char *buf)
7957{
7958 struct iwl_priv *priv = dev_get_drvdata(d);
7959 int level = IWL_POWER_LEVEL(priv->power_mode);
7960 char *p = buf;
7961
7962 p += sprintf(p, "%d ", level);
7963 switch (level) {
7964 case IWL_POWER_MODE_CAM:
7965 case IWL_POWER_AC:
7966 p += sprintf(p, "(AC)");
7967 break;
7968 case IWL_POWER_BATTERY:
7969 p += sprintf(p, "(BATTERY)");
7970 break;
7971 default:
7972 p += sprintf(p,
7973 "(Timeout %dms, Period %dms)",
7974 timeout_duration[level - 1] / 1000,
7975 period_duration[level - 1] / 1000);
7976 }
7977
7978 if (!(priv->power_mode & IWL_POWER_ENABLED))
7979 p += sprintf(p, " OFF\n");
7980 else
7981 p += sprintf(p, " \n");
7982
7983 return (p - buf + 1);
7984
7985}
7986
7987static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7988 store_power_level);
7989
7990static ssize_t show_channels(struct device *d,
7991 struct device_attribute *attr, char *buf)
7992{
7993 struct iwl_priv *priv = dev_get_drvdata(d);
7994 int len = 0, i;
7995 struct ieee80211_channel *channels = NULL;
7996 const struct ieee80211_hw_mode *hw_mode = NULL;
7997 int count = 0;
7998
7999 if (!iwl_is_ready(priv))
8000 return -EAGAIN;
8001
8002 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G);
8003 if (!hw_mode)
8004 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B);
8005 if (hw_mode) {
8006 channels = hw_mode->channels;
8007 count = hw_mode->num_channels;
8008 }
8009
8010 len +=
8011 sprintf(&buf[len],
8012 "Displaying %d channels in 2.4GHz band "
8013 "(802.11bg):\n", count);
8014
8015 for (i = 0; i < count; i++)
8016 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8017 channels[i].chan,
8018 channels[i].power_level,
8019 channels[i].
8020 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8021 " (IEEE 802.11h required)" : "",
8022 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8023 || (channels[i].
8024 flag &
8025 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8026 ", IBSS",
8027 channels[i].
8028 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8029 "active/passive" : "passive only");
8030
8031 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A);
8032 if (hw_mode) {
8033 channels = hw_mode->channels;
8034 count = hw_mode->num_channels;
8035 } else {
8036 channels = NULL;
8037 count = 0;
8038 }
8039
8040 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8041 "(802.11a):\n", count);
8042
8043 for (i = 0; i < count; i++)
8044 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8045 channels[i].chan,
8046 channels[i].power_level,
8047 channels[i].
8048 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8049 " (IEEE 802.11h required)" : "",
8050 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8051 || (channels[i].
8052 flag &
8053 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8054 ", IBSS",
8055 channels[i].
8056 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8057 "active/passive" : "passive only");
8058
8059 return len;
8060}
8061
8062static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8063
8064static ssize_t show_statistics(struct device *d,
8065 struct device_attribute *attr, char *buf)
8066{
8067 struct iwl_priv *priv = dev_get_drvdata(d);
8068 u32 size = sizeof(struct iwl_notif_statistics);
8069 u32 len = 0, ofs = 0;
8070 u8 *data = (u8 *) & priv->statistics;
8071 int rc = 0;
8072
8073 if (!iwl_is_alive(priv))
8074 return -EAGAIN;
8075
8076 mutex_lock(&priv->mutex);
8077 rc = iwl_send_statistics_request(priv);
8078 mutex_unlock(&priv->mutex);
8079
8080 if (rc) {
8081 len = sprintf(buf,
8082 "Error sending statistics request: 0x%08X\n", rc);
8083 return len;
8084 }
8085
8086 while (size && (PAGE_SIZE - len)) {
8087 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8088 PAGE_SIZE - len, 1);
8089 len = strlen(buf);
8090 if (PAGE_SIZE - len)
8091 buf[len++] = '\n';
8092
8093 ofs += 16;
8094 size -= min(size, 16U);
8095 }
8096
8097 return len;
8098}
8099
8100static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8101
8102static ssize_t show_antenna(struct device *d,
8103 struct device_attribute *attr, char *buf)
8104{
8105 struct iwl_priv *priv = dev_get_drvdata(d);
8106
8107 if (!iwl_is_alive(priv))
8108 return -EAGAIN;
8109
8110 return sprintf(buf, "%d\n", priv->antenna);
8111}
8112
8113static ssize_t store_antenna(struct device *d,
8114 struct device_attribute *attr,
8115 const char *buf, size_t count)
8116{
8117 int ant;
8118 struct iwl_priv *priv = dev_get_drvdata(d);
8119
8120 if (count == 0)
8121 return 0;
8122
8123 if (sscanf(buf, "%1i", &ant) != 1) {
8124 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8125 return count;
8126 }
8127
8128 if ((ant >= 0) && (ant <= 2)) {
8129 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
8130 priv->antenna = (enum iwl_antenna)ant;
8131 } else
8132 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8133
8134
8135 return count;
8136}
8137
8138static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8139
8140static ssize_t show_status(struct device *d,
8141 struct device_attribute *attr, char *buf)
8142{
8143 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8144 if (!iwl_is_alive(priv))
8145 return -EAGAIN;
8146 return sprintf(buf, "0x%08x\n", (int)priv->status);
8147}
8148
8149static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8150
8151static ssize_t dump_error_log(struct device *d,
8152 struct device_attribute *attr,
8153 const char *buf, size_t count)
8154{
8155 char *p = (char *)buf;
8156
8157 if (p[0] == '1')
8158 iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data);
8159
8160 return strnlen(buf, count);
8161}
8162
8163static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8164
8165static ssize_t dump_event_log(struct device *d,
8166 struct device_attribute *attr,
8167 const char *buf, size_t count)
8168{
8169 char *p = (char *)buf;
8170
8171 if (p[0] == '1')
8172 iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data);
8173
8174 return strnlen(buf, count);
8175}
8176
8177static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8178
8179/*****************************************************************************
8180 *
8181 * driver setup and teardown
8182 *
8183 *****************************************************************************/
8184
8185static void iwl_setup_deferred_work(struct iwl_priv *priv)
8186{
8187 priv->workqueue = create_workqueue(DRV_NAME);
8188
8189 init_waitqueue_head(&priv->wait_command_queue);
8190
8191 INIT_WORK(&priv->up, iwl_bg_up);
8192 INIT_WORK(&priv->restart, iwl_bg_restart);
8193 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
8194 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
8195 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
8196 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
8197 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
8198 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
8199 INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate);
8200 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
8201 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
8202 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
8203
8204 iwl_hw_setup_deferred_work(priv);
8205
8206 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
8207 iwl_irq_tasklet, (unsigned long)priv);
8208}
8209
8210static void iwl_cancel_deferred_work(struct iwl_priv *priv)
8211{
8212 iwl_hw_cancel_deferred_work(priv);
8213
8214 cancel_delayed_work(&priv->scan_check);
8215 cancel_delayed_work(&priv->alive_start);
8216 cancel_delayed_work(&priv->post_associate);
8217 cancel_work_sync(&priv->beacon_update);
8218}
8219
8220static struct attribute *iwl_sysfs_entries[] = {
8221 &dev_attr_antenna.attr,
8222 &dev_attr_channels.attr,
8223 &dev_attr_dump_errors.attr,
8224 &dev_attr_dump_events.attr,
8225 &dev_attr_flags.attr,
8226 &dev_attr_filter_flags.attr,
8227#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
8228 &dev_attr_measurement.attr,
8229#endif
8230 &dev_attr_power_level.attr,
8231 &dev_attr_rate.attr,
8232 &dev_attr_retry_rate.attr,
8233 &dev_attr_rf_kill.attr,
8234 &dev_attr_rs_window.attr,
8235 &dev_attr_statistics.attr,
8236 &dev_attr_status.attr,
8237 &dev_attr_temperature.attr,
8238 &dev_attr_tune.attr,
8239 &dev_attr_tx_power.attr,
8240
8241 NULL
8242};
8243
8244static struct attribute_group iwl_attribute_group = {
8245 .name = NULL, /* put in device directory */
8246 .attrs = iwl_sysfs_entries,
8247};
8248
8249static struct ieee80211_ops iwl_hw_ops = {
8250 .tx = iwl_mac_tx,
8251 .open = iwl_mac_open,
8252 .stop = iwl_mac_stop,
8253 .add_interface = iwl_mac_add_interface,
8254 .remove_interface = iwl_mac_remove_interface,
8255 .config = iwl_mac_config,
8256 .config_interface = iwl_mac_config_interface,
8257 .set_key = iwl_mac_set_key,
8258 .get_stats = iwl_mac_get_stats,
8259 .get_tx_stats = iwl_mac_get_tx_stats,
8260 .conf_tx = iwl_mac_conf_tx,
8261 .get_tsf = iwl_mac_get_tsf,
8262 .reset_tsf = iwl_mac_reset_tsf,
8263 .beacon_update = iwl_mac_beacon_update,
8264 .hw_scan = iwl_mac_hw_scan
8265};
8266
8267static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8268{
8269 int err = 0;
8270 u32 pci_id;
8271 struct iwl_priv *priv;
8272 struct ieee80211_hw *hw;
8273 int i;
8274
8275 if (iwl_param_disable_hw_scan) {
8276 IWL_DEBUG_INFO("Disabling hw_scan\n");
8277 iwl_hw_ops.hw_scan = NULL;
8278 }
8279
8280 if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) ||
8281 (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) {
8282 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
8283 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
8284 err = -EINVAL;
8285 goto out;
8286 }
8287
8288 /* mac80211 allocates memory for this device instance, including
8289 * space for this driver's private structure */
8290 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops);
8291 if (hw == NULL) {
8292 IWL_ERROR("Can not allocate network device\n");
8293 err = -ENOMEM;
8294 goto out;
8295 }
8296 SET_IEEE80211_DEV(hw, &pdev->dev);
8297
8298 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
8299 priv = hw->priv;
8300 priv->hw = hw;
8301
8302 priv->pci_dev = pdev;
8303 priv->antenna = (enum iwl_antenna)iwl_param_antenna;
8304#ifdef CONFIG_IWLWIFI_DEBUG
8305 iwl_debug_level = iwl_param_debug;
8306 atomic_set(&priv->restrict_refcnt, 0);
8307#endif
8308 priv->retry_rate = 1;
8309
8310 priv->ibss_beacon = NULL;
8311
8312 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
8313 * the range of signal quality values that we'll provide.
8314 * Negative values for level/noise indicate that we'll provide dBm.
8315 * For WE, at least, non-0 values here *enable* display of values
8316 * in app (iwconfig). */
8317 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8318 hw->max_noise = -20; /* noise level, negative indicates dBm */
8319 hw->max_signal = 100; /* link quality indication (%) */
8320
8321 /* Tell mac80211 our Tx characteristics */
8322 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8323
8324 hw->queues = 4;
8325
8326 spin_lock_init(&priv->lock);
8327 spin_lock_init(&priv->power_data.lock);
8328 spin_lock_init(&priv->sta_lock);
8329 spin_lock_init(&priv->hcmd_lock);
8330
8331 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
8332 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
8333
8334 INIT_LIST_HEAD(&priv->free_frames);
8335
8336 mutex_init(&priv->mutex);
8337 if (pci_enable_device(pdev)) {
8338 err = -ENODEV;
8339 goto out_ieee80211_free_hw;
8340 }
8341
8342 pci_set_master(pdev);
8343
8344 iwl_clear_stations_table(priv);
8345
8346 priv->data_retry_limit = -1;
8347 priv->ieee_channels = NULL;
8348 priv->ieee_rates = NULL;
8349 priv->phymode = -1;
8350
8351 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
8352 if (!err)
8353 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
8354 if (err) {
8355 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
8356 goto out_pci_disable_device;
8357 }
8358
8359 pci_set_drvdata(pdev, priv);
8360 err = pci_request_regions(pdev, DRV_NAME);
8361 if (err)
8362 goto out_pci_disable_device;
8363 /* We disable the RETRY_TIMEOUT register (0x41) to keep
8364 * PCI Tx retries from interfering with C3 CPU state */
8365 pci_write_config_byte(pdev, 0x41, 0x00);
8366 priv->hw_base = pci_iomap(pdev, 0, 0);
8367 if (!priv->hw_base) {
8368 err = -ENODEV;
8369 goto out_pci_release_regions;
8370 }
8371
8372 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
8373 (unsigned long long) pci_resource_len(pdev, 0));
8374 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
8375
8376 /* Initialize module parameter values here */
8377
8378 if (iwl_param_disable) {
8379 set_bit(STATUS_RF_KILL_SW, &priv->status);
8380 IWL_DEBUG_INFO("Radio disabled.\n");
8381 }
8382
8383 priv->iw_mode = IEEE80211_IF_TYPE_STA;
8384
8385 pci_id =
8386 (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device;
8387
8388 switch (pci_id) {
8389 case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */
8390 case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */
8391 case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */
8392 case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */
8393 priv->is_abg = 0;
8394 break;
8395
8396 /*
8397 * Rest are assumed ABG SKU -- if this is not the
8398 * case then the card will get the wrong 'Detected'
8399 * line in the kernel log however the code that
8400 * initializes the GEO table will detect no A-band
8401 * channels and remove the is_abg mask.
8402 */
8403 default:
8404 priv->is_abg = 1;
8405 break;
8406 }
8407
8408 printk(KERN_INFO DRV_NAME
8409 ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n",
8410 priv->is_abg ? "A" : "");
8411
8412 /* Device-specific setup */
8413 if (iwl_hw_set_hw_setting(priv)) {
8414 IWL_ERROR("failed to set hw settings\n");
8415 mutex_unlock(&priv->mutex);
8416 goto out_iounmap;
8417 }
8418
8419#ifdef CONFIG_IWLWIFI_QOS
8420 if (iwl_param_qos_enable)
8421 priv->qos_data.qos_enable = 1;
8422
8423 iwl_reset_qos(priv);
8424
8425 priv->qos_data.qos_active = 0;
8426 priv->qos_data.qos_cap.val = 0;
8427#endif /* CONFIG_IWLWIFI_QOS */
8428
8429 iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6);
8430 iwl_setup_deferred_work(priv);
8431 iwl_setup_rx_handlers(priv);
8432
8433 priv->rates_mask = IWL_RATES_MASK;
8434 /* If power management is turned on, default to AC mode */
8435 priv->power_mode = IWL_POWER_AC;
8436 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
8437
8438 pci_enable_msi(pdev);
8439
8440 err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv);
8441 if (err) {
8442 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
8443 goto out_disable_msi;
8444 }
8445
8446 mutex_lock(&priv->mutex);
8447
8448 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group);
8449 if (err) {
8450 IWL_ERROR("failed to create sysfs device attributes\n");
8451 mutex_unlock(&priv->mutex);
8452 goto out_release_irq;
8453 }
8454
8455 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
8456 * ucode filename and max sizes are card-specific. */
8457 err = iwl_read_ucode(priv);
8458 if (err) {
8459 IWL_ERROR("Could not read microcode: %d\n", err);
8460 mutex_unlock(&priv->mutex);
8461 goto out_pci_alloc;
8462 }
8463
8464 mutex_unlock(&priv->mutex);
8465
8466 IWL_DEBUG_INFO("Queing UP work.\n");
8467
8468 queue_work(priv->workqueue, &priv->up);
8469
8470 return 0;
8471
8472 out_pci_alloc:
8473 iwl_dealloc_ucode_pci(priv);
8474
8475 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
8476
8477 out_release_irq:
8478 free_irq(pdev->irq, priv);
8479
8480 out_disable_msi:
8481 pci_disable_msi(pdev);
8482 destroy_workqueue(priv->workqueue);
8483 priv->workqueue = NULL;
8484 iwl_unset_hw_setting(priv);
8485
8486 out_iounmap:
8487 pci_iounmap(pdev, priv->hw_base);
8488 out_pci_release_regions:
8489 pci_release_regions(pdev);
8490 out_pci_disable_device:
8491 pci_disable_device(pdev);
8492 pci_set_drvdata(pdev, NULL);
8493 out_ieee80211_free_hw:
8494 ieee80211_free_hw(priv->hw);
8495 out:
8496 return err;
8497}
8498
8499static void iwl_pci_remove(struct pci_dev *pdev)
8500{
8501 struct iwl_priv *priv = pci_get_drvdata(pdev);
8502 struct list_head *p, *q;
8503 int i;
8504
8505 if (!priv)
8506 return;
8507
8508 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
8509
8510 mutex_lock(&priv->mutex);
8511 set_bit(STATUS_EXIT_PENDING, &priv->status);
8512 __iwl_down(priv);
8513 mutex_unlock(&priv->mutex);
8514
8515 /* Free MAC hash list for ADHOC */
8516 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
8517 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
8518 list_del(p);
8519 kfree(list_entry(p, struct iwl_ibss_seq, list));
8520 }
8521 }
8522
8523 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
8524
8525 iwl_dealloc_ucode_pci(priv);
8526
8527 if (priv->rxq.bd)
8528 iwl_rx_queue_free(priv, &priv->rxq);
8529 iwl_hw_txq_ctx_free(priv);
8530
8531 iwl_unset_hw_setting(priv);
8532 iwl_clear_stations_table(priv);
8533
8534 if (priv->mac80211_registered) {
8535 ieee80211_unregister_hw(priv->hw);
8536 iwl_rate_control_unregister(priv->hw);
8537 }
8538
8539 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
8540 * priv->workqueue... so we can't take down the workqueue
8541 * until now... */
8542 destroy_workqueue(priv->workqueue);
8543 priv->workqueue = NULL;
8544
8545 free_irq(pdev->irq, priv);
8546 pci_disable_msi(pdev);
8547 pci_iounmap(pdev, priv->hw_base);
8548 pci_release_regions(pdev);
8549 pci_disable_device(pdev);
8550 pci_set_drvdata(pdev, NULL);
8551
8552 kfree(priv->channel_info);
8553
8554 kfree(priv->ieee_channels);
8555 kfree(priv->ieee_rates);
8556
8557 if (priv->ibss_beacon)
8558 dev_kfree_skb(priv->ibss_beacon);
8559
8560 ieee80211_free_hw(priv->hw);
8561}
8562
8563#ifdef CONFIG_PM
8564
8565static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8566{
8567 struct iwl_priv *priv = pci_get_drvdata(pdev);
8568
8569 mutex_lock(&priv->mutex);
8570
8571 set_bit(STATUS_IN_SUSPEND, &priv->status);
8572
8573 /* Take down the device; powers it off, etc. */
8574 __iwl_down(priv);
8575
8576 if (priv->mac80211_registered)
8577 ieee80211_stop_queues(priv->hw);
8578
8579 pci_save_state(pdev);
8580 pci_disable_device(pdev);
8581 pci_set_power_state(pdev, PCI_D3hot);
8582
8583 mutex_unlock(&priv->mutex);
8584
8585 return 0;
8586}
8587
8588static void iwl_resume(struct iwl_priv *priv)
8589{
8590 unsigned long flags;
8591
8592 /* The following it a temporary work around due to the
8593 * suspend / resume not fully initializing the NIC correctly.
8594 * Without all of the following, resume will not attempt to take
8595 * down the NIC (it shouldn't really need to) and will just try
8596 * and bring the NIC back up. However that fails during the
8597 * ucode verification process. This then causes iwl_down to be
8598 * called *after* iwl_hw_nic_init() has succeeded -- which
8599 * then lets the next init sequence succeed. So, we've
8600 * replicated all of that NIC init code here... */
8601
8602 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
8603
8604 iwl_hw_nic_init(priv);
8605
8606 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8607 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
8608 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
8609 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
8610 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8611 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8612
8613 /* tell the device to stop sending interrupts */
8614 iwl_disable_interrupts(priv);
8615
8616 spin_lock_irqsave(&priv->lock, flags);
8617 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
8618
8619 if (!iwl_grab_restricted_access(priv)) {
8620 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
8621 APMG_CLK_VAL_DMA_CLK_RQT);
8622 iwl_release_restricted_access(priv);
8623 }
8624 spin_unlock_irqrestore(&priv->lock, flags);
8625
8626 udelay(5);
8627
8628 iwl_hw_nic_reset(priv);
8629
8630 /* Bring the device back up */
8631 clear_bit(STATUS_IN_SUSPEND, &priv->status);
8632 queue_work(priv->workqueue, &priv->up);
8633}
8634
8635static int iwl_pci_resume(struct pci_dev *pdev)
8636{
8637 struct iwl_priv *priv = pci_get_drvdata(pdev);
8638 int err;
8639
8640 printk(KERN_INFO "Coming out of suspend...\n");
8641
8642 mutex_lock(&priv->mutex);
8643
8644 pci_set_power_state(pdev, PCI_D0);
8645 err = pci_enable_device(pdev);
8646 pci_restore_state(pdev);
8647
8648 /*
8649 * Suspend/Resume resets the PCI configuration space, so we have to
8650 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
8651 * from interfering with C3 CPU state. pci_restore_state won't help
8652 * here since it only restores the first 64 bytes pci config header.
8653 */
8654 pci_write_config_byte(pdev, 0x41, 0x00);
8655
8656 iwl_resume(priv);
8657 mutex_unlock(&priv->mutex);
8658
8659 return 0;
8660}
8661
8662#endif /* CONFIG_PM */
8663
8664/*****************************************************************************
8665 *
8666 * driver and module entry point
8667 *
8668 *****************************************************************************/
8669
8670static struct pci_driver iwl_driver = {
8671 .name = DRV_NAME,
8672 .id_table = iwl_hw_card_ids,
8673 .probe = iwl_pci_probe,
8674 .remove = __devexit_p(iwl_pci_remove),
8675#ifdef CONFIG_PM
8676 .suspend = iwl_pci_suspend,
8677 .resume = iwl_pci_resume,
8678#endif
8679};
8680
8681static int __init iwl_init(void)
8682{
8683
8684 int ret;
8685 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8686 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
8687 ret = pci_register_driver(&iwl_driver);
8688 if (ret) {
8689 IWL_ERROR("Unable to initialize PCI module\n");
8690 return ret;
8691 }
8692#ifdef CONFIG_IWLWIFI_DEBUG
8693 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
8694 if (ret) {
8695 IWL_ERROR("Unable to create driver sysfs file\n");
8696 pci_unregister_driver(&iwl_driver);
8697 return ret;
8698 }
8699#endif
8700
8701 return ret;
8702}
8703
8704static void __exit iwl_exit(void)
8705{
8706#ifdef CONFIG_IWLWIFI_DEBUG
8707 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
8708#endif
8709 pci_unregister_driver(&iwl_driver);
8710}
8711
8712module_param_named(antenna, iwl_param_antenna, int, 0444);
8713MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
8714module_param_named(disable, iwl_param_disable, int, 0444);
8715MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8716module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444);
8717MODULE_PARM_DESC(hwcrypto,
8718 "using hardware crypto engine (default 0 [software])\n");
8719module_param_named(debug, iwl_param_debug, int, 0444);
8720MODULE_PARM_DESC(debug, "debug output mask");
8721module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444);
8722MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
8723
8724module_param_named(queues_num, iwl_param_queues_num, int, 0444);
8725MODULE_PARM_DESC(queues_num, "number of hw queues.");
8726
8727/* QoS */
8728module_param_named(qos_enable, iwl_param_qos_enable, int, 0444);
8729MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
8730
8731module_exit(iwl_exit);
8732module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
new file mode 100644
index 000000000000..b79dabc8c01c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -0,0 +1,9323 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30/*
31 * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets
32 * by defining IWL to either 3945 or 4965. The Makefile used when building
33 * the base targets will create base-3945.o and base-4965.o
34 *
35 * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36 * this file and into the hardware specific implementation files (iwl-XXXX.c)
37 * and leave only the common (non #ifdef sprinkled) code in this file
38 */
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/version.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/dma-mapping.h>
46#include <linux/delay.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/wireless.h>
50#include <linux/firmware.h>
51#include <linux/skbuff.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/if_arp.h>
55
56#include <net/ieee80211_radiotap.h>
57#include <net/mac80211.h>
58
59#include <asm/div64.h>
60
61#include "iwlwifi.h"
62#include "iwl-4965.h"
63#include "iwl-helpers.h"
64
65#ifdef CONFIG_IWLWIFI_DEBUG
66u32 iwl_debug_level;
67#endif
68
69/******************************************************************************
70 *
71 * module boiler plate
72 *
73 ******************************************************************************/
74
75/* module parameters */
76int iwl_param_disable_hw_scan;
77int iwl_param_debug;
78int iwl_param_disable; /* def: enable radio */
79int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */
80int iwl_param_hwcrypto; /* def: using software encryption */
81int iwl_param_qos_enable = 1;
82int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
83
84/*
85 * module name, copyright, version, etc.
86 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
87 */
88
89#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
90
91#ifdef CONFIG_IWLWIFI_DEBUG
92#define VD "d"
93#else
94#define VD
95#endif
96
97#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
98#define VS "s"
99#else
100#define VS
101#endif
102
103#define IWLWIFI_VERSION "0.1.15k" VD VS
104#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
105#define DRV_VERSION IWLWIFI_VERSION
106
107/* Change firmware file name, using "-" and incrementing number,
108 * *only* when uCode interface or architecture changes so that it
109 * is not compatible with earlier drivers.
110 * This number will also appear in << 8 position of 1st dword of uCode file */
111#define IWL4965_UCODE_API "-1"
112
113MODULE_DESCRIPTION(DRV_DESCRIPTION);
114MODULE_VERSION(DRV_VERSION);
115MODULE_AUTHOR(DRV_COPYRIGHT);
116MODULE_LICENSE("GPL");
117
118__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
119{
120 u16 fc = le16_to_cpu(hdr->frame_control);
121 int hdr_len = ieee80211_get_hdrlen(fc);
122
123 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
124 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
125 return NULL;
126}
127
128static const struct ieee80211_hw_mode *iwl_get_hw_mode(
129 struct iwl_priv *priv, int mode)
130{
131 int i;
132
133 for (i = 0; i < 3; i++)
134 if (priv->modes[i].mode == mode)
135 return &priv->modes[i];
136
137 return NULL;
138}
139
140static int iwl_is_empty_essid(const char *essid, int essid_len)
141{
142 /* Single white space is for Linksys APs */
143 if (essid_len == 1 && essid[0] == ' ')
144 return 1;
145
146 /* Otherwise, if the entire essid is 0, we assume it is hidden */
147 while (essid_len) {
148 essid_len--;
149 if (essid[essid_len] != '\0')
150 return 0;
151 }
152
153 return 1;
154}
155
156static const char *iwl_escape_essid(const char *essid, u8 essid_len)
157{
158 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
159 const char *s = essid;
160 char *d = escaped;
161
162 if (iwl_is_empty_essid(essid, essid_len)) {
163 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
164 return escaped;
165 }
166
167 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
168 while (essid_len--) {
169 if (*s == '\0') {
170 *d++ = '\\';
171 *d++ = '0';
172 s++;
173 } else
174 *d++ = *s++;
175 }
176 *d = '\0';
177 return escaped;
178}
179
180static void iwl_print_hex_dump(int level, void *p, u32 len)
181{
182#ifdef CONFIG_IWLWIFI_DEBUG
183 if (!(iwl_debug_level & level))
184 return;
185
186 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
187 p, len, 1);
188#endif
189}
190
191/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
192 * DMA services
193 *
194 * Theory of operation
195 *
196 * A queue is a circular buffers with 'Read' and 'Write' pointers.
197 * 2 empty entries always kept in the buffer to protect from overflow.
198 *
199 * For Tx queue, there are low mark and high mark limits. If, after queuing
200 * the packet for Tx, free space become < low mark, Tx queue stopped. When
201 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
202 * Tx queue resumed.
203 *
204 * The IPW operates with six queues, one receive queue in the device's
205 * sram, one transmit queue for sending commands to the device firmware,
206 * and four transmit queues for data.
207 ***************************************************/
208
209static int iwl_queue_space(const struct iwl_queue *q)
210{
211 int s = q->last_used - q->first_empty;
212
213 if (q->last_used > q->first_empty)
214 s -= q->n_bd;
215
216 if (s <= 0)
217 s += q->n_window;
218 /* keep some reserve to not confuse empty and full situations */
219 s -= 2;
220 if (s < 0)
221 s = 0;
222 return s;
223}
224
225/* XXX: n_bd must be power-of-two size */
226static inline int iwl_queue_inc_wrap(int index, int n_bd)
227{
228 return ++index & (n_bd - 1);
229}
230
231/* XXX: n_bd must be power-of-two size */
232static inline int iwl_queue_dec_wrap(int index, int n_bd)
233{
234 return --index & (n_bd - 1);
235}
236
237static inline int x2_queue_used(const struct iwl_queue *q, int i)
238{
239 return q->first_empty > q->last_used ?
240 (i >= q->last_used && i < q->first_empty) :
241 !(i < q->last_used && i >= q->first_empty);
242}
243
244static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
245{
246 if (is_huge)
247 return q->n_window;
248
249 return index & (q->n_window - 1);
250}
251
252static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
253 int count, int slots_num, u32 id)
254{
255 q->n_bd = count;
256 q->n_window = slots_num;
257 q->id = id;
258
259 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
260 * and iwl_queue_dec_wrap are broken. */
261 BUG_ON(!is_power_of_2(count));
262
263 /* slots_num must be power-of-two size, otherwise
264 * get_cmd_index is broken. */
265 BUG_ON(!is_power_of_2(slots_num));
266
267 q->low_mark = q->n_window / 4;
268 if (q->low_mark < 4)
269 q->low_mark = 4;
270
271 q->high_mark = q->n_window / 8;
272 if (q->high_mark < 2)
273 q->high_mark = 2;
274
275 q->first_empty = q->last_used = 0;
276
277 return 0;
278}
279
280static int iwl_tx_queue_alloc(struct iwl_priv *priv,
281 struct iwl_tx_queue *txq, u32 id)
282{
283 struct pci_dev *dev = priv->pci_dev;
284
285 if (id != IWL_CMD_QUEUE_NUM) {
286 txq->txb = kmalloc(sizeof(txq->txb[0]) *
287 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
288 if (!txq->txb) {
289 IWL_ERROR("kmalloc for auxilary BD "
290 "structures failed\n");
291 goto error;
292 }
293 } else
294 txq->txb = NULL;
295
296 txq->bd = pci_alloc_consistent(dev,
297 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
298 &txq->q.dma_addr);
299
300 if (!txq->bd) {
301 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
302 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
303 goto error;
304 }
305 txq->q.id = id;
306
307 return 0;
308
309 error:
310 if (txq->txb) {
311 kfree(txq->txb);
312 txq->txb = NULL;
313 }
314
315 return -ENOMEM;
316}
317
318int iwl_tx_queue_init(struct iwl_priv *priv,
319 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
320{
321 struct pci_dev *dev = priv->pci_dev;
322 int len;
323 int rc = 0;
324
325 /* alocate command space + one big command for scan since scan
326 * command is very huge the system will not have two scan at the
327 * same time */
328 len = sizeof(struct iwl_cmd) * slots_num;
329 if (txq_id == IWL_CMD_QUEUE_NUM)
330 len += IWL_MAX_SCAN_SIZE;
331 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
332 if (!txq->cmd)
333 return -ENOMEM;
334
335 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
336 if (rc) {
337 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
338
339 return -ENOMEM;
340 }
341 txq->need_update = 0;
342
343 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
344 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
345 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
346 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
347
348 iwl_hw_tx_queue_init(priv, txq);
349
350 return 0;
351}
352
353/**
354 * iwl_tx_queue_free - Deallocate DMA queue.
355 * @txq: Transmit queue to deallocate.
356 *
357 * Empty queue by removing and destroying all BD's.
358 * Free all buffers. txq itself is not freed.
359 *
360 */
361void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
362{
363 struct iwl_queue *q = &txq->q;
364 struct pci_dev *dev = priv->pci_dev;
365 int len;
366
367 if (q->n_bd == 0)
368 return;
369
370 /* first, empty all BD's */
371 for (; q->first_empty != q->last_used;
372 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
373 iwl_hw_txq_free_tfd(priv, txq);
374
375 len = sizeof(struct iwl_cmd) * q->n_window;
376 if (q->id == IWL_CMD_QUEUE_NUM)
377 len += IWL_MAX_SCAN_SIZE;
378
379 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
380
381 /* free buffers belonging to queue itself */
382 if (txq->q.n_bd)
383 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
384 txq->q.n_bd, txq->bd, txq->q.dma_addr);
385
386 if (txq->txb) {
387 kfree(txq->txb);
388 txq->txb = NULL;
389 }
390
391 /* 0 fill whole structure */
392 memset(txq, 0, sizeof(*txq));
393}
394
395const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
396
397/*************** STATION TABLE MANAGEMENT ****
398 *
399 * NOTE: This needs to be overhauled to better synchronize between
400 * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
401 *
402 * mac80211 should also be examined to determine if sta_info is duplicating
403 * the functionality provided here
404 */
405
406/**************************************************************/
407
408static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
409{
410 int index = IWL_INVALID_STATION;
411 int i;
412 unsigned long flags;
413
414 spin_lock_irqsave(&priv->sta_lock, flags);
415
416 if (is_ap)
417 index = IWL_AP_ID;
418 else if (is_broadcast_ether_addr(addr))
419 index = priv->hw_setting.bcast_sta_id;
420 else
421 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
422 if (priv->stations[i].used &&
423 !compare_ether_addr(priv->stations[i].sta.sta.addr,
424 addr)) {
425 index = i;
426 break;
427 }
428
429 if (unlikely(index == IWL_INVALID_STATION))
430 goto out;
431
432 if (priv->stations[index].used) {
433 priv->stations[index].used = 0;
434 priv->num_stations--;
435 }
436
437 BUG_ON(priv->num_stations < 0);
438
439out:
440 spin_unlock_irqrestore(&priv->sta_lock, flags);
441 return 0;
442}
443
444static void iwl_clear_stations_table(struct iwl_priv *priv)
445{
446 unsigned long flags;
447
448 spin_lock_irqsave(&priv->sta_lock, flags);
449
450 priv->num_stations = 0;
451 memset(priv->stations, 0, sizeof(priv->stations));
452
453 spin_unlock_irqrestore(&priv->sta_lock, flags);
454}
455
456u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
457{
458 int i;
459 int index = IWL_INVALID_STATION;
460 struct iwl_station_entry *station;
461 unsigned long flags_spin;
462
463 spin_lock_irqsave(&priv->sta_lock, flags_spin);
464 if (is_ap)
465 index = IWL_AP_ID;
466 else if (is_broadcast_ether_addr(addr))
467 index = priv->hw_setting.bcast_sta_id;
468 else
469 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
470 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
471 addr)) {
472 index = i;
473 break;
474 }
475
476 if (!priv->stations[i].used &&
477 index == IWL_INVALID_STATION)
478 index = i;
479 }
480
481
482 /* These twh conditions has the same outcome but keep them separate
483 since they have different meaning */
484 if (unlikely(index == IWL_INVALID_STATION)) {
485 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
486 return index;
487 }
488
489 if (priv->stations[index].used &&
490 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
491 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
492 return index;
493 }
494
495
496 IWL_DEBUG_ASSOC("Add STA ID %d: " MAC_FMT "\n", index, MAC_ARG(addr));
497 station = &priv->stations[index];
498 station->used = 1;
499 priv->num_stations++;
500
501 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
502 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
503 station->sta.mode = 0;
504 station->sta.sta.sta_id = index;
505 station->sta.station_flags = 0;
506
507#ifdef CONFIG_IWLWIFI_HT
508 /* BCAST station and IBSS stations do not work in HT mode */
509 if (index != priv->hw_setting.bcast_sta_id &&
510 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
511 iwl4965_set_ht_add_station(priv, index);
512#endif /*CONFIG_IWLWIFI_HT*/
513
514 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
515 iwl_send_add_station(priv, &station->sta, flags);
516 return index;
517
518}
519
520/*************** DRIVER STATUS FUNCTIONS *****/
521
522static inline int iwl_is_ready(struct iwl_priv *priv)
523{
524 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
525 * set but EXIT_PENDING is not */
526 return test_bit(STATUS_READY, &priv->status) &&
527 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
528 !test_bit(STATUS_EXIT_PENDING, &priv->status);
529}
530
531static inline int iwl_is_alive(struct iwl_priv *priv)
532{
533 return test_bit(STATUS_ALIVE, &priv->status);
534}
535
536static inline int iwl_is_init(struct iwl_priv *priv)
537{
538 return test_bit(STATUS_INIT, &priv->status);
539}
540
541static inline int iwl_is_rfkill(struct iwl_priv *priv)
542{
543 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
544 test_bit(STATUS_RF_KILL_SW, &priv->status);
545}
546
547static inline int iwl_is_ready_rf(struct iwl_priv *priv)
548{
549
550 if (iwl_is_rfkill(priv))
551 return 0;
552
553 return iwl_is_ready(priv);
554}
555
556/*************** HOST COMMAND QUEUE FUNCTIONS *****/
557
558#define IWL_CMD(x) case x : return #x
559
560static const char *get_cmd_string(u8 cmd)
561{
562 switch (cmd) {
563 IWL_CMD(REPLY_ALIVE);
564 IWL_CMD(REPLY_ERROR);
565 IWL_CMD(REPLY_RXON);
566 IWL_CMD(REPLY_RXON_ASSOC);
567 IWL_CMD(REPLY_QOS_PARAM);
568 IWL_CMD(REPLY_RXON_TIMING);
569 IWL_CMD(REPLY_ADD_STA);
570 IWL_CMD(REPLY_REMOVE_STA);
571 IWL_CMD(REPLY_REMOVE_ALL_STA);
572 IWL_CMD(REPLY_TX);
573 IWL_CMD(REPLY_RATE_SCALE);
574 IWL_CMD(REPLY_LEDS_CMD);
575 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
576 IWL_CMD(RADAR_NOTIFICATION);
577 IWL_CMD(REPLY_QUIET_CMD);
578 IWL_CMD(REPLY_CHANNEL_SWITCH);
579 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
580 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
581 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
582 IWL_CMD(POWER_TABLE_CMD);
583 IWL_CMD(PM_SLEEP_NOTIFICATION);
584 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
585 IWL_CMD(REPLY_SCAN_CMD);
586 IWL_CMD(REPLY_SCAN_ABORT_CMD);
587 IWL_CMD(SCAN_START_NOTIFICATION);
588 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
589 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
590 IWL_CMD(BEACON_NOTIFICATION);
591 IWL_CMD(REPLY_TX_BEACON);
592 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
593 IWL_CMD(QUIET_NOTIFICATION);
594 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
595 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
596 IWL_CMD(REPLY_BT_CONFIG);
597 IWL_CMD(REPLY_STATISTICS_CMD);
598 IWL_CMD(STATISTICS_NOTIFICATION);
599 IWL_CMD(REPLY_CARD_STATE_CMD);
600 IWL_CMD(CARD_STATE_NOTIFICATION);
601 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
602 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
603 IWL_CMD(SENSITIVITY_CMD);
604 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
605 IWL_CMD(REPLY_RX_PHY_CMD);
606 IWL_CMD(REPLY_RX_MPDU_CMD);
607 IWL_CMD(REPLY_4965_RX);
608 IWL_CMD(REPLY_COMPRESSED_BA);
609 default:
610 return "UNKNOWN";
611
612 }
613}
614
615#define HOST_COMPLETE_TIMEOUT (HZ / 2)
616
617/**
618 * iwl_enqueue_hcmd - enqueue a uCode command
619 * @priv: device private data point
620 * @cmd: a point to the ucode command structure
621 *
622 * The function returns < 0 values to indicate the operation is
623 * failed. On success, it turns the index (> 0) of command in the
624 * command queue.
625 */
626static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
627{
628 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
629 struct iwl_queue *q = &txq->q;
630 struct iwl_tfd_frame *tfd;
631 u32 *control_flags;
632 struct iwl_cmd *out_cmd;
633 u32 idx;
634 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
635 dma_addr_t phys_addr;
636 int ret;
637 unsigned long flags;
638
639 /* If any of the command structures end up being larger than
640 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
641 * we will need to increase the size of the TFD entries */
642 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
643 !(cmd->meta.flags & CMD_SIZE_HUGE));
644
645 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
646 IWL_ERROR("No space for Tx\n");
647 return -ENOSPC;
648 }
649
650 spin_lock_irqsave(&priv->hcmd_lock, flags);
651
652 tfd = &txq->bd[q->first_empty];
653 memset(tfd, 0, sizeof(*tfd));
654
655 control_flags = (u32 *) tfd;
656
657 idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
658 out_cmd = &txq->cmd[idx];
659
660 out_cmd->hdr.cmd = cmd->id;
661 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
662 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
663
664 /* At this point, the out_cmd now has all of the incoming cmd
665 * information */
666
667 out_cmd->hdr.flags = 0;
668 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
669 INDEX_TO_SEQ(q->first_empty));
670 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
671 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
672
673 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
674 offsetof(struct iwl_cmd, hdr);
675 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
676
677 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
678 "%d bytes at %d[%d]:%d\n",
679 get_cmd_string(out_cmd->hdr.cmd),
680 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
681 fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
682
683 txq->need_update = 1;
684 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
685 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
686 iwl_tx_queue_update_write_ptr(priv, txq);
687
688 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
689 return ret ? ret : idx;
690}
691
692int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
693{
694 int ret;
695
696 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
697
698 /* An asynchronous command can not expect an SKB to be set. */
699 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
700
701 /* An asynchronous command MUST have a callback. */
702 BUG_ON(!cmd->meta.u.callback);
703
704 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
705 return -EBUSY;
706
707 ret = iwl_enqueue_hcmd(priv, cmd);
708 if (ret < 0) {
709 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
710 get_cmd_string(cmd->id), ret);
711 return ret;
712 }
713 return 0;
714}
715
716int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
717{
718 int cmd_idx;
719 int ret;
720 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
721
722 BUG_ON(cmd->meta.flags & CMD_ASYNC);
723
724 /* A synchronous command can not have a callback set. */
725 BUG_ON(cmd->meta.u.callback != NULL);
726
727 if (atomic_xchg(&entry, 1)) {
728 IWL_ERROR("Error sending %s: Already sending a host command\n",
729 get_cmd_string(cmd->id));
730 return -EBUSY;
731 }
732
733 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
734
735 if (cmd->meta.flags & CMD_WANT_SKB)
736 cmd->meta.source = &cmd->meta;
737
738 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
739 if (cmd_idx < 0) {
740 ret = cmd_idx;
741 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
742 get_cmd_string(cmd->id), ret);
743 goto out;
744 }
745
746 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
747 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
748 HOST_COMPLETE_TIMEOUT);
749 if (!ret) {
750 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
751 IWL_ERROR("Error sending %s: time out after %dms.\n",
752 get_cmd_string(cmd->id),
753 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
754
755 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
756 ret = -ETIMEDOUT;
757 goto cancel;
758 }
759 }
760
761 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
762 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
763 get_cmd_string(cmd->id));
764 ret = -ECANCELED;
765 goto fail;
766 }
767 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
768 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
769 get_cmd_string(cmd->id));
770 ret = -EIO;
771 goto fail;
772 }
773 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
774 IWL_ERROR("Error: Response NULL in '%s'\n",
775 get_cmd_string(cmd->id));
776 ret = -EIO;
777 goto out;
778 }
779
780 ret = 0;
781 goto out;
782
783cancel:
784 if (cmd->meta.flags & CMD_WANT_SKB) {
785 struct iwl_cmd *qcmd;
786
787 /* Cancel the CMD_WANT_SKB flag for the cmd in the
788 * TX cmd queue. Otherwise in case the cmd comes
789 * in later, it will possibly set an invalid
790 * address (cmd->meta.source). */
791 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
792 qcmd->meta.flags &= ~CMD_WANT_SKB;
793 }
794fail:
795 if (cmd->meta.u.skb) {
796 dev_kfree_skb_any(cmd->meta.u.skb);
797 cmd->meta.u.skb = NULL;
798 }
799out:
800 atomic_set(&entry, 0);
801 return ret;
802}
803
804int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
805{
806 /* A command can not be asynchronous AND expect an SKB to be set. */
807 BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
808 (cmd->meta.flags & CMD_WANT_SKB));
809
810 if (cmd->meta.flags & CMD_ASYNC)
811 return iwl_send_cmd_async(priv, cmd);
812
813 return iwl_send_cmd_sync(priv, cmd);
814}
815
816int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
817{
818 struct iwl_host_cmd cmd = {
819 .id = id,
820 .len = len,
821 .data = data,
822 };
823
824 return iwl_send_cmd_sync(priv, &cmd);
825}
826
827static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
828{
829 struct iwl_host_cmd cmd = {
830 .id = id,
831 .len = sizeof(val),
832 .data = &val,
833 };
834
835 return iwl_send_cmd_sync(priv, &cmd);
836}
837
838int iwl_send_statistics_request(struct iwl_priv *priv)
839{
840 return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
841}
842
843/**
844 * iwl_rxon_add_station - add station into station table.
845 *
846 * there is only one AP station with id= IWL_AP_ID
847 * NOTE: mutex must be held before calling the this fnction
848*/
849static int iwl_rxon_add_station(struct iwl_priv *priv,
850 const u8 *addr, int is_ap)
851{
852 u8 rc;
853
854 /* Remove this station if it happens to already exist */
855 iwl_remove_station(priv, addr, is_ap);
856
857 rc = iwl_add_station(priv, addr, is_ap, 0);
858
859 iwl4965_add_station(priv, addr, is_ap);
860
861 return rc;
862}
863
864/**
865 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
866 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
867 * @channel: Any channel valid for the requested phymode
868
869 * In addition to setting the staging RXON, priv->phymode is also set.
870 *
871 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
872 * in the staging RXON flag structure based on the phymode
873 */
874static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
875{
876 if (!iwl_get_channel_info(priv, phymode, channel)) {
877 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
878 channel, phymode);
879 return -EINVAL;
880 }
881
882 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
883 (priv->phymode == phymode))
884 return 0;
885
886 priv->staging_rxon.channel = cpu_to_le16(channel);
887 if (phymode == MODE_IEEE80211A)
888 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
889 else
890 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
891
892 priv->phymode = phymode;
893
894 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
895
896 return 0;
897}
898
899/**
900 * iwl_check_rxon_cmd - validate RXON structure is valid
901 *
902 * NOTE: This is really only useful during development and can eventually
903 * be #ifdef'd out once the driver is stable and folks aren't actively
904 * making changes
905 */
906static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
907{
908 int error = 0;
909 int counter = 1;
910
911 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
912 error |= le32_to_cpu(rxon->flags &
913 (RXON_FLG_TGJ_NARROW_BAND_MSK |
914 RXON_FLG_RADAR_DETECT_MSK));
915 if (error)
916 IWL_WARNING("check 24G fields %d | %d\n",
917 counter++, error);
918 } else {
919 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
920 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
921 if (error)
922 IWL_WARNING("check 52 fields %d | %d\n",
923 counter++, error);
924 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
925 if (error)
926 IWL_WARNING("check 52 CCK %d | %d\n",
927 counter++, error);
928 }
929 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
930 if (error)
931 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
932
933 /* make sure basic rates 6Mbps and 1Mbps are supported */
934 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
935 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
936 if (error)
937 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
938
939 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
940 if (error)
941 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
942
943 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
944 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
945 if (error)
946 IWL_WARNING("check CCK and short slot %d | %d\n",
947 counter++, error);
948
949 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
950 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
951 if (error)
952 IWL_WARNING("check CCK & auto detect %d | %d\n",
953 counter++, error);
954
955 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
956 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
957 if (error)
958 IWL_WARNING("check TGG and auto detect %d | %d\n",
959 counter++, error);
960
961 if (error)
962 IWL_WARNING("Tuning to channel %d\n",
963 le16_to_cpu(rxon->channel));
964
965 if (error) {
966 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
967 return -1;
968 }
969 return 0;
970}
971
972/**
973 * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
974 * @priv: staging_rxon is comapred to active_rxon
975 *
976 * If the RXON structure is changing sufficient to require a new
977 * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
978 * to indicate a new tune is required.
979 */
980static int iwl_full_rxon_required(struct iwl_priv *priv)
981{
982
983 /* These items are only settable from the full RXON command */
984 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
985 compare_ether_addr(priv->staging_rxon.bssid_addr,
986 priv->active_rxon.bssid_addr) ||
987 compare_ether_addr(priv->staging_rxon.node_addr,
988 priv->active_rxon.node_addr) ||
989 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
990 priv->active_rxon.wlap_bssid_addr) ||
991 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
992 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
993 (priv->staging_rxon.air_propagation !=
994 priv->active_rxon.air_propagation) ||
995 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
996 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
997 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
998 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
999 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1000 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1001 return 1;
1002
1003 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1004 * be updated with the RXON_ASSOC command -- however only some
1005 * flag transitions are allowed using RXON_ASSOC */
1006
1007 /* Check if we are not switching bands */
1008 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1009 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1010 return 1;
1011
1012 /* Check if we are switching association toggle */
1013 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1014 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1015 return 1;
1016
1017 return 0;
1018}
1019
1020static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1021{
1022 int rc = 0;
1023 struct iwl_rx_packet *res = NULL;
1024 struct iwl_rxon_assoc_cmd rxon_assoc;
1025 struct iwl_host_cmd cmd = {
1026 .id = REPLY_RXON_ASSOC,
1027 .len = sizeof(rxon_assoc),
1028 .meta.flags = CMD_WANT_SKB,
1029 .data = &rxon_assoc,
1030 };
1031 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1032 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1033
1034 if ((rxon1->flags == rxon2->flags) &&
1035 (rxon1->filter_flags == rxon2->filter_flags) &&
1036 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1037 (rxon1->ofdm_ht_single_stream_basic_rates ==
1038 rxon2->ofdm_ht_single_stream_basic_rates) &&
1039 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1040 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1041 (rxon1->rx_chain == rxon2->rx_chain) &&
1042 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1043 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1044 return 0;
1045 }
1046
1047 rxon_assoc.flags = priv->staging_rxon.flags;
1048 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1049 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1050 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1051 rxon_assoc.reserved = 0;
1052 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1053 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1054 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1055 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1056 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1057
1058 rc = iwl_send_cmd_sync(priv, &cmd);
1059 if (rc)
1060 return rc;
1061
1062 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1063 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1064 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1065 rc = -EIO;
1066 }
1067
1068 priv->alloc_rxb_skb--;
1069 dev_kfree_skb_any(cmd.meta.u.skb);
1070
1071 return rc;
1072}
1073
1074/**
1075 * iwl_commit_rxon - commit staging_rxon to hardware
1076 *
1077 * The RXON command in staging_rxon is commited to the hardware and
1078 * the active_rxon structure is updated with the new data. This
1079 * function correctly transitions out of the RXON_ASSOC_MSK state if
1080 * a HW tune is required based on the RXON structure changes.
1081 */
1082static int iwl_commit_rxon(struct iwl_priv *priv)
1083{
1084 /* cast away the const for active_rxon in this function */
1085 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1086 int rc = 0;
1087
1088 if (!iwl_is_alive(priv))
1089 return -1;
1090
1091 /* always get timestamp with Rx frame */
1092 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1093
1094 rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1095 if (rc) {
1096 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1097 return -EINVAL;
1098 }
1099
1100 /* If we don't need to send a full RXON, we can use
1101 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1102 * and other flags for the current radio configuration. */
1103 if (!iwl_full_rxon_required(priv)) {
1104 rc = iwl_send_rxon_assoc(priv);
1105 if (rc) {
1106 IWL_ERROR("Error setting RXON_ASSOC "
1107 "configuration (%d).\n", rc);
1108 return rc;
1109 }
1110
1111 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1112
1113 return 0;
1114 }
1115
1116 /* station table will be cleared */
1117 priv->assoc_station_added = 0;
1118
1119#ifdef CONFIG_IWLWIFI_SENSITIVITY
1120 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1121 if (!priv->error_recovering)
1122 priv->start_calib = 0;
1123
1124 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1125#endif /* CONFIG_IWLWIFI_SENSITIVITY */
1126
1127 /* If we are currently associated and the new config requires
1128 * an RXON_ASSOC and the new config wants the associated mask enabled,
1129 * we must clear the associated from the active configuration
1130 * before we apply the new config */
1131 if (iwl_is_associated(priv) &&
1132 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1133 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1134 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1135
1136 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1137 sizeof(struct iwl_rxon_cmd),
1138 &priv->active_rxon);
1139
1140 /* If the mask clearing failed then we set
1141 * active_rxon back to what it was previously */
1142 if (rc) {
1143 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1144 IWL_ERROR("Error clearing ASSOC_MSK on current "
1145 "configuration (%d).\n", rc);
1146 return rc;
1147 }
1148
1149 /* The RXON bit toggling will have cleared out the
1150 * station table in the uCode, so blank it in the driver
1151 * as well */
1152 iwl_clear_stations_table(priv);
1153 } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) {
1154 /* When switching from non-associated to associated, the
1155 * uCode clears out the station table; so clear it in the
1156 * driver as well */
1157 iwl_clear_stations_table(priv);
1158 }
1159
1160 IWL_DEBUG_INFO("Sending RXON\n"
1161 "* with%s RXON_FILTER_ASSOC_MSK\n"
1162 "* channel = %d\n"
1163 "* bssid = " MAC_FMT "\n",
1164 ((priv->staging_rxon.filter_flags &
1165 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1166 le16_to_cpu(priv->staging_rxon.channel),
1167 MAC_ARG(priv->staging_rxon.bssid_addr));
1168
1169 /* Apply the new configuration */
1170 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1171 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1172 if (rc) {
1173 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1174 return rc;
1175 }
1176
1177#ifdef CONFIG_IWLWIFI_SENSITIVITY
1178 if (!priv->error_recovering)
1179 priv->start_calib = 0;
1180
1181 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1182 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1183#endif /* CONFIG_IWLWIFI_SENSITIVITY */
1184
1185 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1186
1187 /* If we issue a new RXON command which required a tune then we must
1188 * send a new TXPOWER command or we won't be able to Tx any frames */
1189 rc = iwl_hw_reg_send_txpower(priv);
1190 if (rc) {
1191 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1192 return rc;
1193 }
1194
1195 /* Add the broadcast address so we can send broadcast frames */
1196 if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) ==
1197 IWL_INVALID_STATION) {
1198 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1199 return -EIO;
1200 }
1201
1202 /* If we have set the ASSOC_MSK and we are in BSS mode then
1203 * add the IWL_AP_ID to the station rate table */
1204 if (iwl_is_associated(priv) &&
1205 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
1206 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1207 == IWL_INVALID_STATION) {
1208 IWL_ERROR("Error adding AP address for transmit.\n");
1209 return -EIO;
1210 }
1211 priv->assoc_station_added = 1;
1212 }
1213
1214 return 0;
1215}
1216
1217static int iwl_send_bt_config(struct iwl_priv *priv)
1218{
1219 struct iwl_bt_cmd bt_cmd = {
1220 .flags = 3,
1221 .lead_time = 0xAA,
1222 .max_kill = 1,
1223 .kill_ack_mask = 0,
1224 .kill_cts_mask = 0,
1225 };
1226
1227 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1228 sizeof(struct iwl_bt_cmd), &bt_cmd);
1229}
1230
1231static int iwl_send_scan_abort(struct iwl_priv *priv)
1232{
1233 int rc = 0;
1234 struct iwl_rx_packet *res;
1235 struct iwl_host_cmd cmd = {
1236 .id = REPLY_SCAN_ABORT_CMD,
1237 .meta.flags = CMD_WANT_SKB,
1238 };
1239
1240 /* If there isn't a scan actively going on in the hardware
1241 * then we are in between scan bands and not actually
1242 * actively scanning, so don't send the abort command */
1243 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1244 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1245 return 0;
1246 }
1247
1248 rc = iwl_send_cmd_sync(priv, &cmd);
1249 if (rc) {
1250 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1251 return rc;
1252 }
1253
1254 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1255 if (res->u.status != CAN_ABORT_STATUS) {
1256 /* The scan abort will return 1 for success or
1257 * 2 for "failure". A failure condition can be
1258 * due to simply not being in an active scan which
1259 * can occur if we send the scan abort before we
1260 * the microcode has notified us that a scan is
1261 * completed. */
1262 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1263 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1264 clear_bit(STATUS_SCAN_HW, &priv->status);
1265 }
1266
1267 dev_kfree_skb_any(cmd.meta.u.skb);
1268
1269 return rc;
1270}
1271
1272static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1273 struct iwl_cmd *cmd,
1274 struct sk_buff *skb)
1275{
1276 return 1;
1277}
1278
1279/*
1280 * CARD_STATE_CMD
1281 *
1282 * Use: Sets the internal card state to enable, disable, or halt
1283 *
1284 * When in the 'enable' state the card operates as normal.
1285 * When in the 'disable' state, the card enters into a low power mode.
1286 * When in the 'halt' state, the card is shut down and must be fully
1287 * restarted to come back on.
1288 */
1289static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1290{
1291 struct iwl_host_cmd cmd = {
1292 .id = REPLY_CARD_STATE_CMD,
1293 .len = sizeof(u32),
1294 .data = &flags,
1295 .meta.flags = meta_flag,
1296 };
1297
1298 if (meta_flag & CMD_ASYNC)
1299 cmd.meta.u.callback = iwl_card_state_sync_callback;
1300
1301 return iwl_send_cmd(priv, &cmd);
1302}
1303
1304static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1305 struct iwl_cmd *cmd, struct sk_buff *skb)
1306{
1307 struct iwl_rx_packet *res = NULL;
1308
1309 if (!skb) {
1310 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1311 return 1;
1312 }
1313
1314 res = (struct iwl_rx_packet *)skb->data;
1315 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1316 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1317 res->hdr.flags);
1318 return 1;
1319 }
1320
1321 switch (res->u.add_sta.status) {
1322 case ADD_STA_SUCCESS_MSK:
1323 break;
1324 default:
1325 break;
1326 }
1327
1328 /* We didn't cache the SKB; let the caller free it */
1329 return 1;
1330}
1331
1332int iwl_send_add_station(struct iwl_priv *priv,
1333 struct iwl_addsta_cmd *sta, u8 flags)
1334{
1335 struct iwl_rx_packet *res = NULL;
1336 int rc = 0;
1337 struct iwl_host_cmd cmd = {
1338 .id = REPLY_ADD_STA,
1339 .len = sizeof(struct iwl_addsta_cmd),
1340 .meta.flags = flags,
1341 .data = sta,
1342 };
1343
1344 if (flags & CMD_ASYNC)
1345 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1346 else
1347 cmd.meta.flags |= CMD_WANT_SKB;
1348
1349 rc = iwl_send_cmd(priv, &cmd);
1350
1351 if (rc || (flags & CMD_ASYNC))
1352 return rc;
1353
1354 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1355 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1356 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1357 res->hdr.flags);
1358 rc = -EIO;
1359 }
1360
1361 if (rc == 0) {
1362 switch (res->u.add_sta.status) {
1363 case ADD_STA_SUCCESS_MSK:
1364 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1365 break;
1366 default:
1367 rc = -EIO;
1368 IWL_WARNING("REPLY_ADD_STA failed\n");
1369 break;
1370 }
1371 }
1372
1373 priv->alloc_rxb_skb--;
1374 dev_kfree_skb_any(cmd.meta.u.skb);
1375
1376 return rc;
1377}
1378
1379static int iwl_update_sta_key_info(struct iwl_priv *priv,
1380 struct ieee80211_key_conf *keyconf,
1381 u8 sta_id)
1382{
1383 unsigned long flags;
1384 __le16 key_flags = 0;
1385
1386 switch (keyconf->alg) {
1387 case ALG_CCMP:
1388 key_flags |= STA_KEY_FLG_CCMP;
1389 key_flags |= cpu_to_le16(
1390 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1391 key_flags &= ~STA_KEY_FLG_INVALID;
1392 break;
1393 case ALG_TKIP:
1394 case ALG_WEP:
1395 return -EINVAL;
1396 default:
1397 return -EINVAL;
1398 }
1399 spin_lock_irqsave(&priv->sta_lock, flags);
1400 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1401 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1402 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1403 keyconf->keylen);
1404
1405 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1406 keyconf->keylen);
1407 priv->stations[sta_id].sta.key.key_flags = key_flags;
1408 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1409 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1410
1411 spin_unlock_irqrestore(&priv->sta_lock, flags);
1412
1413 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1414 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1415 return 0;
1416}
1417
1418static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1419{
1420 unsigned long flags;
1421
1422 spin_lock_irqsave(&priv->sta_lock, flags);
1423 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1424 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1425 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1426 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1427 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1428 spin_unlock_irqrestore(&priv->sta_lock, flags);
1429
1430 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1431 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1432 return 0;
1433}
1434
1435static void iwl_clear_free_frames(struct iwl_priv *priv)
1436{
1437 struct list_head *element;
1438
1439 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1440 priv->frames_count);
1441
1442 while (!list_empty(&priv->free_frames)) {
1443 element = priv->free_frames.next;
1444 list_del(element);
1445 kfree(list_entry(element, struct iwl_frame, list));
1446 priv->frames_count--;
1447 }
1448
1449 if (priv->frames_count) {
1450 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1451 priv->frames_count);
1452 priv->frames_count = 0;
1453 }
1454}
1455
1456static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1457{
1458 struct iwl_frame *frame;
1459 struct list_head *element;
1460 if (list_empty(&priv->free_frames)) {
1461 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1462 if (!frame) {
1463 IWL_ERROR("Could not allocate frame!\n");
1464 return NULL;
1465 }
1466
1467 priv->frames_count++;
1468 return frame;
1469 }
1470
1471 element = priv->free_frames.next;
1472 list_del(element);
1473 return list_entry(element, struct iwl_frame, list);
1474}
1475
1476static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1477{
1478 memset(frame, 0, sizeof(*frame));
1479 list_add(&frame->list, &priv->free_frames);
1480}
1481
1482unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1483 struct ieee80211_hdr *hdr,
1484 const u8 *dest, int left)
1485{
1486
1487 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1488 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1489 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1490 return 0;
1491
1492 if (priv->ibss_beacon->len > left)
1493 return 0;
1494
1495 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1496
1497 return priv->ibss_beacon->len;
1498}
1499
1500int iwl_rate_index_from_plcp(int plcp)
1501{
1502 int i = 0;
1503
1504 if (plcp & RATE_MCS_HT_MSK) {
1505 i = (plcp & 0xff);
1506
1507 if (i >= IWL_RATE_MIMO_6M_PLCP)
1508 i = i - IWL_RATE_MIMO_6M_PLCP;
1509
1510 i += IWL_FIRST_OFDM_RATE;
1511 /* skip 9M not supported in ht*/
1512 if (i >= IWL_RATE_9M_INDEX)
1513 i += 1;
1514 if ((i >= IWL_FIRST_OFDM_RATE) &&
1515 (i <= IWL_LAST_OFDM_RATE))
1516 return i;
1517 } else {
1518 for (i = 0; i < ARRAY_SIZE(iwl_rates); i++)
1519 if (iwl_rates[i].plcp == (plcp &0xFF))
1520 return i;
1521 }
1522 return -1;
1523}
1524
1525static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1526{
1527 u8 i;
1528
1529 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1530 i = iwl_rates[i].next_ieee) {
1531 if (rate_mask & (1 << i))
1532 return iwl_rates[i].plcp;
1533 }
1534
1535 return IWL_RATE_INVALID;
1536}
1537
1538static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1539{
1540 struct iwl_frame *frame;
1541 unsigned int frame_size;
1542 int rc;
1543 u8 rate;
1544
1545 frame = iwl_get_free_frame(priv);
1546
1547 if (!frame) {
1548 IWL_ERROR("Could not obtain free frame buffer for beacon "
1549 "command.\n");
1550 return -ENOMEM;
1551 }
1552
1553 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1554 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1555 0xFF0);
1556 if (rate == IWL_INVALID_RATE)
1557 rate = IWL_RATE_6M_PLCP;
1558 } else {
1559 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1560 if (rate == IWL_INVALID_RATE)
1561 rate = IWL_RATE_1M_PLCP;
1562 }
1563
1564 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1565
1566 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1567 &frame->u.cmd[0]);
1568
1569 iwl_free_frame(priv, frame);
1570
1571 return rc;
1572}
1573
1574/******************************************************************************
1575 *
1576 * EEPROM related functions
1577 *
1578 ******************************************************************************/
1579
1580static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1581{
1582 memcpy(mac, priv->eeprom.mac_address, 6);
1583}
1584
1585/**
1586 * iwl_eeprom_init - read EEPROM contents
1587 *
1588 * Load the EEPROM from adapter into priv->eeprom
1589 *
1590 * NOTE: This routine uses the non-debug IO access functions.
1591 */
1592int iwl_eeprom_init(struct iwl_priv *priv)
1593{
1594 u16 *e = (u16 *)&priv->eeprom;
1595 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1596 u32 r;
1597 int sz = sizeof(priv->eeprom);
1598 int rc;
1599 int i;
1600 u16 addr;
1601
1602 /* The EEPROM structure has several padding buffers within it
1603 * and when adding new EEPROM maps is subject to programmer errors
1604 * which may be very difficult to identify without explicitly
1605 * checking the resulting size of the eeprom map. */
1606 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1607
1608 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1609 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1610 return -ENOENT;
1611 }
1612
1613 rc = iwl_eeprom_aqcuire_semaphore(priv);
1614 if (rc < 0) {
1615 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1616 return -ENOENT;
1617 }
1618
1619 /* eeprom is an array of 16bit values */
1620 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1621 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1622 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1623
1624 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1625 i += IWL_EEPROM_ACCESS_DELAY) {
1626 r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1627 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1628 break;
1629 udelay(IWL_EEPROM_ACCESS_DELAY);
1630 }
1631
1632 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1633 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1634 rc = -ETIMEDOUT;
1635 goto done;
1636 }
1637 e[addr / 2] = le16_to_cpu(r >> 16);
1638 }
1639 rc = 0;
1640
1641done:
1642 iwl_eeprom_release_semaphore(priv);
1643 return rc;
1644}
1645
1646/******************************************************************************
1647 *
1648 * Misc. internal state and helper functions
1649 *
1650 ******************************************************************************/
1651#ifdef CONFIG_IWLWIFI_DEBUG
1652
1653/**
1654 * iwl_report_frame - dump frame to syslog during debug sessions
1655 *
1656 * hack this function to show different aspects of received frames,
1657 * including selective frame dumps.
1658 * group100 parameter selects whether to show 1 out of 100 good frames.
1659 *
1660 * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1661 * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1662 * is 3945-specific and gives bad output for 4965. Need to split the
1663 * functionality, keep common stuff here.
1664 */
1665void iwl_report_frame(struct iwl_priv *priv,
1666 struct iwl_rx_packet *pkt,
1667 struct ieee80211_hdr *header, int group100)
1668{
1669 u32 to_us;
1670 u32 print_summary = 0;
1671 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1672 u32 hundred = 0;
1673 u32 dataframe = 0;
1674 u16 fc;
1675 u16 seq_ctl;
1676 u16 channel;
1677 u16 phy_flags;
1678 int rate_sym;
1679 u16 length;
1680 u16 status;
1681 u16 bcn_tmr;
1682 u32 tsf_low;
1683 u64 tsf;
1684 u8 rssi;
1685 u8 agc;
1686 u16 sig_avg;
1687 u16 noise_diff;
1688 struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1689 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1690 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1691 u8 *data = IWL_RX_DATA(pkt);
1692
1693 /* MAC header */
1694 fc = le16_to_cpu(header->frame_control);
1695 seq_ctl = le16_to_cpu(header->seq_ctrl);
1696
1697 /* metadata */
1698 channel = le16_to_cpu(rx_hdr->channel);
1699 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1700 rate_sym = rx_hdr->rate;
1701 length = le16_to_cpu(rx_hdr->len);
1702
1703 /* end-of-frame status and timestamp */
1704 status = le32_to_cpu(rx_end->status);
1705 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1706 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1707 tsf = le64_to_cpu(rx_end->timestamp);
1708
1709 /* signal statistics */
1710 rssi = rx_stats->rssi;
1711 agc = rx_stats->agc;
1712 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1713 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1714
1715 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1716
1717 /* if data frame is to us and all is good,
1718 * (optionally) print summary for only 1 out of every 100 */
1719 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1720 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1721 dataframe = 1;
1722 if (!group100)
1723 print_summary = 1; /* print each frame */
1724 else if (priv->framecnt_to_us < 100) {
1725 priv->framecnt_to_us++;
1726 print_summary = 0;
1727 } else {
1728 priv->framecnt_to_us = 0;
1729 print_summary = 1;
1730 hundred = 1;
1731 }
1732 } else {
1733 /* print summary for all other frames */
1734 print_summary = 1;
1735 }
1736
1737 if (print_summary) {
1738 char *title;
1739 u32 rate;
1740
1741 if (hundred)
1742 title = "100Frames";
1743 else if (fc & IEEE80211_FCTL_RETRY)
1744 title = "Retry";
1745 else if (ieee80211_is_assoc_response(fc))
1746 title = "AscRsp";
1747 else if (ieee80211_is_reassoc_response(fc))
1748 title = "RasRsp";
1749 else if (ieee80211_is_probe_response(fc)) {
1750 title = "PrbRsp";
1751 print_dump = 1; /* dump frame contents */
1752 } else if (ieee80211_is_beacon(fc)) {
1753 title = "Beacon";
1754 print_dump = 1; /* dump frame contents */
1755 } else if (ieee80211_is_atim(fc))
1756 title = "ATIM";
1757 else if (ieee80211_is_auth(fc))
1758 title = "Auth";
1759 else if (ieee80211_is_deauth(fc))
1760 title = "DeAuth";
1761 else if (ieee80211_is_disassoc(fc))
1762 title = "DisAssoc";
1763 else
1764 title = "Frame";
1765
1766 rate = iwl_rate_index_from_plcp(rate_sym);
1767 if (rate == -1)
1768 rate = 0;
1769 else
1770 rate = iwl_rates[rate].ieee / 2;
1771
1772 /* print frame summary.
1773 * MAC addresses show just the last byte (for brevity),
1774 * but you can hack it to show more, if you'd like to. */
1775 if (dataframe)
1776 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1777 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1778 title, fc, header->addr1[5],
1779 length, rssi, channel, rate);
1780 else {
1781 /* src/dst addresses assume managed mode */
1782 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1783 "src=0x%02x, rssi=%u, tim=%lu usec, "
1784 "phy=0x%02x, chnl=%d\n",
1785 title, fc, header->addr1[5],
1786 header->addr3[5], rssi,
1787 tsf_low - priv->scan_start_tsf,
1788 phy_flags, channel);
1789 }
1790 }
1791 if (print_dump)
1792 iwl_print_hex_dump(IWL_DL_RX, data, length);
1793}
1794#endif
1795
1796static void iwl_unset_hw_setting(struct iwl_priv *priv)
1797{
1798 if (priv->hw_setting.shared_virt)
1799 pci_free_consistent(priv->pci_dev,
1800 sizeof(struct iwl_shared),
1801 priv->hw_setting.shared_virt,
1802 priv->hw_setting.shared_phys);
1803}
1804
1805/**
1806 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1807 *
1808 * return : set the bit for each supported rate insert in ie
1809 */
1810static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1811 u16 basic_rate, int max_count)
1812{
1813 u16 ret_rates = 0, bit;
1814 int i;
1815 u8 *rates;
1816
1817 rates = &(ie[1]);
1818
1819 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1820 if (bit & supported_rate) {
1821 ret_rates |= bit;
1822 rates[*ie] = iwl_rates[i].ieee |
1823 ((bit & basic_rate) ? 0x80 : 0x00);
1824 *ie = *ie + 1;
1825 if (*ie >= max_count)
1826 break;
1827 }
1828 }
1829
1830 return ret_rates;
1831}
1832
1833#ifdef CONFIG_IWLWIFI_HT
1834void static iwl_set_ht_capab(struct ieee80211_hw *hw,
1835 struct ieee80211_ht_capability *ht_cap,
1836 u8 use_wide_chan);
1837#endif
1838
1839/**
1840 * iwl_fill_probe_req - fill in all required fields and IE for probe request
1841 */
1842static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1843 struct ieee80211_mgmt *frame,
1844 int left, int is_direct)
1845{
1846 int len = 0;
1847 u8 *pos = NULL;
1848 u16 ret_rates;
1849
1850 /* Make sure there is enough space for the probe request,
1851 * two mandatory IEs and the data */
1852 left -= 24;
1853 if (left < 0)
1854 return 0;
1855 len += 24;
1856
1857 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1858 memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1859 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1860 memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1861 frame->seq_ctrl = 0;
1862
1863 /* fill in our indirect SSID IE */
1864 /* ...next IE... */
1865
1866 left -= 2;
1867 if (left < 0)
1868 return 0;
1869 len += 2;
1870 pos = &(frame->u.probe_req.variable[0]);
1871 *pos++ = WLAN_EID_SSID;
1872 *pos++ = 0;
1873
1874 /* fill in our direct SSID IE... */
1875 if (is_direct) {
1876 /* ...next IE... */
1877 left -= 2 + priv->essid_len;
1878 if (left < 0)
1879 return 0;
1880 /* ... fill it in... */
1881 *pos++ = WLAN_EID_SSID;
1882 *pos++ = priv->essid_len;
1883 memcpy(pos, priv->essid, priv->essid_len);
1884 pos += priv->essid_len;
1885 len += 2 + priv->essid_len;
1886 }
1887
1888 /* fill in supported rate */
1889 /* ...next IE... */
1890 left -= 2;
1891 if (left < 0)
1892 return 0;
1893 /* ... fill it in... */
1894 *pos++ = WLAN_EID_SUPP_RATES;
1895 *pos = 0;
1896 ret_rates = priv->active_rate = priv->rates_mask;
1897 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1898
1899 iwl_supported_rate_to_ie(pos, priv->active_rate,
1900 priv->active_rate_basic, left);
1901 len += 2 + *pos;
1902 pos += (*pos) + 1;
1903 ret_rates = ~ret_rates & priv->active_rate;
1904
1905 if (ret_rates == 0)
1906 goto fill_end;
1907
1908 /* fill in supported extended rate */
1909 /* ...next IE... */
1910 left -= 2;
1911 if (left < 0)
1912 return 0;
1913 /* ... fill it in... */
1914 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1915 *pos = 0;
1916 iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1917 if (*pos > 0)
1918 len += 2 + *pos;
1919
1920#ifdef CONFIG_IWLWIFI_HT
1921 if (is_direct && priv->is_ht_enabled) {
1922 u8 use_wide_chan = 1;
1923
1924 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
1925 use_wide_chan = 0;
1926 pos += (*pos) + 1;
1927 *pos++ = WLAN_EID_HT_CAPABILITY;
1928 *pos++ = sizeof(struct ieee80211_ht_capability);
1929 iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos,
1930 use_wide_chan);
1931 len += 2 + sizeof(struct ieee80211_ht_capability);
1932 }
1933#endif /*CONFIG_IWLWIFI_HT */
1934
1935 fill_end:
1936 return (u16)len;
1937}
1938
1939/*
1940 * QoS support
1941*/
1942#ifdef CONFIG_IWLWIFI_QOS
1943static int iwl_send_qos_params_command(struct iwl_priv *priv,
1944 struct iwl_qosparam_cmd *qos)
1945{
1946
1947 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1948 sizeof(struct iwl_qosparam_cmd), qos);
1949}
1950
1951static void iwl_reset_qos(struct iwl_priv *priv)
1952{
1953 u16 cw_min = 15;
1954 u16 cw_max = 1023;
1955 u8 aifs = 2;
1956 u8 is_legacy = 0;
1957 unsigned long flags;
1958 int i;
1959
1960 spin_lock_irqsave(&priv->lock, flags);
1961 priv->qos_data.qos_active = 0;
1962
1963 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1964 if (priv->qos_data.qos_enable)
1965 priv->qos_data.qos_active = 1;
1966 if (!(priv->active_rate & 0xfff0)) {
1967 cw_min = 31;
1968 is_legacy = 1;
1969 }
1970 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1971 if (priv->qos_data.qos_enable)
1972 priv->qos_data.qos_active = 1;
1973 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1974 cw_min = 31;
1975 is_legacy = 1;
1976 }
1977
1978 if (priv->qos_data.qos_active)
1979 aifs = 3;
1980
1981 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1982 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1983 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1984 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1985 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1986
1987 if (priv->qos_data.qos_active) {
1988 i = 1;
1989 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1990 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1991 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1992 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1993 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1994
1995 i = 2;
1996 priv->qos_data.def_qos_parm.ac[i].cw_min =
1997 cpu_to_le16((cw_min + 1) / 2 - 1);
1998 priv->qos_data.def_qos_parm.ac[i].cw_max =
1999 cpu_to_le16(cw_max);
2000 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2001 if (is_legacy)
2002 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2003 cpu_to_le16(6016);
2004 else
2005 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2006 cpu_to_le16(3008);
2007 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2008
2009 i = 3;
2010 priv->qos_data.def_qos_parm.ac[i].cw_min =
2011 cpu_to_le16((cw_min + 1) / 4 - 1);
2012 priv->qos_data.def_qos_parm.ac[i].cw_max =
2013 cpu_to_le16((cw_max + 1) / 2 - 1);
2014 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2015 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2016 if (is_legacy)
2017 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2018 cpu_to_le16(3264);
2019 else
2020 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2021 cpu_to_le16(1504);
2022 } else {
2023 for (i = 1; i < 4; i++) {
2024 priv->qos_data.def_qos_parm.ac[i].cw_min =
2025 cpu_to_le16(cw_min);
2026 priv->qos_data.def_qos_parm.ac[i].cw_max =
2027 cpu_to_le16(cw_max);
2028 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2029 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2030 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2031 }
2032 }
2033 IWL_DEBUG_QOS("set QoS to default \n");
2034
2035 spin_unlock_irqrestore(&priv->lock, flags);
2036}
2037
2038static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
2039{
2040 unsigned long flags;
2041
2042 if (priv == NULL)
2043 return;
2044
2045 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2046 return;
2047
2048 if (!priv->qos_data.qos_enable)
2049 return;
2050
2051 spin_lock_irqsave(&priv->lock, flags);
2052 priv->qos_data.def_qos_parm.qos_flags = 0;
2053
2054 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2055 !priv->qos_data.qos_cap.q_AP.txop_request)
2056 priv->qos_data.def_qos_parm.qos_flags |=
2057 QOS_PARAM_FLG_TXOP_TYPE_MSK;
2058
2059 if (priv->qos_data.qos_active)
2060 priv->qos_data.def_qos_parm.qos_flags |=
2061 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2062
2063 spin_unlock_irqrestore(&priv->lock, flags);
2064
2065 if (force || iwl_is_associated(priv)) {
2066 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
2067 priv->qos_data.qos_active);
2068
2069 iwl_send_qos_params_command(priv,
2070 &(priv->qos_data.def_qos_parm));
2071 }
2072}
2073
2074#endif /* CONFIG_IWLWIFI_QOS */
2075/*
2076 * Power management (not Tx power!) functions
2077 */
2078#define MSEC_TO_USEC 1024
2079
2080#define NOSLP __constant_cpu_to_le16(0), 0, 0
2081#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2082#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2083#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2084 __constant_cpu_to_le32(X1), \
2085 __constant_cpu_to_le32(X2), \
2086 __constant_cpu_to_le32(X3), \
2087 __constant_cpu_to_le32(X4)}
2088
2089
2090/* default power management (not Tx power) table values */
2091/* for tim 0-10 */
2092static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2093 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2094 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2095 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2096 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2097 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2098 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2099};
2100
2101/* for tim > 10 */
2102static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2103 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2104 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2105 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2106 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2107 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2108 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2109 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2110 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2111 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2112 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2113};
2114
2115int iwl_power_init_handle(struct iwl_priv *priv)
2116{
2117 int rc = 0, i;
2118 struct iwl_power_mgr *pow_data;
2119 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2120 u16 pci_pm;
2121
2122 IWL_DEBUG_POWER("Initialize power \n");
2123
2124 pow_data = &(priv->power_data);
2125
2126 memset(pow_data, 0, sizeof(*pow_data));
2127
2128 pow_data->active_index = IWL_POWER_RANGE_0;
2129 pow_data->dtim_val = 0xffff;
2130
2131 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2132 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2133
2134 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2135 if (rc != 0)
2136 return 0;
2137 else {
2138 struct iwl_powertable_cmd *cmd;
2139
2140 IWL_DEBUG_POWER("adjust power command flags\n");
2141
2142 for (i = 0; i < IWL_POWER_AC; i++) {
2143 cmd = &pow_data->pwr_range_0[i].cmd;
2144
2145 if (pci_pm & 0x1)
2146 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2147 else
2148 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2149 }
2150 }
2151 return rc;
2152}
2153
2154static int iwl_update_power_cmd(struct iwl_priv *priv,
2155 struct iwl_powertable_cmd *cmd, u32 mode)
2156{
2157 int rc = 0, i;
2158 u8 skip;
2159 u32 max_sleep = 0;
2160 struct iwl_power_vec_entry *range;
2161 u8 period = 0;
2162 struct iwl_power_mgr *pow_data;
2163
2164 if (mode > IWL_POWER_INDEX_5) {
2165 IWL_DEBUG_POWER("Error invalid power mode \n");
2166 return -1;
2167 }
2168 pow_data = &(priv->power_data);
2169
2170 if (pow_data->active_index == IWL_POWER_RANGE_0)
2171 range = &pow_data->pwr_range_0[0];
2172 else
2173 range = &pow_data->pwr_range_1[1];
2174
2175 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2176
2177#ifdef IWL_MAC80211_DISABLE
2178 if (priv->assoc_network != NULL) {
2179 unsigned long flags;
2180
2181 period = priv->assoc_network->tim.tim_period;
2182 }
2183#endif /*IWL_MAC80211_DISABLE */
2184 skip = range[mode].no_dtim;
2185
2186 if (period == 0) {
2187 period = 1;
2188 skip = 0;
2189 }
2190
2191 if (skip == 0) {
2192 max_sleep = period;
2193 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2194 } else {
2195 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2196 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2197 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2198 }
2199
2200 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2201 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2202 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2203 }
2204
2205 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2206 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2207 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2208 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2209 le32_to_cpu(cmd->sleep_interval[0]),
2210 le32_to_cpu(cmd->sleep_interval[1]),
2211 le32_to_cpu(cmd->sleep_interval[2]),
2212 le32_to_cpu(cmd->sleep_interval[3]),
2213 le32_to_cpu(cmd->sleep_interval[4]));
2214
2215 return rc;
2216}
2217
2218static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2219{
2220 u32 final_mode = mode;
2221 int rc;
2222 struct iwl_powertable_cmd cmd;
2223
2224 /* If on battery, set to 3,
2225 * if plugged into AC power, set to CAM ("continuosly aware mode"),
2226 * else user level */
2227 switch (mode) {
2228 case IWL_POWER_BATTERY:
2229 final_mode = IWL_POWER_INDEX_3;
2230 break;
2231 case IWL_POWER_AC:
2232 final_mode = IWL_POWER_MODE_CAM;
2233 break;
2234 default:
2235 final_mode = mode;
2236 break;
2237 }
2238
2239 cmd.keep_alive_beacons = 0;
2240
2241 iwl_update_power_cmd(priv, &cmd, final_mode);
2242
2243 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2244
2245 if (final_mode == IWL_POWER_MODE_CAM)
2246 clear_bit(STATUS_POWER_PMI, &priv->status);
2247 else
2248 set_bit(STATUS_POWER_PMI, &priv->status);
2249
2250 return rc;
2251}
2252
2253int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2254{
2255 /* Filter incoming packets to determine if they are targeted toward
2256 * this network, discarding packets coming from ourselves */
2257 switch (priv->iw_mode) {
2258 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2259 /* packets from our adapter are dropped (echo) */
2260 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2261 return 0;
2262 /* {broad,multi}cast packets to our IBSS go through */
2263 if (is_multicast_ether_addr(header->addr1))
2264 return !compare_ether_addr(header->addr3, priv->bssid);
2265 /* packets to our adapter go through */
2266 return !compare_ether_addr(header->addr1, priv->mac_addr);
2267 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2268 /* packets from our adapter are dropped (echo) */
2269 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2270 return 0;
2271 /* {broad,multi}cast packets to our BSS go through */
2272 if (is_multicast_ether_addr(header->addr1))
2273 return !compare_ether_addr(header->addr2, priv->bssid);
2274 /* packets to our adapter go through */
2275 return !compare_ether_addr(header->addr1, priv->mac_addr);
2276 }
2277
2278 return 1;
2279}
2280
2281#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2282
2283const char *iwl_get_tx_fail_reason(u32 status)
2284{
2285 switch (status & TX_STATUS_MSK) {
2286 case TX_STATUS_SUCCESS:
2287 return "SUCCESS";
2288 TX_STATUS_ENTRY(SHORT_LIMIT);
2289 TX_STATUS_ENTRY(LONG_LIMIT);
2290 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2291 TX_STATUS_ENTRY(MGMNT_ABORT);
2292 TX_STATUS_ENTRY(NEXT_FRAG);
2293 TX_STATUS_ENTRY(LIFE_EXPIRE);
2294 TX_STATUS_ENTRY(DEST_PS);
2295 TX_STATUS_ENTRY(ABORTED);
2296 TX_STATUS_ENTRY(BT_RETRY);
2297 TX_STATUS_ENTRY(STA_INVALID);
2298 TX_STATUS_ENTRY(FRAG_DROPPED);
2299 TX_STATUS_ENTRY(TID_DISABLE);
2300 TX_STATUS_ENTRY(FRAME_FLUSHED);
2301 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2302 TX_STATUS_ENTRY(TX_LOCKED);
2303 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2304 }
2305
2306 return "UNKNOWN";
2307}
2308
2309/**
2310 * iwl_scan_cancel - Cancel any currently executing HW scan
2311 *
2312 * NOTE: priv->mutex is not required before calling this function
2313 */
2314static int iwl_scan_cancel(struct iwl_priv *priv)
2315{
2316 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2317 clear_bit(STATUS_SCANNING, &priv->status);
2318 return 0;
2319 }
2320
2321 if (test_bit(STATUS_SCANNING, &priv->status)) {
2322 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2323 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2324 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2325 queue_work(priv->workqueue, &priv->abort_scan);
2326
2327 } else
2328 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2329
2330 return test_bit(STATUS_SCANNING, &priv->status);
2331 }
2332
2333 return 0;
2334}
2335
2336/**
2337 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2338 * @ms: amount of time to wait (in milliseconds) for scan to abort
2339 *
2340 * NOTE: priv->mutex must be held before calling this function
2341 */
2342static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2343{
2344 unsigned long now = jiffies;
2345 int ret;
2346
2347 ret = iwl_scan_cancel(priv);
2348 if (ret && ms) {
2349 mutex_unlock(&priv->mutex);
2350 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2351 test_bit(STATUS_SCANNING, &priv->status))
2352 msleep(1);
2353 mutex_lock(&priv->mutex);
2354
2355 return test_bit(STATUS_SCANNING, &priv->status);
2356 }
2357
2358 return ret;
2359}
2360
2361static void iwl_sequence_reset(struct iwl_priv *priv)
2362{
2363 /* Reset ieee stats */
2364
2365 /* We don't reset the net_device_stats (ieee->stats) on
2366 * re-association */
2367
2368 priv->last_seq_num = -1;
2369 priv->last_frag_num = -1;
2370 priv->last_packet_time = 0;
2371
2372 iwl_scan_cancel(priv);
2373}
2374
2375#define MAX_UCODE_BEACON_INTERVAL 4096
2376#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2377
2378static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2379{
2380 u16 new_val = 0;
2381 u16 beacon_factor = 0;
2382
2383 beacon_factor =
2384 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2385 / MAX_UCODE_BEACON_INTERVAL;
2386 new_val = beacon_val / beacon_factor;
2387
2388 return cpu_to_le16(new_val);
2389}
2390
2391static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2392{
2393 u64 interval_tm_unit;
2394 u64 tsf, result;
2395 unsigned long flags;
2396 struct ieee80211_conf *conf = NULL;
2397 u16 beacon_int = 0;
2398
2399 conf = ieee80211_get_hw_conf(priv->hw);
2400
2401 spin_lock_irqsave(&priv->lock, flags);
2402 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2403 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2404
2405 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2406
2407 tsf = priv->timestamp1;
2408 tsf = ((tsf << 32) | priv->timestamp0);
2409
2410 beacon_int = priv->beacon_int;
2411 spin_unlock_irqrestore(&priv->lock, flags);
2412
2413 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2414 if (beacon_int == 0) {
2415 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2416 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2417 } else {
2418 priv->rxon_timing.beacon_interval =
2419 cpu_to_le16(beacon_int);
2420 priv->rxon_timing.beacon_interval =
2421 iwl_adjust_beacon_interval(
2422 le16_to_cpu(priv->rxon_timing.beacon_interval));
2423 }
2424
2425 priv->rxon_timing.atim_window = 0;
2426 } else {
2427 priv->rxon_timing.beacon_interval =
2428 iwl_adjust_beacon_interval(conf->beacon_int);
2429 /* TODO: we need to get atim_window from upper stack
2430 * for now we set to 0 */
2431 priv->rxon_timing.atim_window = 0;
2432 }
2433
2434 interval_tm_unit =
2435 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2436 result = do_div(tsf, interval_tm_unit);
2437 priv->rxon_timing.beacon_init_val =
2438 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2439
2440 IWL_DEBUG_ASSOC
2441 ("beacon interval %d beacon timer %d beacon tim %d\n",
2442 le16_to_cpu(priv->rxon_timing.beacon_interval),
2443 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2444 le16_to_cpu(priv->rxon_timing.atim_window));
2445}
2446
2447static int iwl_scan_initiate(struct iwl_priv *priv)
2448{
2449 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2450 IWL_ERROR("APs don't scan.\n");
2451 return 0;
2452 }
2453
2454 if (!iwl_is_ready_rf(priv)) {
2455 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2456 return -EIO;
2457 }
2458
2459 if (test_bit(STATUS_SCANNING, &priv->status)) {
2460 IWL_DEBUG_SCAN("Scan already in progress.\n");
2461 return -EAGAIN;
2462 }
2463
2464 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2465 IWL_DEBUG_SCAN("Scan request while abort pending. "
2466 "Queuing.\n");
2467 return -EAGAIN;
2468 }
2469
2470 IWL_DEBUG_INFO("Starting scan...\n");
2471 priv->scan_bands = 2;
2472 set_bit(STATUS_SCANNING, &priv->status);
2473 priv->scan_start = jiffies;
2474 priv->scan_pass_start = priv->scan_start;
2475
2476 queue_work(priv->workqueue, &priv->request_scan);
2477
2478 return 0;
2479}
2480
2481static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2482{
2483 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2484
2485 if (hw_decrypt)
2486 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2487 else
2488 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2489
2490 return 0;
2491}
2492
2493static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2494{
2495 if (phymode == MODE_IEEE80211A) {
2496 priv->staging_rxon.flags &=
2497 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2498 | RXON_FLG_CCK_MSK);
2499 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2500 } else {
2501 /* Copied from iwl_bg_post_associate() */
2502 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2503 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2504 else
2505 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2506
2507 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2508 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2509
2510 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2511 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2512 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2513 }
2514}
2515
2516/*
2517 * initilize rxon structure with default values fromm eeprom
2518 */
2519static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2520{
2521 const struct iwl_channel_info *ch_info;
2522
2523 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2524
2525 switch (priv->iw_mode) {
2526 case IEEE80211_IF_TYPE_AP:
2527 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2528 break;
2529
2530 case IEEE80211_IF_TYPE_STA:
2531 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2532 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2533 break;
2534
2535 case IEEE80211_IF_TYPE_IBSS:
2536 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2537 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2538 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2539 RXON_FILTER_ACCEPT_GRP_MSK;
2540 break;
2541
2542 case IEEE80211_IF_TYPE_MNTR:
2543 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2544 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2545 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2546 break;
2547 }
2548
2549#if 0
2550 /* TODO: Figure out when short_preamble would be set and cache from
2551 * that */
2552 if (!hw_to_local(priv->hw)->short_preamble)
2553 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2554 else
2555 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2556#endif
2557
2558 ch_info = iwl_get_channel_info(priv, priv->phymode,
2559 le16_to_cpu(priv->staging_rxon.channel));
2560
2561 if (!ch_info)
2562 ch_info = &priv->channel_info[0];
2563
2564 /*
2565 * in some case A channels are all non IBSS
2566 * in this case force B/G channel
2567 */
2568 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2569 !(is_channel_ibss(ch_info)))
2570 ch_info = &priv->channel_info[0];
2571
2572 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2573 if (is_channel_a_band(ch_info))
2574 priv->phymode = MODE_IEEE80211A;
2575 else
2576 priv->phymode = MODE_IEEE80211G;
2577
2578 iwl_set_flags_for_phymode(priv, priv->phymode);
2579
2580 priv->staging_rxon.ofdm_basic_rates =
2581 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2582 priv->staging_rxon.cck_basic_rates =
2583 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2584
2585 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2586 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2587 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2588 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2589 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2590 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2591 iwl4965_set_rxon_chain(priv);
2592}
2593
2594static int iwl_set_mode(struct iwl_priv *priv, int mode)
2595{
2596 if (!iwl_is_ready_rf(priv))
2597 return -EAGAIN;
2598
2599 if (mode == IEEE80211_IF_TYPE_IBSS) {
2600 const struct iwl_channel_info *ch_info;
2601
2602 ch_info = iwl_get_channel_info(priv,
2603 priv->phymode,
2604 le16_to_cpu(priv->staging_rxon.channel));
2605
2606 if (!ch_info || !is_channel_ibss(ch_info)) {
2607 IWL_ERROR("channel %d not IBSS channel\n",
2608 le16_to_cpu(priv->staging_rxon.channel));
2609 return -EINVAL;
2610 }
2611 }
2612
2613 cancel_delayed_work(&priv->scan_check);
2614 if (iwl_scan_cancel_timeout(priv, 100)) {
2615 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2616 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2617 return -EAGAIN;
2618 }
2619
2620 priv->iw_mode = mode;
2621
2622 iwl_connection_init_rx_config(priv);
2623 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2624
2625 iwl_clear_stations_table(priv);
2626
2627 iwl_commit_rxon(priv);
2628
2629 return 0;
2630}
2631
2632static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2633 struct ieee80211_tx_control *ctl,
2634 struct iwl_cmd *cmd,
2635 struct sk_buff *skb_frag,
2636 int last_frag)
2637{
2638 struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2639
2640 switch (keyinfo->alg) {
2641 case ALG_CCMP:
2642 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2643 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2644 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2645 break;
2646
2647 case ALG_TKIP:
2648#if 0
2649 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2650
2651 if (last_frag)
2652 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2653 8);
2654 else
2655 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2656#endif
2657 break;
2658
2659 case ALG_WEP:
2660 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2661 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2662
2663 if (keyinfo->keylen == 13)
2664 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2665
2666 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2667
2668 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2669 "with key %d\n", ctl->key_idx);
2670 break;
2671
2672 case ALG_NONE:
2673 IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n");
2674 break;
2675
2676 default:
2677 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2678 break;
2679 }
2680}
2681
2682/*
2683 * handle build REPLY_TX command notification.
2684 */
2685static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2686 struct iwl_cmd *cmd,
2687 struct ieee80211_tx_control *ctrl,
2688 struct ieee80211_hdr *hdr,
2689 int is_unicast, u8 std_id)
2690{
2691 __le16 *qc;
2692 u16 fc = le16_to_cpu(hdr->frame_control);
2693 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2694
2695 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2696 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2697 tx_flags |= TX_CMD_FLG_ACK_MSK;
2698 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2699 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2700 if (ieee80211_is_probe_response(fc) &&
2701 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2702 tx_flags |= TX_CMD_FLG_TSF_MSK;
2703 } else {
2704 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2705 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2706 }
2707
2708 cmd->cmd.tx.sta_id = std_id;
2709 if (ieee80211_get_morefrag(hdr))
2710 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2711
2712 qc = ieee80211_get_qos_ctrl(hdr);
2713 if (qc) {
2714 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2715 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2716 } else
2717 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2718
2719 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2720 tx_flags |= TX_CMD_FLG_RTS_MSK;
2721 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2722 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2723 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2724 tx_flags |= TX_CMD_FLG_CTS_MSK;
2725 }
2726
2727 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2728 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2729
2730 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2731 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2732 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2733 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2734 cmd->cmd.tx.timeout.pm_frame_timeout =
2735 cpu_to_le16(3);
2736 else
2737 cmd->cmd.tx.timeout.pm_frame_timeout =
2738 cpu_to_le16(2);
2739 } else
2740 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2741
2742 cmd->cmd.tx.driver_txop = 0;
2743 cmd->cmd.tx.tx_flags = tx_flags;
2744 cmd->cmd.tx.next_frame_len = 0;
2745}
2746
2747static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2748{
2749 int sta_id;
2750 u16 fc = le16_to_cpu(hdr->frame_control);
2751
2752 /* If this frame is broadcast or not data then use the broadcast
2753 * station id */
2754 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2755 is_multicast_ether_addr(hdr->addr1))
2756 return priv->hw_setting.bcast_sta_id;
2757
2758 switch (priv->iw_mode) {
2759
2760 /* If this frame is part of a BSS network (we're a station), then
2761 * we use the AP's station id */
2762 case IEEE80211_IF_TYPE_STA:
2763 return IWL_AP_ID;
2764
2765 /* If we are an AP, then find the station, or use BCAST */
2766 case IEEE80211_IF_TYPE_AP:
2767 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2768 if (sta_id != IWL_INVALID_STATION)
2769 return sta_id;
2770 return priv->hw_setting.bcast_sta_id;
2771
2772 /* If this frame is part of a IBSS network, then we use the
2773 * target specific station id */
2774 case IEEE80211_IF_TYPE_IBSS:
2775 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2776 if (sta_id != IWL_INVALID_STATION)
2777 return sta_id;
2778
2779 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2780
2781 if (sta_id != IWL_INVALID_STATION)
2782 return sta_id;
2783
2784 IWL_DEBUG_DROP("Station " MAC_FMT " not in station map. "
2785 "Defaulting to broadcast...\n",
2786 MAC_ARG(hdr->addr1));
2787 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2788 return priv->hw_setting.bcast_sta_id;
2789
2790 default:
2791 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2792 return priv->hw_setting.bcast_sta_id;
2793 }
2794}
2795
2796/*
2797 * start REPLY_TX command process
2798 */
2799static int iwl_tx_skb(struct iwl_priv *priv,
2800 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2801{
2802 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2803 struct iwl_tfd_frame *tfd;
2804 u32 *control_flags;
2805 int txq_id = ctl->queue;
2806 struct iwl_tx_queue *txq = NULL;
2807 struct iwl_queue *q = NULL;
2808 dma_addr_t phys_addr;
2809 dma_addr_t txcmd_phys;
2810 struct iwl_cmd *out_cmd = NULL;
2811 u16 len, idx, len_org;
2812 u8 id, hdr_len, unicast;
2813 u8 sta_id;
2814 u16 seq_number = 0;
2815 u16 fc;
2816 __le16 *qc;
2817 u8 wait_write_ptr = 0;
2818 unsigned long flags;
2819 int rc;
2820
2821 spin_lock_irqsave(&priv->lock, flags);
2822 if (iwl_is_rfkill(priv)) {
2823 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2824 goto drop_unlock;
2825 }
2826
2827 if (!priv->interface_id) {
2828 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2829 goto drop_unlock;
2830 }
2831
2832 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2833 IWL_ERROR("ERROR: No TX rate available.\n");
2834 goto drop_unlock;
2835 }
2836
2837 unicast = !is_multicast_ether_addr(hdr->addr1);
2838 id = 0;
2839
2840 fc = le16_to_cpu(hdr->frame_control);
2841
2842#ifdef CONFIG_IWLWIFI_DEBUG
2843 if (ieee80211_is_auth(fc))
2844 IWL_DEBUG_TX("Sending AUTH frame\n");
2845 else if (ieee80211_is_assoc_request(fc))
2846 IWL_DEBUG_TX("Sending ASSOC frame\n");
2847 else if (ieee80211_is_reassoc_request(fc))
2848 IWL_DEBUG_TX("Sending REASSOC frame\n");
2849#endif
2850
2851 if (!iwl_is_associated(priv) &&
2852 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2853 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2854 goto drop_unlock;
2855 }
2856
2857 spin_unlock_irqrestore(&priv->lock, flags);
2858
2859 hdr_len = ieee80211_get_hdrlen(fc);
2860 sta_id = iwl_get_sta_id(priv, hdr);
2861 if (sta_id == IWL_INVALID_STATION) {
2862 IWL_DEBUG_DROP("Dropping - INVALID STATION: " MAC_FMT "\n",
2863 MAC_ARG(hdr->addr1));
2864 goto drop;
2865 }
2866
2867 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2868
2869 qc = ieee80211_get_qos_ctrl(hdr);
2870 if (qc) {
2871 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2872 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2873 IEEE80211_SCTL_SEQ;
2874 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2875 (hdr->seq_ctrl &
2876 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2877 seq_number += 0x10;
2878#ifdef CONFIG_IWLWIFI_HT
2879#ifdef CONFIG_IWLWIFI_HT_AGG
2880 /* aggregation is on for this <sta,tid> */
2881 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2882 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2883#endif /* CONFIG_IWLWIFI_HT_AGG */
2884#endif /* CONFIG_IWLWIFI_HT */
2885 }
2886 txq = &priv->txq[txq_id];
2887 q = &txq->q;
2888
2889 spin_lock_irqsave(&priv->lock, flags);
2890
2891 tfd = &txq->bd[q->first_empty];
2892 memset(tfd, 0, sizeof(*tfd));
2893 control_flags = (u32 *) tfd;
2894 idx = get_cmd_index(q, q->first_empty, 0);
2895
2896 memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2897 txq->txb[q->first_empty].skb[0] = skb;
2898 memcpy(&(txq->txb[q->first_empty].status.control),
2899 ctl, sizeof(struct ieee80211_tx_control));
2900 out_cmd = &txq->cmd[idx];
2901 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2902 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2903 out_cmd->hdr.cmd = REPLY_TX;
2904 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2905 INDEX_TO_SEQ(q->first_empty)));
2906 /* copy frags header */
2907 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2908
2909 /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2910 len = priv->hw_setting.tx_cmd_len +
2911 sizeof(struct iwl_cmd_header) + hdr_len;
2912
2913 len_org = len;
2914 len = (len + 3) & ~3;
2915
2916 if (len_org != len)
2917 len_org = 1;
2918 else
2919 len_org = 0;
2920
2921 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2922 offsetof(struct iwl_cmd, hdr);
2923
2924 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2925
2926 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2927 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2928
2929 /* 802.11 null functions have no payload... */
2930 len = skb->len - hdr_len;
2931 if (len) {
2932 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2933 len, PCI_DMA_TODEVICE);
2934 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2935 }
2936
2937 if (len_org)
2938 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2939
2940 len = (u16)skb->len;
2941 out_cmd->cmd.tx.len = cpu_to_le16(len);
2942
2943 /* TODO need this for burst mode later on */
2944 iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2945
2946 /* set is_hcca to 0; it probably will never be implemented */
2947 iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2948
2949 iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
2950 hdr, hdr_len, ctl, NULL);
2951
2952 if (!ieee80211_get_morefrag(hdr)) {
2953 txq->need_update = 1;
2954 if (qc) {
2955 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2956 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2957 }
2958 } else {
2959 wait_write_ptr = 1;
2960 txq->need_update = 0;
2961 }
2962
2963 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2964 sizeof(out_cmd->cmd.tx));
2965
2966 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2967 ieee80211_get_hdrlen(fc));
2968
2969 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
2970
2971 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2972 rc = iwl_tx_queue_update_write_ptr(priv, txq);
2973 spin_unlock_irqrestore(&priv->lock, flags);
2974
2975 if (rc)
2976 return rc;
2977
2978 if ((iwl_queue_space(q) < q->high_mark)
2979 && priv->mac80211_registered) {
2980 if (wait_write_ptr) {
2981 spin_lock_irqsave(&priv->lock, flags);
2982 txq->need_update = 1;
2983 iwl_tx_queue_update_write_ptr(priv, txq);
2984 spin_unlock_irqrestore(&priv->lock, flags);
2985 }
2986
2987 ieee80211_stop_queue(priv->hw, ctl->queue);
2988 }
2989
2990 return 0;
2991
2992drop_unlock:
2993 spin_unlock_irqrestore(&priv->lock, flags);
2994drop:
2995 return -1;
2996}
2997
2998static void iwl_set_rate(struct iwl_priv *priv)
2999{
3000 const struct ieee80211_hw_mode *hw = NULL;
3001 struct ieee80211_rate *rate;
3002 int i;
3003
3004 hw = iwl_get_hw_mode(priv, priv->phymode);
3005
3006 priv->active_rate = 0;
3007 priv->active_rate_basic = 0;
3008
3009 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3010 hw->mode == MODE_IEEE80211A ?
3011 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3012
3013 for (i = 0; i < hw->num_rates; i++) {
3014 rate = &(hw->rates[i]);
3015 if ((rate->val < IWL_RATE_COUNT) &&
3016 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3017 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
3018 rate->val, iwl_rates[rate->val].plcp,
3019 (rate->flags & IEEE80211_RATE_BASIC) ?
3020 "*" : "");
3021 priv->active_rate |= (1 << rate->val);
3022 if (rate->flags & IEEE80211_RATE_BASIC)
3023 priv->active_rate_basic |= (1 << rate->val);
3024 } else
3025 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
3026 rate->val, iwl_rates[rate->val].plcp);
3027 }
3028
3029 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3030 priv->active_rate, priv->active_rate_basic);
3031
3032 /*
3033 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3034 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3035 * OFDM
3036 */
3037 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3038 priv->staging_rxon.cck_basic_rates =
3039 ((priv->active_rate_basic &
3040 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3041 else
3042 priv->staging_rxon.cck_basic_rates =
3043 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3044
3045 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3046 priv->staging_rxon.ofdm_basic_rates =
3047 ((priv->active_rate_basic &
3048 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3049 IWL_FIRST_OFDM_RATE) & 0xFF;
3050 else
3051 priv->staging_rxon.ofdm_basic_rates =
3052 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3053}
3054
3055static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
3056{
3057 unsigned long flags;
3058
3059 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3060 return;
3061
3062 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3063 disable_radio ? "OFF" : "ON");
3064
3065 if (disable_radio) {
3066 iwl_scan_cancel(priv);
3067 /* FIXME: This is a workaround for AP */
3068 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3069 spin_lock_irqsave(&priv->lock, flags);
3070 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3071 CSR_UCODE_SW_BIT_RFKILL);
3072 spin_unlock_irqrestore(&priv->lock, flags);
3073 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
3074 set_bit(STATUS_RF_KILL_SW, &priv->status);
3075 }
3076 return;
3077 }
3078
3079 spin_lock_irqsave(&priv->lock, flags);
3080 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3081
3082 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3083 spin_unlock_irqrestore(&priv->lock, flags);
3084
3085 /* wake up ucode */
3086 msleep(10);
3087
3088 spin_lock_irqsave(&priv->lock, flags);
3089 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3090 if (!iwl_grab_restricted_access(priv))
3091 iwl_release_restricted_access(priv);
3092 spin_unlock_irqrestore(&priv->lock, flags);
3093
3094 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3095 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3096 "disabled by HW switch\n");
3097 return;
3098 }
3099
3100 queue_work(priv->workqueue, &priv->restart);
3101 return;
3102}
3103
3104void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3105 u32 decrypt_res, struct ieee80211_rx_status *stats)
3106{
3107 u16 fc =
3108 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3109
3110 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3111 return;
3112
3113 if (!(fc & IEEE80211_FCTL_PROTECTED))
3114 return;
3115
3116 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3117 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3118 case RX_RES_STATUS_SEC_TYPE_TKIP:
3119 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3120 RX_RES_STATUS_BAD_ICV_MIC)
3121 stats->flag |= RX_FLAG_MMIC_ERROR;
3122 case RX_RES_STATUS_SEC_TYPE_WEP:
3123 case RX_RES_STATUS_SEC_TYPE_CCMP:
3124 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3125 RX_RES_STATUS_DECRYPT_OK) {
3126 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3127 stats->flag |= RX_FLAG_DECRYPTED;
3128 }
3129 break;
3130
3131 default:
3132 break;
3133 }
3134}
3135
3136void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3137 struct iwl_rx_mem_buffer *rxb,
3138 void *data, short len,
3139 struct ieee80211_rx_status *stats,
3140 u16 phy_flags)
3141{
3142 struct iwl_rt_rx_hdr *iwl_rt;
3143
3144 /* First cache any information we need before we overwrite
3145 * the information provided in the skb from the hardware */
3146 s8 signal = stats->ssi;
3147 s8 noise = 0;
3148 int rate = stats->rate;
3149 u64 tsf = stats->mactime;
3150 __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3151
3152 /* We received data from the HW, so stop the watchdog */
3153 if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) {
3154 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3155 return;
3156 }
3157
3158 /* copy the frame data to write after where the radiotap header goes */
3159 iwl_rt = (void *)rxb->skb->data;
3160 memmove(iwl_rt->payload, data, len);
3161
3162 iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3163 iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */
3164
3165 /* total header + data */
3166 iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt));
3167
3168 /* Set the size of the skb to the size of the frame */
3169 skb_put(rxb->skb, sizeof(*iwl_rt) + len);
3170
3171 /* Big bitfield of all the fields we provide in radiotap */
3172 iwl_rt->rt_hdr.it_present =
3173 cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3174 (1 << IEEE80211_RADIOTAP_FLAGS) |
3175 (1 << IEEE80211_RADIOTAP_RATE) |
3176 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3177 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3178 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3179 (1 << IEEE80211_RADIOTAP_ANTENNA));
3180
3181 /* Zero the flags, we'll add to them as we go */
3182 iwl_rt->rt_flags = 0;
3183
3184 iwl_rt->rt_tsf = cpu_to_le64(tsf);
3185
3186 /* Convert to dBm */
3187 iwl_rt->rt_dbmsignal = signal;
3188 iwl_rt->rt_dbmnoise = noise;
3189
3190 /* Convert the channel frequency and set the flags */
3191 iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq);
3192 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3193 iwl_rt->rt_chbitmask =
3194 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3195 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3196 iwl_rt->rt_chbitmask =
3197 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3198 else /* 802.11g */
3199 iwl_rt->rt_chbitmask =
3200 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3201
3202 rate = iwl_rate_index_from_plcp(rate);
3203 if (rate == -1)
3204 iwl_rt->rt_rate = 0;
3205 else
3206 iwl_rt->rt_rate = iwl_rates[rate].ieee;
3207
3208 /* antenna number */
3209 iwl_rt->rt_antenna =
3210 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3211
3212 /* set the preamble flag if we have it */
3213 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3214 iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3215
3216 IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3217
3218 stats->flag |= RX_FLAG_RADIOTAP;
3219 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3220 rxb->skb = NULL;
3221}
3222
3223
3224#define IWL_PACKET_RETRY_TIME HZ
3225
3226int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3227{
3228 u16 sc = le16_to_cpu(header->seq_ctrl);
3229 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3230 u16 frag = sc & IEEE80211_SCTL_FRAG;
3231 u16 *last_seq, *last_frag;
3232 unsigned long *last_time;
3233
3234 switch (priv->iw_mode) {
3235 case IEEE80211_IF_TYPE_IBSS:{
3236 struct list_head *p;
3237 struct iwl_ibss_seq *entry = NULL;
3238 u8 *mac = header->addr2;
3239 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3240
3241 __list_for_each(p, &priv->ibss_mac_hash[index]) {
3242 entry =
3243 list_entry(p, struct iwl_ibss_seq, list);
3244 if (!compare_ether_addr(entry->mac, mac))
3245 break;
3246 }
3247 if (p == &priv->ibss_mac_hash[index]) {
3248 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3249 if (!entry) {
3250 IWL_ERROR
3251 ("Cannot malloc new mac entry\n");
3252 return 0;
3253 }
3254 memcpy(entry->mac, mac, ETH_ALEN);
3255 entry->seq_num = seq;
3256 entry->frag_num = frag;
3257 entry->packet_time = jiffies;
3258 list_add(&entry->list,
3259 &priv->ibss_mac_hash[index]);
3260 return 0;
3261 }
3262 last_seq = &entry->seq_num;
3263 last_frag = &entry->frag_num;
3264 last_time = &entry->packet_time;
3265 break;
3266 }
3267 case IEEE80211_IF_TYPE_STA:
3268 last_seq = &priv->last_seq_num;
3269 last_frag = &priv->last_frag_num;
3270 last_time = &priv->last_packet_time;
3271 break;
3272 default:
3273 return 0;
3274 }
3275 if ((*last_seq == seq) &&
3276 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3277 if (*last_frag == frag)
3278 goto drop;
3279 if (*last_frag + 1 != frag)
3280 /* out-of-order fragment */
3281 goto drop;
3282 } else
3283 *last_seq = seq;
3284
3285 *last_frag = frag;
3286 *last_time = jiffies;
3287 return 0;
3288
3289 drop:
3290 return 1;
3291}
3292
3293#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3294
3295#include "iwl-spectrum.h"
3296
3297#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3298#define BEACON_TIME_MASK_HIGH 0xFF000000
3299#define TIME_UNIT 1024
3300
3301/*
3302 * extended beacon time format
3303 * time in usec will be changed into a 32-bit value in 8:24 format
3304 * the high 1 byte is the beacon counts
3305 * the lower 3 bytes is the time in usec within one beacon interval
3306 */
3307
3308static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
3309{
3310 u32 quot;
3311 u32 rem;
3312 u32 interval = beacon_interval * 1024;
3313
3314 if (!interval || !usec)
3315 return 0;
3316
3317 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3318 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3319
3320 return (quot << 24) + rem;
3321}
3322
3323/* base is usually what we get from ucode with each received frame,
3324 * the same as HW timer counter counting down
3325 */
3326
3327static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3328{
3329 u32 base_low = base & BEACON_TIME_MASK_LOW;
3330 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3331 u32 interval = beacon_interval * TIME_UNIT;
3332 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3333 (addon & BEACON_TIME_MASK_HIGH);
3334
3335 if (base_low > addon_low)
3336 res += base_low - addon_low;
3337 else if (base_low < addon_low) {
3338 res += interval + base_low - addon_low;
3339 res += (1 << 24);
3340 } else
3341 res += (1 << 24);
3342
3343 return cpu_to_le32(res);
3344}
3345
3346static int iwl_get_measurement(struct iwl_priv *priv,
3347 struct ieee80211_measurement_params *params,
3348 u8 type)
3349{
3350 struct iwl_spectrum_cmd spectrum;
3351 struct iwl_rx_packet *res;
3352 struct iwl_host_cmd cmd = {
3353 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3354 .data = (void *)&spectrum,
3355 .meta.flags = CMD_WANT_SKB,
3356 };
3357 u32 add_time = le64_to_cpu(params->start_time);
3358 int rc;
3359 int spectrum_resp_status;
3360 int duration = le16_to_cpu(params->duration);
3361
3362 if (iwl_is_associated(priv))
3363 add_time =
3364 iwl_usecs_to_beacons(
3365 le64_to_cpu(params->start_time) - priv->last_tsf,
3366 le16_to_cpu(priv->rxon_timing.beacon_interval));
3367
3368 memset(&spectrum, 0, sizeof(spectrum));
3369
3370 spectrum.channel_count = cpu_to_le16(1);
3371 spectrum.flags =
3372 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3373 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3374 cmd.len = sizeof(spectrum);
3375 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3376
3377 if (iwl_is_associated(priv))
3378 spectrum.start_time =
3379 iwl_add_beacon_time(priv->last_beacon_time,
3380 add_time,
3381 le16_to_cpu(priv->rxon_timing.beacon_interval));
3382 else
3383 spectrum.start_time = 0;
3384
3385 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3386 spectrum.channels[0].channel = params->channel;
3387 spectrum.channels[0].type = type;
3388 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3389 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3390 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3391
3392 rc = iwl_send_cmd_sync(priv, &cmd);
3393 if (rc)
3394 return rc;
3395
3396 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
3397 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3398 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3399 rc = -EIO;
3400 }
3401
3402 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3403 switch (spectrum_resp_status) {
3404 case 0: /* Command will be handled */
3405 if (res->u.spectrum.id != 0xff) {
3406 IWL_DEBUG_INFO
3407 ("Replaced existing measurement: %d\n",
3408 res->u.spectrum.id);
3409 priv->measurement_status &= ~MEASUREMENT_READY;
3410 }
3411 priv->measurement_status |= MEASUREMENT_ACTIVE;
3412 rc = 0;
3413 break;
3414
3415 case 1: /* Command will not be handled */
3416 rc = -EAGAIN;
3417 break;
3418 }
3419
3420 dev_kfree_skb_any(cmd.meta.u.skb);
3421
3422 return rc;
3423}
3424#endif
3425
3426static void iwl_txstatus_to_ieee(struct iwl_priv *priv,
3427 struct iwl_tx_info *tx_sta)
3428{
3429
3430 tx_sta->status.ack_signal = 0;
3431 tx_sta->status.excessive_retries = 0;
3432 tx_sta->status.queue_length = 0;
3433 tx_sta->status.queue_number = 0;
3434
3435 if (in_interrupt())
3436 ieee80211_tx_status_irqsafe(priv->hw,
3437 tx_sta->skb[0], &(tx_sta->status));
3438 else
3439 ieee80211_tx_status(priv->hw,
3440 tx_sta->skb[0], &(tx_sta->status));
3441
3442 tx_sta->skb[0] = NULL;
3443}
3444
3445/**
3446 * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC.
3447 *
3448 * When FW advances 'R' index, all entries between old and
3449 * new 'R' index need to be reclaimed. As result, some free space
3450 * forms. If there is enough free space (> low mark), wake Tx queue.
3451 */
3452int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3453{
3454 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3455 struct iwl_queue *q = &txq->q;
3456 int nfreed = 0;
3457
3458 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3459 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3460 "is out of range [0-%d] %d %d.\n", txq_id,
3461 index, q->n_bd, q->first_empty, q->last_used);
3462 return 0;
3463 }
3464
3465 for (index = iwl_queue_inc_wrap(index, q->n_bd);
3466 q->last_used != index;
3467 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) {
3468 if (txq_id != IWL_CMD_QUEUE_NUM) {
3469 iwl_txstatus_to_ieee(priv,
3470 &(txq->txb[txq->q.last_used]));
3471 iwl_hw_txq_free_tfd(priv, txq);
3472 } else if (nfreed > 1) {
3473 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3474 q->first_empty, q->last_used);
3475 queue_work(priv->workqueue, &priv->restart);
3476 }
3477 nfreed++;
3478 }
3479
3480 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3481 (txq_id != IWL_CMD_QUEUE_NUM) &&
3482 priv->mac80211_registered)
3483 ieee80211_wake_queue(priv->hw, txq_id);
3484
3485
3486 return nfreed;
3487}
3488
3489static int iwl_is_tx_success(u32 status)
3490{
3491 status &= TX_STATUS_MSK;
3492 return (status == TX_STATUS_SUCCESS)
3493 || (status == TX_STATUS_DIRECT_DONE);
3494}
3495
3496/******************************************************************************
3497 *
3498 * Generic RX handler implementations
3499 *
3500 ******************************************************************************/
3501#ifdef CONFIG_IWLWIFI_HT
3502#ifdef CONFIG_IWLWIFI_HT_AGG
3503
3504static inline int iwl_get_ra_sta_id(struct iwl_priv *priv,
3505 struct ieee80211_hdr *hdr)
3506{
3507 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3508 return IWL_AP_ID;
3509 else {
3510 u8 *da = ieee80211_get_DA(hdr);
3511 return iwl_hw_find_station(priv, da);
3512 }
3513}
3514
3515static struct ieee80211_hdr *iwl_tx_queue_get_hdr(
3516 struct iwl_priv *priv, int txq_id, int idx)
3517{
3518 if (priv->txq[txq_id].txb[idx].skb[0])
3519 return (struct ieee80211_hdr *)priv->txq[txq_id].
3520 txb[idx].skb[0]->data;
3521 return NULL;
3522}
3523
3524static inline u32 iwl_get_scd_ssn(struct iwl_tx_resp *tx_resp)
3525{
3526 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3527 tx_resp->frame_count);
3528 return le32_to_cpu(*scd_ssn) & MAX_SN;
3529
3530}
3531static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
3532 struct iwl_ht_agg *agg,
3533 struct iwl_tx_resp *tx_resp,
3534 u16 start_idx)
3535{
3536 u32 status;
3537 __le32 *frame_status = &tx_resp->status;
3538 struct ieee80211_tx_status *tx_status = NULL;
3539 struct ieee80211_hdr *hdr = NULL;
3540 int i, sh;
3541 int txq_id, idx;
3542 u16 seq;
3543
3544 if (agg->wait_for_ba)
3545 IWL_DEBUG_TX_REPLY("got tx repsons w/o back\n");
3546
3547 agg->frame_count = tx_resp->frame_count;
3548 agg->start_idx = start_idx;
3549 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3550 agg->bitmap0 = agg->bitmap1 = 0;
3551
3552 if (agg->frame_count == 1) {
3553 struct iwl_tx_queue *txq ;
3554 status = le32_to_cpu(frame_status[0]);
3555
3556 txq_id = agg->txq_id;
3557 txq = &priv->txq[txq_id];
3558 /* FIXME: code repetition */
3559 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3560 agg->frame_count, agg->start_idx);
3561
3562 tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status);
3563 tx_status->retry_count = tx_resp->failure_frame;
3564 tx_status->queue_number = status & 0xff;
3565 tx_status->queue_length = tx_resp->bt_kill_count;
3566 tx_status->queue_length |= tx_resp->failure_rts;
3567
3568 tx_status->flags = iwl_is_tx_success(status)?
3569 IEEE80211_TX_STATUS_ACK : 0;
3570 tx_status->control.tx_rate =
3571 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags);
3572 /* FIXME: code repetition end */
3573
3574 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3575 status & 0xff, tx_resp->failure_frame);
3576 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
3577 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags));
3578
3579 agg->wait_for_ba = 0;
3580 } else {
3581 u64 bitmap = 0;
3582 int start = agg->start_idx;
3583
3584 for (i = 0; i < agg->frame_count; i++) {
3585 u16 sc;
3586 status = le32_to_cpu(frame_status[i]);
3587 seq = status >> 16;
3588 idx = SEQ_TO_INDEX(seq);
3589 txq_id = SEQ_TO_QUEUE(seq);
3590
3591 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3592 AGG_TX_STATE_ABORT_MSK))
3593 continue;
3594
3595 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3596 agg->frame_count, txq_id, idx);
3597
3598 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
3599
3600 sc = le16_to_cpu(hdr->seq_ctrl);
3601 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3602 IWL_ERROR("BUG_ON idx doesn't match seq control"
3603 " idx=%d, seq_idx=%d, seq=%d\n",
3604 idx, SEQ_TO_SN(sc),
3605 hdr->seq_ctrl);
3606 return -1;
3607 }
3608
3609 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3610 i, idx, SEQ_TO_SN(sc));
3611
3612 sh = idx - start;
3613 if (sh > 64) {
3614 sh = (start - idx) + 0xff;
3615 bitmap = bitmap << sh;
3616 sh = 0;
3617 start = idx;
3618 } else if (sh < -64)
3619 sh = 0xff - (start - idx);
3620 else if (sh < 0) {
3621 sh = start - idx;
3622 start = idx;
3623 bitmap = bitmap << sh;
3624 sh = 0;
3625 }
3626 bitmap |= (1 << sh);
3627 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3628 start, (u32)(bitmap & 0xFFFFFFFF));
3629 }
3630
3631 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3632 agg->bitmap1 = bitmap >> 32;
3633 agg->start_idx = start;
3634 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3635 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3636 agg->frame_count, agg->start_idx,
3637 agg->bitmap0);
3638
3639 if (bitmap)
3640 agg->wait_for_ba = 1;
3641 }
3642 return 0;
3643}
3644#endif
3645#endif
3646
3647static void iwl_rx_reply_tx(struct iwl_priv *priv,
3648 struct iwl_rx_mem_buffer *rxb)
3649{
3650 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3651 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3652 int txq_id = SEQ_TO_QUEUE(sequence);
3653 int index = SEQ_TO_INDEX(sequence);
3654 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3655 struct ieee80211_tx_status *tx_status;
3656 struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3657 u32 status = le32_to_cpu(tx_resp->status);
3658#ifdef CONFIG_IWLWIFI_HT
3659#ifdef CONFIG_IWLWIFI_HT_AGG
3660 int tid, sta_id;
3661#endif
3662#endif
3663
3664 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3665 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3666 "is out of range [0-%d] %d %d\n", txq_id,
3667 index, txq->q.n_bd, txq->q.first_empty,
3668 txq->q.last_used);
3669 return;
3670 }
3671
3672#ifdef CONFIG_IWLWIFI_HT
3673#ifdef CONFIG_IWLWIFI_HT_AGG
3674 if (txq->sched_retry) {
3675 const u32 scd_ssn = iwl_get_scd_ssn(tx_resp);
3676 struct ieee80211_hdr *hdr =
3677 iwl_tx_queue_get_hdr(priv, txq_id, index);
3678 struct iwl_ht_agg *agg = NULL;
3679 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3680
3681 if (qc == NULL) {
3682 IWL_ERROR("BUG_ON qc is null!!!!\n");
3683 return;
3684 }
3685
3686 tid = le16_to_cpu(*qc) & 0xf;
3687
3688 sta_id = iwl_get_ra_sta_id(priv, hdr);
3689 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3690 IWL_ERROR("Station not known for\n");
3691 return;
3692 }
3693
3694 agg = &priv->stations[sta_id].tid[tid].agg;
3695
3696 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3697
3698 if ((tx_resp->frame_count == 1) &&
3699 !iwl_is_tx_success(status)) {
3700 /* TODO: send BAR */
3701 }
3702
3703 if ((txq->q.last_used != (scd_ssn & 0xff))) {
3704 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3705 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3706 "%d index %d\n", scd_ssn , index);
3707 iwl_tx_queue_reclaim(priv, txq_id, index);
3708 }
3709 } else {
3710#endif /* CONFIG_IWLWIFI_HT_AGG */
3711#endif /* CONFIG_IWLWIFI_HT */
3712 tx_status = &(txq->txb[txq->q.last_used].status);
3713
3714 tx_status->retry_count = tx_resp->failure_frame;
3715 tx_status->queue_number = status;
3716 tx_status->queue_length = tx_resp->bt_kill_count;
3717 tx_status->queue_length |= tx_resp->failure_rts;
3718
3719 tx_status->flags =
3720 iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3721
3722 tx_status->control.tx_rate =
3723 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags);
3724
3725 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3726 "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
3727 status, le32_to_cpu(tx_resp->rate_n_flags),
3728 tx_resp->failure_frame);
3729
3730 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3731 if (index != -1)
3732 iwl_tx_queue_reclaim(priv, txq_id, index);
3733#ifdef CONFIG_IWLWIFI_HT
3734#ifdef CONFIG_IWLWIFI_HT_AGG
3735 }
3736#endif /* CONFIG_IWLWIFI_HT_AGG */
3737#endif /* CONFIG_IWLWIFI_HT */
3738
3739 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3740 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3741}
3742
3743
3744static void iwl_rx_reply_alive(struct iwl_priv *priv,
3745 struct iwl_rx_mem_buffer *rxb)
3746{
3747 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3748 struct iwl_alive_resp *palive;
3749 struct delayed_work *pwork;
3750
3751 palive = &pkt->u.alive_frame;
3752
3753 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3754 "0x%01X 0x%01X\n",
3755 palive->is_valid, palive->ver_type,
3756 palive->ver_subtype);
3757
3758 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3759 IWL_DEBUG_INFO("Initialization Alive received.\n");
3760 memcpy(&priv->card_alive_init,
3761 &pkt->u.alive_frame,
3762 sizeof(struct iwl_init_alive_resp));
3763 pwork = &priv->init_alive_start;
3764 } else {
3765 IWL_DEBUG_INFO("Runtime Alive received.\n");
3766 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3767 sizeof(struct iwl_alive_resp));
3768 pwork = &priv->alive_start;
3769 }
3770
3771 /* We delay the ALIVE response by 5ms to
3772 * give the HW RF Kill time to activate... */
3773 if (palive->is_valid == UCODE_VALID_OK)
3774 queue_delayed_work(priv->workqueue, pwork,
3775 msecs_to_jiffies(5));
3776 else
3777 IWL_WARNING("uCode did not respond OK.\n");
3778}
3779
3780static void iwl_rx_reply_add_sta(struct iwl_priv *priv,
3781 struct iwl_rx_mem_buffer *rxb)
3782{
3783 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3784
3785 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3786 return;
3787}
3788
3789static void iwl_rx_reply_error(struct iwl_priv *priv,
3790 struct iwl_rx_mem_buffer *rxb)
3791{
3792 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3793
3794 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3795 "seq 0x%04X ser 0x%08X\n",
3796 le32_to_cpu(pkt->u.err_resp.error_type),
3797 get_cmd_string(pkt->u.err_resp.cmd_id),
3798 pkt->u.err_resp.cmd_id,
3799 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3800 le32_to_cpu(pkt->u.err_resp.error_info));
3801}
3802
3803#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3804
3805static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3806{
3807 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3808 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3809 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
3810 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3811 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3812 rxon->channel = csa->channel;
3813 priv->staging_rxon.channel = csa->channel;
3814}
3815
3816static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
3817 struct iwl_rx_mem_buffer *rxb)
3818{
3819#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3820 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3821 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3822
3823 if (!report->state) {
3824 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3825 "Spectrum Measure Notification: Start\n");
3826 return;
3827 }
3828
3829 memcpy(&priv->measure_report, report, sizeof(*report));
3830 priv->measurement_status |= MEASUREMENT_READY;
3831#endif
3832}
3833
3834static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
3835 struct iwl_rx_mem_buffer *rxb)
3836{
3837#ifdef CONFIG_IWLWIFI_DEBUG
3838 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3839 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3840 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3841 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3842#endif
3843}
3844
3845static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3846 struct iwl_rx_mem_buffer *rxb)
3847{
3848 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3849 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3850 "notification for %s:\n",
3851 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3852 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3853}
3854
3855static void iwl_bg_beacon_update(struct work_struct *work)
3856{
3857 struct iwl_priv *priv =
3858 container_of(work, struct iwl_priv, beacon_update);
3859 struct sk_buff *beacon;
3860
3861 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3862 beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3863
3864 if (!beacon) {
3865 IWL_ERROR("update beacon failed\n");
3866 return;
3867 }
3868
3869 mutex_lock(&priv->mutex);
3870 /* new beacon skb is allocated every time; dispose previous.*/
3871 if (priv->ibss_beacon)
3872 dev_kfree_skb(priv->ibss_beacon);
3873
3874 priv->ibss_beacon = beacon;
3875 mutex_unlock(&priv->mutex);
3876
3877 iwl_send_beacon_cmd(priv);
3878}
3879
3880static void iwl_rx_beacon_notif(struct iwl_priv *priv,
3881 struct iwl_rx_mem_buffer *rxb)
3882{
3883#ifdef CONFIG_IWLWIFI_DEBUG
3884 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3885 struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status);
3886 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3887
3888 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3889 "tsf %d %d rate %d\n",
3890 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3891 beacon->beacon_notify_hdr.failure_frame,
3892 le32_to_cpu(beacon->ibss_mgr_status),
3893 le32_to_cpu(beacon->high_tsf),
3894 le32_to_cpu(beacon->low_tsf), rate);
3895#endif
3896
3897 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3898 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3899 queue_work(priv->workqueue, &priv->beacon_update);
3900}
3901
3902/* Service response to REPLY_SCAN_CMD (0x80) */
3903static void iwl_rx_reply_scan(struct iwl_priv *priv,
3904 struct iwl_rx_mem_buffer *rxb)
3905{
3906#ifdef CONFIG_IWLWIFI_DEBUG
3907 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3908 struct iwl_scanreq_notification *notif =
3909 (struct iwl_scanreq_notification *)pkt->u.raw;
3910
3911 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3912#endif
3913}
3914
3915/* Service SCAN_START_NOTIFICATION (0x82) */
3916static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
3917 struct iwl_rx_mem_buffer *rxb)
3918{
3919 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3920 struct iwl_scanstart_notification *notif =
3921 (struct iwl_scanstart_notification *)pkt->u.raw;
3922 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3923 IWL_DEBUG_SCAN("Scan start: "
3924 "%d [802.11%s] "
3925 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3926 notif->channel,
3927 notif->band ? "bg" : "a",
3928 notif->tsf_high,
3929 notif->tsf_low, notif->status, notif->beacon_timer);
3930}
3931
3932/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3933static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
3934 struct iwl_rx_mem_buffer *rxb)
3935{
3936 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3937 struct iwl_scanresults_notification *notif =
3938 (struct iwl_scanresults_notification *)pkt->u.raw;
3939
3940 IWL_DEBUG_SCAN("Scan ch.res: "
3941 "%d [802.11%s] "
3942 "(TSF: 0x%08X:%08X) - %d "
3943 "elapsed=%lu usec (%dms since last)\n",
3944 notif->channel,
3945 notif->band ? "bg" : "a",
3946 le32_to_cpu(notif->tsf_high),
3947 le32_to_cpu(notif->tsf_low),
3948 le32_to_cpu(notif->statistics[0]),
3949 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3950 jiffies_to_msecs(elapsed_jiffies
3951 (priv->last_scan_jiffies, jiffies)));
3952
3953 priv->last_scan_jiffies = jiffies;
3954}
3955
3956/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3957static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
3958 struct iwl_rx_mem_buffer *rxb)
3959{
3960 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3961 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3962
3963 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3964 scan_notif->scanned_channels,
3965 scan_notif->tsf_low,
3966 scan_notif->tsf_high, scan_notif->status);
3967
3968 /* The HW is no longer scanning */
3969 clear_bit(STATUS_SCAN_HW, &priv->status);
3970
3971 /* The scan completion notification came in, so kill that timer... */
3972 cancel_delayed_work(&priv->scan_check);
3973
3974 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3975 (priv->scan_bands == 2) ? "2.4" : "5.2",
3976 jiffies_to_msecs(elapsed_jiffies
3977 (priv->scan_pass_start, jiffies)));
3978
3979 /* Remove this scanned band from the list
3980 * of pending bands to scan */
3981 priv->scan_bands--;
3982
3983 /* If a request to abort was given, or the scan did not succeed
3984 * then we reset the scan state machine and terminate,
3985 * re-queuing another scan if one has been requested */
3986 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3987 IWL_DEBUG_INFO("Aborted scan completed.\n");
3988 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3989 } else {
3990 /* If there are more bands on this scan pass reschedule */
3991 if (priv->scan_bands > 0)
3992 goto reschedule;
3993 }
3994
3995 priv->last_scan_jiffies = jiffies;
3996 IWL_DEBUG_INFO("Setting scan to off\n");
3997
3998 clear_bit(STATUS_SCANNING, &priv->status);
3999
4000 IWL_DEBUG_INFO("Scan took %dms\n",
4001 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4002
4003 queue_work(priv->workqueue, &priv->scan_completed);
4004
4005 return;
4006
4007reschedule:
4008 priv->scan_pass_start = jiffies;
4009 queue_work(priv->workqueue, &priv->request_scan);
4010}
4011
4012/* Handle notification from uCode that card's power state is changing
4013 * due to software, hardware, or critical temperature RFKILL */
4014static void iwl_rx_card_state_notif(struct iwl_priv *priv,
4015 struct iwl_rx_mem_buffer *rxb)
4016{
4017 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4018 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4019 unsigned long status = priv->status;
4020
4021 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4022 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4023 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4024
4025 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4026 RF_CARD_DISABLED)) {
4027
4028 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4029 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4030
4031 if (!iwl_grab_restricted_access(priv)) {
4032 iwl_write_restricted(
4033 priv, HBUS_TARG_MBX_C,
4034 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4035
4036 iwl_release_restricted_access(priv);
4037 }
4038
4039 if (!(flags & RXON_CARD_DISABLED)) {
4040 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
4041 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4042 if (!iwl_grab_restricted_access(priv)) {
4043 iwl_write_restricted(
4044 priv, HBUS_TARG_MBX_C,
4045 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4046
4047 iwl_release_restricted_access(priv);
4048 }
4049 }
4050
4051 if (flags & RF_CARD_DISABLED) {
4052 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4053 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4054 iwl_read32(priv, CSR_UCODE_DRV_GP1);
4055 if (!iwl_grab_restricted_access(priv))
4056 iwl_release_restricted_access(priv);
4057 }
4058 }
4059
4060 if (flags & HW_CARD_DISABLED)
4061 set_bit(STATUS_RF_KILL_HW, &priv->status);
4062 else
4063 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4064
4065
4066 if (flags & SW_CARD_DISABLED)
4067 set_bit(STATUS_RF_KILL_SW, &priv->status);
4068 else
4069 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4070
4071 if (!(flags & RXON_CARD_DISABLED))
4072 iwl_scan_cancel(priv);
4073
4074 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4075 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4076 (test_bit(STATUS_RF_KILL_SW, &status) !=
4077 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4078 queue_work(priv->workqueue, &priv->rf_kill);
4079 else
4080 wake_up_interruptible(&priv->wait_command_queue);
4081}
4082
4083/**
4084 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
4085 *
4086 * Setup the RX handlers for each of the reply types sent from the uCode
4087 * to the host.
4088 *
4089 * This function chains into the hardware specific files for them to setup
4090 * any hardware specific handlers as well.
4091 */
4092static void iwl_setup_rx_handlers(struct iwl_priv *priv)
4093{
4094 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
4095 priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta;
4096 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
4097 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
4098 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
4099 iwl_rx_spectrum_measure_notif;
4100 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
4101 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
4102 iwl_rx_pm_debug_statistics_notif;
4103 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
4104
4105 /* NOTE: iwl_rx_statistics is different based on whether
4106 * the build is for the 3945 or the 4965. See the
4107 * corresponding implementation in iwl-XXXX.c
4108 *
4109 * The same handler is used for both the REPLY to a
4110 * discrete statistics request from the host as well as
4111 * for the periodic statistics notification from the uCode
4112 */
4113 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics;
4114 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics;
4115
4116 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
4117 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
4118 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
4119 iwl_rx_scan_results_notif;
4120 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
4121 iwl_rx_scan_complete_notif;
4122 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
4123 priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx;
4124
4125 /* Setup hardware specific Rx handlers */
4126 iwl_hw_rx_handler_setup(priv);
4127}
4128
4129/**
4130 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
4131 * @rxb: Rx buffer to reclaim
4132 *
4133 * If an Rx buffer has an async callback associated with it the callback
4134 * will be executed. The attached skb (if present) will only be freed
4135 * if the callback returns 1
4136 */
4137static void iwl_tx_cmd_complete(struct iwl_priv *priv,
4138 struct iwl_rx_mem_buffer *rxb)
4139{
4140 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4141 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4142 int txq_id = SEQ_TO_QUEUE(sequence);
4143 int index = SEQ_TO_INDEX(sequence);
4144 int huge = sequence & SEQ_HUGE_FRAME;
4145 int cmd_index;
4146 struct iwl_cmd *cmd;
4147
4148 /* If a Tx command is being handled and it isn't in the actual
4149 * command queue then there a command routing bug has been introduced
4150 * in the queue management code. */
4151 if (txq_id != IWL_CMD_QUEUE_NUM)
4152 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4153 txq_id, pkt->hdr.cmd);
4154 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4155
4156 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4157 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4158
4159 /* Input error checking is done when commands are added to queue. */
4160 if (cmd->meta.flags & CMD_WANT_SKB) {
4161 cmd->meta.source->u.skb = rxb->skb;
4162 rxb->skb = NULL;
4163 } else if (cmd->meta.u.callback &&
4164 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4165 rxb->skb = NULL;
4166
4167 iwl_tx_queue_reclaim(priv, txq_id, index);
4168
4169 if (!(cmd->meta.flags & CMD_ASYNC)) {
4170 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4171 wake_up_interruptible(&priv->wait_command_queue);
4172 }
4173}
4174
4175/************************** RX-FUNCTIONS ****************************/
4176/*
4177 * Rx theory of operation
4178 *
4179 * The host allocates 32 DMA target addresses and passes the host address
4180 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4181 * 0 to 31
4182 *
4183 * Rx Queue Indexes
4184 * The host/firmware share two index registers for managing the Rx buffers.
4185 *
4186 * The READ index maps to the first position that the firmware may be writing
4187 * to -- the driver can read up to (but not including) this position and get
4188 * good data.
4189 * The READ index is managed by the firmware once the card is enabled.
4190 *
4191 * The WRITE index maps to the last position the driver has read from -- the
4192 * position preceding WRITE is the last slot the firmware can place a packet.
4193 *
4194 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4195 * WRITE = READ.
4196 *
4197 * During initialization the host sets up the READ queue position to the first
4198 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4199 *
4200 * When the firmware places a packet in a buffer it will advance the READ index
4201 * and fire the RX interrupt. The driver can then query the READ index and
4202 * process as many packets as possible, moving the WRITE index forward as it
4203 * resets the Rx queue buffers with new memory.
4204 *
4205 * The management in the driver is as follows:
4206 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4207 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4208 * to replensish the iwl->rxq->rx_free.
4209 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
4210 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4211 * 'processed' and 'read' driver indexes as well)
4212 * + A received packet is processed and handed to the kernel network stack,
4213 * detached from the iwl->rxq. The driver 'processed' index is updated.
4214 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4215 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4216 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4217 * were enough free buffers and RX_STALLED is set it is cleared.
4218 *
4219 *
4220 * Driver sequence:
4221 *
4222 * iwl_rx_queue_alloc() Allocates rx_free
4223 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
4224 * iwl_rx_queue_restock
4225 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
4226 * queue, updates firmware pointers, and updates
4227 * the WRITE index. If insufficient rx_free buffers
4228 * are available, schedules iwl_rx_replenish
4229 *
4230 * -- enable interrupts --
4231 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
4232 * READ INDEX, detaching the SKB from the pool.
4233 * Moves the packet buffer from queue to rx_used.
4234 * Calls iwl_rx_queue_restock to refill any empty
4235 * slots.
4236 * ...
4237 *
4238 */
4239
4240/**
4241 * iwl_rx_queue_space - Return number of free slots available in queue.
4242 */
4243static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
4244{
4245 int s = q->read - q->write;
4246 if (s <= 0)
4247 s += RX_QUEUE_SIZE;
4248 /* keep some buffer to not confuse full and empty queue */
4249 s -= 2;
4250 if (s < 0)
4251 s = 0;
4252 return s;
4253}
4254
4255/**
4256 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
4257 *
4258 * NOTE: This function has 3945 and 4965 specific code sections
4259 * but is declared in base due to the majority of the
4260 * implementation being the same (only a numeric constant is
4261 * different)
4262 *
4263 */
4264int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
4265{
4266 u32 reg = 0;
4267 int rc = 0;
4268 unsigned long flags;
4269
4270 spin_lock_irqsave(&q->lock, flags);
4271
4272 if (q->need_update == 0)
4273 goto exit_unlock;
4274
4275 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4276 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4277
4278 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4279 iwl_set_bit(priv, CSR_GP_CNTRL,
4280 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4281 goto exit_unlock;
4282 }
4283
4284 rc = iwl_grab_restricted_access(priv);
4285 if (rc)
4286 goto exit_unlock;
4287
4288 iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR,
4289 q->write & ~0x7);
4290 iwl_release_restricted_access(priv);
4291 } else
4292 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
4293
4294
4295 q->need_update = 0;
4296
4297 exit_unlock:
4298 spin_unlock_irqrestore(&q->lock, flags);
4299 return rc;
4300}
4301
4302/**
4303 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer.
4304 *
4305 * NOTE: This function has 3945 and 4965 specific code paths in it.
4306 */
4307static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
4308 dma_addr_t dma_addr)
4309{
4310 return cpu_to_le32((u32)(dma_addr >> 8));
4311}
4312
4313
4314/**
4315 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
4316 *
4317 * If there are slots in the RX queue that need to be restocked,
4318 * and we have free pre-allocated buffers, fill the ranks as much
4319 * as we can pulling from rx_free.
4320 *
4321 * This moves the 'write' index forward to catch up with 'processed', and
4322 * also updates the memory address in the firmware to reference the new
4323 * target buffer.
4324 */
4325int iwl_rx_queue_restock(struct iwl_priv *priv)
4326{
4327 struct iwl_rx_queue *rxq = &priv->rxq;
4328 struct list_head *element;
4329 struct iwl_rx_mem_buffer *rxb;
4330 unsigned long flags;
4331 int write, rc;
4332
4333 spin_lock_irqsave(&rxq->lock, flags);
4334 write = rxq->write & ~0x7;
4335 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
4336 element = rxq->rx_free.next;
4337 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4338 list_del(element);
4339 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
4340 rxq->queue[rxq->write] = rxb;
4341 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4342 rxq->free_count--;
4343 }
4344 spin_unlock_irqrestore(&rxq->lock, flags);
4345 /* If the pre-allocated buffer pool is dropping low, schedule to
4346 * refill it */
4347 if (rxq->free_count <= RX_LOW_WATERMARK)
4348 queue_work(priv->workqueue, &priv->rx_replenish);
4349
4350
4351 /* If we've added more space for the firmware to place data, tell it */
4352 if ((write != (rxq->write & ~0x7))
4353 || (abs(rxq->write - rxq->read) > 7)) {
4354 spin_lock_irqsave(&rxq->lock, flags);
4355 rxq->need_update = 1;
4356 spin_unlock_irqrestore(&rxq->lock, flags);
4357 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
4358 if (rc)
4359 return rc;
4360 }
4361
4362 return 0;
4363}
4364
4365/**
4366 * iwl_rx_replensih - Move all used packet from rx_used to rx_free
4367 *
4368 * When moving to rx_free an SKB is allocated for the slot.
4369 *
4370 * Also restock the Rx queue via iwl_rx_queue_restock.
4371 * This is called as a scheduled work item (except for during intialization)
4372 */
4373void iwl_rx_replenish(void *data)
4374{
4375 struct iwl_priv *priv = data;
4376 struct iwl_rx_queue *rxq = &priv->rxq;
4377 struct list_head *element;
4378 struct iwl_rx_mem_buffer *rxb;
4379 unsigned long flags;
4380 spin_lock_irqsave(&rxq->lock, flags);
4381 while (!list_empty(&rxq->rx_used)) {
4382 element = rxq->rx_used.next;
4383 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4384 rxb->skb =
4385 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4386 if (!rxb->skb) {
4387 if (net_ratelimit())
4388 printk(KERN_CRIT DRV_NAME
4389 ": Can not allocate SKB buffers\n");
4390 /* We don't reschedule replenish work here -- we will
4391 * call the restock method and if it still needs
4392 * more buffers it will schedule replenish */
4393 break;
4394 }
4395 priv->alloc_rxb_skb++;
4396 list_del(element);
4397 rxb->dma_addr =
4398 pci_map_single(priv->pci_dev, rxb->skb->data,
4399 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4400 list_add_tail(&rxb->list, &rxq->rx_free);
4401 rxq->free_count++;
4402 }
4403 spin_unlock_irqrestore(&rxq->lock, flags);
4404
4405 spin_lock_irqsave(&priv->lock, flags);
4406 iwl_rx_queue_restock(priv);
4407 spin_unlock_irqrestore(&priv->lock, flags);
4408}
4409
4410/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4411 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4412 * This free routine walks the list of POOL entries and if SKB is set to
4413 * non NULL it is unmapped and freed
4414 */
4415void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4416{
4417 int i;
4418 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4419 if (rxq->pool[i].skb != NULL) {
4420 pci_unmap_single(priv->pci_dev,
4421 rxq->pool[i].dma_addr,
4422 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4423 dev_kfree_skb(rxq->pool[i].skb);
4424 }
4425 }
4426
4427 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4428 rxq->dma_addr);
4429 rxq->bd = NULL;
4430}
4431
4432int iwl_rx_queue_alloc(struct iwl_priv *priv)
4433{
4434 struct iwl_rx_queue *rxq = &priv->rxq;
4435 struct pci_dev *dev = priv->pci_dev;
4436 int i;
4437
4438 spin_lock_init(&rxq->lock);
4439 INIT_LIST_HEAD(&rxq->rx_free);
4440 INIT_LIST_HEAD(&rxq->rx_used);
4441 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4442 if (!rxq->bd)
4443 return -ENOMEM;
4444 /* Fill the rx_used queue with _all_ of the Rx buffers */
4445 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4446 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4447 /* Set us so that we have processed and used all buffers, but have
4448 * not restocked the Rx queue with fresh buffers */
4449 rxq->read = rxq->write = 0;
4450 rxq->free_count = 0;
4451 rxq->need_update = 0;
4452 return 0;
4453}
4454
4455void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4456{
4457 unsigned long flags;
4458 int i;
4459 spin_lock_irqsave(&rxq->lock, flags);
4460 INIT_LIST_HEAD(&rxq->rx_free);
4461 INIT_LIST_HEAD(&rxq->rx_used);
4462 /* Fill the rx_used queue with _all_ of the Rx buffers */
4463 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4464 /* In the reset function, these buffers may have been allocated
4465 * to an SKB, so we need to unmap and free potential storage */
4466 if (rxq->pool[i].skb != NULL) {
4467 pci_unmap_single(priv->pci_dev,
4468 rxq->pool[i].dma_addr,
4469 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4470 priv->alloc_rxb_skb--;
4471 dev_kfree_skb(rxq->pool[i].skb);
4472 rxq->pool[i].skb = NULL;
4473 }
4474 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4475 }
4476
4477 /* Set us so that we have processed and used all buffers, but have
4478 * not restocked the Rx queue with fresh buffers */
4479 rxq->read = rxq->write = 0;
4480 rxq->free_count = 0;
4481 spin_unlock_irqrestore(&rxq->lock, flags);
4482}
4483
4484/* Convert linear signal-to-noise ratio into dB */
4485static u8 ratio2dB[100] = {
4486/* 0 1 2 3 4 5 6 7 8 9 */
4487 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4488 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4489 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4490 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4491 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4492 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4493 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4494 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4495 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4496 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4497};
4498
4499/* Calculates a relative dB value from a ratio of linear
4500 * (i.e. not dB) signal levels.
4501 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4502int iwl_calc_db_from_ratio(int sig_ratio)
4503{
4504 /* Anything above 1000:1 just report as 60 dB */
4505 if (sig_ratio > 1000)
4506 return 60;
4507
4508 /* Above 100:1, divide by 10 and use table,
4509 * add 20 dB to make up for divide by 10 */
4510 if (sig_ratio > 100)
4511 return (20 + (int)ratio2dB[sig_ratio/10]);
4512
4513 /* We shouldn't see this */
4514 if (sig_ratio < 1)
4515 return 0;
4516
4517 /* Use table for ratios 1:1 - 99:1 */
4518 return (int)ratio2dB[sig_ratio];
4519}
4520
4521#define PERFECT_RSSI (-20) /* dBm */
4522#define WORST_RSSI (-95) /* dBm */
4523#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4524
4525/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4526 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4527 * about formulas used below. */
4528int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
4529{
4530 int sig_qual;
4531 int degradation = PERFECT_RSSI - rssi_dbm;
4532
4533 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4534 * as indicator; formula is (signal dbm - noise dbm).
4535 * SNR at or above 40 is a great signal (100%).
4536 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4537 * Weakest usable signal is usually 10 - 15 dB SNR. */
4538 if (noise_dbm) {
4539 if (rssi_dbm - noise_dbm >= 40)
4540 return 100;
4541 else if (rssi_dbm < noise_dbm)
4542 return 0;
4543 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4544
4545 /* Else use just the signal level.
4546 * This formula is a least squares fit of data points collected and
4547 * compared with a reference system that had a percentage (%) display
4548 * for signal quality. */
4549 } else
4550 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4551 (15 * RSSI_RANGE + 62 * degradation)) /
4552 (RSSI_RANGE * RSSI_RANGE);
4553
4554 if (sig_qual > 100)
4555 sig_qual = 100;
4556 else if (sig_qual < 1)
4557 sig_qual = 0;
4558
4559 return sig_qual;
4560}
4561
4562/**
4563 * iwl_rx_handle - Main entry function for receiving responses from the uCode
4564 *
4565 * Uses the priv->rx_handlers callback function array to invoke
4566 * the appropriate handlers, including command responses,
4567 * frame-received notifications, and other notifications.
4568 */
4569static void iwl_rx_handle(struct iwl_priv *priv)
4570{
4571 struct iwl_rx_mem_buffer *rxb;
4572 struct iwl_rx_packet *pkt;
4573 struct iwl_rx_queue *rxq = &priv->rxq;
4574 u32 r, i;
4575 int reclaim;
4576 unsigned long flags;
4577
4578 r = iwl_hw_get_rx_read(priv);
4579 i = rxq->read;
4580
4581 /* Rx interrupt, but nothing sent from uCode */
4582 if (i == r)
4583 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4584
4585 while (i != r) {
4586 rxb = rxq->queue[i];
4587
4588 /* If an RXB doesn't have a queue slot associated with it
4589 * then a bug has been introduced in the queue refilling
4590 * routines -- catch it here */
4591 BUG_ON(rxb == NULL);
4592
4593 rxq->queue[i] = NULL;
4594
4595 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4596 IWL_RX_BUF_SIZE,
4597 PCI_DMA_FROMDEVICE);
4598 pkt = (struct iwl_rx_packet *)rxb->skb->data;
4599
4600 /* Reclaim a command buffer only if this packet is a response
4601 * to a (driver-originated) command.
4602 * If the packet (e.g. Rx frame) originated from uCode,
4603 * there is no command buffer to reclaim.
4604 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4605 * but apparently a few don't get set; catch them here. */
4606 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4607 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4608 (pkt->hdr.cmd != REPLY_4965_RX) &&
4609 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4610 (pkt->hdr.cmd != REPLY_TX);
4611
4612 /* Based on type of command response or notification,
4613 * handle those that need handling via function in
4614 * rx_handlers table. See iwl_setup_rx_handlers() */
4615 if (priv->rx_handlers[pkt->hdr.cmd]) {
4616 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4617 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4618 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4619 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4620 } else {
4621 /* No handling needed */
4622 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4623 "r %d i %d No handler needed for %s, 0x%02x\n",
4624 r, i, get_cmd_string(pkt->hdr.cmd),
4625 pkt->hdr.cmd);
4626 }
4627
4628 if (reclaim) {
4629 /* Invoke any callbacks, transfer the skb to caller,
4630 * and fire off the (possibly) blocking iwl_send_cmd()
4631 * as we reclaim the driver command queue */
4632 if (rxb && rxb->skb)
4633 iwl_tx_cmd_complete(priv, rxb);
4634 else
4635 IWL_WARNING("Claim null rxb?\n");
4636 }
4637
4638 /* For now we just don't re-use anything. We can tweak this
4639 * later to try and re-use notification packets and SKBs that
4640 * fail to Rx correctly */
4641 if (rxb->skb != NULL) {
4642 priv->alloc_rxb_skb--;
4643 dev_kfree_skb_any(rxb->skb);
4644 rxb->skb = NULL;
4645 }
4646
4647 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4648 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4649 spin_lock_irqsave(&rxq->lock, flags);
4650 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4651 spin_unlock_irqrestore(&rxq->lock, flags);
4652 i = (i + 1) & RX_QUEUE_MASK;
4653 }
4654
4655 /* Backtrack one entry */
4656 priv->rxq.read = i;
4657 iwl_rx_queue_restock(priv);
4658}
4659
4660int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
4661 struct iwl_tx_queue *txq)
4662{
4663 u32 reg = 0;
4664 int rc = 0;
4665 int txq_id = txq->q.id;
4666
4667 if (txq->need_update == 0)
4668 return rc;
4669
4670 /* if we're trying to save power */
4671 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4672 /* wake up nic if it's powered down ...
4673 * uCode will wake up, and interrupt us again, so next
4674 * time we'll skip this part. */
4675 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4676
4677 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4678 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4679 iwl_set_bit(priv, CSR_GP_CNTRL,
4680 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4681 return rc;
4682 }
4683
4684 /* restore this queue's parameters in nic hardware. */
4685 rc = iwl_grab_restricted_access(priv);
4686 if (rc)
4687 return rc;
4688 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
4689 txq->q.first_empty | (txq_id << 8));
4690 iwl_release_restricted_access(priv);
4691
4692 /* else not in power-save mode, uCode will never sleep when we're
4693 * trying to tx (during RFKILL, we're not trying to tx). */
4694 } else
4695 iwl_write32(priv, HBUS_TARG_WRPTR,
4696 txq->q.first_empty | (txq_id << 8));
4697
4698 txq->need_update = 0;
4699
4700 return rc;
4701}
4702
4703#ifdef CONFIG_IWLWIFI_DEBUG
4704static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon)
4705{
4706 IWL_DEBUG_RADIO("RX CONFIG:\n");
4707 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4708 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4709 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4710 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4711 le32_to_cpu(rxon->filter_flags));
4712 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4713 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4714 rxon->ofdm_basic_rates);
4715 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4716 IWL_DEBUG_RADIO("u8[6] node_addr: " MAC_FMT "\n",
4717 MAC_ARG(rxon->node_addr));
4718 IWL_DEBUG_RADIO("u8[6] bssid_addr: " MAC_FMT "\n",
4719 MAC_ARG(rxon->bssid_addr));
4720 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4721}
4722#endif
4723
4724static void iwl_enable_interrupts(struct iwl_priv *priv)
4725{
4726 IWL_DEBUG_ISR("Enabling interrupts\n");
4727 set_bit(STATUS_INT_ENABLED, &priv->status);
4728 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4729}
4730
4731static inline void iwl_disable_interrupts(struct iwl_priv *priv)
4732{
4733 clear_bit(STATUS_INT_ENABLED, &priv->status);
4734
4735 /* disable interrupts from uCode/NIC to host */
4736 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4737
4738 /* acknowledge/clear/reset any interrupts still pending
4739 * from uCode or flow handler (Rx/Tx DMA) */
4740 iwl_write32(priv, CSR_INT, 0xffffffff);
4741 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4742 IWL_DEBUG_ISR("Disabled interrupts\n");
4743}
4744
4745static const char *desc_lookup(int i)
4746{
4747 switch (i) {
4748 case 1:
4749 return "FAIL";
4750 case 2:
4751 return "BAD_PARAM";
4752 case 3:
4753 return "BAD_CHECKSUM";
4754 case 4:
4755 return "NMI_INTERRUPT";
4756 case 5:
4757 return "SYSASSERT";
4758 case 6:
4759 return "FATAL_ERROR";
4760 }
4761
4762 return "UNKNOWN";
4763}
4764
4765#define ERROR_START_OFFSET (1 * sizeof(u32))
4766#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4767
4768static void iwl_dump_nic_error_log(struct iwl_priv *priv)
4769{
4770 u32 data2, line;
4771 u32 desc, time, count, base, data1;
4772 u32 blink1, blink2, ilink1, ilink2;
4773 int rc;
4774
4775 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4776
4777 if (!iwl_hw_valid_rtc_data_addr(base)) {
4778 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4779 return;
4780 }
4781
4782 rc = iwl_grab_restricted_access(priv);
4783 if (rc) {
4784 IWL_WARNING("Can not read from adapter at this time.\n");
4785 return;
4786 }
4787
4788 count = iwl_read_restricted_mem(priv, base);
4789
4790 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4791 IWL_ERROR("Start IWL Error Log Dump:\n");
4792 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4793 priv->status, priv->config, count);
4794 }
4795
4796 desc = iwl_read_restricted_mem(priv, base + 1 * sizeof(u32));
4797 blink1 = iwl_read_restricted_mem(priv, base + 3 * sizeof(u32));
4798 blink2 = iwl_read_restricted_mem(priv, base + 4 * sizeof(u32));
4799 ilink1 = iwl_read_restricted_mem(priv, base + 5 * sizeof(u32));
4800 ilink2 = iwl_read_restricted_mem(priv, base + 6 * sizeof(u32));
4801 data1 = iwl_read_restricted_mem(priv, base + 7 * sizeof(u32));
4802 data2 = iwl_read_restricted_mem(priv, base + 8 * sizeof(u32));
4803 line = iwl_read_restricted_mem(priv, base + 9 * sizeof(u32));
4804 time = iwl_read_restricted_mem(priv, base + 11 * sizeof(u32));
4805
4806 IWL_ERROR("Desc Time "
4807 "data1 data2 line\n");
4808 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4809 desc_lookup(desc), desc, time, data1, data2, line);
4810 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4811 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4812 ilink1, ilink2);
4813
4814 iwl_release_restricted_access(priv);
4815}
4816
4817#define EVENT_START_OFFSET (4 * sizeof(u32))
4818
4819/**
4820 * iwl_print_event_log - Dump error event log to syslog
4821 *
4822 * NOTE: Must be called with iwl_grab_restricted_access() already obtained!
4823 */
4824static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
4825 u32 num_events, u32 mode)
4826{
4827 u32 i;
4828 u32 base; /* SRAM byte address of event log header */
4829 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4830 u32 ptr; /* SRAM byte address of log data */
4831 u32 ev, time, data; /* event log data */
4832
4833 if (num_events == 0)
4834 return;
4835
4836 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4837
4838 if (mode == 0)
4839 event_size = 2 * sizeof(u32);
4840 else
4841 event_size = 3 * sizeof(u32);
4842
4843 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4844
4845 /* "time" is actually "data" for mode 0 (no timestamp).
4846 * place event id # at far right for easier visual parsing. */
4847 for (i = 0; i < num_events; i++) {
4848 ev = iwl_read_restricted_mem(priv, ptr);
4849 ptr += sizeof(u32);
4850 time = iwl_read_restricted_mem(priv, ptr);
4851 ptr += sizeof(u32);
4852 if (mode == 0)
4853 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4854 else {
4855 data = iwl_read_restricted_mem(priv, ptr);
4856 ptr += sizeof(u32);
4857 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4858 }
4859 }
4860}
4861
4862static void iwl_dump_nic_event_log(struct iwl_priv *priv)
4863{
4864 int rc;
4865 u32 base; /* SRAM byte address of event log header */
4866 u32 capacity; /* event log capacity in # entries */
4867 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4868 u32 num_wraps; /* # times uCode wrapped to top of log */
4869 u32 next_entry; /* index of next entry to be written by uCode */
4870 u32 size; /* # entries that we'll print */
4871
4872 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4873 if (!iwl_hw_valid_rtc_data_addr(base)) {
4874 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4875 return;
4876 }
4877
4878 rc = iwl_grab_restricted_access(priv);
4879 if (rc) {
4880 IWL_WARNING("Can not read from adapter at this time.\n");
4881 return;
4882 }
4883
4884 /* event log header */
4885 capacity = iwl_read_restricted_mem(priv, base);
4886 mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32)));
4887 num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32)));
4888 next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32)));
4889
4890 size = num_wraps ? capacity : next_entry;
4891
4892 /* bail out if nothing in log */
4893 if (size == 0) {
4894 IWL_ERROR("Start IPW Event Log Dump: nothing in log\n");
4895 iwl_release_restricted_access(priv);
4896 return;
4897 }
4898
4899 IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n",
4900 size, num_wraps);
4901
4902 /* if uCode has wrapped back to top of log, start at the oldest entry,
4903 * i.e the next one that uCode would fill. */
4904 if (num_wraps)
4905 iwl_print_event_log(priv, next_entry,
4906 capacity - next_entry, mode);
4907
4908 /* (then/else) start at top of log */
4909 iwl_print_event_log(priv, 0, next_entry, mode);
4910
4911 iwl_release_restricted_access(priv);
4912}
4913
4914/**
4915 * iwl_irq_handle_error - called for HW or SW error interrupt from card
4916 */
4917static void iwl_irq_handle_error(struct iwl_priv *priv)
4918{
4919 /* Set the FW error flag -- cleared on iwl_down */
4920 set_bit(STATUS_FW_ERROR, &priv->status);
4921
4922 /* Cancel currently queued command. */
4923 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4924
4925#ifdef CONFIG_IWLWIFI_DEBUG
4926 if (iwl_debug_level & IWL_DL_FW_ERRORS) {
4927 iwl_dump_nic_error_log(priv);
4928 iwl_dump_nic_event_log(priv);
4929 iwl_print_rx_config_cmd(&priv->staging_rxon);
4930 }
4931#endif
4932
4933 wake_up_interruptible(&priv->wait_command_queue);
4934
4935 /* Keep the restart process from trying to send host
4936 * commands by clearing the INIT status bit */
4937 clear_bit(STATUS_READY, &priv->status);
4938
4939 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4940 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4941 "Restarting adapter due to uCode error.\n");
4942
4943 if (iwl_is_associated(priv)) {
4944 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4945 sizeof(priv->recovery_rxon));
4946 priv->error_recovering = 1;
4947 }
4948 queue_work(priv->workqueue, &priv->restart);
4949 }
4950}
4951
4952static void iwl_error_recovery(struct iwl_priv *priv)
4953{
4954 unsigned long flags;
4955
4956 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4957 sizeof(priv->staging_rxon));
4958 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4959 iwl_commit_rxon(priv);
4960
4961 iwl_rxon_add_station(priv, priv->bssid, 1);
4962
4963 spin_lock_irqsave(&priv->lock, flags);
4964 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4965 priv->error_recovering = 0;
4966 spin_unlock_irqrestore(&priv->lock, flags);
4967}
4968
4969static void iwl_irq_tasklet(struct iwl_priv *priv)
4970{
4971 u32 inta, handled = 0;
4972 u32 inta_fh;
4973 unsigned long flags;
4974#ifdef CONFIG_IWLWIFI_DEBUG
4975 u32 inta_mask;
4976#endif
4977
4978 spin_lock_irqsave(&priv->lock, flags);
4979
4980 /* Ack/clear/reset pending uCode interrupts.
4981 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4982 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4983 inta = iwl_read32(priv, CSR_INT);
4984 iwl_write32(priv, CSR_INT, inta);
4985
4986 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4987 * Any new interrupts that happen after this, either while we're
4988 * in this tasklet, or later, will show up in next ISR/tasklet. */
4989 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4990 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4991
4992#ifdef CONFIG_IWLWIFI_DEBUG
4993 if (iwl_debug_level & IWL_DL_ISR) {
4994 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4995 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4996 inta, inta_mask, inta_fh);
4997 }
4998#endif
4999
5000 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5001 * atomic, make sure that inta covers all the interrupts that
5002 * we've discovered, even if FH interrupt came in just after
5003 * reading CSR_INT. */
5004 if (inta_fh & CSR_FH_INT_RX_MASK)
5005 inta |= CSR_INT_BIT_FH_RX;
5006 if (inta_fh & CSR_FH_INT_TX_MASK)
5007 inta |= CSR_INT_BIT_FH_TX;
5008
5009 /* Now service all interrupt bits discovered above. */
5010 if (inta & CSR_INT_BIT_HW_ERR) {
5011 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5012
5013 /* Tell the device to stop sending interrupts */
5014 iwl_disable_interrupts(priv);
5015
5016 iwl_irq_handle_error(priv);
5017
5018 handled |= CSR_INT_BIT_HW_ERR;
5019
5020 spin_unlock_irqrestore(&priv->lock, flags);
5021
5022 return;
5023 }
5024
5025#ifdef CONFIG_IWLWIFI_DEBUG
5026 if (iwl_debug_level & (IWL_DL_ISR)) {
5027 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5028 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5029 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5030
5031 /* Alive notification via Rx interrupt will do the real work */
5032 if (inta & CSR_INT_BIT_ALIVE)
5033 IWL_DEBUG_ISR("Alive interrupt\n");
5034 }
5035#endif
5036 /* Safely ignore these bits for debug checks below */
5037 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5038
5039 /* HW RF KILL switch toggled (4965 only) */
5040 if (inta & CSR_INT_BIT_RF_KILL) {
5041 int hw_rf_kill = 0;
5042 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
5043 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5044 hw_rf_kill = 1;
5045
5046 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5047 "RF_KILL bit toggled to %s.\n",
5048 hw_rf_kill ? "disable radio":"enable radio");
5049
5050 /* Queue restart only if RF_KILL switch was set to "kill"
5051 * when we loaded driver, and is now set to "enable".
5052 * After we're Alive, RF_KILL gets handled by
5053 * iwl_rx_card_state_notif() */
5054 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
5055 queue_work(priv->workqueue, &priv->restart);
5056
5057 handled |= CSR_INT_BIT_RF_KILL;
5058 }
5059
5060 /* Chip got too hot and stopped itself (4965 only) */
5061 if (inta & CSR_INT_BIT_CT_KILL) {
5062 IWL_ERROR("Microcode CT kill error detected.\n");
5063 handled |= CSR_INT_BIT_CT_KILL;
5064 }
5065
5066 /* Error detected by uCode */
5067 if (inta & CSR_INT_BIT_SW_ERR) {
5068 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5069 inta);
5070 iwl_irq_handle_error(priv);
5071 handled |= CSR_INT_BIT_SW_ERR;
5072 }
5073
5074 /* uCode wakes up after power-down sleep */
5075 if (inta & CSR_INT_BIT_WAKEUP) {
5076 IWL_DEBUG_ISR("Wakeup interrupt\n");
5077 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
5078 iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5079 iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5080 iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5081 iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5082 iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5083 iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]);
5084
5085 handled |= CSR_INT_BIT_WAKEUP;
5086 }
5087
5088 /* All uCode command responses, including Tx command responses,
5089 * Rx "responses" (frame-received notification), and other
5090 * notifications from uCode come through here*/
5091 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
5092 iwl_rx_handle(priv);
5093 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5094 }
5095
5096 if (inta & CSR_INT_BIT_FH_TX) {
5097 IWL_DEBUG_ISR("Tx interrupt\n");
5098 handled |= CSR_INT_BIT_FH_TX;
5099 }
5100
5101 if (inta & ~handled)
5102 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5103
5104 if (inta & ~CSR_INI_SET_MASK) {
5105 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5106 inta & ~CSR_INI_SET_MASK);
5107 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5108 }
5109
5110 /* Re-enable all interrupts */
5111 iwl_enable_interrupts(priv);
5112
5113#ifdef CONFIG_IWLWIFI_DEBUG
5114 if (iwl_debug_level & (IWL_DL_ISR)) {
5115 inta = iwl_read32(priv, CSR_INT);
5116 inta_mask = iwl_read32(priv, CSR_INT_MASK);
5117 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5118 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5119 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5120 }
5121#endif
5122 spin_unlock_irqrestore(&priv->lock, flags);
5123}
5124
5125static irqreturn_t iwl_isr(int irq, void *data)
5126{
5127 struct iwl_priv *priv = data;
5128 u32 inta, inta_mask;
5129 u32 inta_fh;
5130 if (!priv)
5131 return IRQ_NONE;
5132
5133 spin_lock(&priv->lock);
5134
5135 /* Disable (but don't clear!) interrupts here to avoid
5136 * back-to-back ISRs and sporadic interrupts from our NIC.
5137 * If we have something to service, the tasklet will re-enable ints.
5138 * If we *don't* have something, we'll re-enable before leaving here. */
5139 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
5140 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
5141
5142 /* Discover which interrupts are active/pending */
5143 inta = iwl_read32(priv, CSR_INT);
5144 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5145
5146 /* Ignore interrupt if there's nothing in NIC to service.
5147 * This may be due to IRQ shared with another device,
5148 * or due to sporadic interrupts thrown from our NIC. */
5149 if (!inta && !inta_fh) {
5150 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5151 goto none;
5152 }
5153
5154 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
5155 /* Hardware disappeared */
5156 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
5157 goto none;
5158 }
5159
5160 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5161 inta, inta_mask, inta_fh);
5162
5163 /* iwl_irq_tasklet() will service interrupts and re-enable them */
5164 tasklet_schedule(&priv->irq_tasklet);
5165 spin_unlock(&priv->lock);
5166
5167 return IRQ_HANDLED;
5168
5169 none:
5170 /* re-enable interrupts here since we don't have anything to service. */
5171 iwl_enable_interrupts(priv);
5172 spin_unlock(&priv->lock);
5173 return IRQ_NONE;
5174}
5175
5176/************************** EEPROM BANDS ****************************
5177 *
5178 * The iwl_eeprom_band definitions below provide the mapping from the
5179 * EEPROM contents to the specific channel number supported for each
5180 * band.
5181 *
5182 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
5183 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5184 * The specific geography and calibration information for that channel
5185 * is contained in the eeprom map itself.
5186 *
5187 * During init, we copy the eeprom information and channel map
5188 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5189 *
5190 * channel_map_24/52 provides the index in the channel_info array for a
5191 * given channel. We have to have two separate maps as there is channel
5192 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5193 * band_2
5194 *
5195 * A value of 0xff stored in the channel_map indicates that the channel
5196 * is not supported by the hardware at all.
5197 *
5198 * A value of 0xfe in the channel_map indicates that the channel is not
5199 * valid for Tx with the current hardware. This means that
5200 * while the system can tune and receive on a given channel, it may not
5201 * be able to associate or transmit any frames on that
5202 * channel. There is no corresponding channel information for that
5203 * entry.
5204 *
5205 *********************************************************************/
5206
5207/* 2.4 GHz */
5208static const u8 iwl_eeprom_band_1[14] = {
5209 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5210};
5211
5212/* 5.2 GHz bands */
5213static const u8 iwl_eeprom_band_2[] = {
5214 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5215};
5216
5217static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */
5218 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5219};
5220
5221static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
5222 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5223};
5224
5225static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
5226 145, 149, 153, 157, 161, 165
5227};
5228
5229static u8 iwl_eeprom_band_6[] = { /* 2.4 FAT channel */
5230 1, 2, 3, 4, 5, 6, 7
5231};
5232
5233static u8 iwl_eeprom_band_7[] = { /* 5.2 FAT channel */
5234 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5235};
5236
5237static void iwl_init_band_reference(const struct iwl_priv *priv, int band,
5238 int *eeprom_ch_count,
5239 const struct iwl_eeprom_channel
5240 **eeprom_ch_info,
5241 const u8 **eeprom_ch_index)
5242{
5243 switch (band) {
5244 case 1: /* 2.4GHz band */
5245 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
5246 *eeprom_ch_info = priv->eeprom.band_1_channels;
5247 *eeprom_ch_index = iwl_eeprom_band_1;
5248 break;
5249 case 2: /* 5.2GHz band */
5250 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
5251 *eeprom_ch_info = priv->eeprom.band_2_channels;
5252 *eeprom_ch_index = iwl_eeprom_band_2;
5253 break;
5254 case 3: /* 5.2GHz band */
5255 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
5256 *eeprom_ch_info = priv->eeprom.band_3_channels;
5257 *eeprom_ch_index = iwl_eeprom_band_3;
5258 break;
5259 case 4: /* 5.2GHz band */
5260 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
5261 *eeprom_ch_info = priv->eeprom.band_4_channels;
5262 *eeprom_ch_index = iwl_eeprom_band_4;
5263 break;
5264 case 5: /* 5.2GHz band */
5265 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
5266 *eeprom_ch_info = priv->eeprom.band_5_channels;
5267 *eeprom_ch_index = iwl_eeprom_band_5;
5268 break;
5269 case 6:
5270 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
5271 *eeprom_ch_info = priv->eeprom.band_24_channels;
5272 *eeprom_ch_index = iwl_eeprom_band_6;
5273 break;
5274 case 7:
5275 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
5276 *eeprom_ch_info = priv->eeprom.band_52_channels;
5277 *eeprom_ch_index = iwl_eeprom_band_7;
5278 break;
5279 default:
5280 BUG();
5281 return;
5282 }
5283}
5284
5285const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
5286 int phymode, u16 channel)
5287{
5288 int i;
5289
5290 switch (phymode) {
5291 case MODE_IEEE80211A:
5292 for (i = 14; i < priv->channel_count; i++) {
5293 if (priv->channel_info[i].channel == channel)
5294 return &priv->channel_info[i];
5295 }
5296 break;
5297
5298 case MODE_IEEE80211B:
5299 case MODE_IEEE80211G:
5300 if (channel >= 1 && channel <= 14)
5301 return &priv->channel_info[channel - 1];
5302 break;
5303
5304 }
5305
5306 return NULL;
5307}
5308
5309#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5310 ? # x " " : "")
5311
5312static int iwl_init_channel_map(struct iwl_priv *priv)
5313{
5314 int eeprom_ch_count = 0;
5315 const u8 *eeprom_ch_index = NULL;
5316 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
5317 int band, ch;
5318 struct iwl_channel_info *ch_info;
5319
5320 if (priv->channel_count) {
5321 IWL_DEBUG_INFO("Channel map already initialized.\n");
5322 return 0;
5323 }
5324
5325 if (priv->eeprom.version < 0x2f) {
5326 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5327 priv->eeprom.version);
5328 return -EINVAL;
5329 }
5330
5331 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5332
5333 priv->channel_count =
5334 ARRAY_SIZE(iwl_eeprom_band_1) +
5335 ARRAY_SIZE(iwl_eeprom_band_2) +
5336 ARRAY_SIZE(iwl_eeprom_band_3) +
5337 ARRAY_SIZE(iwl_eeprom_band_4) +
5338 ARRAY_SIZE(iwl_eeprom_band_5);
5339
5340 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5341
5342 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
5343 priv->channel_count, GFP_KERNEL);
5344 if (!priv->channel_info) {
5345 IWL_ERROR("Could not allocate channel_info\n");
5346 priv->channel_count = 0;
5347 return -ENOMEM;
5348 }
5349
5350 ch_info = priv->channel_info;
5351
5352 /* Loop through the 5 EEPROM bands adding them in order to the
5353 * channel map we maintain (that contains additional information than
5354 * what just in the EEPROM) */
5355 for (band = 1; band <= 5; band++) {
5356
5357 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5358 &eeprom_ch_info, &eeprom_ch_index);
5359
5360 /* Loop through each band adding each of the channels */
5361 for (ch = 0; ch < eeprom_ch_count; ch++) {
5362 ch_info->channel = eeprom_ch_index[ch];
5363 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5364 MODE_IEEE80211A;
5365
5366 /* permanently store EEPROM's channel regulatory flags
5367 * and max power in channel info database. */
5368 ch_info->eeprom = eeprom_ch_info[ch];
5369
5370 /* Copy the run-time flags so they are there even on
5371 * invalid channels */
5372 ch_info->flags = eeprom_ch_info[ch].flags;
5373
5374 if (!(is_channel_valid(ch_info))) {
5375 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5376 "No traffic\n",
5377 ch_info->channel,
5378 ch_info->flags,
5379 is_channel_a_band(ch_info) ?
5380 "5.2" : "2.4");
5381 ch_info++;
5382 continue;
5383 }
5384
5385 /* Initialize regulatory-based run-time data */
5386 ch_info->max_power_avg = ch_info->curr_txpow =
5387 eeprom_ch_info[ch].max_power_avg;
5388 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5389 ch_info->min_power = 0;
5390
5391 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5392 " %ddBm): Ad-Hoc %ssupported\n",
5393 ch_info->channel,
5394 is_channel_a_band(ch_info) ?
5395 "5.2" : "2.4",
5396 CHECK_AND_PRINT(IBSS),
5397 CHECK_AND_PRINT(ACTIVE),
5398 CHECK_AND_PRINT(RADAR),
5399 CHECK_AND_PRINT(WIDE),
5400 CHECK_AND_PRINT(NARROW),
5401 CHECK_AND_PRINT(DFS),
5402 eeprom_ch_info[ch].flags,
5403 eeprom_ch_info[ch].max_power_avg,
5404 ((eeprom_ch_info[ch].
5405 flags & EEPROM_CHANNEL_IBSS)
5406 && !(eeprom_ch_info[ch].
5407 flags & EEPROM_CHANNEL_RADAR))
5408 ? "" : "not ");
5409
5410 /* Set the user_txpower_limit to the highest power
5411 * supported by any channel */
5412 if (eeprom_ch_info[ch].max_power_avg >
5413 priv->user_txpower_limit)
5414 priv->user_txpower_limit =
5415 eeprom_ch_info[ch].max_power_avg;
5416
5417 ch_info++;
5418 }
5419 }
5420
5421 for (band = 6; band <= 7; band++) {
5422 int phymode;
5423 u8 fat_extension_chan;
5424
5425 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5426 &eeprom_ch_info, &eeprom_ch_index);
5427
5428 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
5429 /* Loop through each band adding each of the channels */
5430 for (ch = 0; ch < eeprom_ch_count; ch++) {
5431
5432 if ((band == 6) &&
5433 ((eeprom_ch_index[ch] == 5) ||
5434 (eeprom_ch_index[ch] == 6) ||
5435 (eeprom_ch_index[ch] == 7)))
5436 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5437 else
5438 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5439
5440 iwl4965_set_fat_chan_info(priv, phymode,
5441 eeprom_ch_index[ch],
5442 &(eeprom_ch_info[ch]),
5443 fat_extension_chan);
5444
5445 iwl4965_set_fat_chan_info(priv, phymode,
5446 (eeprom_ch_index[ch] + 4),
5447 &(eeprom_ch_info[ch]),
5448 HT_IE_EXT_CHANNEL_BELOW);
5449 }
5450 }
5451
5452 return 0;
5453}
5454
5455/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5456 * sending probe req. This should be set long enough to hear probe responses
5457 * from more than one AP. */
5458#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5459#define IWL_ACTIVE_DWELL_TIME_52 (10)
5460
5461/* For faster active scanning, scan will move to the next channel if fewer than
5462 * PLCP_QUIET_THRESH packets are heard on this channel within
5463 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5464 * time if it's a quiet channel (nothing responded to our probe, and there's
5465 * no other traffic).
5466 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5467#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5468#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5469
5470/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5471 * Must be set longer than active dwell time.
5472 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5473#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5474#define IWL_PASSIVE_DWELL_TIME_52 (10)
5475#define IWL_PASSIVE_DWELL_BASE (100)
5476#define IWL_CHANNEL_TUNE_TIME 5
5477
5478static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode)
5479{
5480 if (phymode == MODE_IEEE80211A)
5481 return IWL_ACTIVE_DWELL_TIME_52;
5482 else
5483 return IWL_ACTIVE_DWELL_TIME_24;
5484}
5485
5486static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode)
5487{
5488 u16 active = iwl_get_active_dwell_time(priv, phymode);
5489 u16 passive = (phymode != MODE_IEEE80211A) ?
5490 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5491 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5492
5493 if (iwl_is_associated(priv)) {
5494 /* If we're associated, we clamp the maximum passive
5495 * dwell time to be 98% of the beacon interval (minus
5496 * 2 * channel tune time) */
5497 passive = priv->beacon_int;
5498 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5499 passive = IWL_PASSIVE_DWELL_BASE;
5500 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5501 }
5502
5503 if (passive <= active)
5504 passive = active + 1;
5505
5506 return passive;
5507}
5508
5509static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode,
5510 u8 is_active, u8 direct_mask,
5511 struct iwl_scan_channel *scan_ch)
5512{
5513 const struct ieee80211_channel *channels = NULL;
5514 const struct ieee80211_hw_mode *hw_mode;
5515 const struct iwl_channel_info *ch_info;
5516 u16 passive_dwell = 0;
5517 u16 active_dwell = 0;
5518 int added, i;
5519
5520 hw_mode = iwl_get_hw_mode(priv, phymode);
5521 if (!hw_mode)
5522 return 0;
5523
5524 channels = hw_mode->channels;
5525
5526 active_dwell = iwl_get_active_dwell_time(priv, phymode);
5527 passive_dwell = iwl_get_passive_dwell_time(priv, phymode);
5528
5529 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5530 if (channels[i].chan ==
5531 le16_to_cpu(priv->active_rxon.channel)) {
5532 if (iwl_is_associated(priv)) {
5533 IWL_DEBUG_SCAN
5534 ("Skipping current channel %d\n",
5535 le16_to_cpu(priv->active_rxon.channel));
5536 continue;
5537 }
5538 } else if (priv->only_active_channel)
5539 continue;
5540
5541 scan_ch->channel = channels[i].chan;
5542
5543 ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel);
5544 if (!is_channel_valid(ch_info)) {
5545 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5546 scan_ch->channel);
5547 continue;
5548 }
5549
5550 if (!is_active || is_channel_passive(ch_info) ||
5551 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5552 scan_ch->type = 0; /* passive */
5553 else
5554 scan_ch->type = 1; /* active */
5555
5556 if (scan_ch->type & 1)
5557 scan_ch->type |= (direct_mask << 1);
5558
5559 if (is_channel_narrow(ch_info))
5560 scan_ch->type |= (1 << 7);
5561
5562 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5563 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5564
5565 /* Set power levels to defaults */
5566 scan_ch->tpc.dsp_atten = 110;
5567 /* scan_pwr_info->tpc.dsp_atten; */
5568
5569 /*scan_pwr_info->tpc.tx_gain; */
5570 if (phymode == MODE_IEEE80211A)
5571 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5572 else {
5573 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5574 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
5575 * power level
5576 scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
5577 */
5578 }
5579
5580 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5581 scan_ch->channel,
5582 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5583 (scan_ch->type & 1) ?
5584 active_dwell : passive_dwell);
5585
5586 scan_ch++;
5587 added++;
5588 }
5589
5590 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5591 return added;
5592}
5593
5594static void iwl_reset_channel_flag(struct iwl_priv *priv)
5595{
5596 int i, j;
5597 for (i = 0; i < 3; i++) {
5598 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5599 for (j = 0; j < hw_mode->num_channels; j++)
5600 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5601 }
5602}
5603
5604static void iwl_init_hw_rates(struct iwl_priv *priv,
5605 struct ieee80211_rate *rates)
5606{
5607 int i;
5608
5609 for (i = 0; i < IWL_RATE_COUNT; i++) {
5610 rates[i].rate = iwl_rates[i].ieee * 5;
5611 rates[i].val = i; /* Rate scaling will work on indexes */
5612 rates[i].val2 = i;
5613 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5614 /* Only OFDM have the bits-per-symbol set */
5615 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5616 rates[i].flags |= IEEE80211_RATE_OFDM;
5617 else {
5618 /*
5619 * If CCK 1M then set rate flag to CCK else CCK_2
5620 * which is CCK | PREAMBLE2
5621 */
5622 rates[i].flags |= (iwl_rates[i].plcp == 10) ?
5623 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5624 }
5625
5626 /* Set up which ones are basic rates... */
5627 if (IWL_BASIC_RATES_MASK & (1 << i))
5628 rates[i].flags |= IEEE80211_RATE_BASIC;
5629 }
5630
5631 iwl4965_init_hw_rates(priv, rates);
5632}
5633
5634/**
5635 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
5636 */
5637static int iwl_init_geos(struct iwl_priv *priv)
5638{
5639 struct iwl_channel_info *ch;
5640 struct ieee80211_hw_mode *modes;
5641 struct ieee80211_channel *channels;
5642 struct ieee80211_channel *geo_ch;
5643 struct ieee80211_rate *rates;
5644 int i = 0;
5645 enum {
5646 A = 0,
5647 B = 1,
5648 G = 2,
5649 A_11N = 3,
5650 G_11N = 4,
5651 };
5652 int mode_count = 5;
5653
5654 if (priv->modes) {
5655 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5656 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5657 return 0;
5658 }
5659
5660 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5661 GFP_KERNEL);
5662 if (!modes)
5663 return -ENOMEM;
5664
5665 channels = kzalloc(sizeof(struct ieee80211_channel) *
5666 priv->channel_count, GFP_KERNEL);
5667 if (!channels) {
5668 kfree(modes);
5669 return -ENOMEM;
5670 }
5671
5672 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5673 GFP_KERNEL);
5674 if (!rates) {
5675 kfree(modes);
5676 kfree(channels);
5677 return -ENOMEM;
5678 }
5679
5680 /* 0 = 802.11a
5681 * 1 = 802.11b
5682 * 2 = 802.11g
5683 */
5684
5685 /* 5.2GHz channels start after the 2.4GHz channels */
5686 modes[A].mode = MODE_IEEE80211A;
5687 modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5688 modes[A].rates = rates;
5689 modes[A].num_rates = 8; /* just OFDM */
5690 modes[A].rates = &rates[4];
5691 modes[A].num_channels = 0;
5692
5693 modes[B].mode = MODE_IEEE80211B;
5694 modes[B].channels = channels;
5695 modes[B].rates = rates;
5696 modes[B].num_rates = 4; /* just CCK */
5697 modes[B].num_channels = 0;
5698
5699 modes[G].mode = MODE_IEEE80211G;
5700 modes[G].channels = channels;
5701 modes[G].rates = rates;
5702 modes[G].num_rates = 12; /* OFDM & CCK */
5703 modes[G].num_channels = 0;
5704
5705 modes[G_11N].mode = MODE_IEEE80211G;
5706 modes[G_11N].channels = channels;
5707 modes[G_11N].num_rates = 13; /* OFDM & CCK */
5708 modes[G_11N].rates = rates;
5709 modes[G_11N].num_channels = 0;
5710
5711 modes[A_11N].mode = MODE_IEEE80211A;
5712 modes[A_11N].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5713 modes[A_11N].rates = &rates[4];
5714 modes[A_11N].num_rates = 9; /* just OFDM */
5715 modes[A_11N].num_channels = 0;
5716
5717 priv->ieee_channels = channels;
5718 priv->ieee_rates = rates;
5719
5720 iwl_init_hw_rates(priv, rates);
5721
5722 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5723 ch = &priv->channel_info[i];
5724
5725 if (!is_channel_valid(ch)) {
5726 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5727 "skipping.\n",
5728 ch->channel, is_channel_a_band(ch) ?
5729 "5.2" : "2.4");
5730 continue;
5731 }
5732
5733 if (is_channel_a_band(ch)) {
5734 geo_ch = &modes[A].channels[modes[A].num_channels++];
5735 modes[A_11N].num_channels++;
5736 } else {
5737 geo_ch = &modes[B].channels[modes[B].num_channels++];
5738 modes[G].num_channels++;
5739 modes[G_11N].num_channels++;
5740 }
5741
5742 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5743 geo_ch->chan = ch->channel;
5744 geo_ch->power_level = ch->max_power_avg;
5745 geo_ch->antenna_max = 0xff;
5746
5747 if (is_channel_valid(ch)) {
5748 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5749 if (ch->flags & EEPROM_CHANNEL_IBSS)
5750 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5751
5752 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5753 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5754
5755 if (ch->flags & EEPROM_CHANNEL_RADAR)
5756 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5757
5758 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5759 priv->max_channel_txpower_limit =
5760 ch->max_power_avg;
5761 }
5762
5763 geo_ch->val = geo_ch->flag;
5764 }
5765
5766 if ((modes[A].num_channels == 0) && priv->is_abg) {
5767 printk(KERN_INFO DRV_NAME
5768 ": Incorrectly detected BG card as ABG. Please send "
5769 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5770 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5771 priv->is_abg = 0;
5772 }
5773
5774 printk(KERN_INFO DRV_NAME
5775 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5776 modes[G].num_channels, modes[A].num_channels);
5777
5778 /*
5779 * NOTE: We register these in preference of order -- the
5780 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5781 * a phymode based on rates or AP capabilities but seems to
5782 * configure it purely on if the channel being configured
5783 * is supported by a mode -- and the first match is taken
5784 */
5785
5786 if (modes[G].num_channels)
5787 ieee80211_register_hwmode(priv->hw, &modes[G]);
5788 if (modes[B].num_channels)
5789 ieee80211_register_hwmode(priv->hw, &modes[B]);
5790 if (modes[A].num_channels)
5791 ieee80211_register_hwmode(priv->hw, &modes[A]);
5792
5793 priv->modes = modes;
5794 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5795
5796 return 0;
5797}
5798
5799/******************************************************************************
5800 *
5801 * uCode download functions
5802 *
5803 ******************************************************************************/
5804
5805static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
5806{
5807 if (priv->ucode_code.v_addr != NULL) {
5808 pci_free_consistent(priv->pci_dev,
5809 priv->ucode_code.len,
5810 priv->ucode_code.v_addr,
5811 priv->ucode_code.p_addr);
5812 priv->ucode_code.v_addr = NULL;
5813 }
5814 if (priv->ucode_data.v_addr != NULL) {
5815 pci_free_consistent(priv->pci_dev,
5816 priv->ucode_data.len,
5817 priv->ucode_data.v_addr,
5818 priv->ucode_data.p_addr);
5819 priv->ucode_data.v_addr = NULL;
5820 }
5821 if (priv->ucode_data_backup.v_addr != NULL) {
5822 pci_free_consistent(priv->pci_dev,
5823 priv->ucode_data_backup.len,
5824 priv->ucode_data_backup.v_addr,
5825 priv->ucode_data_backup.p_addr);
5826 priv->ucode_data_backup.v_addr = NULL;
5827 }
5828 if (priv->ucode_init.v_addr != NULL) {
5829 pci_free_consistent(priv->pci_dev,
5830 priv->ucode_init.len,
5831 priv->ucode_init.v_addr,
5832 priv->ucode_init.p_addr);
5833 priv->ucode_init.v_addr = NULL;
5834 }
5835 if (priv->ucode_init_data.v_addr != NULL) {
5836 pci_free_consistent(priv->pci_dev,
5837 priv->ucode_init_data.len,
5838 priv->ucode_init_data.v_addr,
5839 priv->ucode_init_data.p_addr);
5840 priv->ucode_init_data.v_addr = NULL;
5841 }
5842 if (priv->ucode_boot.v_addr != NULL) {
5843 pci_free_consistent(priv->pci_dev,
5844 priv->ucode_boot.len,
5845 priv->ucode_boot.v_addr,
5846 priv->ucode_boot.p_addr);
5847 priv->ucode_boot.v_addr = NULL;
5848 }
5849}
5850
5851/**
5852 * iwl_verify_inst_full - verify runtime uCode image in card vs. host,
5853 * looking at all data.
5854 */
5855static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len)
5856{
5857 u32 val;
5858 u32 save_len = len;
5859 int rc = 0;
5860 u32 errcnt;
5861
5862 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5863
5864 rc = iwl_grab_restricted_access(priv);
5865 if (rc)
5866 return rc;
5867
5868 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5869
5870 errcnt = 0;
5871 for (; len > 0; len -= sizeof(u32), image++) {
5872 /* read data comes through single port, auto-incr addr */
5873 /* NOTE: Use the debugless read so we don't flood kernel log
5874 * if IWL_DL_IO is set */
5875 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5876 if (val != le32_to_cpu(*image)) {
5877 IWL_ERROR("uCode INST section is invalid at "
5878 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5879 save_len - len, val, le32_to_cpu(*image));
5880 rc = -EIO;
5881 errcnt++;
5882 if (errcnt >= 20)
5883 break;
5884 }
5885 }
5886
5887 iwl_release_restricted_access(priv);
5888
5889 if (!errcnt)
5890 IWL_DEBUG_INFO
5891 ("ucode image in INSTRUCTION memory is good\n");
5892
5893 return rc;
5894}
5895
5896
5897/**
5898 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
5899 * using sample data 100 bytes apart. If these sample points are good,
5900 * it's a pretty good bet that everything between them is good, too.
5901 */
5902static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5903{
5904 u32 val;
5905 int rc = 0;
5906 u32 errcnt = 0;
5907 u32 i;
5908
5909 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5910
5911 rc = iwl_grab_restricted_access(priv);
5912 if (rc)
5913 return rc;
5914
5915 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5916 /* read data comes through single port, auto-incr addr */
5917 /* NOTE: Use the debugless read so we don't flood kernel log
5918 * if IWL_DL_IO is set */
5919 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR,
5920 i + RTC_INST_LOWER_BOUND);
5921 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5922 if (val != le32_to_cpu(*image)) {
5923#if 0 /* Enable this if you want to see details */
5924 IWL_ERROR("uCode INST section is invalid at "
5925 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5926 i, val, *image);
5927#endif
5928 rc = -EIO;
5929 errcnt++;
5930 if (errcnt >= 3)
5931 break;
5932 }
5933 }
5934
5935 iwl_release_restricted_access(priv);
5936
5937 return rc;
5938}
5939
5940
5941/**
5942 * iwl_verify_ucode - determine which instruction image is in SRAM,
5943 * and verify its contents
5944 */
5945static int iwl_verify_ucode(struct iwl_priv *priv)
5946{
5947 __le32 *image;
5948 u32 len;
5949 int rc = 0;
5950
5951 /* Try bootstrap */
5952 image = (__le32 *)priv->ucode_boot.v_addr;
5953 len = priv->ucode_boot.len;
5954 rc = iwl_verify_inst_sparse(priv, image, len);
5955 if (rc == 0) {
5956 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5957 return 0;
5958 }
5959
5960 /* Try initialize */
5961 image = (__le32 *)priv->ucode_init.v_addr;
5962 len = priv->ucode_init.len;
5963 rc = iwl_verify_inst_sparse(priv, image, len);
5964 if (rc == 0) {
5965 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5966 return 0;
5967 }
5968
5969 /* Try runtime/protocol */
5970 image = (__le32 *)priv->ucode_code.v_addr;
5971 len = priv->ucode_code.len;
5972 rc = iwl_verify_inst_sparse(priv, image, len);
5973 if (rc == 0) {
5974 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5975 return 0;
5976 }
5977
5978 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5979
5980 /* Show first several data entries in instruction SRAM.
5981 * Selection of bootstrap image is arbitrary. */
5982 image = (__le32 *)priv->ucode_boot.v_addr;
5983 len = priv->ucode_boot.len;
5984 rc = iwl_verify_inst_full(priv, image, len);
5985
5986 return rc;
5987}
5988
5989
5990/* check contents of special bootstrap uCode SRAM */
5991static int iwl_verify_bsm(struct iwl_priv *priv)
5992{
5993 __le32 *image = priv->ucode_boot.v_addr;
5994 u32 len = priv->ucode_boot.len;
5995 u32 reg;
5996 u32 val;
5997
5998 IWL_DEBUG_INFO("Begin verify bsm\n");
5999
6000 /* verify BSM SRAM contents */
6001 val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG);
6002 for (reg = BSM_SRAM_LOWER_BOUND;
6003 reg < BSM_SRAM_LOWER_BOUND + len;
6004 reg += sizeof(u32), image ++) {
6005 val = iwl_read_restricted_reg(priv, reg);
6006 if (val != le32_to_cpu(*image)) {
6007 IWL_ERROR("BSM uCode verification failed at "
6008 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6009 BSM_SRAM_LOWER_BOUND,
6010 reg - BSM_SRAM_LOWER_BOUND, len,
6011 val, le32_to_cpu(*image));
6012 return -EIO;
6013 }
6014 }
6015
6016 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6017
6018 return 0;
6019}
6020
6021/**
6022 * iwl_load_bsm - Load bootstrap instructions
6023 *
6024 * BSM operation:
6025 *
6026 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6027 * in special SRAM that does not power down during RFKILL. When powering back
6028 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6029 * the bootstrap program into the on-board processor, and starts it.
6030 *
6031 * The bootstrap program loads (via DMA) instructions and data for a new
6032 * program from host DRAM locations indicated by the host driver in the
6033 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6034 * automatically.
6035 *
6036 * When initializing the NIC, the host driver points the BSM to the
6037 * "initialize" uCode image. This uCode sets up some internal data, then
6038 * notifies host via "initialize alive" that it is complete.
6039 *
6040 * The host then replaces the BSM_DRAM_* pointer values to point to the
6041 * normal runtime uCode instructions and a backup uCode data cache buffer
6042 * (filled initially with starting data values for the on-board processor),
6043 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6044 * which begins normal operation.
6045 *
6046 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6047 * the backup data cache in DRAM before SRAM is powered down.
6048 *
6049 * When powering back up, the BSM loads the bootstrap program. This reloads
6050 * the runtime uCode instructions and the backup data cache into SRAM,
6051 * and re-launches the runtime uCode from where it left off.
6052 */
6053static int iwl_load_bsm(struct iwl_priv *priv)
6054{
6055 __le32 *image = priv->ucode_boot.v_addr;
6056 u32 len = priv->ucode_boot.len;
6057 dma_addr_t pinst;
6058 dma_addr_t pdata;
6059 u32 inst_len;
6060 u32 data_len;
6061 int rc;
6062 int i;
6063 u32 done;
6064 u32 reg_offset;
6065
6066 IWL_DEBUG_INFO("Begin load bsm\n");
6067
6068 /* make sure bootstrap program is no larger than BSM's SRAM size */
6069 if (len > IWL_MAX_BSM_SIZE)
6070 return -EINVAL;
6071
6072 /* Tell bootstrap uCode where to find the "Initialize" uCode
6073 * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965.
6074 * NOTE: iwl_initialize_alive_start() will replace these values,
6075 * after the "initialize" uCode has run, to point to
6076 * runtime/protocol instructions and backup data cache. */
6077 pinst = priv->ucode_init.p_addr >> 4;
6078 pdata = priv->ucode_init_data.p_addr >> 4;
6079 inst_len = priv->ucode_init.len;
6080 data_len = priv->ucode_init_data.len;
6081
6082 rc = iwl_grab_restricted_access(priv);
6083 if (rc)
6084 return rc;
6085
6086 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
6087 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6088 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6089 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
6090
6091 /* Fill BSM memory with bootstrap instructions */
6092 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6093 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6094 reg_offset += sizeof(u32), image++)
6095 _iwl_write_restricted_reg(priv, reg_offset,
6096 le32_to_cpu(*image));
6097
6098 rc = iwl_verify_bsm(priv);
6099 if (rc) {
6100 iwl_release_restricted_access(priv);
6101 return rc;
6102 }
6103
6104 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
6105 iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0);
6106 iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG,
6107 RTC_INST_LOWER_BOUND);
6108 iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
6109
6110 /* Load bootstrap code into instruction SRAM now,
6111 * to prepare to load "initialize" uCode */
6112 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
6113 BSM_WR_CTRL_REG_BIT_START);
6114
6115 /* Wait for load of bootstrap uCode to finish */
6116 for (i = 0; i < 100; i++) {
6117 done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG);
6118 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6119 break;
6120 udelay(10);
6121 }
6122 if (i < 100)
6123 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6124 else {
6125 IWL_ERROR("BSM write did not complete!\n");
6126 return -EIO;
6127 }
6128
6129 /* Enable future boot loads whenever power management unit triggers it
6130 * (e.g. when powering back up after power-save shutdown) */
6131 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
6132 BSM_WR_CTRL_REG_BIT_START_EN);
6133
6134 iwl_release_restricted_access(priv);
6135
6136 return 0;
6137}
6138
6139static void iwl_nic_start(struct iwl_priv *priv)
6140{
6141 /* Remove all resets to allow NIC to operate */
6142 iwl_write32(priv, CSR_RESET, 0);
6143}
6144
6145/**
6146 * iwl_read_ucode - Read uCode images from disk file.
6147 *
6148 * Copy into buffers for card to fetch via bus-mastering
6149 */
6150static int iwl_read_ucode(struct iwl_priv *priv)
6151{
6152 struct iwl_ucode *ucode;
6153 int rc = 0;
6154 const struct firmware *ucode_raw;
6155 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6156 u8 *src;
6157 size_t len;
6158 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6159
6160 /* Ask kernel firmware_class module to get the boot firmware off disk.
6161 * request_firmware() is synchronous, file is in memory on return. */
6162 rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6163 if (rc < 0) {
6164 IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc);
6165 goto error;
6166 }
6167
6168 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6169 name, ucode_raw->size);
6170
6171 /* Make sure that we got at least our header! */
6172 if (ucode_raw->size < sizeof(*ucode)) {
6173 IWL_ERROR("File size way too small!\n");
6174 rc = -EINVAL;
6175 goto err_release;
6176 }
6177
6178 /* Data from ucode file: header followed by uCode images */
6179 ucode = (void *)ucode_raw->data;
6180
6181 ver = le32_to_cpu(ucode->ver);
6182 inst_size = le32_to_cpu(ucode->inst_size);
6183 data_size = le32_to_cpu(ucode->data_size);
6184 init_size = le32_to_cpu(ucode->init_size);
6185 init_data_size = le32_to_cpu(ucode->init_data_size);
6186 boot_size = le32_to_cpu(ucode->boot_size);
6187
6188 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6189 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6190 inst_size);
6191 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6192 data_size);
6193 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6194 init_size);
6195 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6196 init_data_size);
6197 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6198 boot_size);
6199
6200 /* Verify size of file vs. image size info in file's header */
6201 if (ucode_raw->size < sizeof(*ucode) +
6202 inst_size + data_size + init_size +
6203 init_data_size + boot_size) {
6204
6205 IWL_DEBUG_INFO("uCode file size %d too small\n",
6206 (int)ucode_raw->size);
6207 rc = -EINVAL;
6208 goto err_release;
6209 }
6210
6211 /* Verify that uCode images will fit in card's SRAM */
6212 if (inst_size > IWL_MAX_INST_SIZE) {
6213 IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n",
6214 (int)inst_size);
6215 rc = -EINVAL;
6216 goto err_release;
6217 }
6218
6219 if (data_size > IWL_MAX_DATA_SIZE) {
6220 IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n",
6221 (int)data_size);
6222 rc = -EINVAL;
6223 goto err_release;
6224 }
6225 if (init_size > IWL_MAX_INST_SIZE) {
6226 IWL_DEBUG_INFO
6227 ("uCode init instr len %d too large to fit in card\n",
6228 (int)init_size);
6229 rc = -EINVAL;
6230 goto err_release;
6231 }
6232 if (init_data_size > IWL_MAX_DATA_SIZE) {
6233 IWL_DEBUG_INFO
6234 ("uCode init data len %d too large to fit in card\n",
6235 (int)init_data_size);
6236 rc = -EINVAL;
6237 goto err_release;
6238 }
6239 if (boot_size > IWL_MAX_BSM_SIZE) {
6240 IWL_DEBUG_INFO
6241 ("uCode boot instr len %d too large to fit in bsm\n",
6242 (int)boot_size);
6243 rc = -EINVAL;
6244 goto err_release;
6245 }
6246
6247 /* Allocate ucode buffers for card's bus-master loading ... */
6248
6249 /* Runtime instructions and 2 copies of data:
6250 * 1) unmodified from disk
6251 * 2) backup cache for save/restore during power-downs */
6252 priv->ucode_code.len = inst_size;
6253 priv->ucode_code.v_addr =
6254 pci_alloc_consistent(priv->pci_dev,
6255 priv->ucode_code.len,
6256 &(priv->ucode_code.p_addr));
6257
6258 priv->ucode_data.len = data_size;
6259 priv->ucode_data.v_addr =
6260 pci_alloc_consistent(priv->pci_dev,
6261 priv->ucode_data.len,
6262 &(priv->ucode_data.p_addr));
6263
6264 priv->ucode_data_backup.len = data_size;
6265 priv->ucode_data_backup.v_addr =
6266 pci_alloc_consistent(priv->pci_dev,
6267 priv->ucode_data_backup.len,
6268 &(priv->ucode_data_backup.p_addr));
6269
6270
6271 /* Initialization instructions and data */
6272 priv->ucode_init.len = init_size;
6273 priv->ucode_init.v_addr =
6274 pci_alloc_consistent(priv->pci_dev,
6275 priv->ucode_init.len,
6276 &(priv->ucode_init.p_addr));
6277
6278 priv->ucode_init_data.len = init_data_size;
6279 priv->ucode_init_data.v_addr =
6280 pci_alloc_consistent(priv->pci_dev,
6281 priv->ucode_init_data.len,
6282 &(priv->ucode_init_data.p_addr));
6283
6284 /* Bootstrap (instructions only, no data) */
6285 priv->ucode_boot.len = boot_size;
6286 priv->ucode_boot.v_addr =
6287 pci_alloc_consistent(priv->pci_dev,
6288 priv->ucode_boot.len,
6289 &(priv->ucode_boot.p_addr));
6290
6291 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
6292 !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr ||
6293 !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr)
6294 goto err_pci_alloc;
6295
6296 /* Copy images into buffers for card's bus-master reads ... */
6297
6298 /* Runtime instructions (first block of data in file) */
6299 src = &ucode->data[0];
6300 len = priv->ucode_code.len;
6301 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n",
6302 (int)len);
6303 memcpy(priv->ucode_code.v_addr, src, len);
6304 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6305 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6306
6307 /* Runtime data (2nd block)
6308 * NOTE: Copy into backup buffer will be done in iwl_up() */
6309 src = &ucode->data[inst_size];
6310 len = priv->ucode_data.len;
6311 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n",
6312 (int)len);
6313 memcpy(priv->ucode_data.v_addr, src, len);
6314 memcpy(priv->ucode_data_backup.v_addr, src, len);
6315
6316 /* Initialization instructions (3rd block) */
6317 if (init_size) {
6318 src = &ucode->data[inst_size + data_size];
6319 len = priv->ucode_init.len;
6320 IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n",
6321 (int)len);
6322 memcpy(priv->ucode_init.v_addr, src, len);
6323 }
6324
6325 /* Initialization data (4th block) */
6326 if (init_data_size) {
6327 src = &ucode->data[inst_size + data_size + init_size];
6328 len = priv->ucode_init_data.len;
6329 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
6330 (int)len);
6331 memcpy(priv->ucode_init_data.v_addr, src, len);
6332 }
6333
6334 /* Bootstrap instructions (5th block) */
6335 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6336 len = priv->ucode_boot.len;
6337 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
6338 (int)len);
6339 memcpy(priv->ucode_boot.v_addr, src, len);
6340
6341 /* We have our copies now, allow OS release its copies */
6342 release_firmware(ucode_raw);
6343 return 0;
6344
6345 err_pci_alloc:
6346 IWL_ERROR("failed to allocate pci memory\n");
6347 rc = -ENOMEM;
6348 iwl_dealloc_ucode_pci(priv);
6349
6350 err_release:
6351 release_firmware(ucode_raw);
6352
6353 error:
6354 return rc;
6355}
6356
6357
6358/**
6359 * iwl_set_ucode_ptrs - Set uCode address location
6360 *
6361 * Tell initialization uCode where to find runtime uCode.
6362 *
6363 * BSM registers initially contain pointers to initialization uCode.
6364 * We need to replace them to load runtime uCode inst and data,
6365 * and to save runtime data when powering down.
6366 */
6367static int iwl_set_ucode_ptrs(struct iwl_priv *priv)
6368{
6369 dma_addr_t pinst;
6370 dma_addr_t pdata;
6371 int rc = 0;
6372 unsigned long flags;
6373
6374 /* bits 35:4 for 4965 */
6375 pinst = priv->ucode_code.p_addr >> 4;
6376 pdata = priv->ucode_data_backup.p_addr >> 4;
6377
6378 spin_lock_irqsave(&priv->lock, flags);
6379 rc = iwl_grab_restricted_access(priv);
6380 if (rc) {
6381 spin_unlock_irqrestore(&priv->lock, flags);
6382 return rc;
6383 }
6384
6385 /* Tell bootstrap uCode where to find image to load */
6386 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
6387 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6388 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
6389 priv->ucode_data.len);
6390
6391 /* Inst bytecount must be last to set up, bit 31 signals uCode
6392 * that all new ptr/size info is in place */
6393 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG,
6394 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6395
6396 iwl_release_restricted_access(priv);
6397
6398 spin_unlock_irqrestore(&priv->lock, flags);
6399
6400 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6401
6402 return rc;
6403}
6404
6405/**
6406 * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved
6407 *
6408 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6409 *
6410 * The 4965 "initialize" ALIVE reply contains calibration data for:
6411 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6412 * (3945 does not contain this data).
6413 *
6414 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6415*/
6416static void iwl_init_alive_start(struct iwl_priv *priv)
6417{
6418 /* Check alive response for "valid" sign from uCode */
6419 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6420 /* We had an error bringing up the hardware, so take it
6421 * all the way back down so we can try again */
6422 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6423 goto restart;
6424 }
6425
6426 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6427 * This is a paranoid check, because we would not have gotten the
6428 * "initialize" alive if code weren't properly loaded. */
6429 if (iwl_verify_ucode(priv)) {
6430 /* Runtime instruction load was bad;
6431 * take it all the way back down so we can try again */
6432 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6433 goto restart;
6434 }
6435
6436 /* Calculate temperature */
6437 priv->temperature = iwl4965_get_temperature(priv);
6438
6439 /* Send pointers to protocol/runtime uCode image ... init code will
6440 * load and launch runtime uCode, which will send us another "Alive"
6441 * notification. */
6442 IWL_DEBUG_INFO("Initialization Alive received.\n");
6443 if (iwl_set_ucode_ptrs(priv)) {
6444 /* Runtime instruction load won't happen;
6445 * take it all the way back down so we can try again */
6446 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6447 goto restart;
6448 }
6449 return;
6450
6451 restart:
6452 queue_work(priv->workqueue, &priv->restart);
6453}
6454
6455
6456/**
6457 * iwl_alive_start - called after REPLY_ALIVE notification received
6458 * from protocol/runtime uCode (initialization uCode's
6459 * Alive gets handled by iwl_init_alive_start()).
6460 */
6461static void iwl_alive_start(struct iwl_priv *priv)
6462{
6463 int rc = 0;
6464
6465 IWL_DEBUG_INFO("Runtime Alive received.\n");
6466
6467 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6468 /* We had an error bringing up the hardware, so take it
6469 * all the way back down so we can try again */
6470 IWL_DEBUG_INFO("Alive failed.\n");
6471 goto restart;
6472 }
6473
6474 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6475 * This is a paranoid check, because we would not have gotten the
6476 * "runtime" alive if code weren't properly loaded. */
6477 if (iwl_verify_ucode(priv)) {
6478 /* Runtime instruction load was bad;
6479 * take it all the way back down so we can try again */
6480 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6481 goto restart;
6482 }
6483
6484 iwl_clear_stations_table(priv);
6485
6486 rc = iwl4965_alive_notify(priv);
6487 if (rc) {
6488 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6489 rc);
6490 goto restart;
6491 }
6492
6493 /* After the ALIVE response, we can process host commands */
6494 set_bit(STATUS_ALIVE, &priv->status);
6495
6496 /* Clear out the uCode error bit if it is set */
6497 clear_bit(STATUS_FW_ERROR, &priv->status);
6498
6499 rc = iwl_init_channel_map(priv);
6500 if (rc) {
6501 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6502 return;
6503 }
6504
6505 iwl_init_geos(priv);
6506
6507 if (iwl_is_rfkill(priv))
6508 return;
6509
6510 if (!priv->mac80211_registered) {
6511 /* Unlock so any user space entry points can call back into
6512 * the driver without a deadlock... */
6513 mutex_unlock(&priv->mutex);
6514 iwl_rate_control_register(priv->hw);
6515 rc = ieee80211_register_hw(priv->hw);
6516 priv->hw->conf.beacon_int = 100;
6517 mutex_lock(&priv->mutex);
6518
6519 if (rc) {
6520 IWL_ERROR("Failed to register network "
6521 "device (error %d)\n", rc);
6522 return;
6523 }
6524
6525 priv->mac80211_registered = 1;
6526
6527 iwl_reset_channel_flag(priv);
6528 } else
6529 ieee80211_start_queues(priv->hw);
6530
6531 priv->active_rate = priv->rates_mask;
6532 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6533
6534 iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
6535
6536 if (iwl_is_associated(priv)) {
6537 struct iwl_rxon_cmd *active_rxon =
6538 (struct iwl_rxon_cmd *)(&priv->active_rxon);
6539
6540 memcpy(&priv->staging_rxon, &priv->active_rxon,
6541 sizeof(priv->staging_rxon));
6542 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6543 } else {
6544 /* Initialize our rx_config data */
6545 iwl_connection_init_rx_config(priv);
6546 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6547 }
6548
6549 /* Configure BT coexistence */
6550 iwl_send_bt_config(priv);
6551
6552 /* Configure the adapter for unassociated operation */
6553 iwl_commit_rxon(priv);
6554
6555 /* At this point, the NIC is initialized and operational */
6556 priv->notif_missed_beacons = 0;
6557 set_bit(STATUS_READY, &priv->status);
6558
6559 iwl4965_rf_kill_ct_config(priv);
6560 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6561
6562 if (priv->error_recovering)
6563 iwl_error_recovery(priv);
6564
6565 return;
6566
6567 restart:
6568 queue_work(priv->workqueue, &priv->restart);
6569}
6570
6571static void iwl_cancel_deferred_work(struct iwl_priv *priv);
6572
6573static void __iwl_down(struct iwl_priv *priv)
6574{
6575 unsigned long flags;
6576 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6577 struct ieee80211_conf *conf = NULL;
6578
6579 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6580
6581 conf = ieee80211_get_hw_conf(priv->hw);
6582
6583 if (!exit_pending)
6584 set_bit(STATUS_EXIT_PENDING, &priv->status);
6585
6586 iwl_clear_stations_table(priv);
6587
6588 /* Unblock any waiting calls */
6589 wake_up_interruptible_all(&priv->wait_command_queue);
6590
6591 iwl_cancel_deferred_work(priv);
6592
6593 /* Wipe out the EXIT_PENDING status bit if we are not actually
6594 * exiting the module */
6595 if (!exit_pending)
6596 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6597
6598 /* stop and reset the on-board processor */
6599 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6600
6601 /* tell the device to stop sending interrupts */
6602 iwl_disable_interrupts(priv);
6603
6604 if (priv->mac80211_registered)
6605 ieee80211_stop_queues(priv->hw);
6606
6607 /* If we have not previously called iwl_init() then
6608 * clear all bits but the RF Kill and SUSPEND bits and return */
6609 if (!iwl_is_init(priv)) {
6610 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6611 STATUS_RF_KILL_HW |
6612 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6613 STATUS_RF_KILL_SW |
6614 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6615 STATUS_IN_SUSPEND;
6616 goto exit;
6617 }
6618
6619 /* ...otherwise clear out all the status bits but the RF Kill and
6620 * SUSPEND bits and continue taking the NIC down. */
6621 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6622 STATUS_RF_KILL_HW |
6623 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6624 STATUS_RF_KILL_SW |
6625 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6626 STATUS_IN_SUSPEND |
6627 test_bit(STATUS_FW_ERROR, &priv->status) <<
6628 STATUS_FW_ERROR;
6629
6630 spin_lock_irqsave(&priv->lock, flags);
6631 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6632 spin_unlock_irqrestore(&priv->lock, flags);
6633
6634 iwl_hw_txq_ctx_stop(priv);
6635 iwl_hw_rxq_stop(priv);
6636
6637 spin_lock_irqsave(&priv->lock, flags);
6638 if (!iwl_grab_restricted_access(priv)) {
6639 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
6640 APMG_CLK_VAL_DMA_CLK_RQT);
6641 iwl_release_restricted_access(priv);
6642 }
6643 spin_unlock_irqrestore(&priv->lock, flags);
6644
6645 udelay(5);
6646
6647 iwl_hw_nic_stop_master(priv);
6648 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6649 iwl_hw_nic_reset(priv);
6650
6651 exit:
6652 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
6653
6654 if (priv->ibss_beacon)
6655 dev_kfree_skb(priv->ibss_beacon);
6656 priv->ibss_beacon = NULL;
6657
6658 /* clear out any free frames */
6659 iwl_clear_free_frames(priv);
6660}
6661
6662static void iwl_down(struct iwl_priv *priv)
6663{
6664 mutex_lock(&priv->mutex);
6665 __iwl_down(priv);
6666 mutex_unlock(&priv->mutex);
6667}
6668
6669#define MAX_HW_RESTARTS 5
6670
6671static int __iwl_up(struct iwl_priv *priv)
6672{
6673 int rc, i;
6674 u32 hw_rf_kill = 0;
6675
6676 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6677 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6678 return -EIO;
6679 }
6680
6681 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6682 IWL_WARNING("Radio disabled by SW RF kill (module "
6683 "parameter)\n");
6684 return 0;
6685 }
6686
6687 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6688
6689 rc = iwl_hw_nic_init(priv);
6690 if (rc) {
6691 IWL_ERROR("Unable to int nic\n");
6692 return rc;
6693 }
6694
6695 /* make sure rfkill handshake bits are cleared */
6696 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6697 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
6698 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6699
6700 /* clear (again), then enable host interrupts */
6701 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6702 iwl_enable_interrupts(priv);
6703
6704 /* really make sure rfkill handshake bits are cleared */
6705 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6706 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6707
6708 /* Copy original ucode data image from disk into backup cache.
6709 * This will be used to initialize the on-board processor's
6710 * data SRAM for a clean start when the runtime program first loads. */
6711 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6712 priv->ucode_data.len);
6713
6714 /* If platform's RF_KILL switch is set to KILL,
6715 * wait for BIT_INT_RF_KILL interrupt before loading uCode
6716 * and getting things started */
6717 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
6718 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
6719 hw_rf_kill = 1;
6720
6721 if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) {
6722 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6723 return 0;
6724 }
6725
6726 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6727
6728 iwl_clear_stations_table(priv);
6729
6730 /* load bootstrap state machine,
6731 * load bootstrap program into processor's memory,
6732 * prepare to load the "initialize" uCode */
6733 rc = iwl_load_bsm(priv);
6734
6735 if (rc) {
6736 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6737 continue;
6738 }
6739
6740 /* start card; "initialize" will load runtime ucode */
6741 iwl_nic_start(priv);
6742
6743 /* MAC Address location in EEPROM same for 3945/4965 */
6744 get_eeprom_mac(priv, priv->mac_addr);
6745 IWL_DEBUG_INFO("MAC address: " MAC_FMT "\n",
6746 MAC_ARG(priv->mac_addr));
6747
6748 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6749
6750 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6751
6752 return 0;
6753 }
6754
6755 set_bit(STATUS_EXIT_PENDING, &priv->status);
6756 __iwl_down(priv);
6757
6758 /* tried to restart and config the device for as long as our
6759 * patience could withstand */
6760 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6761 return -EIO;
6762}
6763
6764
6765/*****************************************************************************
6766 *
6767 * Workqueue callbacks
6768 *
6769 *****************************************************************************/
6770
6771static void iwl_bg_init_alive_start(struct work_struct *data)
6772{
6773 struct iwl_priv *priv =
6774 container_of(data, struct iwl_priv, init_alive_start.work);
6775
6776 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6777 return;
6778
6779 mutex_lock(&priv->mutex);
6780 iwl_init_alive_start(priv);
6781 mutex_unlock(&priv->mutex);
6782}
6783
6784static void iwl_bg_alive_start(struct work_struct *data)
6785{
6786 struct iwl_priv *priv =
6787 container_of(data, struct iwl_priv, alive_start.work);
6788
6789 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6790 return;
6791
6792 mutex_lock(&priv->mutex);
6793 iwl_alive_start(priv);
6794 mutex_unlock(&priv->mutex);
6795}
6796
6797static void iwl_bg_rf_kill(struct work_struct *work)
6798{
6799 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
6800
6801 wake_up_interruptible(&priv->wait_command_queue);
6802
6803 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6804 return;
6805
6806 mutex_lock(&priv->mutex);
6807
6808 if (!iwl_is_rfkill(priv)) {
6809 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6810 "HW and/or SW RF Kill no longer active, restarting "
6811 "device\n");
6812 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6813 queue_work(priv->workqueue, &priv->restart);
6814 } else {
6815
6816 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6817 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6818 "disabled by SW switch\n");
6819 else
6820 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6821 "Kill switch must be turned off for "
6822 "wireless networking to work.\n");
6823 }
6824 mutex_unlock(&priv->mutex);
6825}
6826
6827#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6828
6829static void iwl_bg_scan_check(struct work_struct *data)
6830{
6831 struct iwl_priv *priv =
6832 container_of(data, struct iwl_priv, scan_check.work);
6833
6834 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6835 return;
6836
6837 mutex_lock(&priv->mutex);
6838 if (test_bit(STATUS_SCANNING, &priv->status) ||
6839 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6840 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6841 "Scan completion watchdog resetting adapter (%dms)\n",
6842 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
6843 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6844 queue_work(priv->workqueue, &priv->restart);
6845 }
6846 mutex_unlock(&priv->mutex);
6847}
6848
6849static void iwl_bg_request_scan(struct work_struct *data)
6850{
6851 struct iwl_priv *priv =
6852 container_of(data, struct iwl_priv, request_scan);
6853 struct iwl_host_cmd cmd = {
6854 .id = REPLY_SCAN_CMD,
6855 .len = sizeof(struct iwl_scan_cmd),
6856 .meta.flags = CMD_SIZE_HUGE,
6857 };
6858 int rc = 0;
6859 struct iwl_scan_cmd *scan;
6860 struct ieee80211_conf *conf = NULL;
6861 u8 direct_mask;
6862 int phymode;
6863
6864 conf = ieee80211_get_hw_conf(priv->hw);
6865
6866 mutex_lock(&priv->mutex);
6867
6868 if (!iwl_is_ready(priv)) {
6869 IWL_WARNING("request scan called when driver not ready.\n");
6870 goto done;
6871 }
6872
6873 /* Make sure the scan wasn't cancelled before this queued work
6874 * was given the chance to run... */
6875 if (!test_bit(STATUS_SCANNING, &priv->status))
6876 goto done;
6877
6878 /* This should never be called or scheduled if there is currently
6879 * a scan active in the hardware. */
6880 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6881 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6882 "Ignoring second request.\n");
6883 rc = -EIO;
6884 goto done;
6885 }
6886
6887 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6888 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6889 goto done;
6890 }
6891
6892 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6893 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6894 goto done;
6895 }
6896
6897 if (iwl_is_rfkill(priv)) {
6898 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6899 goto done;
6900 }
6901
6902 if (!test_bit(STATUS_READY, &priv->status)) {
6903 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6904 goto done;
6905 }
6906
6907 if (!priv->scan_bands) {
6908 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6909 goto done;
6910 }
6911
6912 if (!priv->scan) {
6913 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
6914 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6915 if (!priv->scan) {
6916 rc = -ENOMEM;
6917 goto done;
6918 }
6919 }
6920 scan = priv->scan;
6921 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
6922
6923 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6924 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6925
6926 if (iwl_is_associated(priv)) {
6927 u16 interval = 0;
6928 u32 extra;
6929 u32 suspend_time = 100;
6930 u32 scan_suspend_time = 100;
6931 unsigned long flags;
6932
6933 IWL_DEBUG_INFO("Scanning while associated...\n");
6934
6935 spin_lock_irqsave(&priv->lock, flags);
6936 interval = priv->beacon_int;
6937 spin_unlock_irqrestore(&priv->lock, flags);
6938
6939 scan->suspend_time = 0;
6940 scan->max_out_time = cpu_to_le32(600 * 1024);
6941 if (!interval)
6942 interval = suspend_time;
6943
6944 extra = (suspend_time / interval) << 22;
6945 scan_suspend_time = (extra |
6946 ((suspend_time % interval) * 1024));
6947 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6948 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
6949 scan_suspend_time, interval);
6950 }
6951
6952 /* We should add the ability for user to lock to PASSIVE ONLY */
6953 if (priv->one_direct_scan) {
6954 IWL_DEBUG_SCAN
6955 ("Kicking off one direct scan for '%s'\n",
6956 iwl_escape_essid(priv->direct_ssid,
6957 priv->direct_ssid_len));
6958 scan->direct_scan[0].id = WLAN_EID_SSID;
6959 scan->direct_scan[0].len = priv->direct_ssid_len;
6960 memcpy(scan->direct_scan[0].ssid,
6961 priv->direct_ssid, priv->direct_ssid_len);
6962 direct_mask = 1;
6963 } else if (!iwl_is_associated(priv)) {
6964 scan->direct_scan[0].id = WLAN_EID_SSID;
6965 scan->direct_scan[0].len = priv->essid_len;
6966 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6967 direct_mask = 1;
6968 } else
6969 direct_mask = 0;
6970
6971 /* We don't build a direct scan probe request; the uCode will do
6972 * that based on the direct_mask added to each channel entry */
6973 scan->tx_cmd.len = cpu_to_le16(
6974 iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
6975 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
6976 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6977 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6978 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6979
6980 /* flags + rate selection */
6981
6982 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
6983
6984 switch (priv->scan_bands) {
6985 case 2:
6986 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6987 scan->tx_cmd.rate_n_flags =
6988 iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
6989 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
6990
6991 scan->good_CRC_th = 0;
6992 phymode = MODE_IEEE80211G;
6993 break;
6994
6995 case 1:
6996 scan->tx_cmd.rate_n_flags =
6997 iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
6998 RATE_MCS_ANT_B_MSK);
6999 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7000 phymode = MODE_IEEE80211A;
7001 break;
7002
7003 default:
7004 IWL_WARNING("Invalid scan band count\n");
7005 goto done;
7006 }
7007
7008 /* select Rx chains */
7009
7010 /* Force use of chains B and C (0x6) for scan Rx.
7011 * Avoid A (0x1) because of its off-channel reception on A-band.
7012 * MIMO is not used here, but value is required to make uCode happy. */
7013 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7014 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7015 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7016 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7017
7018 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7019 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7020
7021 if (direct_mask)
7022 IWL_DEBUG_SCAN
7023 ("Initiating direct scan for %s.\n",
7024 iwl_escape_essid(priv->essid, priv->essid_len));
7025 else
7026 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7027
7028 scan->channel_count =
7029 iwl_get_channels_for_scan(
7030 priv, phymode, 1, /* active */
7031 direct_mask,
7032 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7033
7034 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
7035 scan->channel_count * sizeof(struct iwl_scan_channel);
7036 cmd.data = scan;
7037 scan->len = cpu_to_le16(cmd.len);
7038
7039 set_bit(STATUS_SCAN_HW, &priv->status);
7040 rc = iwl_send_cmd_sync(priv, &cmd);
7041 if (rc)
7042 goto done;
7043
7044 queue_delayed_work(priv->workqueue, &priv->scan_check,
7045 IWL_SCAN_CHECK_WATCHDOG);
7046
7047 mutex_unlock(&priv->mutex);
7048 return;
7049
7050 done:
7051 /* inform mac80211 sacn aborted */
7052 queue_work(priv->workqueue, &priv->scan_completed);
7053 mutex_unlock(&priv->mutex);
7054}
7055
7056static void iwl_bg_up(struct work_struct *data)
7057{
7058 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
7059
7060 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7061 return;
7062
7063 mutex_lock(&priv->mutex);
7064 __iwl_up(priv);
7065 mutex_unlock(&priv->mutex);
7066}
7067
7068static void iwl_bg_restart(struct work_struct *data)
7069{
7070 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
7071
7072 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7073 return;
7074
7075 iwl_down(priv);
7076 queue_work(priv->workqueue, &priv->up);
7077}
7078
7079static void iwl_bg_rx_replenish(struct work_struct *data)
7080{
7081 struct iwl_priv *priv =
7082 container_of(data, struct iwl_priv, rx_replenish);
7083
7084 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7085 return;
7086
7087 mutex_lock(&priv->mutex);
7088 iwl_rx_replenish(priv);
7089 mutex_unlock(&priv->mutex);
7090}
7091
7092static void iwl_bg_post_associate(struct work_struct *data)
7093{
7094 struct iwl_priv *priv = container_of(data, struct iwl_priv,
7095 post_associate.work);
7096
7097 int rc = 0;
7098 struct ieee80211_conf *conf = NULL;
7099
7100 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7101 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7102 return;
7103 }
7104
7105 IWL_DEBUG_ASSOC("Associated as %d to: " MAC_FMT "\n",
7106 priv->assoc_id, MAC_ARG(priv->active_rxon.bssid_addr));
7107
7108
7109 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7110 return;
7111
7112 mutex_lock(&priv->mutex);
7113
7114 conf = ieee80211_get_hw_conf(priv->hw);
7115
7116 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7117 iwl_commit_rxon(priv);
7118
7119 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
7120 iwl_setup_rxon_timing(priv);
7121 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7122 sizeof(priv->rxon_timing), &priv->rxon_timing);
7123 if (rc)
7124 IWL_WARNING("REPLY_RXON_TIMING failed - "
7125 "Attempting to continue.\n");
7126
7127 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7128
7129#ifdef CONFIG_IWLWIFI_HT
7130 if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht)
7131 iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht);
7132 else {
7133 priv->active_rate_ht[0] = 0;
7134 priv->active_rate_ht[1] = 0;
7135 priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ;
7136 }
7137#endif /* CONFIG_IWLWIFI_HT*/
7138 iwl4965_set_rxon_chain(priv);
7139 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7140
7141 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7142 priv->assoc_id, priv->beacon_int);
7143
7144 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7145 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7146 else
7147 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7148
7149 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7150 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7151 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7152 else
7153 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7154
7155 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7156 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7157
7158 }
7159
7160 iwl_commit_rxon(priv);
7161
7162 switch (priv->iw_mode) {
7163 case IEEE80211_IF_TYPE_STA:
7164 iwl_rate_scale_init(priv->hw, IWL_AP_ID);
7165 break;
7166
7167 case IEEE80211_IF_TYPE_IBSS:
7168
7169 /* clear out the station table */
7170 iwl_clear_stations_table(priv);
7171
7172 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
7173 iwl_rxon_add_station(priv, priv->bssid, 0);
7174 iwl_rate_scale_init(priv->hw, IWL_STA_ID);
7175 iwl_send_beacon_cmd(priv);
7176
7177 break;
7178
7179 default:
7180 IWL_ERROR("%s Should not be called in %d mode\n",
7181 __FUNCTION__, priv->iw_mode);
7182 break;
7183 }
7184
7185 iwl_sequence_reset(priv);
7186
7187#ifdef CONFIG_IWLWIFI_SENSITIVITY
7188 /* Enable Rx differential gain and sensitivity calibrations */
7189 iwl4965_chain_noise_reset(priv);
7190 priv->start_calib = 1;
7191#endif /* CONFIG_IWLWIFI_SENSITIVITY */
7192
7193 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7194 priv->assoc_station_added = 1;
7195
7196#ifdef CONFIG_IWLWIFI_QOS
7197 iwl_activate_qos(priv, 0);
7198#endif /* CONFIG_IWLWIFI_QOS */
7199 mutex_unlock(&priv->mutex);
7200}
7201
7202static void iwl_bg_abort_scan(struct work_struct *work)
7203{
7204 struct iwl_priv *priv = container_of(work, struct iwl_priv,
7205 abort_scan);
7206
7207 if (!iwl_is_ready(priv))
7208 return;
7209
7210 mutex_lock(&priv->mutex);
7211
7212 set_bit(STATUS_SCAN_ABORTING, &priv->status);
7213 iwl_send_scan_abort(priv);
7214
7215 mutex_unlock(&priv->mutex);
7216}
7217
7218static void iwl_bg_scan_completed(struct work_struct *work)
7219{
7220 struct iwl_priv *priv =
7221 container_of(work, struct iwl_priv, scan_completed);
7222
7223 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7224
7225 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7226 return;
7227
7228 ieee80211_scan_completed(priv->hw);
7229
7230 /* Since setting the TXPOWER may have been deferred while
7231 * performing the scan, fire one off */
7232 mutex_lock(&priv->mutex);
7233 iwl_hw_reg_send_txpower(priv);
7234 mutex_unlock(&priv->mutex);
7235}
7236
7237/*****************************************************************************
7238 *
7239 * mac80211 entry point functions
7240 *
7241 *****************************************************************************/
7242
7243static int iwl_mac_open(struct ieee80211_hw *hw)
7244{
7245 struct iwl_priv *priv = hw->priv;
7246
7247 IWL_DEBUG_MAC80211("enter\n");
7248
7249 /* we should be verifying the device is ready to be opened */
7250 mutex_lock(&priv->mutex);
7251
7252 priv->is_open = 1;
7253
7254 if (!iwl_is_rfkill(priv))
7255 ieee80211_start_queues(priv->hw);
7256
7257 mutex_unlock(&priv->mutex);
7258 IWL_DEBUG_MAC80211("leave\n");
7259 return 0;
7260}
7261
7262static int iwl_mac_stop(struct ieee80211_hw *hw)
7263{
7264 struct iwl_priv *priv = hw->priv;
7265
7266 IWL_DEBUG_MAC80211("enter\n");
7267 priv->is_open = 0;
7268 /*netif_stop_queue(dev); */
7269 flush_workqueue(priv->workqueue);
7270 IWL_DEBUG_MAC80211("leave\n");
7271
7272 return 0;
7273}
7274
7275static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
7276 struct ieee80211_tx_control *ctl)
7277{
7278 struct iwl_priv *priv = hw->priv;
7279
7280 IWL_DEBUG_MAC80211("enter\n");
7281
7282 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7283 IWL_DEBUG_MAC80211("leave - monitor\n");
7284 return -1;
7285 }
7286
7287 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7288 ctl->tx_rate);
7289
7290 if (iwl_tx_skb(priv, skb, ctl))
7291 dev_kfree_skb_any(skb);
7292
7293 IWL_DEBUG_MAC80211("leave\n");
7294 return 0;
7295}
7296
7297static int iwl_mac_add_interface(struct ieee80211_hw *hw,
7298 struct ieee80211_if_init_conf *conf)
7299{
7300 struct iwl_priv *priv = hw->priv;
7301 unsigned long flags;
7302
7303 IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type);
7304 if (conf->mac_addr)
7305 IWL_DEBUG_MAC80211("enter: MAC " MAC_FMT "\n",
7306 MAC_ARG(conf->mac_addr));
7307
7308 if (priv->interface_id) {
7309 IWL_DEBUG_MAC80211("leave - interface_id != 0\n");
7310 return 0;
7311 }
7312
7313 spin_lock_irqsave(&priv->lock, flags);
7314 priv->interface_id = conf->if_id;
7315
7316 spin_unlock_irqrestore(&priv->lock, flags);
7317
7318 mutex_lock(&priv->mutex);
7319 iwl_set_mode(priv, conf->type);
7320
7321 IWL_DEBUG_MAC80211("leave\n");
7322 mutex_unlock(&priv->mutex);
7323
7324 return 0;
7325}
7326
7327/**
7328 * iwl_mac_config - mac80211 config callback
7329 *
7330 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7331 * be set inappropriately and the driver currently sets the hardware up to
7332 * use it whenever needed.
7333 */
7334static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
7335{
7336 struct iwl_priv *priv = hw->priv;
7337 const struct iwl_channel_info *ch_info;
7338 unsigned long flags;
7339
7340 mutex_lock(&priv->mutex);
7341 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7342
7343 if (!iwl_is_ready(priv)) {
7344 IWL_DEBUG_MAC80211("leave - not ready\n");
7345 mutex_unlock(&priv->mutex);
7346 return -EIO;
7347 }
7348
7349 /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only
7350 * what is exposed through include/ declrations */
7351 if (unlikely(!iwl_param_disable_hw_scan &&
7352 test_bit(STATUS_SCANNING, &priv->status))) {
7353 IWL_DEBUG_MAC80211("leave - scanning\n");
7354 mutex_unlock(&priv->mutex);
7355 return 0;
7356 }
7357
7358 spin_lock_irqsave(&priv->lock, flags);
7359
7360 ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel);
7361 if (!is_channel_valid(ch_info)) {
7362 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7363 conf->channel, conf->phymode);
7364 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7365 spin_unlock_irqrestore(&priv->lock, flags);
7366 mutex_unlock(&priv->mutex);
7367 return -EINVAL;
7368 }
7369
7370#ifdef CONFIG_IWLWIFI_HT
7371 /* if we are switching fron ht to 2.4 clear flags
7372 * from any ht related info since 2.4 does not
7373 * support ht */
7374 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7375#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7376 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7377#endif
7378 )
7379 priv->staging_rxon.flags = 0;
7380#endif /* CONFIG_IWLWIFI_HT */
7381
7382 iwl_set_rxon_channel(priv, conf->phymode, conf->channel);
7383
7384 iwl_set_flags_for_phymode(priv, conf->phymode);
7385
7386 /* The list of supported rates and rate mask can be different
7387 * for each phymode; since the phymode may have changed, reset
7388 * the rate mask to what mac80211 lists */
7389 iwl_set_rate(priv);
7390
7391 spin_unlock_irqrestore(&priv->lock, flags);
7392
7393#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7394 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
7395 iwl_hw_channel_switch(priv, conf->channel);
7396 mutex_unlock(&priv->mutex);
7397 return 0;
7398 }
7399#endif
7400
7401 iwl_radio_kill_sw(priv, !conf->radio_enabled);
7402
7403 if (!conf->radio_enabled) {
7404 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7405 mutex_unlock(&priv->mutex);
7406 return 0;
7407 }
7408
7409 if (iwl_is_rfkill(priv)) {
7410 IWL_DEBUG_MAC80211("leave - RF kill\n");
7411 mutex_unlock(&priv->mutex);
7412 return -EIO;
7413 }
7414
7415 iwl_set_rate(priv);
7416
7417 if (memcmp(&priv->active_rxon,
7418 &priv->staging_rxon, sizeof(priv->staging_rxon)))
7419 iwl_commit_rxon(priv);
7420 else
7421 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7422
7423 IWL_DEBUG_MAC80211("leave\n");
7424
7425 mutex_unlock(&priv->mutex);
7426
7427 return 0;
7428}
7429
7430static void iwl_config_ap(struct iwl_priv *priv)
7431{
7432 int rc = 0;
7433
7434 if (priv->status & STATUS_EXIT_PENDING)
7435 return;
7436
7437 /* The following should be done only at AP bring up */
7438 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7439
7440 /* RXON - unassoc (to set timing command) */
7441 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7442 iwl_commit_rxon(priv);
7443
7444 /* RXON Timing */
7445 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
7446 iwl_setup_rxon_timing(priv);
7447 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7448 sizeof(priv->rxon_timing), &priv->rxon_timing);
7449 if (rc)
7450 IWL_WARNING("REPLY_RXON_TIMING failed - "
7451 "Attempting to continue.\n");
7452
7453 iwl4965_set_rxon_chain(priv);
7454
7455 /* FIXME: what should be the assoc_id for AP? */
7456 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7457 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7458 priv->staging_rxon.flags |=
7459 RXON_FLG_SHORT_PREAMBLE_MSK;
7460 else
7461 priv->staging_rxon.flags &=
7462 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7463
7464 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7465 if (priv->assoc_capability &
7466 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7467 priv->staging_rxon.flags |=
7468 RXON_FLG_SHORT_SLOT_MSK;
7469 else
7470 priv->staging_rxon.flags &=
7471 ~RXON_FLG_SHORT_SLOT_MSK;
7472
7473 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7474 priv->staging_rxon.flags &=
7475 ~RXON_FLG_SHORT_SLOT_MSK;
7476 }
7477 /* restore RXON assoc */
7478 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7479 iwl_commit_rxon(priv);
7480#ifdef CONFIG_IWLWIFI_QOS
7481 iwl_activate_qos(priv, 1);
7482#endif
7483 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
7484 iwl_send_beacon_cmd(priv);
7485 } else
7486 iwl_send_beacon_cmd(priv);
7487
7488 /* FIXME - we need to add code here to detect a totally new
7489 * configuration, reset the AP, unassoc, rxon timing, assoc,
7490 * clear sta table, add BCAST sta... */
7491}
7492
7493static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
7494 struct ieee80211_if_conf *conf)
7495{
7496 struct iwl_priv *priv = hw->priv;
7497 unsigned long flags;
7498 int rc;
7499
7500 if (conf == NULL)
7501 return -EIO;
7502
7503 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7504 (!conf->beacon || !conf->ssid_len)) {
7505 IWL_DEBUG_MAC80211
7506 ("Leaving in AP mode because HostAPD is not ready.\n");
7507 return 0;
7508 }
7509
7510 mutex_lock(&priv->mutex);
7511
7512 IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id);
7513 if (conf->bssid)
7514 IWL_DEBUG_MAC80211("bssid: " MAC_FMT "\n",
7515 MAC_ARG(conf->bssid));
7516
7517 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7518 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
7519 IWL_DEBUG_MAC80211("leave - scanning\n");
7520 mutex_unlock(&priv->mutex);
7521 return 0;
7522 }
7523
7524 if (priv->interface_id != if_id) {
7525 IWL_DEBUG_MAC80211("leave - interface_id != if_id\n");
7526 mutex_unlock(&priv->mutex);
7527 return 0;
7528 }
7529
7530 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7531 if (!conf->bssid) {
7532 conf->bssid = priv->mac_addr;
7533 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
7534 IWL_DEBUG_MAC80211("bssid was set to: " MAC_FMT "\n",
7535 MAC_ARG(conf->bssid));
7536 }
7537 if (priv->ibss_beacon)
7538 dev_kfree_skb(priv->ibss_beacon);
7539
7540 priv->ibss_beacon = conf->beacon;
7541 }
7542
7543 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7544 !is_multicast_ether_addr(conf->bssid)) {
7545 /* If there is currently a HW scan going on in the background
7546 * then we need to cancel it else the RXON below will fail. */
7547 if (iwl_scan_cancel_timeout(priv, 100)) {
7548 IWL_WARNING("Aborted scan still in progress "
7549 "after 100ms\n");
7550 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7551 mutex_unlock(&priv->mutex);
7552 return -EAGAIN;
7553 }
7554 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7555
7556 /* TODO: Audit driver for usage of these members and see
7557 * if mac80211 deprecates them (priv->bssid looks like it
7558 * shouldn't be there, but I haven't scanned the IBSS code
7559 * to verify) - jpk */
7560 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7561
7562 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7563 iwl_config_ap(priv);
7564 else {
7565 priv->staging_rxon.filter_flags |=
7566 RXON_FILTER_ASSOC_MSK;
7567 rc = iwl_commit_rxon(priv);
7568 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
7569 iwl_rxon_add_station(
7570 priv, priv->active_rxon.bssid_addr, 1);
7571 }
7572
7573 } else {
7574 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7575 iwl_commit_rxon(priv);
7576 }
7577
7578 spin_lock_irqsave(&priv->lock, flags);
7579 if (!conf->ssid_len)
7580 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7581 else
7582 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7583
7584 priv->essid_len = conf->ssid_len;
7585 spin_unlock_irqrestore(&priv->lock, flags);
7586
7587 IWL_DEBUG_MAC80211("leave\n");
7588 mutex_unlock(&priv->mutex);
7589
7590 return 0;
7591}
7592
7593static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
7594 struct ieee80211_if_init_conf *conf)
7595{
7596 struct iwl_priv *priv = hw->priv;
7597
7598 IWL_DEBUG_MAC80211("enter\n");
7599
7600 mutex_lock(&priv->mutex);
7601 if (priv->interface_id == conf->if_id) {
7602 priv->interface_id = 0;
7603 memset(priv->bssid, 0, ETH_ALEN);
7604 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7605 priv->essid_len = 0;
7606 }
7607 mutex_unlock(&priv->mutex);
7608
7609 IWL_DEBUG_MAC80211("leave\n");
7610
7611}
7612
7613#define IWL_DELAY_NEXT_SCAN (HZ*2)
7614static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7615{
7616 int rc = 0;
7617 unsigned long flags;
7618 struct iwl_priv *priv = hw->priv;
7619
7620 IWL_DEBUG_MAC80211("enter\n");
7621
7622 spin_lock_irqsave(&priv->lock, flags);
7623
7624 if (!iwl_is_ready_rf(priv)) {
7625 rc = -EIO;
7626 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7627 goto out_unlock;
7628 }
7629
7630 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7631 rc = -EIO;
7632 IWL_ERROR("ERROR: APs don't scan\n");
7633 goto out_unlock;
7634 }
7635
7636 /* if we just finished scan ask for delay */
7637 if (priv->last_scan_jiffies &&
7638 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7639 jiffies)) {
7640 rc = -EAGAIN;
7641 goto out_unlock;
7642 }
7643 if (len) {
7644 IWL_DEBUG_SCAN("direct scan for "
7645 "%s [%d]\n ",
7646 iwl_escape_essid(ssid, len), (int)len);
7647
7648 priv->one_direct_scan = 1;
7649 priv->direct_ssid_len = (u8)
7650 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7651 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
7652 }
7653
7654 rc = iwl_scan_initiate(priv);
7655
7656 IWL_DEBUG_MAC80211("leave\n");
7657
7658out_unlock:
7659 spin_unlock_irqrestore(&priv->lock, flags);
7660
7661 return rc;
7662}
7663
7664static int iwl_mac_set_key(struct ieee80211_hw *hw, set_key_cmd cmd,
7665 const u8 *local_addr, const u8 *addr,
7666 struct ieee80211_key_conf *key)
7667{
7668 struct iwl_priv *priv = hw->priv;
7669 int rc = 0;
7670 u8 sta_id;
7671
7672 IWL_DEBUG_MAC80211("enter\n");
7673
7674 if (!iwl_param_hwcrypto) {
7675 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7676 return -EOPNOTSUPP;
7677 }
7678
7679 if (is_zero_ether_addr(addr))
7680 /* only support pairwise keys */
7681 return -EOPNOTSUPP;
7682
7683 sta_id = iwl_hw_find_station(priv, addr);
7684 if (sta_id == IWL_INVALID_STATION) {
7685 IWL_DEBUG_MAC80211("leave - " MAC_FMT " not in station map.\n",
7686 MAC_ARG(addr));
7687 return -EINVAL;
7688 }
7689
7690 mutex_lock(&priv->mutex);
7691
7692 switch (cmd) {
7693 case SET_KEY:
7694 rc = iwl_update_sta_key_info(priv, key, sta_id);
7695 if (!rc) {
7696 iwl_set_rxon_hwcrypto(priv, 1);
7697 iwl_commit_rxon(priv);
7698 key->hw_key_idx = sta_id;
7699 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7700 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7701 }
7702 break;
7703 case DISABLE_KEY:
7704 rc = iwl_clear_sta_key_info(priv, sta_id);
7705 if (!rc) {
7706 iwl_set_rxon_hwcrypto(priv, 0);
7707 iwl_commit_rxon(priv);
7708 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7709 }
7710 break;
7711 default:
7712 rc = -EINVAL;
7713 }
7714
7715 IWL_DEBUG_MAC80211("leave\n");
7716 mutex_unlock(&priv->mutex);
7717
7718 return rc;
7719}
7720
7721static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7722 const struct ieee80211_tx_queue_params *params)
7723{
7724 struct iwl_priv *priv = hw->priv;
7725#ifdef CONFIG_IWLWIFI_QOS
7726 unsigned long flags;
7727 int q;
7728#endif /* CONFIG_IWL_QOS */
7729
7730 IWL_DEBUG_MAC80211("enter\n");
7731
7732 if (!iwl_is_ready_rf(priv)) {
7733 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7734 return -EIO;
7735 }
7736
7737 if (queue >= AC_NUM) {
7738 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7739 return 0;
7740 }
7741
7742#ifdef CONFIG_IWLWIFI_QOS
7743 if (!priv->qos_data.qos_enable) {
7744 priv->qos_data.qos_active = 0;
7745 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7746 return 0;
7747 }
7748 q = AC_NUM - 1 - queue;
7749
7750 spin_lock_irqsave(&priv->lock, flags);
7751
7752 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7753 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7754 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7755 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7756 cpu_to_le16((params->burst_time * 100));
7757
7758 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7759 priv->qos_data.qos_active = 1;
7760
7761 spin_unlock_irqrestore(&priv->lock, flags);
7762
7763 mutex_lock(&priv->mutex);
7764 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7765 iwl_activate_qos(priv, 1);
7766 else if (priv->assoc_id && iwl_is_associated(priv))
7767 iwl_activate_qos(priv, 0);
7768
7769 mutex_unlock(&priv->mutex);
7770
7771#endif /*CONFIG_IWLWIFI_QOS */
7772
7773 IWL_DEBUG_MAC80211("leave\n");
7774 return 0;
7775}
7776
7777static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
7778 struct ieee80211_tx_queue_stats *stats)
7779{
7780 struct iwl_priv *priv = hw->priv;
7781 int i, avail;
7782 struct iwl_tx_queue *txq;
7783 struct iwl_queue *q;
7784 unsigned long flags;
7785
7786 IWL_DEBUG_MAC80211("enter\n");
7787
7788 if (!iwl_is_ready_rf(priv)) {
7789 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7790 return -EIO;
7791 }
7792
7793 spin_lock_irqsave(&priv->lock, flags);
7794
7795 for (i = 0; i < AC_NUM; i++) {
7796 txq = &priv->txq[i];
7797 q = &txq->q;
7798 avail = iwl_queue_space(q);
7799
7800 stats->data[i].len = q->n_window - avail;
7801 stats->data[i].limit = q->n_window - q->high_mark;
7802 stats->data[i].count = q->n_window;
7803
7804 }
7805 spin_unlock_irqrestore(&priv->lock, flags);
7806
7807 IWL_DEBUG_MAC80211("leave\n");
7808
7809 return 0;
7810}
7811
7812static int iwl_mac_get_stats(struct ieee80211_hw *hw,
7813 struct ieee80211_low_level_stats *stats)
7814{
7815 IWL_DEBUG_MAC80211("enter\n");
7816 IWL_DEBUG_MAC80211("leave\n");
7817
7818 return 0;
7819}
7820
7821static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw)
7822{
7823 IWL_DEBUG_MAC80211("enter\n");
7824 IWL_DEBUG_MAC80211("leave\n");
7825
7826 return 0;
7827}
7828
7829static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
7830{
7831 struct iwl_priv *priv = hw->priv;
7832 unsigned long flags;
7833
7834 mutex_lock(&priv->mutex);
7835 IWL_DEBUG_MAC80211("enter\n");
7836
7837 priv->lq_mngr.lq_ready = 0;
7838#ifdef CONFIG_IWLWIFI_HT
7839 spin_lock_irqsave(&priv->lock, flags);
7840 memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info));
7841 spin_unlock_irqrestore(&priv->lock, flags);
7842#ifdef CONFIG_IWLWIFI_HT_AGG
7843/* if (priv->lq_mngr.agg_ctrl.granted_ba)
7844 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
7845
7846 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl_agg_control));
7847 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
7848 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
7849 priv->lq_mngr.agg_ctrl.auto_agg = 1;
7850
7851 if (priv->lq_mngr.agg_ctrl.auto_agg)
7852 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
7853#endif /*CONFIG_IWLWIFI_HT_AGG */
7854#endif /* CONFIG_IWLWIFI_HT */
7855
7856#ifdef CONFIG_IWLWIFI_QOS
7857 iwl_reset_qos(priv);
7858#endif
7859
7860 cancel_delayed_work(&priv->post_associate);
7861
7862 spin_lock_irqsave(&priv->lock, flags);
7863 priv->assoc_id = 0;
7864 priv->assoc_capability = 0;
7865 priv->call_post_assoc_from_beacon = 0;
7866 priv->assoc_station_added = 0;
7867
7868 /* new association get rid of ibss beacon skb */
7869 if (priv->ibss_beacon)
7870 dev_kfree_skb(priv->ibss_beacon);
7871
7872 priv->ibss_beacon = NULL;
7873
7874 priv->beacon_int = priv->hw->conf.beacon_int;
7875 priv->timestamp1 = 0;
7876 priv->timestamp0 = 0;
7877 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
7878 priv->beacon_int = 0;
7879
7880 spin_unlock_irqrestore(&priv->lock, flags);
7881
7882 /* Per mac80211.h: This is only used in IBSS mode... */
7883 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7884 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7885 mutex_unlock(&priv->mutex);
7886 return;
7887 }
7888
7889 if (!iwl_is_ready_rf(priv)) {
7890 IWL_DEBUG_MAC80211("leave - not ready\n");
7891 mutex_unlock(&priv->mutex);
7892 return;
7893 }
7894
7895 priv->only_active_channel = 0;
7896
7897 iwl_set_rate(priv);
7898
7899 mutex_unlock(&priv->mutex);
7900
7901 IWL_DEBUG_MAC80211("leave\n");
7902
7903}
7904
7905static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
7906 struct ieee80211_tx_control *control)
7907{
7908 struct iwl_priv *priv = hw->priv;
7909 unsigned long flags;
7910
7911 mutex_lock(&priv->mutex);
7912 IWL_DEBUG_MAC80211("enter\n");
7913
7914 if (!iwl_is_ready_rf(priv)) {
7915 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7916 mutex_unlock(&priv->mutex);
7917 return -EIO;
7918 }
7919
7920 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7921 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7922 mutex_unlock(&priv->mutex);
7923 return -EIO;
7924 }
7925
7926 spin_lock_irqsave(&priv->lock, flags);
7927
7928 if (priv->ibss_beacon)
7929 dev_kfree_skb(priv->ibss_beacon);
7930
7931 priv->ibss_beacon = skb;
7932
7933 priv->assoc_id = 0;
7934
7935 IWL_DEBUG_MAC80211("leave\n");
7936 spin_unlock_irqrestore(&priv->lock, flags);
7937
7938#ifdef CONFIG_IWLWIFI_QOS
7939 iwl_reset_qos(priv);
7940#endif
7941
7942 queue_work(priv->workqueue, &priv->post_associate.work);
7943
7944 mutex_unlock(&priv->mutex);
7945
7946 return 0;
7947}
7948
7949#ifdef CONFIG_IWLWIFI_HT
7950union ht_cap_info {
7951 struct {
7952 u16 advanced_coding_cap :1;
7953 u16 supported_chan_width_set :1;
7954 u16 mimo_power_save_mode :2;
7955 u16 green_field :1;
7956 u16 short_GI20 :1;
7957 u16 short_GI40 :1;
7958 u16 tx_stbc :1;
7959 u16 rx_stbc :1;
7960 u16 beam_forming :1;
7961 u16 delayed_ba :1;
7962 u16 maximal_amsdu_size :1;
7963 u16 cck_mode_at_40MHz :1;
7964 u16 psmp_support :1;
7965 u16 stbc_ctrl_frame_support :1;
7966 u16 sig_txop_protection_support :1;
7967 };
7968 u16 val;
7969} __attribute__ ((packed));
7970
7971union ht_param_info{
7972 struct {
7973 u8 max_rx_ampdu_factor :2;
7974 u8 mpdu_density :3;
7975 u8 reserved :3;
7976 };
7977 u8 val;
7978} __attribute__ ((packed));
7979
7980union ht_exra_param_info {
7981 struct {
7982 u8 ext_chan_offset :2;
7983 u8 tx_chan_width :1;
7984 u8 rifs_mode :1;
7985 u8 controlled_access_only :1;
7986 u8 service_interval_granularity :3;
7987 };
7988 u8 val;
7989} __attribute__ ((packed));
7990
7991union ht_operation_mode{
7992 struct {
7993 u16 op_mode :2;
7994 u16 non_GF :1;
7995 u16 reserved :13;
7996 };
7997 u16 val;
7998} __attribute__ ((packed));
7999
8000
8001static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap,
8002 struct ieee80211_ht_additional_info *ht_extra,
8003 struct sta_ht_info *ht_info_ap,
8004 struct sta_ht_info *ht_info)
8005{
8006 union ht_cap_info cap;
8007 union ht_operation_mode op_mode;
8008 union ht_param_info param_info;
8009 union ht_exra_param_info extra_param_info;
8010
8011 IWL_DEBUG_MAC80211("enter: \n");
8012
8013 if (!ht_info) {
8014 IWL_DEBUG_MAC80211("leave: ht_info is NULL\n");
8015 return -1;
8016 }
8017
8018 if (ht_cap) {
8019 cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info);
8020 param_info.val = ht_cap->mac_ht_params_info;
8021 ht_info->is_ht = 1;
8022 if (cap.short_GI20)
8023 ht_info->sgf |= 0x1;
8024 if (cap.short_GI40)
8025 ht_info->sgf |= 0x2;
8026 ht_info->is_green_field = cap.green_field;
8027 ht_info->max_amsdu_size = cap.maximal_amsdu_size;
8028 ht_info->supported_chan_width = cap.supported_chan_width_set;
8029 ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode;
8030 memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16);
8031
8032 ht_info->ampdu_factor = param_info.max_rx_ampdu_factor;
8033 ht_info->mpdu_density = param_info.mpdu_density;
8034
8035 IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n",
8036 ht_cap->supported_mcs_set[0],
8037 ht_cap->supported_mcs_set[1]);
8038
8039 if (ht_info_ap) {
8040 ht_info->control_channel = ht_info_ap->control_channel;
8041 ht_info->extension_chan_offset =
8042 ht_info_ap->extension_chan_offset;
8043 ht_info->tx_chan_width = ht_info_ap->tx_chan_width;
8044 ht_info->operating_mode = ht_info_ap->operating_mode;
8045 }
8046
8047 if (ht_extra) {
8048 extra_param_info.val = ht_extra->ht_param;
8049 ht_info->control_channel = ht_extra->control_chan;
8050 ht_info->extension_chan_offset =
8051 extra_param_info.ext_chan_offset;
8052 ht_info->tx_chan_width = extra_param_info.tx_chan_width;
8053 op_mode.val = (u16)
8054 le16_to_cpu(ht_extra->operation_mode);
8055 ht_info->operating_mode = op_mode.op_mode;
8056 IWL_DEBUG_MAC80211("control channel %d\n",
8057 ht_extra->control_chan);
8058 }
8059 } else
8060 ht_info->is_ht = 0;
8061
8062 IWL_DEBUG_MAC80211("leave\n");
8063 return 0;
8064}
8065
8066static int iwl_mac_conf_ht(struct ieee80211_hw *hw,
8067 struct ieee80211_ht_capability *ht_cap,
8068 struct ieee80211_ht_additional_info *ht_extra)
8069{
8070 struct iwl_priv *priv = hw->priv;
8071 int rs;
8072
8073 IWL_DEBUG_MAC80211("enter: \n");
8074
8075 rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht);
8076 iwl4965_set_rxon_chain(priv);
8077
8078 if (priv && priv->assoc_id &&
8079 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8080 unsigned long flags;
8081
8082 spin_lock_irqsave(&priv->lock, flags);
8083 if (priv->beacon_int)
8084 queue_work(priv->workqueue, &priv->post_associate.work);
8085 else
8086 priv->call_post_assoc_from_beacon = 1;
8087 spin_unlock_irqrestore(&priv->lock, flags);
8088 }
8089
8090 IWL_DEBUG_MAC80211("leave: control channel %d\n",
8091 ht_extra->control_chan);
8092 return rs;
8093
8094}
8095
8096static void iwl_set_ht_capab(struct ieee80211_hw *hw,
8097 struct ieee80211_ht_capability *ht_cap,
8098 u8 use_wide_chan)
8099{
8100 union ht_cap_info cap;
8101 union ht_param_info param_info;
8102
8103 memset(&cap, 0, sizeof(union ht_cap_info));
8104 memset(&param_info, 0, sizeof(union ht_param_info));
8105
8106 cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K;
8107 cap.green_field = 1;
8108 cap.short_GI20 = 1;
8109 cap.short_GI40 = 1;
8110 cap.supported_chan_width_set = use_wide_chan;
8111 cap.mimo_power_save_mode = 0x3;
8112
8113 param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
8114 param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF;
8115 ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val);
8116 ht_cap->mac_ht_params_info = (u8) param_info.val;
8117
8118 ht_cap->supported_mcs_set[0] = 0xff;
8119 ht_cap->supported_mcs_set[1] = 0xff;
8120 ht_cap->supported_mcs_set[4] =
8121 (cap.supported_chan_width_set) ? 0x1: 0x0;
8122}
8123
8124static void iwl_mac_get_ht_capab(struct ieee80211_hw *hw,
8125 struct ieee80211_ht_capability *ht_cap)
8126{
8127 u8 use_wide_channel = 1;
8128 struct iwl_priv *priv = hw->priv;
8129
8130 IWL_DEBUG_MAC80211("enter: \n");
8131 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
8132 use_wide_channel = 0;
8133
8134 /* no fat tx allowed on 2.4GHZ */
8135 if (priv->phymode != MODE_IEEE80211A)
8136 use_wide_channel = 0;
8137
8138 iwl_set_ht_capab(hw, ht_cap, use_wide_channel);
8139 IWL_DEBUG_MAC80211("leave: \n");
8140}
8141#endif /*CONFIG_IWLWIFI_HT*/
8142
8143/*****************************************************************************
8144 *
8145 * sysfs attributes
8146 *
8147 *****************************************************************************/
8148
8149#ifdef CONFIG_IWLWIFI_DEBUG
8150
8151/*
8152 * The following adds a new attribute to the sysfs representation
8153 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8154 * used for controlling the debug level.
8155 *
8156 * See the level definitions in iwl for details.
8157 */
8158
8159static ssize_t show_debug_level(struct device_driver *d, char *buf)
8160{
8161 return sprintf(buf, "0x%08X\n", iwl_debug_level);
8162}
8163static ssize_t store_debug_level(struct device_driver *d,
8164 const char *buf, size_t count)
8165{
8166 char *p = (char *)buf;
8167 u32 val;
8168
8169 val = simple_strtoul(p, &p, 0);
8170 if (p == buf)
8171 printk(KERN_INFO DRV_NAME
8172 ": %s is not in hex or decimal form.\n", buf);
8173 else
8174 iwl_debug_level = val;
8175
8176 return strnlen(buf, count);
8177}
8178
8179static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8180 show_debug_level, store_debug_level);
8181
8182#endif /* CONFIG_IWLWIFI_DEBUG */
8183
8184static ssize_t show_rf_kill(struct device *d,
8185 struct device_attribute *attr, char *buf)
8186{
8187 /*
8188 * 0 - RF kill not enabled
8189 * 1 - SW based RF kill active (sysfs)
8190 * 2 - HW based RF kill active
8191 * 3 - Both HW and SW based RF kill active
8192 */
8193 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8194 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8195 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8196
8197 return sprintf(buf, "%i\n", val);
8198}
8199
8200static ssize_t store_rf_kill(struct device *d,
8201 struct device_attribute *attr,
8202 const char *buf, size_t count)
8203{
8204 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8205
8206 mutex_lock(&priv->mutex);
8207 iwl_radio_kill_sw(priv, buf[0] == '1');
8208 mutex_unlock(&priv->mutex);
8209
8210 return count;
8211}
8212
8213static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8214
8215static ssize_t show_temperature(struct device *d,
8216 struct device_attribute *attr, char *buf)
8217{
8218 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8219
8220 if (!iwl_is_alive(priv))
8221 return -EAGAIN;
8222
8223 return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv));
8224}
8225
8226static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8227
8228static ssize_t show_rs_window(struct device *d,
8229 struct device_attribute *attr,
8230 char *buf)
8231{
8232 struct iwl_priv *priv = d->driver_data;
8233 return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID);
8234}
8235static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8236
8237static ssize_t show_tx_power(struct device *d,
8238 struct device_attribute *attr, char *buf)
8239{
8240 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8241 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8242}
8243
8244static ssize_t store_tx_power(struct device *d,
8245 struct device_attribute *attr,
8246 const char *buf, size_t count)
8247{
8248 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8249 char *p = (char *)buf;
8250 u32 val;
8251
8252 val = simple_strtoul(p, &p, 10);
8253 if (p == buf)
8254 printk(KERN_INFO DRV_NAME
8255 ": %s is not in decimal form.\n", buf);
8256 else
8257 iwl_hw_reg_set_txpower(priv, val);
8258
8259 return count;
8260}
8261
8262static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8263
8264static ssize_t show_flags(struct device *d,
8265 struct device_attribute *attr, char *buf)
8266{
8267 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8268
8269 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8270}
8271
8272static ssize_t store_flags(struct device *d,
8273 struct device_attribute *attr,
8274 const char *buf, size_t count)
8275{
8276 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8277 u32 flags = simple_strtoul(buf, NULL, 0);
8278
8279 mutex_lock(&priv->mutex);
8280 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8281 /* Cancel any currently running scans... */
8282 if (iwl_scan_cancel_timeout(priv, 100))
8283 IWL_WARNING("Could not cancel scan.\n");
8284 else {
8285 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8286 flags);
8287 priv->staging_rxon.flags = cpu_to_le32(flags);
8288 iwl_commit_rxon(priv);
8289 }
8290 }
8291 mutex_unlock(&priv->mutex);
8292
8293 return count;
8294}
8295
8296static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8297
8298static ssize_t show_filter_flags(struct device *d,
8299 struct device_attribute *attr, char *buf)
8300{
8301 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8302
8303 return sprintf(buf, "0x%04X\n",
8304 le32_to_cpu(priv->active_rxon.filter_flags));
8305}
8306
8307static ssize_t store_filter_flags(struct device *d,
8308 struct device_attribute *attr,
8309 const char *buf, size_t count)
8310{
8311 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8312 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8313
8314 mutex_lock(&priv->mutex);
8315 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8316 /* Cancel any currently running scans... */
8317 if (iwl_scan_cancel_timeout(priv, 100))
8318 IWL_WARNING("Could not cancel scan.\n");
8319 else {
8320 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8321 "0x%04X\n", filter_flags);
8322 priv->staging_rxon.filter_flags =
8323 cpu_to_le32(filter_flags);
8324 iwl_commit_rxon(priv);
8325 }
8326 }
8327 mutex_unlock(&priv->mutex);
8328
8329 return count;
8330}
8331
8332static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8333 store_filter_flags);
8334
8335static ssize_t show_tune(struct device *d,
8336 struct device_attribute *attr, char *buf)
8337{
8338 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8339
8340 return sprintf(buf, "0x%04X\n",
8341 (priv->phymode << 8) |
8342 le16_to_cpu(priv->active_rxon.channel));
8343}
8344
8345static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode);
8346
8347static ssize_t store_tune(struct device *d,
8348 struct device_attribute *attr,
8349 const char *buf, size_t count)
8350{
8351 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8352 char *p = (char *)buf;
8353 u16 tune = simple_strtoul(p, &p, 0);
8354 u8 phymode = (tune >> 8) & 0xff;
8355 u16 channel = tune & 0xff;
8356
8357 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8358
8359 mutex_lock(&priv->mutex);
8360 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8361 (priv->phymode != phymode)) {
8362 const struct iwl_channel_info *ch_info;
8363
8364 ch_info = iwl_get_channel_info(priv, phymode, channel);
8365 if (!ch_info) {
8366 IWL_WARNING("Requested invalid phymode/channel "
8367 "combination: %d %d\n", phymode, channel);
8368 mutex_unlock(&priv->mutex);
8369 return -EINVAL;
8370 }
8371
8372 /* Cancel any currently running scans... */
8373 if (iwl_scan_cancel_timeout(priv, 100))
8374 IWL_WARNING("Could not cancel scan.\n");
8375 else {
8376 IWL_DEBUG_INFO("Committing phymode and "
8377 "rxon.channel = %d %d\n",
8378 phymode, channel);
8379
8380 iwl_set_rxon_channel(priv, phymode, channel);
8381 iwl_set_flags_for_phymode(priv, phymode);
8382
8383 iwl_set_rate(priv);
8384 iwl_commit_rxon(priv);
8385 }
8386 }
8387 mutex_unlock(&priv->mutex);
8388
8389 return count;
8390}
8391
8392static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8393
8394#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
8395
8396static ssize_t show_measurement(struct device *d,
8397 struct device_attribute *attr, char *buf)
8398{
8399 struct iwl_priv *priv = dev_get_drvdata(d);
8400 struct iwl_spectrum_notification measure_report;
8401 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8402 u8 *data = (u8 *) & measure_report;
8403 unsigned long flags;
8404
8405 spin_lock_irqsave(&priv->lock, flags);
8406 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8407 spin_unlock_irqrestore(&priv->lock, flags);
8408 return 0;
8409 }
8410 memcpy(&measure_report, &priv->measure_report, size);
8411 priv->measurement_status = 0;
8412 spin_unlock_irqrestore(&priv->lock, flags);
8413
8414 while (size && (PAGE_SIZE - len)) {
8415 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8416 PAGE_SIZE - len, 1);
8417 len = strlen(buf);
8418 if (PAGE_SIZE - len)
8419 buf[len++] = '\n';
8420
8421 ofs += 16;
8422 size -= min(size, 16U);
8423 }
8424
8425 return len;
8426}
8427
8428static ssize_t store_measurement(struct device *d,
8429 struct device_attribute *attr,
8430 const char *buf, size_t count)
8431{
8432 struct iwl_priv *priv = dev_get_drvdata(d);
8433 struct ieee80211_measurement_params params = {
8434 .channel = le16_to_cpu(priv->active_rxon.channel),
8435 .start_time = cpu_to_le64(priv->last_tsf),
8436 .duration = cpu_to_le16(1),
8437 };
8438 u8 type = IWL_MEASURE_BASIC;
8439 u8 buffer[32];
8440 u8 channel;
8441
8442 if (count) {
8443 char *p = buffer;
8444 strncpy(buffer, buf, min(sizeof(buffer), count));
8445 channel = simple_strtoul(p, NULL, 0);
8446 if (channel)
8447 params.channel = channel;
8448
8449 p = buffer;
8450 while (*p && *p != ' ')
8451 p++;
8452 if (*p)
8453 type = simple_strtoul(p + 1, NULL, 0);
8454 }
8455
8456 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8457 "channel %d (for '%s')\n", type, params.channel, buf);
8458 iwl_get_measurement(priv, &params, type);
8459
8460 return count;
8461}
8462
8463static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8464 show_measurement, store_measurement);
8465#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */
8466
8467static ssize_t store_retry_rate(struct device *d,
8468 struct device_attribute *attr,
8469 const char *buf, size_t count)
8470{
8471 struct iwl_priv *priv = dev_get_drvdata(d);
8472
8473 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8474 if (priv->retry_rate <= 0)
8475 priv->retry_rate = 1;
8476
8477 return count;
8478}
8479
8480static ssize_t show_retry_rate(struct device *d,
8481 struct device_attribute *attr, char *buf)
8482{
8483 struct iwl_priv *priv = dev_get_drvdata(d);
8484 return sprintf(buf, "%d", priv->retry_rate);
8485}
8486
8487static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8488 store_retry_rate);
8489
8490static ssize_t store_power_level(struct device *d,
8491 struct device_attribute *attr,
8492 const char *buf, size_t count)
8493{
8494 struct iwl_priv *priv = dev_get_drvdata(d);
8495 int rc;
8496 int mode;
8497
8498 mode = simple_strtoul(buf, NULL, 0);
8499 mutex_lock(&priv->mutex);
8500
8501 if (!iwl_is_ready(priv)) {
8502 rc = -EAGAIN;
8503 goto out;
8504 }
8505
8506 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8507 mode = IWL_POWER_AC;
8508 else
8509 mode |= IWL_POWER_ENABLED;
8510
8511 if (mode != priv->power_mode) {
8512 rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode));
8513 if (rc) {
8514 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8515 goto out;
8516 }
8517 priv->power_mode = mode;
8518 }
8519
8520 rc = count;
8521
8522 out:
8523 mutex_unlock(&priv->mutex);
8524 return rc;
8525}
8526
8527#define MAX_WX_STRING 80
8528
8529/* Values are in microsecond */
8530static const s32 timeout_duration[] = {
8531 350000,
8532 250000,
8533 75000,
8534 37000,
8535 25000,
8536};
8537static const s32 period_duration[] = {
8538 400000,
8539 700000,
8540 1000000,
8541 1000000,
8542 1000000
8543};
8544
8545static ssize_t show_power_level(struct device *d,
8546 struct device_attribute *attr, char *buf)
8547{
8548 struct iwl_priv *priv = dev_get_drvdata(d);
8549 int level = IWL_POWER_LEVEL(priv->power_mode);
8550 char *p = buf;
8551
8552 p += sprintf(p, "%d ", level);
8553 switch (level) {
8554 case IWL_POWER_MODE_CAM:
8555 case IWL_POWER_AC:
8556 p += sprintf(p, "(AC)");
8557 break;
8558 case IWL_POWER_BATTERY:
8559 p += sprintf(p, "(BATTERY)");
8560 break;
8561 default:
8562 p += sprintf(p,
8563 "(Timeout %dms, Period %dms)",
8564 timeout_duration[level - 1] / 1000,
8565 period_duration[level - 1] / 1000);
8566 }
8567
8568 if (!(priv->power_mode & IWL_POWER_ENABLED))
8569 p += sprintf(p, " OFF\n");
8570 else
8571 p += sprintf(p, " \n");
8572
8573 return (p - buf + 1);
8574
8575}
8576
8577static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8578 store_power_level);
8579
8580static ssize_t show_channels(struct device *d,
8581 struct device_attribute *attr, char *buf)
8582{
8583 struct iwl_priv *priv = dev_get_drvdata(d);
8584 int len = 0, i;
8585 struct ieee80211_channel *channels = NULL;
8586 const struct ieee80211_hw_mode *hw_mode = NULL;
8587 int count = 0;
8588
8589 if (!iwl_is_ready(priv))
8590 return -EAGAIN;
8591
8592 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G);
8593 if (!hw_mode)
8594 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B);
8595 if (hw_mode) {
8596 channels = hw_mode->channels;
8597 count = hw_mode->num_channels;
8598 }
8599
8600 len +=
8601 sprintf(&buf[len],
8602 "Displaying %d channels in 2.4GHz band "
8603 "(802.11bg):\n", count);
8604
8605 for (i = 0; i < count; i++)
8606 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8607 channels[i].chan,
8608 channels[i].power_level,
8609 channels[i].
8610 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8611 " (IEEE 802.11h required)" : "",
8612 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8613 || (channels[i].
8614 flag &
8615 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8616 ", IBSS",
8617 channels[i].
8618 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8619 "active/passive" : "passive only");
8620
8621 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A);
8622 if (hw_mode) {
8623 channels = hw_mode->channels;
8624 count = hw_mode->num_channels;
8625 } else {
8626 channels = NULL;
8627 count = 0;
8628 }
8629
8630 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8631 "(802.11a):\n", count);
8632
8633 for (i = 0; i < count; i++)
8634 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8635 channels[i].chan,
8636 channels[i].power_level,
8637 channels[i].
8638 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8639 " (IEEE 802.11h required)" : "",
8640 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8641 || (channels[i].
8642 flag &
8643 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8644 ", IBSS",
8645 channels[i].
8646 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8647 "active/passive" : "passive only");
8648
8649 return len;
8650}
8651
8652static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8653
8654static ssize_t show_statistics(struct device *d,
8655 struct device_attribute *attr, char *buf)
8656{
8657 struct iwl_priv *priv = dev_get_drvdata(d);
8658 u32 size = sizeof(struct iwl_notif_statistics);
8659 u32 len = 0, ofs = 0;
8660 u8 *data = (u8 *) & priv->statistics;
8661 int rc = 0;
8662
8663 if (!iwl_is_alive(priv))
8664 return -EAGAIN;
8665
8666 mutex_lock(&priv->mutex);
8667 rc = iwl_send_statistics_request(priv);
8668 mutex_unlock(&priv->mutex);
8669
8670 if (rc) {
8671 len = sprintf(buf,
8672 "Error sending statistics request: 0x%08X\n", rc);
8673 return len;
8674 }
8675
8676 while (size && (PAGE_SIZE - len)) {
8677 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8678 PAGE_SIZE - len, 1);
8679 len = strlen(buf);
8680 if (PAGE_SIZE - len)
8681 buf[len++] = '\n';
8682
8683 ofs += 16;
8684 size -= min(size, 16U);
8685 }
8686
8687 return len;
8688}
8689
8690static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8691
8692static ssize_t show_antenna(struct device *d,
8693 struct device_attribute *attr, char *buf)
8694{
8695 struct iwl_priv *priv = dev_get_drvdata(d);
8696
8697 if (!iwl_is_alive(priv))
8698 return -EAGAIN;
8699
8700 return sprintf(buf, "%d\n", priv->antenna);
8701}
8702
8703static ssize_t store_antenna(struct device *d,
8704 struct device_attribute *attr,
8705 const char *buf, size_t count)
8706{
8707 int ant;
8708 struct iwl_priv *priv = dev_get_drvdata(d);
8709
8710 if (count == 0)
8711 return 0;
8712
8713 if (sscanf(buf, "%1i", &ant) != 1) {
8714 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8715 return count;
8716 }
8717
8718 if ((ant >= 0) && (ant <= 2)) {
8719 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
8720 priv->antenna = (enum iwl_antenna)ant;
8721 } else
8722 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8723
8724
8725 return count;
8726}
8727
8728static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8729
8730static ssize_t show_status(struct device *d,
8731 struct device_attribute *attr, char *buf)
8732{
8733 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8734 if (!iwl_is_alive(priv))
8735 return -EAGAIN;
8736 return sprintf(buf, "0x%08x\n", (int)priv->status);
8737}
8738
8739static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8740
8741static ssize_t dump_error_log(struct device *d,
8742 struct device_attribute *attr,
8743 const char *buf, size_t count)
8744{
8745 char *p = (char *)buf;
8746
8747 if (p[0] == '1')
8748 iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data);
8749
8750 return strnlen(buf, count);
8751}
8752
8753static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8754
8755static ssize_t dump_event_log(struct device *d,
8756 struct device_attribute *attr,
8757 const char *buf, size_t count)
8758{
8759 char *p = (char *)buf;
8760
8761 if (p[0] == '1')
8762 iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data);
8763
8764 return strnlen(buf, count);
8765}
8766
8767static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8768
8769/*****************************************************************************
8770 *
8771 * driver setup and teardown
8772 *
8773 *****************************************************************************/
8774
8775static void iwl_setup_deferred_work(struct iwl_priv *priv)
8776{
8777 priv->workqueue = create_workqueue(DRV_NAME);
8778
8779 init_waitqueue_head(&priv->wait_command_queue);
8780
8781 INIT_WORK(&priv->up, iwl_bg_up);
8782 INIT_WORK(&priv->restart, iwl_bg_restart);
8783 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
8784 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
8785 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
8786 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
8787 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
8788 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
8789 INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate);
8790 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
8791 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
8792 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
8793
8794 iwl_hw_setup_deferred_work(priv);
8795
8796 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
8797 iwl_irq_tasklet, (unsigned long)priv);
8798}
8799
8800static void iwl_cancel_deferred_work(struct iwl_priv *priv)
8801{
8802 iwl_hw_cancel_deferred_work(priv);
8803
8804 cancel_delayed_work(&priv->scan_check);
8805 cancel_delayed_work(&priv->alive_start);
8806 cancel_delayed_work(&priv->post_associate);
8807 cancel_work_sync(&priv->beacon_update);
8808}
8809
8810static struct attribute *iwl_sysfs_entries[] = {
8811 &dev_attr_antenna.attr,
8812 &dev_attr_channels.attr,
8813 &dev_attr_dump_errors.attr,
8814 &dev_attr_dump_events.attr,
8815 &dev_attr_flags.attr,
8816 &dev_attr_filter_flags.attr,
8817#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
8818 &dev_attr_measurement.attr,
8819#endif
8820 &dev_attr_power_level.attr,
8821 &dev_attr_retry_rate.attr,
8822 &dev_attr_rf_kill.attr,
8823 &dev_attr_rs_window.attr,
8824 &dev_attr_statistics.attr,
8825 &dev_attr_status.attr,
8826 &dev_attr_temperature.attr,
8827 &dev_attr_tune.attr,
8828 &dev_attr_tx_power.attr,
8829
8830 NULL
8831};
8832
8833static struct attribute_group iwl_attribute_group = {
8834 .name = NULL, /* put in device directory */
8835 .attrs = iwl_sysfs_entries,
8836};
8837
8838static struct ieee80211_ops iwl_hw_ops = {
8839 .tx = iwl_mac_tx,
8840 .open = iwl_mac_open,
8841 .stop = iwl_mac_stop,
8842 .add_interface = iwl_mac_add_interface,
8843 .remove_interface = iwl_mac_remove_interface,
8844 .config = iwl_mac_config,
8845 .config_interface = iwl_mac_config_interface,
8846 .set_key = iwl_mac_set_key,
8847 .get_stats = iwl_mac_get_stats,
8848 .get_tx_stats = iwl_mac_get_tx_stats,
8849 .conf_tx = iwl_mac_conf_tx,
8850 .get_tsf = iwl_mac_get_tsf,
8851 .reset_tsf = iwl_mac_reset_tsf,
8852 .beacon_update = iwl_mac_beacon_update,
8853#ifdef CONFIG_IWLWIFI_HT
8854 .conf_ht = iwl_mac_conf_ht,
8855 .get_ht_capab = iwl_mac_get_ht_capab,
8856#ifdef CONFIG_IWLWIFI_HT_AGG
8857 .ht_tx_agg_start = iwl_mac_ht_tx_agg_start,
8858 .ht_tx_agg_stop = iwl_mac_ht_tx_agg_stop,
8859 .ht_rx_agg_start = iwl_mac_ht_rx_agg_start,
8860 .ht_rx_agg_stop = iwl_mac_ht_rx_agg_stop,
8861#endif /* CONFIG_IWLWIFI_HT_AGG */
8862#endif /* CONFIG_IWLWIFI_HT */
8863 .hw_scan = iwl_mac_hw_scan
8864};
8865
8866static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8867{
8868 int err = 0;
8869 struct iwl_priv *priv;
8870 struct ieee80211_hw *hw;
8871 int i;
8872
8873 if (iwl_param_disable_hw_scan) {
8874 IWL_DEBUG_INFO("Disabling hw_scan\n");
8875 iwl_hw_ops.hw_scan = NULL;
8876 }
8877
8878 if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) ||
8879 (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) {
8880 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
8881 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
8882 err = -EINVAL;
8883 goto out;
8884 }
8885
8886 /* mac80211 allocates memory for this device instance, including
8887 * space for this driver's private structure */
8888 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops);
8889 if (hw == NULL) {
8890 IWL_ERROR("Can not allocate network device\n");
8891 err = -ENOMEM;
8892 goto out;
8893 }
8894 SET_IEEE80211_DEV(hw, &pdev->dev);
8895
8896 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
8897 priv = hw->priv;
8898 priv->hw = hw;
8899
8900 priv->pci_dev = pdev;
8901 priv->antenna = (enum iwl_antenna)iwl_param_antenna;
8902#ifdef CONFIG_IWLWIFI_DEBUG
8903 iwl_debug_level = iwl_param_debug;
8904 atomic_set(&priv->restrict_refcnt, 0);
8905#endif
8906 priv->retry_rate = 1;
8907
8908 priv->ibss_beacon = NULL;
8909
8910 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
8911 * the range of signal quality values that we'll provide.
8912 * Negative values for level/noise indicate that we'll provide dBm.
8913 * For WE, at least, non-0 values here *enable* display of values
8914 * in app (iwconfig). */
8915 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8916 hw->max_noise = -20; /* noise level, negative indicates dBm */
8917 hw->max_signal = 100; /* link quality indication (%) */
8918
8919 /* Tell mac80211 our Tx characteristics */
8920 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8921
8922 hw->queues = 4;
8923#ifdef CONFIG_IWLWIFI_HT
8924#ifdef CONFIG_IWLWIFI_HT_AGG
8925 hw->queues = 16;
8926#endif /* CONFIG_IWLWIFI_HT_AGG */
8927#endif /* CONFIG_IWLWIFI_HT */
8928
8929 spin_lock_init(&priv->lock);
8930 spin_lock_init(&priv->power_data.lock);
8931 spin_lock_init(&priv->sta_lock);
8932 spin_lock_init(&priv->hcmd_lock);
8933 spin_lock_init(&priv->lq_mngr.lock);
8934
8935 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
8936 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
8937
8938 INIT_LIST_HEAD(&priv->free_frames);
8939
8940 mutex_init(&priv->mutex);
8941 if (pci_enable_device(pdev)) {
8942 err = -ENODEV;
8943 goto out_ieee80211_free_hw;
8944 }
8945
8946 pci_set_master(pdev);
8947
8948 iwl_clear_stations_table(priv);
8949
8950 priv->data_retry_limit = -1;
8951 priv->ieee_channels = NULL;
8952 priv->ieee_rates = NULL;
8953 priv->phymode = -1;
8954
8955 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
8956 if (!err)
8957 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
8958 if (err) {
8959 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
8960 goto out_pci_disable_device;
8961 }
8962
8963 pci_set_drvdata(pdev, priv);
8964 err = pci_request_regions(pdev, DRV_NAME);
8965 if (err)
8966 goto out_pci_disable_device;
8967 /* We disable the RETRY_TIMEOUT register (0x41) to keep
8968 * PCI Tx retries from interfering with C3 CPU state */
8969 pci_write_config_byte(pdev, 0x41, 0x00);
8970 priv->hw_base = pci_iomap(pdev, 0, 0);
8971 if (!priv->hw_base) {
8972 err = -ENODEV;
8973 goto out_pci_release_regions;
8974 }
8975
8976 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
8977 (unsigned long long) pci_resource_len(pdev, 0));
8978 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
8979
8980 /* Initialize module parameter values here */
8981
8982 if (iwl_param_disable) {
8983 set_bit(STATUS_RF_KILL_SW, &priv->status);
8984 IWL_DEBUG_INFO("Radio disabled.\n");
8985 }
8986
8987 priv->iw_mode = IEEE80211_IF_TYPE_STA;
8988
8989 priv->ps_mode = 0;
8990 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
8991 priv->is_ht_enabled = 1;
8992 priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ;
8993 priv->valid_antenna = 0x7; /* assume all 3 connected */
8994 priv->ps_mode = IWL_MIMO_PS_NONE;
8995 priv->cck_power_index_compensation = iwl_read32(
8996 priv, CSR_HW_REV_WA_REG);
8997
8998 iwl4965_set_rxon_chain(priv);
8999
9000 printk(KERN_INFO DRV_NAME
9001 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9002
9003 /* Device-specific setup */
9004 if (iwl_hw_set_hw_setting(priv)) {
9005 IWL_ERROR("failed to set hw settings\n");
9006 mutex_unlock(&priv->mutex);
9007 goto out_iounmap;
9008 }
9009
9010#ifdef CONFIG_IWLWIFI_QOS
9011 if (iwl_param_qos_enable)
9012 priv->qos_data.qos_enable = 1;
9013
9014 iwl_reset_qos(priv);
9015
9016 priv->qos_data.qos_active = 0;
9017 priv->qos_data.qos_cap.val = 0;
9018#endif /* CONFIG_IWLWIFI_QOS */
9019
9020 iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9021 iwl_setup_deferred_work(priv);
9022 iwl_setup_rx_handlers(priv);
9023
9024 priv->rates_mask = IWL_RATES_MASK;
9025 /* If power management is turned on, default to AC mode */
9026 priv->power_mode = IWL_POWER_AC;
9027 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9028
9029 pci_enable_msi(pdev);
9030
9031 err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv);
9032 if (err) {
9033 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
9034 goto out_disable_msi;
9035 }
9036
9037 mutex_lock(&priv->mutex);
9038
9039 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group);
9040 if (err) {
9041 IWL_ERROR("failed to create sysfs device attributes\n");
9042 mutex_unlock(&priv->mutex);
9043 goto out_release_irq;
9044 }
9045
9046 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
9047 * ucode filename and max sizes are card-specific. */
9048 err = iwl_read_ucode(priv);
9049 if (err) {
9050 IWL_ERROR("Could not read microcode: %d\n", err);
9051 mutex_unlock(&priv->mutex);
9052 goto out_pci_alloc;
9053 }
9054
9055 mutex_unlock(&priv->mutex);
9056
9057 IWL_DEBUG_INFO("Queing UP work.\n");
9058
9059 queue_work(priv->workqueue, &priv->up);
9060
9061 return 0;
9062
9063 out_pci_alloc:
9064 iwl_dealloc_ucode_pci(priv);
9065
9066 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
9067
9068 out_release_irq:
9069 free_irq(pdev->irq, priv);
9070
9071 out_disable_msi:
9072 pci_disable_msi(pdev);
9073 destroy_workqueue(priv->workqueue);
9074 priv->workqueue = NULL;
9075 iwl_unset_hw_setting(priv);
9076
9077 out_iounmap:
9078 pci_iounmap(pdev, priv->hw_base);
9079 out_pci_release_regions:
9080 pci_release_regions(pdev);
9081 out_pci_disable_device:
9082 pci_disable_device(pdev);
9083 pci_set_drvdata(pdev, NULL);
9084 out_ieee80211_free_hw:
9085 ieee80211_free_hw(priv->hw);
9086 out:
9087 return err;
9088}
9089
9090static void iwl_pci_remove(struct pci_dev *pdev)
9091{
9092 struct iwl_priv *priv = pci_get_drvdata(pdev);
9093 struct list_head *p, *q;
9094 int i;
9095
9096 if (!priv)
9097 return;
9098
9099 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9100
9101 mutex_lock(&priv->mutex);
9102 set_bit(STATUS_EXIT_PENDING, &priv->status);
9103 __iwl_down(priv);
9104 mutex_unlock(&priv->mutex);
9105
9106 /* Free MAC hash list for ADHOC */
9107 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9108 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9109 list_del(p);
9110 kfree(list_entry(p, struct iwl_ibss_seq, list));
9111 }
9112 }
9113
9114 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
9115
9116 iwl_dealloc_ucode_pci(priv);
9117
9118 if (priv->rxq.bd)
9119 iwl_rx_queue_free(priv, &priv->rxq);
9120 iwl_hw_txq_ctx_free(priv);
9121
9122 iwl_unset_hw_setting(priv);
9123 iwl_clear_stations_table(priv);
9124
9125 if (priv->mac80211_registered) {
9126 ieee80211_unregister_hw(priv->hw);
9127 iwl_rate_control_unregister(priv->hw);
9128 }
9129
9130 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
9131 * priv->workqueue... so we can't take down the workqueue
9132 * until now... */
9133 destroy_workqueue(priv->workqueue);
9134 priv->workqueue = NULL;
9135
9136 free_irq(pdev->irq, priv);
9137 pci_disable_msi(pdev);
9138 pci_iounmap(pdev, priv->hw_base);
9139 pci_release_regions(pdev);
9140 pci_disable_device(pdev);
9141 pci_set_drvdata(pdev, NULL);
9142
9143 kfree(priv->channel_info);
9144
9145 kfree(priv->ieee_channels);
9146 kfree(priv->ieee_rates);
9147
9148 if (priv->ibss_beacon)
9149 dev_kfree_skb(priv->ibss_beacon);
9150
9151 ieee80211_free_hw(priv->hw);
9152}
9153
9154#ifdef CONFIG_PM
9155
9156static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
9157{
9158 struct iwl_priv *priv = pci_get_drvdata(pdev);
9159
9160 mutex_lock(&priv->mutex);
9161
9162 set_bit(STATUS_IN_SUSPEND, &priv->status);
9163
9164 /* Take down the device; powers it off, etc. */
9165 __iwl_down(priv);
9166
9167 if (priv->mac80211_registered)
9168 ieee80211_stop_queues(priv->hw);
9169
9170 pci_save_state(pdev);
9171 pci_disable_device(pdev);
9172 pci_set_power_state(pdev, PCI_D3hot);
9173
9174 mutex_unlock(&priv->mutex);
9175
9176 return 0;
9177}
9178
9179static void iwl_resume(struct iwl_priv *priv)
9180{
9181 unsigned long flags;
9182
9183 /* The following it a temporary work around due to the
9184 * suspend / resume not fully initializing the NIC correctly.
9185 * Without all of the following, resume will not attempt to take
9186 * down the NIC (it shouldn't really need to) and will just try
9187 * and bring the NIC back up. However that fails during the
9188 * ucode verification process. This then causes iwl_down to be
9189 * called *after* iwl_hw_nic_init() has succeeded -- which
9190 * then lets the next init sequence succeed. So, we've
9191 * replicated all of that NIC init code here... */
9192
9193 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
9194
9195 iwl_hw_nic_init(priv);
9196
9197 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9198 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
9199 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
9200 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
9201 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9202 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9203
9204 /* tell the device to stop sending interrupts */
9205 iwl_disable_interrupts(priv);
9206
9207 spin_lock_irqsave(&priv->lock, flags);
9208 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
9209
9210 if (!iwl_grab_restricted_access(priv)) {
9211 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
9212 APMG_CLK_VAL_DMA_CLK_RQT);
9213 iwl_release_restricted_access(priv);
9214 }
9215 spin_unlock_irqrestore(&priv->lock, flags);
9216
9217 udelay(5);
9218
9219 iwl_hw_nic_reset(priv);
9220
9221 /* Bring the device back up */
9222 clear_bit(STATUS_IN_SUSPEND, &priv->status);
9223 queue_work(priv->workqueue, &priv->up);
9224}
9225
9226static int iwl_pci_resume(struct pci_dev *pdev)
9227{
9228 struct iwl_priv *priv = pci_get_drvdata(pdev);
9229 int err;
9230
9231 printk(KERN_INFO "Coming out of suspend...\n");
9232
9233 mutex_lock(&priv->mutex);
9234
9235 pci_set_power_state(pdev, PCI_D0);
9236 err = pci_enable_device(pdev);
9237 pci_restore_state(pdev);
9238
9239 /*
9240 * Suspend/Resume resets the PCI configuration space, so we have to
9241 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
9242 * from interfering with C3 CPU state. pci_restore_state won't help
9243 * here since it only restores the first 64 bytes pci config header.
9244 */
9245 pci_write_config_byte(pdev, 0x41, 0x00);
9246
9247 iwl_resume(priv);
9248 mutex_unlock(&priv->mutex);
9249
9250 return 0;
9251}
9252
9253#endif /* CONFIG_PM */
9254
9255/*****************************************************************************
9256 *
9257 * driver and module entry point
9258 *
9259 *****************************************************************************/
9260
9261static struct pci_driver iwl_driver = {
9262 .name = DRV_NAME,
9263 .id_table = iwl_hw_card_ids,
9264 .probe = iwl_pci_probe,
9265 .remove = __devexit_p(iwl_pci_remove),
9266#ifdef CONFIG_PM
9267 .suspend = iwl_pci_suspend,
9268 .resume = iwl_pci_resume,
9269#endif
9270};
9271
9272static int __init iwl_init(void)
9273{
9274
9275 int ret;
9276 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9277 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
9278 ret = pci_register_driver(&iwl_driver);
9279 if (ret) {
9280 IWL_ERROR("Unable to initialize PCI module\n");
9281 return ret;
9282 }
9283#ifdef CONFIG_IWLWIFI_DEBUG
9284 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
9285 if (ret) {
9286 IWL_ERROR("Unable to create driver sysfs file\n");
9287 pci_unregister_driver(&iwl_driver);
9288 return ret;
9289 }
9290#endif
9291
9292 return ret;
9293}
9294
9295static void __exit iwl_exit(void)
9296{
9297#ifdef CONFIG_IWLWIFI_DEBUG
9298 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
9299#endif
9300 pci_unregister_driver(&iwl_driver);
9301}
9302
9303module_param_named(antenna, iwl_param_antenna, int, 0444);
9304MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
9305module_param_named(disable, iwl_param_disable, int, 0444);
9306MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
9307module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444);
9308MODULE_PARM_DESC(hwcrypto,
9309 "using hardware crypto engine (default 0 [software])\n");
9310module_param_named(debug, iwl_param_debug, int, 0444);
9311MODULE_PARM_DESC(debug, "debug output mask");
9312module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444);
9313MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9314
9315module_param_named(queues_num, iwl_param_queues_num, int, 0444);
9316MODULE_PARM_DESC(queues_num, "number of hw queues.");
9317
9318/* QoS */
9319module_param_named(qos_enable, iwl_param_qos_enable, int, 0444);
9320MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9321
9322module_exit(iwl_exit);
9323module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwlwifi.h b/drivers/net/wireless/iwlwifi/iwlwifi.h
new file mode 100644
index 000000000000..00c79e200c68
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwlwifi.h
@@ -0,0 +1,713 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwlwifi_h__
31#define __iwlwifi_h__
32
33#include <linux/pci.h> /* for struct pci_device_id */
34#include <linux/kernel.h>
35#include <net/ieee80211_radiotap.h>
36
37struct iwl_priv;
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl_hw_card_ids[];
41
42#if IWL == 3945
43
44#define DRV_NAME "iwl3945"
45#include "iwl-hw.h"
46#include "iwl-3945-hw.h"
47
48#elif IWL == 4965
49
50#define DRV_NAME "iwl4965"
51#include "iwl-hw.h"
52#include "iwl-4965-hw.h"
53
54#endif
55
56#include "iwl-prph.h"
57
58/*
59 * Driver implementation data structures, constants, inline
60 * functions
61 *
62 * NOTE: DO NOT PUT HARDWARE/UCODE SPECIFIC DECLRATIONS HERE
63 *
64 * Hardware specific declrations go into iwl-*hw.h
65 *
66 */
67
68#include "iwl-debug.h"
69
70/* Default noise level to report when noise measurement is not available.
71 * This may be because we're:
72 * 1) Not associated (4965, no beacon statistics being sent to driver)
73 * 2) Scanning (noise measurement does not apply to associated channel)
74 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
75 * Use default noise value of -127 ... this is below the range of measurable
76 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
77 * Also, -127 works better than 0 when averaging frames with/without
78 * noise info (e.g. averaging might be done in app); measured dBm values are
79 * always negative ... using a negative value as the default keeps all
80 * averages within an s8's (used in some apps) range of negative values. */
81#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
82
83/* Module parameters accessible from iwl-*.c */
84extern int iwl_param_disable_hw_scan;
85extern int iwl_param_debug;
86extern int iwl_param_mode;
87extern int iwl_param_disable;
88extern int iwl_param_antenna;
89extern int iwl_param_hwcrypto;
90extern int iwl_param_qos_enable;
91extern int iwl_param_queues_num;
92
93enum iwl_antenna {
94 IWL_ANTENNA_DIVERSITY,
95 IWL_ANTENNA_MAIN,
96 IWL_ANTENNA_AUX
97};
98
99/*
100 * RTS threshold here is total size [2347] minus 4 FCS bytes
101 * Per spec:
102 * a value of 0 means RTS on all data/management packets
103 * a value > max MSDU size means no RTS
104 * else RTS for data/management frames where MPDU is larger
105 * than RTS value.
106 */
107#define DEFAULT_RTS_THRESHOLD 2347U
108#define MIN_RTS_THRESHOLD 0U
109#define MAX_RTS_THRESHOLD 2347U
110#define MAX_MSDU_SIZE 2304U
111#define MAX_MPDU_SIZE 2346U
112#define DEFAULT_BEACON_INTERVAL 100U
113#define DEFAULT_SHORT_RETRY_LIMIT 7U
114#define DEFAULT_LONG_RETRY_LIMIT 4U
115
116struct iwl_rx_mem_buffer {
117 dma_addr_t dma_addr;
118 struct sk_buff *skb;
119 struct list_head list;
120};
121
122struct iwl_rt_rx_hdr {
123 struct ieee80211_radiotap_header rt_hdr;
124 __le64 rt_tsf; /* TSF */
125 u8 rt_flags; /* radiotap packet flags */
126 u8 rt_rate; /* rate in 500kb/s */
127 __le16 rt_channelMHz; /* channel in MHz */
128 __le16 rt_chbitmask; /* channel bitfield */
129 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
130 s8 rt_dbmnoise;
131 u8 rt_antenna; /* antenna number */
132 u8 payload[0]; /* payload... */
133} __attribute__ ((packed));
134
135struct iwl_rt_tx_hdr {
136 struct ieee80211_radiotap_header rt_hdr;
137 u8 rt_rate; /* rate in 500kb/s */
138 __le16 rt_channel; /* channel in mHz */
139 __le16 rt_chbitmask; /* channel bitfield */
140 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
141 u8 rt_antenna; /* antenna number */
142 u8 payload[0]; /* payload... */
143} __attribute__ ((packed));
144
145/*
146 * Generic queue structure
147 *
148 * Contains common data for Rx and Tx queues
149 */
150struct iwl_queue {
151 int n_bd; /* number of BDs in this queue */
152 int first_empty; /* 1-st empty entry (index) host_w*/
153 int last_used; /* last used entry (index) host_r*/
154 dma_addr_t dma_addr; /* physical addr for BD's */
155 int n_window; /* safe queue window */
156 u32 id;
157 int low_mark; /* low watermark, resume queue if free
158 * space more than this */
159 int high_mark; /* high watermark, stop queue if free
160 * space less than this */
161} __attribute__ ((packed));
162
163#define MAX_NUM_OF_TBS (20)
164
165struct iwl_tx_info {
166 struct ieee80211_tx_status status;
167 struct sk_buff *skb[MAX_NUM_OF_TBS];
168};
169
170/**
171 * struct iwl_tx_queue - Tx Queue for DMA
172 * @need_update: need to update read/write index
173 * @shed_retry: queue is HT AGG enabled
174 *
175 * Queue consists of circular buffer of BD's and required locking structures.
176 */
177struct iwl_tx_queue {
178 struct iwl_queue q;
179 struct iwl_tfd_frame *bd;
180 struct iwl_cmd *cmd;
181 dma_addr_t dma_addr_cmd;
182 struct iwl_tx_info *txb;
183 int need_update;
184 int sched_retry;
185 int active;
186};
187
188#include "iwl-channel.h"
189
190#if IWL == 3945
191#include "iwl-3945-rs.h"
192#else
193#include "iwl-4965-rs.h"
194#endif
195
196#define IWL_TX_FIFO_AC0 0
197#define IWL_TX_FIFO_AC1 1
198#define IWL_TX_FIFO_AC2 2
199#define IWL_TX_FIFO_AC3 3
200#define IWL_TX_FIFO_HCCA_1 5
201#define IWL_TX_FIFO_HCCA_2 6
202#define IWL_TX_FIFO_NONE 7
203
204/* Minimum number of queues. MAX_NUM is defined in hw specific files */
205#define IWL_MIN_NUM_QUEUES 4
206
207/* Power management (not Tx power) structures */
208
209struct iwl_power_vec_entry {
210 struct iwl_powertable_cmd cmd;
211 u8 no_dtim;
212};
213#define IWL_POWER_RANGE_0 (0)
214#define IWL_POWER_RANGE_1 (1)
215
216#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
217#define IWL_POWER_INDEX_3 0x03
218#define IWL_POWER_INDEX_5 0x05
219#define IWL_POWER_AC 0x06
220#define IWL_POWER_BATTERY 0x07
221#define IWL_POWER_LIMIT 0x07
222#define IWL_POWER_MASK 0x0F
223#define IWL_POWER_ENABLED 0x10
224#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK)
225
226struct iwl_power_mgr {
227 spinlock_t lock;
228 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC];
229 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC];
230 u8 active_index;
231 u32 dtim_val;
232};
233
234#define IEEE80211_DATA_LEN 2304
235#define IEEE80211_4ADDR_LEN 30
236#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
237#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
238
239struct iwl_frame {
240 union {
241 struct ieee80211_hdr frame;
242 struct iwl_tx_beacon_cmd beacon;
243 u8 raw[IEEE80211_FRAME_LEN];
244 u8 cmd[360];
245 } u;
246 struct list_head list;
247};
248
249#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
250#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
251#define SEQ_TO_INDEX(x) (x & 0xff)
252#define INDEX_TO_SEQ(x) (x & 0xff)
253#define SEQ_HUGE_FRAME (0x4000)
254#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
255#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
256#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
257#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
258
259enum {
260 /* CMD_SIZE_NORMAL = 0, */
261 CMD_SIZE_HUGE = (1 << 0),
262 /* CMD_SYNC = 0, */
263 CMD_ASYNC = (1 << 1),
264 /* CMD_NO_SKB = 0, */
265 CMD_WANT_SKB = (1 << 2),
266};
267
268struct iwl_cmd;
269struct iwl_priv;
270
271struct iwl_cmd_meta {
272 struct iwl_cmd_meta *source;
273 union {
274 struct sk_buff *skb;
275 int (*callback)(struct iwl_priv *priv,
276 struct iwl_cmd *cmd, struct sk_buff *skb);
277 } __attribute__ ((packed)) u;
278
279 /* The CMD_SIZE_HUGE flag bit indicates that the command
280 * structure is stored at the end of the shared queue memory. */
281 u32 flags;
282
283} __attribute__ ((packed));
284
285struct iwl_cmd {
286 struct iwl_cmd_meta meta;
287 struct iwl_cmd_header hdr;
288 union {
289 struct iwl_addsta_cmd addsta;
290 struct iwl_led_cmd led;
291 u32 flags;
292 u8 val8;
293 u16 val16;
294 u32 val32;
295 struct iwl_bt_cmd bt;
296 struct iwl_rxon_time_cmd rxon_time;
297 struct iwl_powertable_cmd powertable;
298 struct iwl_qosparam_cmd qosparam;
299 struct iwl_tx_cmd tx;
300 struct iwl_tx_beacon_cmd tx_beacon;
301 struct iwl_rxon_assoc_cmd rxon_assoc;
302 u8 *indirect;
303 u8 payload[360];
304 } __attribute__ ((packed)) cmd;
305} __attribute__ ((packed));
306
307struct iwl_host_cmd {
308 u8 id;
309 u16 len;
310 struct iwl_cmd_meta meta;
311 const void *data;
312};
313
314#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \
315 sizeof(struct iwl_cmd_meta))
316
317/*
318 * RX related structures and functions
319 */
320#define RX_FREE_BUFFERS 64
321#define RX_LOW_WATERMARK 8
322
323#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
324#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
325#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
326
327/**
328 * struct iwl_rx_queue - Rx queue
329 * @processed: Internal index to last handled Rx packet
330 * @read: Shared index to newest available Rx buffer
331 * @write: Shared index to oldest written Rx packet
332 * @free_count: Number of pre-allocated buffers in rx_free
333 * @rx_free: list of free SKBs for use
334 * @rx_used: List of Rx buffers with no SKB
335 * @need_update: flag to indicate we need to update read/write index
336 *
337 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
338 */
339struct iwl_rx_queue {
340 __le32 *bd;
341 dma_addr_t dma_addr;
342 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
343 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
344 u32 processed;
345 u32 read;
346 u32 write;
347 u32 free_count;
348 struct list_head rx_free;
349 struct list_head rx_used;
350 int need_update;
351 spinlock_t lock;
352};
353
354#define IWL_SUPPORTED_RATES_IE_LEN 8
355
356#define SCAN_INTERVAL 100
357
358#define MAX_A_CHANNELS 252
359#define MIN_A_CHANNELS 7
360
361#define MAX_B_CHANNELS 14
362#define MIN_B_CHANNELS 1
363
364#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
365#define STATUS_INT_ENABLED 1
366#define STATUS_RF_KILL_HW 2
367#define STATUS_RF_KILL_SW 3
368#define STATUS_INIT 4
369#define STATUS_ALIVE 5
370#define STATUS_READY 6
371#define STATUS_TEMPERATURE 7
372#define STATUS_GEO_CONFIGURED 8
373#define STATUS_EXIT_PENDING 9
374#define STATUS_IN_SUSPEND 10
375#define STATUS_STATISTICS 11
376#define STATUS_SCANNING 12
377#define STATUS_SCAN_ABORTING 13
378#define STATUS_SCAN_HW 14
379#define STATUS_POWER_PMI 15
380#define STATUS_FW_ERROR 16
381
382#define MAX_TID_COUNT 9
383
384#define IWL_INVALID_RATE 0xFF
385#define IWL_INVALID_VALUE -1
386
387#if IWL == 4965
388#ifdef CONFIG_IWLWIFI_HT
389#ifdef CONFIG_IWLWIFI_HT_AGG
390struct iwl_ht_agg {
391 u16 txq_id;
392 u16 frame_count;
393 u16 wait_for_ba;
394 u16 start_idx;
395 u32 bitmap0;
396 u32 bitmap1;
397 u32 rate_n_flags;
398};
399#endif /* CONFIG_IWLWIFI_HT_AGG */
400#endif /* CONFIG_IWLWIFI_HT */
401#endif
402
403struct iwl_tid_data {
404 u16 seq_number;
405#if IWL == 4965
406#ifdef CONFIG_IWLWIFI_HT
407#ifdef CONFIG_IWLWIFI_HT_AGG
408 struct iwl_ht_agg agg;
409#endif /* CONFIG_IWLWIFI_HT_AGG */
410#endif /* CONFIG_IWLWIFI_HT */
411#endif
412};
413
414struct iwl_hw_key {
415 ieee80211_key_alg alg;
416 int keylen;
417 u8 key[32];
418};
419
420union iwl_ht_rate_supp {
421 u16 rates;
422 struct {
423 u8 siso_rate;
424 u8 mimo_rate;
425 };
426};
427
428#ifdef CONFIG_IWLWIFI_HT
429#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
430#define HT_IE_MAX_AMSDU_SIZE_4K (0)
431#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
432#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
433
434struct sta_ht_info {
435 u8 is_ht;
436 u16 rx_mimo_ps_mode;
437 u16 tx_mimo_ps_mode;
438 u16 control_channel;
439 u8 max_amsdu_size;
440 u8 ampdu_factor;
441 u8 mpdu_density;
442 u8 operating_mode;
443 u8 supported_chan_width;
444 u8 extension_chan_offset;
445 u8 is_green_field;
446 u8 sgf;
447 u8 supp_rates[16];
448 u8 tx_chan_width;
449 u8 chan_width_cap;
450};
451#endif /*CONFIG_IWLWIFI_HT */
452
453#ifdef CONFIG_IWLWIFI_QOS
454
455union iwl_qos_capabity {
456 struct {
457 u8 edca_count:4; /* bit 0-3 */
458 u8 q_ack:1; /* bit 4 */
459 u8 queue_request:1; /* bit 5 */
460 u8 txop_request:1; /* bit 6 */
461 u8 reserved:1; /* bit 7 */
462 } q_AP;
463 struct {
464 u8 acvo_APSD:1; /* bit 0 */
465 u8 acvi_APSD:1; /* bit 1 */
466 u8 ac_bk_APSD:1; /* bit 2 */
467 u8 ac_be_APSD:1; /* bit 3 */
468 u8 q_ack:1; /* bit 4 */
469 u8 max_len:2; /* bit 5-6 */
470 u8 more_data_ack:1; /* bit 7 */
471 } q_STA;
472 u8 val;
473};
474
475/* QoS sturctures */
476struct iwl_qos_info {
477 int qos_enable;
478 int qos_active;
479 union iwl_qos_capabity qos_cap;
480 struct iwl_qosparam_cmd def_qos_parm;
481};
482#endif /*CONFIG_IWLWIFI_QOS */
483
484#define STA_PS_STATUS_WAKE 0
485#define STA_PS_STATUS_SLEEP 1
486
487struct iwl_station_entry {
488 struct iwl_addsta_cmd sta;
489 struct iwl_tid_data tid[MAX_TID_COUNT];
490#if IWL == 3945
491 union {
492 struct {
493 u8 rate;
494 u8 flags;
495 } s;
496 u16 rate_n_flags;
497 } current_rate;
498#endif
499 u8 used;
500 u8 ps_status;
501 struct iwl_hw_key keyinfo;
502};
503
504/* one for each uCode image (inst/data, boot/init/runtime) */
505struct fw_image_desc {
506 void *v_addr; /* access by driver */
507 dma_addr_t p_addr; /* access by card's busmaster DMA */
508 u32 len; /* bytes */
509};
510
511/* uCode file layout */
512struct iwl_ucode {
513 __le32 ver; /* major/minor/subminor */
514 __le32 inst_size; /* bytes of runtime instructions */
515 __le32 data_size; /* bytes of runtime data */
516 __le32 init_size; /* bytes of initialization instructions */
517 __le32 init_data_size; /* bytes of initialization data */
518 __le32 boot_size; /* bytes of bootstrap instructions */
519 u8 data[0]; /* data in same order as "size" elements */
520};
521
522#define IWL_IBSS_MAC_HASH_SIZE 32
523
524struct iwl_ibss_seq {
525 u8 mac[ETH_ALEN];
526 u16 seq_num;
527 u16 frag_num;
528 unsigned long packet_time;
529 struct list_head list;
530};
531
532struct iwl_driver_hw_info {
533 u16 max_txq_num;
534 u16 ac_queue_count;
535 u32 rx_buffer_size;
536 u16 tx_cmd_len;
537 u16 max_rxq_size;
538 u16 max_rxq_log;
539 u32 cck_flag;
540 u8 max_stations;
541 u8 bcast_sta_id;
542 void *shared_virt;
543 dma_addr_t shared_phys;
544};
545
546
547#define STA_FLG_RTS_MIMO_PROT_MSK __constant_cpu_to_le32(1 << 17)
548#define STA_FLG_AGG_MPDU_8US_MSK __constant_cpu_to_le32(1 << 18)
549#define STA_FLG_MAX_AGG_SIZE_POS (19)
550#define STA_FLG_MAX_AGG_SIZE_MSK __constant_cpu_to_le32(3 << 19)
551#define STA_FLG_FAT_EN_MSK __constant_cpu_to_le32(1 << 21)
552#define STA_FLG_MIMO_DIS_MSK __constant_cpu_to_le32(1 << 22)
553#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
554#define STA_FLG_AGG_MPDU_DENSITY_MSK __constant_cpu_to_le32(7 << 23)
555#define HT_SHORT_GI_20MHZ_ONLY (1 << 0)
556#define HT_SHORT_GI_40MHZ_ONLY (1 << 1)
557
558
559#include "iwl-priv.h"
560
561/* Requires full declaration of iwl_priv before including */
562#include "iwl-io.h"
563
564#define IWL_RX_HDR(x) ((struct iwl_rx_frame_hdr *)(\
565 x->u.rx_frame.stats.payload + \
566 x->u.rx_frame.stats.phy_count))
567#define IWL_RX_END(x) ((struct iwl_rx_frame_end *)(\
568 IWL_RX_HDR(x)->payload + \
569 le16_to_cpu(IWL_RX_HDR(x)->len)))
570#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
571#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
572
573
574/******************************************************************************
575 *
576 * Functions implemented in iwl-base.c which are forward declared here
577 * for use by iwl-*.c
578 *
579 *****************************************************************************/
580struct iwl_addsta_cmd;
581extern int iwl_send_add_station(struct iwl_priv *priv,
582 struct iwl_addsta_cmd *sta, u8 flags);
583extern const char *iwl_get_tx_fail_reason(u32 status);
584extern u8 iwl_add_station(struct iwl_priv *priv, const u8 *bssid,
585 int is_ap, u8 flags);
586extern int iwl_is_network_packet(struct iwl_priv *priv,
587 struct ieee80211_hdr *header);
588extern int iwl_power_init_handle(struct iwl_priv *priv);
589extern int iwl_eeprom_init(struct iwl_priv *priv);
590#ifdef CONFIG_IWLWIFI_DEBUG
591extern void iwl_report_frame(struct iwl_priv *priv,
592 struct iwl_rx_packet *pkt,
593 struct ieee80211_hdr *header, int group100);
594#else
595static inline void iwl_report_frame(struct iwl_priv *priv,
596 struct iwl_rx_packet *pkt,
597 struct ieee80211_hdr *header,
598 int group100) {}
599#endif
600extern int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
601 struct iwl_tx_queue *txq);
602extern void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
603 struct iwl_rx_mem_buffer *rxb,
604 void *data, short len,
605 struct ieee80211_rx_status *stats,
606 u16 phy_flags);
607extern int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr
608 *header);
609extern void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
610extern int iwl_rx_queue_alloc(struct iwl_priv *priv);
611extern void iwl_rx_queue_reset(struct iwl_priv *priv,
612 struct iwl_rx_queue *rxq);
613extern int iwl_calc_db_from_ratio(int sig_ratio);
614extern int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm);
615extern int iwl_tx_queue_init(struct iwl_priv *priv,
616 struct iwl_tx_queue *txq, int count, u32 id);
617extern int iwl_rx_queue_restock(struct iwl_priv *priv);
618extern void iwl_rx_replenish(void *data);
619extern void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
620extern int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
621 const void *data);
622extern int __must_check iwl_send_cmd_async(struct iwl_priv *priv,
623 struct iwl_host_cmd *cmd);
624extern int __must_check iwl_send_cmd_sync(struct iwl_priv *priv,
625 struct iwl_host_cmd *cmd);
626extern int __must_check iwl_send_cmd(struct iwl_priv *priv,
627 struct iwl_host_cmd *cmd);
628extern unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
629 struct ieee80211_hdr *hdr,
630 const u8 *dest, int left);
631extern int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
632 struct iwl_rx_queue *q);
633extern int iwl_send_statistics_request(struct iwl_priv *priv);
634extern void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
635 u32 decrypt_res,
636 struct ieee80211_rx_status *stats);
637extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr);
638
639extern const u8 BROADCAST_ADDR[ETH_ALEN];
640
641/*
642 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
643 * call this... todo... fix that.
644*/
645extern u8 iwl_sync_station(struct iwl_priv *priv, int sta_id,
646 u16 tx_rate, u8 flags);
647
648static inline int iwl_is_associated(struct iwl_priv *priv)
649{
650 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
651}
652
653/******************************************************************************
654 *
655 * Functions implemented in iwl-[34]*.c which are forward declared here
656 * for use by iwl-base.c
657 *
658 * NOTE: The implementation of these functions are hardware specific
659 * which is why they are in the hardware specific files (vs. iwl-base.c)
660 *
661 * Naming convention --
662 * iwl_ <-- Its part of iwlwifi (should be changed to iwl_)
663 * iwl_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
664 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
665 * iwl_bg_ <-- Called from work queue context
666 * iwl_mac_ <-- mac80211 callback
667 *
668 ****************************************************************************/
669extern void iwl_hw_rx_handler_setup(struct iwl_priv *priv);
670extern void iwl_hw_setup_deferred_work(struct iwl_priv *priv);
671extern void iwl_hw_cancel_deferred_work(struct iwl_priv *priv);
672extern int iwl_hw_rxq_stop(struct iwl_priv *priv);
673extern int iwl_hw_set_hw_setting(struct iwl_priv *priv);
674extern int iwl_hw_nic_init(struct iwl_priv *priv);
675extern void iwl_hw_card_show_info(struct iwl_priv *priv);
676extern int iwl_hw_nic_stop_master(struct iwl_priv *priv);
677extern void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
678extern void iwl_hw_txq_ctx_stop(struct iwl_priv *priv);
679extern int iwl_hw_nic_reset(struct iwl_priv *priv);
680extern int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
681 dma_addr_t addr, u16 len);
682extern int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
683extern int iwl_hw_get_temperature(struct iwl_priv *priv);
684extern int iwl_hw_tx_queue_init(struct iwl_priv *priv,
685 struct iwl_tx_queue *txq);
686extern unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
687 struct iwl_frame *frame, u8 rate);
688extern int iwl_hw_get_rx_read(struct iwl_priv *priv);
689extern void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv,
690 struct iwl_cmd *cmd,
691 struct ieee80211_tx_control *ctrl,
692 struct ieee80211_hdr *hdr,
693 int sta_id, int tx_id);
694extern int iwl_hw_reg_send_txpower(struct iwl_priv *priv);
695extern int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
696extern void iwl_hw_rx_statistics(struct iwl_priv *priv,
697 struct iwl_rx_mem_buffer *rxb);
698extern void iwl_disable_events(struct iwl_priv *priv);
699extern int iwl4965_get_temperature(const struct iwl_priv *priv);
700
701/**
702 * iwl_hw_find_station - Find station id for a given BSSID
703 * @bssid: MAC address of station ID to find
704 *
705 * NOTE: This should not be hardware specific but the code has
706 * not yet been merged into a single common layer for managing the
707 * station tables.
708 */
709extern u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
710
711extern int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel);
712extern int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
713#endif