aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig32
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c146
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c131
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h634
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c1233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.h105
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c4004
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h134
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1580
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c802
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-commands.h)462
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1259
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h219
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c103
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965.h)567
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c171
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h206
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h391
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c209
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c423
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h333
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c106
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c1321
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c931
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c712
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1519
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c535
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c5004
42 files changed, 13356 insertions, 10385 deletions
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 62fb89d82318..82b66a3d3a5d 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -8,7 +8,6 @@ config IWLCORE
8 select MAC80211_LEDS if IWLWIFI_LEDS 8 select MAC80211_LEDS if IWLWIFI_LEDS
9 select LEDS_CLASS if IWLWIFI_LEDS 9 select LEDS_CLASS if IWLWIFI_LEDS
10 select RFKILL if IWLWIFI_RFKILL 10 select RFKILL if IWLWIFI_RFKILL
11 select RFKILL_INPUT if IWLWIFI_RFKILL
12 11
13config IWLWIFI_LEDS 12config IWLWIFI_LEDS
14 bool 13 bool
@@ -45,14 +44,6 @@ config IWL4965
45 say M here and read <file:Documentation/kbuild/modules.txt>. The 44 say M here and read <file:Documentation/kbuild/modules.txt>. The
46 module will be called iwl4965.ko. 45 module will be called iwl4965.ko.
47 46
48config IWL4965_HT
49 bool "Enable 802.11n HT features in iwl4965 driver"
50 depends on EXPERIMENTAL
51 depends on IWL4965
52 ---help---
53 This option enables IEEE 802.11n High Throughput features
54 for the iwl4965 driver.
55
56config IWL4965_LEDS 47config IWL4965_LEDS
57 bool "Enable LEDS features in iwl4965 driver" 48 bool "Enable LEDS features in iwl4965 driver"
58 depends on IWL4965 49 depends on IWL4965
@@ -67,13 +58,6 @@ config IWL4965_SPECTRUM_MEASUREMENT
67 ---help--- 58 ---help---
68 This option will enable spectrum measurement for the iwl4965 driver. 59 This option will enable spectrum measurement for the iwl4965 driver.
69 60
70config IWL4965_SENSITIVITY
71 bool "Enable Sensitivity Calibration in iwl4965 driver"
72 depends on IWL4965
73 ---help---
74 This option will enable sensitivity calibration for the iwl4965
75 driver.
76
77config IWLWIFI_DEBUG 61config IWLWIFI_DEBUG
78 bool "Enable full debugging output in iwl4965 driver" 62 bool "Enable full debugging output in iwl4965 driver"
79 depends on IWL4965 63 depends on IWL4965
@@ -85,13 +69,13 @@ config IWLWIFI_DEBUG
85 control which debug output is sent to the kernel log by setting the 69 control which debug output is sent to the kernel log by setting the
86 value in 70 value in
87 71
88 /sys/bus/pci/drivers/${DRIVER}/debug_level 72 /sys/class/net/wlan0/device/debug_level
89 73
90 This entry will only exist if this option is enabled. 74 This entry will only exist if this option is enabled.
91 75
92 To set a value, simply echo an 8-byte hex value to the same file: 76 To set a value, simply echo an 8-byte hex value to the same file:
93 77
94 % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level 78 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
95 79
96 You can find the list of debug mask values in: 80 You can find the list of debug mask values in:
97 drivers/net/wireless/iwlwifi/iwl-4965-debug.h 81 drivers/net/wireless/iwlwifi/iwl-4965-debug.h
@@ -100,6 +84,13 @@ config IWLWIFI_DEBUG
100 as the debug information can assist others in helping you resolve 84 as the debug information can assist others in helping you resolve
101 any problems you may encounter. 85 any problems you may encounter.
102 86
87config IWL5000
88 bool "Intel Wireless WiFi 5000AGN"
89 depends on IWL4965
90 ---help---
91 This option enables support for Intel Wireless WiFi Link 5000AGN Family
92 Dependency on 4965 is temporary
93
103config IWLWIFI_DEBUGFS 94config IWLWIFI_DEBUGFS
104 bool "Iwlwifi debugfs support" 95 bool "Iwlwifi debugfs support"
105 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS 96 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
@@ -113,6 +104,7 @@ config IWL3945
113 select IWLWIFI 104 select IWLWIFI
114 select MAC80211_LEDS if IWL3945_LEDS 105 select MAC80211_LEDS if IWL3945_LEDS
115 select LEDS_CLASS if IWL3945_LEDS 106 select LEDS_CLASS if IWL3945_LEDS
107 select RFKILL if IWL3945_RFKILL
116 ---help--- 108 ---help---
117 Select to build the driver supporting the: 109 Select to build the driver supporting the:
118 110
@@ -135,6 +127,10 @@ config IWL3945
135 say M here and read <file:Documentation/kbuild/modules.txt>. The 127 say M here and read <file:Documentation/kbuild/modules.txt>. The
136 module will be called iwl3945.ko. 128 module will be called iwl3945.ko.
137 129
130config IWL3945_RFKILL
131 bool "Enable RF kill support in iwl3945 drivers"
132 depends on IWL3945
133
138config IWL3945_SPECTRUM_MEASUREMENT 134config IWL3945_SPECTRUM_MEASUREMENT
139 bool "Enable Spectrum Measurement in iwl3945 drivers" 135 bool "Enable Spectrum Measurement in iwl3945 drivers"
140 depends on IWL3945 136 depends on IWL3945
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index ec6187b75c3b..1f52b92f08b5 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,5 +1,7 @@
1obj-$(CONFIG_IWLCORE) += iwlcore.o 1obj-$(CONFIG_IWLCORE) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
4iwlcore-objs += iwl-scan.o
3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
@@ -9,5 +11,10 @@ iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
9iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o 11iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
10 12
11obj-$(CONFIG_IWL4965) += iwl4965.o 13obj-$(CONFIG_IWL4965) += iwl4965.o
12iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o iwl-sta.o 14iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
15
16ifeq ($(CONFIG_IWL5000),y)
17 iwl4965-objs += iwl-5000.o
18endif
19
13 20
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index ad612a8719f4..644bd9e08052 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -126,7 +126,7 @@ enum {
126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
129 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 129 /* Bit 6 Reserved (was Narrow Channel) */
130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
131}; 131};
132 132
@@ -289,17 +289,6 @@ struct iwl3945_eeprom {
289#define PCI_REG_WUM8 0x0E8 289#define PCI_REG_WUM8 0x0E8
290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
291 291
292/* SCD (3945 Tx Frame Scheduler) */
293#define SCD_BASE (CSR_BASE + 0x2E00)
294
295#define SCD_MODE_REG (SCD_BASE + 0x000)
296#define SCD_ARASTAT_REG (SCD_BASE + 0x004)
297#define SCD_TXFACT_REG (SCD_BASE + 0x010)
298#define SCD_TXF4MF_REG (SCD_BASE + 0x014)
299#define SCD_TXF5MF_REG (SCD_BASE + 0x020)
300#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C)
301#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030)
302
303/*=== FH (data Flow Handler) ===*/ 292/*=== FH (data Flow Handler) ===*/
304#define FH_BASE (0x800) 293#define FH_BASE (0x800)
305 294
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 8b1528e52d43..6be1fe13fa57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -42,14 +42,11 @@
42#include "iwl-3945.h" 42#include "iwl-3945.h"
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45#define IWL_1MB_RATE (128 * 1024)
46#define IWL_LED_THRESHOLD (16)
47#define IWL_MAX_BLINK_TBL (10)
48 45
49static const struct { 46static const struct {
50 u16 brightness; 47 u16 brightness;
51 u8 on_time; 48 u8 on_time;
52 u8 of_time; 49 u8 off_time;
53} blink_tbl[] = 50} blink_tbl[] =
54{ 51{
55 {300, 25, 25}, 52 {300, 25, 25},
@@ -61,9 +58,16 @@ static const struct {
61 {15, 95, 95 }, 58 {15, 95, 95 },
62 {10, 110, 110}, 59 {10, 110, 110},
63 {5, 130, 130}, 60 {5, 130, 130},
64 {0, 167, 167} 61 {0, 167, 167},
62 /*SOLID_ON*/
63 {-1, IWL_LED_SOLID, 0}
65}; 64};
66 65
66#define IWL_1MB_RATE (128 * 1024)
67#define IWL_LED_THRESHOLD (16)
68#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/
69#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
70
67static int iwl3945_led_cmd_callback(struct iwl3945_priv *priv, 71static int iwl3945_led_cmd_callback(struct iwl3945_priv *priv,
68 struct iwl3945_cmd *cmd, 72 struct iwl3945_cmd *cmd,
69 struct sk_buff *skb) 73 struct sk_buff *skb)
@@ -71,6 +75,10 @@ static int iwl3945_led_cmd_callback(struct iwl3945_priv *priv,
71 return 1; 75 return 1;
72} 76}
73 77
78static inline int iwl3945_brightness_to_idx(enum led_brightness brightness)
79{
80 return fls(0x000000FF & (u32)brightness);
81}
74 82
75/* Send led command */ 83/* Send led command */
76static int iwl_send_led_cmd(struct iwl3945_priv *priv, 84static int iwl_send_led_cmd(struct iwl3945_priv *priv,
@@ -81,49 +89,45 @@ static int iwl_send_led_cmd(struct iwl3945_priv *priv,
81 .len = sizeof(struct iwl3945_led_cmd), 89 .len = sizeof(struct iwl3945_led_cmd),
82 .data = led_cmd, 90 .data = led_cmd,
83 .meta.flags = CMD_ASYNC, 91 .meta.flags = CMD_ASYNC,
84 .meta.u.callback = iwl3945_led_cmd_callback 92 .meta.u.callback = iwl3945_led_cmd_callback,
85 }; 93 };
86 94
87 return iwl3945_send_cmd(priv, &cmd); 95 return iwl3945_send_cmd(priv, &cmd);
88} 96}
89 97
90 98
99
91/* Set led on command */ 100/* Set led on command */
92static int iwl3945_led_on(struct iwl3945_priv *priv, int led_id) 101static int iwl3945_led_pattern(struct iwl3945_priv *priv, int led_id,
102 unsigned int idx)
93{ 103{
94 struct iwl3945_led_cmd led_cmd = { 104 struct iwl3945_led_cmd led_cmd = {
95 .id = led_id, 105 .id = led_id,
96 .on = IWL_LED_SOLID,
97 .off = 0,
98 .interval = IWL_DEF_LED_INTRVL 106 .interval = IWL_DEF_LED_INTRVL
99 }; 107 };
108
109 BUG_ON(idx > IWL_MAX_BLINK_TBL);
110
111 led_cmd.on = blink_tbl[idx].on_time;
112 led_cmd.off = blink_tbl[idx].off_time;
113
100 return iwl_send_led_cmd(priv, &led_cmd); 114 return iwl_send_led_cmd(priv, &led_cmd);
101} 115}
102 116
117
118#if 1
103/* Set led on command */ 119/* Set led on command */
104static int iwl3945_led_pattern(struct iwl3945_priv *priv, int led_id, 120static int iwl3945_led_on(struct iwl3945_priv *priv, int led_id)
105 enum led_brightness brightness)
106{ 121{
107 struct iwl3945_led_cmd led_cmd = { 122 struct iwl3945_led_cmd led_cmd = {
108 .id = led_id, 123 .id = led_id,
109 .on = brightness, 124 .on = IWL_LED_SOLID,
110 .off = brightness, 125 .off = 0,
111 .interval = IWL_DEF_LED_INTRVL 126 .interval = IWL_DEF_LED_INTRVL
112 }; 127 };
113 if (brightness == LED_FULL) {
114 led_cmd.on = IWL_LED_SOLID;
115 led_cmd.off = 0;
116 }
117 return iwl_send_led_cmd(priv, &led_cmd); 128 return iwl_send_led_cmd(priv, &led_cmd);
118} 129}
119 130
120/* Set led register off */
121static int iwl3945_led_on_reg(struct iwl3945_priv *priv, int led_id)
122{
123 IWL_DEBUG_LED("led on %d\n", led_id);
124 return iwl3945_led_on(priv, led_id);
125}
126
127/* Set led off command */ 131/* Set led off command */
128static int iwl3945_led_off(struct iwl3945_priv *priv, int led_id) 132static int iwl3945_led_off(struct iwl3945_priv *priv, int led_id)
129{ 133{
@@ -136,27 +140,7 @@ static int iwl3945_led_off(struct iwl3945_priv *priv, int led_id)
136 IWL_DEBUG_LED("led off %d\n", led_id); 140 IWL_DEBUG_LED("led off %d\n", led_id);
137 return iwl_send_led_cmd(priv, &led_cmd); 141 return iwl_send_led_cmd(priv, &led_cmd);
138} 142}
139 143#endif
140/* Set led register off */
141static int iwl3945_led_off_reg(struct iwl3945_priv *priv, int led_id)
142{
143 iwl3945_led_off(priv, led_id);
144 return 0;
145}
146
147/* Set led blink command */
148static int iwl3945_led_not_solid(struct iwl3945_priv *priv, int led_id,
149 u8 brightness)
150{
151 struct iwl3945_led_cmd led_cmd = {
152 .id = led_id,
153 .on = brightness,
154 .off = brightness,
155 .interval = IWL_DEF_LED_INTRVL
156 };
157
158 return iwl_send_led_cmd(priv, &led_cmd);
159}
160 144
161 145
162/* 146/*
@@ -206,8 +190,10 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
206 led->led_off(priv, IWL_LED_LINK); 190 led->led_off(priv, IWL_LED_LINK);
207 break; 191 break;
208 default: 192 default:
209 if (led->led_pattern) 193 if (led->led_pattern) {
210 led->led_pattern(priv, IWL_LED_LINK, brightness); 194 int idx = iwl3945_brightness_to_idx(brightness);
195 led->led_pattern(priv, IWL_LED_LINK, idx);
196 }
211 break; 197 break;
212 } 198 }
213} 199}
@@ -252,24 +238,20 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv,
252static inline u8 get_blink_rate(struct iwl3945_priv *priv) 238static inline u8 get_blink_rate(struct iwl3945_priv *priv)
253{ 239{
254 int index; 240 int index;
255 u8 blink_rate; 241 u64 current_tpt = priv->rxtxpackets;
256 242 s64 tpt = current_tpt - priv->led_tpt;
257 if (priv->rxtxpackets < IWL_LED_THRESHOLD)
258 index = 10;
259 else {
260 for (index = 0; index < IWL_MAX_BLINK_TBL; index++) {
261 if (priv->rxtxpackets > (blink_tbl[index].brightness *
262 IWL_1MB_RATE))
263 break;
264 }
265 }
266 /* if 0 frame is transfered */
267 if ((index == IWL_MAX_BLINK_TBL) || !priv->allow_blinking)
268 blink_rate = IWL_LED_SOLID;
269 else
270 blink_rate = blink_tbl[index].on_time;
271 243
272 return blink_rate; 244 if (tpt < 0)
245 tpt = -tpt;
246 priv->led_tpt = current_tpt;
247
248 if (!priv->allow_blinking)
249 index = IWL_MAX_BLINK_TBL;
250 else
251 for (index = 0; index < IWL_MAX_BLINK_TBL; index++)
252 if (tpt > (blink_tbl[index].brightness * IWL_1MB_RATE))
253 break;
254 return index;
273} 255}
274 256
275static inline int is_rf_kill(struct iwl3945_priv *priv) 257static inline int is_rf_kill(struct iwl3945_priv *priv)
@@ -285,7 +267,7 @@ static inline int is_rf_kill(struct iwl3945_priv *priv)
285 */ 267 */
286void iwl3945_led_background(struct iwl3945_priv *priv) 268void iwl3945_led_background(struct iwl3945_priv *priv)
287{ 269{
288 u8 blink_rate; 270 u8 blink_idx;
289 271
290 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 272 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
291 priv->last_blink_time = 0; 273 priv->last_blink_time = 0;
@@ -298,9 +280,10 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
298 280
299 if (!priv->allow_blinking) { 281 if (!priv->allow_blinking) {
300 priv->last_blink_time = 0; 282 priv->last_blink_time = 0;
301 if (priv->last_blink_rate != IWL_LED_SOLID) { 283 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
302 priv->last_blink_rate = IWL_LED_SOLID; 284 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
303 iwl3945_led_on(priv, IWL_LED_LINK); 285 iwl3945_led_pattern(priv, IWL_LED_LINK,
286 IWL_SOLID_BLINK_IDX);
304 } 287 }
305 return; 288 return;
306 } 289 }
@@ -309,21 +292,14 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
309 msecs_to_jiffies(1000))) 292 msecs_to_jiffies(1000)))
310 return; 293 return;
311 294
312 blink_rate = get_blink_rate(priv); 295 blink_idx = get_blink_rate(priv);
313 296
314 /* call only if blink rate change */ 297 /* call only if blink rate change */
315 if (blink_rate != priv->last_blink_rate) { 298 if (blink_idx != priv->last_blink_rate)
316 if (blink_rate != IWL_LED_SOLID) { 299 iwl3945_led_pattern(priv, IWL_LED_LINK, blink_idx);
317 priv->last_blink_time = jiffies +
318 msecs_to_jiffies(1000);
319 iwl3945_led_not_solid(priv, IWL_LED_LINK, blink_rate);
320 } else {
321 priv->last_blink_time = 0;
322 iwl3945_led_on(priv, IWL_LED_LINK);
323 }
324 }
325 300
326 priv->last_blink_rate = blink_rate; 301 priv->last_blink_time = jiffies;
302 priv->last_blink_rate = blink_idx;
327 priv->rxtxpackets = 0; 303 priv->rxtxpackets = 0;
328} 304}
329 305
@@ -337,6 +313,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
337 313
338 priv->last_blink_rate = 0; 314 priv->last_blink_rate = 0;
339 priv->rxtxpackets = 0; 315 priv->rxtxpackets = 0;
316 priv->led_tpt = 0;
340 priv->last_blink_time = 0; 317 priv->last_blink_time = 0;
341 priv->allow_blinking = 0; 318 priv->allow_blinking = 0;
342 319
@@ -344,8 +321,8 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
344 snprintf(name, sizeof(name), "iwl-%s:radio", 321 snprintf(name, sizeof(name), "iwl-%s:radio",
345 wiphy_name(priv->hw->wiphy)); 322 wiphy_name(priv->hw->wiphy));
346 323
347 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on_reg; 324 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
348 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off_reg; 325 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off;
349 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL; 326 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
350 327
351 ret = iwl3945_led_register_led(priv, 328 ret = iwl3945_led_register_led(priv,
@@ -364,8 +341,8 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
364 IWL_LED_TRG_ASSOC, 0, 341 IWL_LED_TRG_ASSOC, 0,
365 name, trigger); 342 name, trigger);
366 /* for assoc always turn led on */ 343 /* for assoc always turn led on */
367 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on_reg; 344 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
368 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on_reg; 345 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
369 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL; 346 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
370 347
371 if (ret) 348 if (ret)
@@ -391,6 +368,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
391 trigger = ieee80211_get_tx_led_name(priv->hw); 368 trigger = ieee80211_get_tx_led_name(priv->hw);
392 snprintf(name, sizeof(name), "iwl-%s:TX", 369 snprintf(name, sizeof(name), "iwl-%s:TX",
393 wiphy_name(priv->hw->wiphy)); 370 wiphy_name(priv->hw->wiphy));
371
394 ret = iwl3945_led_register_led(priv, 372 ret = iwl3945_led_register_led(priv,
395 &priv->led[IWL_LED_TRG_TX], 373 &priv->led[IWL_LED_TRG_TX],
396 IWL_LED_TRG_TX, 0, 374 IWL_LED_TRG_TX, 0,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index b1d2f6b8b259..47b7e0bac802 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -54,7 +54,7 @@ struct iwl3945_led {
54 int (*led_on) (struct iwl3945_priv *priv, int led_id); 54 int (*led_on) (struct iwl3945_priv *priv, int led_id);
55 int (*led_off) (struct iwl3945_priv *priv, int led_id); 55 int (*led_off) (struct iwl3945_priv *priv, int led_id);
56 int (*led_pattern) (struct iwl3945_priv *priv, int led_id, 56 int (*led_pattern) (struct iwl3945_priv *priv, int led_id,
57 enum led_brightness brightness); 57 unsigned int idx);
58 58
59 enum led_type type; 59 enum led_type type;
60 unsigned int registered; 60 unsigned int registered;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 85c22641542d..10c64bdb314c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/wireless.h> 30#include <linux/wireless.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32#include <net/ieee80211.h>
33 32
34#include <linux/netdevice.h> 33#include <linux/netdevice.h>
35#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
@@ -446,8 +445,7 @@ static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
446 */ 445 */
447static void rs_tx_status(void *priv_rate, 446static void rs_tx_status(void *priv_rate,
448 struct net_device *dev, 447 struct net_device *dev,
449 struct sk_buff *skb, 448 struct sk_buff *skb)
450 struct ieee80211_tx_status *tx_resp)
451{ 449{
452 u8 retries, current_count; 450 u8 retries, current_count;
453 int scale_rate_index, first_index, last_index; 451 int scale_rate_index, first_index, last_index;
@@ -458,14 +456,15 @@ static void rs_tx_status(void *priv_rate,
458 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 456 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
459 struct iwl3945_rs_sta *rs_sta; 457 struct iwl3945_rs_sta *rs_sta;
460 struct ieee80211_supported_band *sband; 458 struct ieee80211_supported_band *sband;
459 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461 460
462 IWL_DEBUG_RATE("enter\n"); 461 IWL_DEBUG_RATE("enter\n");
463 462
464 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 463 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
465 464
466 465
467 retries = tx_resp->retry_count; 466 retries = info->status.retry_count;
468 first_index = tx_resp->control.tx_rate->hw_value; 467 first_index = sband->bitrates[info->tx_rate_idx].hw_value;
469 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 468 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
470 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index); 469 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index);
471 return; 470 return;
@@ -526,11 +525,11 @@ static void rs_tx_status(void *priv_rate,
526 /* Update the last index window with success/failure based on ACK */ 525 /* Update the last index window with success/failure based on ACK */
527 IWL_DEBUG_RATE("Update rate %d with %s.\n", 526 IWL_DEBUG_RATE("Update rate %d with %s.\n",
528 last_index, 527 last_index,
529 (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ? 528 (info->flags & IEEE80211_TX_STAT_ACK) ?
530 "success" : "failure"); 529 "success" : "failure");
531 iwl3945_collect_tx_data(rs_sta, 530 iwl3945_collect_tx_data(rs_sta,
532 &rs_sta->win[last_index], 531 &rs_sta->win[last_index],
533 tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1); 532 info->flags & IEEE80211_TX_STAT_ACK, 1);
534 533
535 /* We updated the rate scale window -- if its been more than 534 /* We updated the rate scale window -- if its been more than
536 * flush_time since the last run, schedule the flush 535 * flush_time since the last run, schedule the flush
@@ -670,7 +669,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
670 is_multicast_ether_addr(hdr->addr1) || 669 is_multicast_ether_addr(hdr->addr1) ||
671 !sta || !sta->rate_ctrl_priv) { 670 !sta || !sta->rate_ctrl_priv) {
672 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 671 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
673 sel->rate = rate_lowest(local, sband, sta); 672 sel->rate_idx = rate_lowest_index(local, sband, sta);
674 rcu_read_unlock(); 673 rcu_read_unlock();
675 return; 674 return;
676 } 675 }
@@ -814,7 +813,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
814 813
815 IWL_DEBUG_RATE("leave: %d\n", index); 814 IWL_DEBUG_RATE("leave: %d\n", index);
816 815
817 sel->rate = &sband->bitrates[sta->txrate_idx]; 816 sel->rate_idx = sta->txrate_idx;
818} 817}
819 818
820static struct rate_control_ops rs_ops = { 819static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 55ac850744b3..c2a76785b665 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -283,8 +283,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
284 284
285 tx_info = &txq->txb[txq->q.read_ptr]; 285 tx_info = &txq->txb[txq->q.read_ptr];
286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0], 286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
287 &tx_info->status);
288 tx_info->skb[0] = NULL; 287 tx_info->skb[0] = NULL;
289 iwl3945_hw_txq_free_tfd(priv, txq); 288 iwl3945_hw_txq_free_tfd(priv, txq);
290 } 289 }
@@ -306,7 +305,7 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
306 int txq_id = SEQ_TO_QUEUE(sequence); 305 int txq_id = SEQ_TO_QUEUE(sequence);
307 int index = SEQ_TO_INDEX(sequence); 306 int index = SEQ_TO_INDEX(sequence);
308 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 307 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
309 struct ieee80211_tx_status *tx_status; 308 struct ieee80211_tx_info *info;
310 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 309 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
311 u32 status = le32_to_cpu(tx_resp->status); 310 u32 status = le32_to_cpu(tx_resp->status);
312 int rate_idx; 311 int rate_idx;
@@ -319,19 +318,22 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
319 return; 318 return;
320 } 319 }
321 320
322 tx_status = &(txq->txb[txq->q.read_ptr].status); 321 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
322 memset(&info->status, 0, sizeof(info->status));
323 323
324 tx_status->retry_count = tx_resp->failure_frame; 324 info->status.retry_count = tx_resp->failure_frame;
325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */ 325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
326 tx_status->flags = ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? 326 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
327 IEEE80211_TX_STATUS_ACK : 0; 327 IEEE80211_TX_STAT_ACK : 0;
328 328
329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", 329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
330 txq_id, iwl3945_get_tx_fail_reason(status), status, 330 txq_id, iwl3945_get_tx_fail_reason(status), status,
331 tx_resp->rate, tx_resp->failure_frame); 331 tx_resp->rate, tx_resp->failure_frame);
332 332
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); 333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 tx_status->control.tx_rate = &priv->ieee_rates[rate_idx]; 334 if (info->band == IEEE80211_BAND_5GHZ)
335 rate_idx -= IWL_FIRST_OFDM_RATE;
336 info->tx_rate_idx = rate_idx;
335 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 337 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
336 iwl3945_tx_queue_reclaim(priv, txq_id, index); 338 iwl3945_tx_queue_reclaim(priv, txq_id, index);
337 339
@@ -386,7 +388,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
386 u32 print_dump = 0; /* set to 1 to dump all frames' contents */ 388 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
387 u32 hundred = 0; 389 u32 hundred = 0;
388 u32 dataframe = 0; 390 u32 dataframe = 0;
389 u16 fc; 391 __le16 fc;
390 u16 seq_ctl; 392 u16 seq_ctl;
391 u16 channel; 393 u16 channel;
392 u16 phy_flags; 394 u16 phy_flags;
@@ -405,7 +407,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
405 u8 *data = IWL_RX_DATA(pkt); 407 u8 *data = IWL_RX_DATA(pkt);
406 408
407 /* MAC header */ 409 /* MAC header */
408 fc = le16_to_cpu(header->frame_control); 410 fc = header->frame_control;
409 seq_ctl = le16_to_cpu(header->seq_ctrl); 411 seq_ctl = le16_to_cpu(header->seq_ctrl);
410 412
411 /* metadata */ 413 /* metadata */
@@ -429,8 +431,8 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
429 431
430 /* if data frame is to us and all is good, 432 /* if data frame is to us and all is good,
431 * (optionally) print summary for only 1 out of every 100 */ 433 * (optionally) print summary for only 1 out of every 100 */
432 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == 434 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
433 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { 435 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
434 dataframe = 1; 436 dataframe = 1;
435 if (!group100) 437 if (!group100)
436 print_summary = 1; /* print each frame */ 438 print_summary = 1; /* print each frame */
@@ -453,13 +455,13 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
453 455
454 if (hundred) 456 if (hundred)
455 title = "100Frames"; 457 title = "100Frames";
456 else if (fc & IEEE80211_FCTL_RETRY) 458 else if (ieee80211_has_retry(fc))
457 title = "Retry"; 459 title = "Retry";
458 else if (ieee80211_is_assoc_response(fc)) 460 else if (ieee80211_is_assoc_resp(fc))
459 title = "AscRsp"; 461 title = "AscRsp";
460 else if (ieee80211_is_reassoc_response(fc)) 462 else if (ieee80211_is_reassoc_resp(fc))
461 title = "RasRsp"; 463 title = "RasRsp";
462 else if (ieee80211_is_probe_response(fc)) { 464 else if (ieee80211_is_probe_resp(fc)) {
463 title = "PrbRsp"; 465 title = "PrbRsp";
464 print_dump = 1; /* dump frame contents */ 466 print_dump = 1; /* dump frame contents */
465 } else if (ieee80211_is_beacon(fc)) { 467 } else if (ieee80211_is_beacon(fc)) {
@@ -488,14 +490,14 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
488 if (dataframe) 490 if (dataframe)
489 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " 491 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
490 "len=%u, rssi=%d, chnl=%d, rate=%d, \n", 492 "len=%u, rssi=%d, chnl=%d, rate=%d, \n",
491 title, fc, header->addr1[5], 493 title, le16_to_cpu(fc), header->addr1[5],
492 length, rssi, channel, rate); 494 length, rssi, channel, rate);
493 else { 495 else {
494 /* src/dst addresses assume managed mode */ 496 /* src/dst addresses assume managed mode */
495 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " 497 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
496 "src=0x%02x, rssi=%u, tim=%lu usec, " 498 "src=0x%02x, rssi=%u, tim=%lu usec, "
497 "phy=0x%02x, chnl=%d\n", 499 "phy=0x%02x, chnl=%d\n",
498 title, fc, header->addr1[5], 500 title, le16_to_cpu(fc), header->addr1[5],
499 header->addr3[5], rssi, 501 header->addr3[5], rssi,
500 tsf_low - priv->scan_start_tsf, 502 tsf_low - priv->scan_start_tsf,
501 phy_flags, channel); 503 phy_flags, channel);
@@ -512,6 +514,23 @@ static inline void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
512} 514}
513#endif 515#endif
514 516
517/* This is necessary only for a number of statistics, see the caller. */
518static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
519 struct ieee80211_hdr *header)
520{
521 /* Filter incoming packets to determine if they are targeted toward
522 * this network, discarding packets coming from ourselves */
523 switch (priv->iw_mode) {
524 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
525 /* packets to our IBSS update information */
526 return !compare_ether_addr(header->addr3, priv->bssid);
527 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
528 /* packets to our IBSS update information */
529 return !compare_ether_addr(header->addr2, priv->bssid);
530 default:
531 return 1;
532 }
533}
515 534
516static void iwl3945_add_radiotap(struct iwl3945_priv *priv, 535static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
517 struct sk_buff *skb, 536 struct sk_buff *skb,
@@ -520,7 +539,7 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
520{ 539{
521 /* First cache any information we need before we overwrite 540 /* First cache any information we need before we overwrite
522 * the information provided in the skb from the hardware */ 541 * the information provided in the skb from the hardware */
523 s8 signal = stats->ssi; 542 s8 signal = stats->signal;
524 s8 noise = 0; 543 s8 noise = 0;
525 int rate = stats->rate_idx; 544 int rate = stats->rate_idx;
526 u64 tsf = stats->mactime; 545 u64 tsf = stats->mactime;
@@ -606,12 +625,12 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
606 stats->flag |= RX_FLAG_RADIOTAP; 625 stats->flag |= RX_FLAG_RADIOTAP;
607} 626}
608 627
609static void iwl3945_handle_data_packet(struct iwl3945_priv *priv, int is_data, 628static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
610 struct iwl3945_rx_mem_buffer *rxb, 629 struct iwl3945_rx_mem_buffer *rxb,
611 struct ieee80211_rx_status *stats) 630 struct ieee80211_rx_status *stats)
612{ 631{
613 struct ieee80211_hdr *hdr;
614 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 632 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data;
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
615 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 634 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
616 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 635 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
617 short len = le16_to_cpu(rx_hdr->len); 636 short len = le16_to_cpu(rx_hdr->len);
@@ -633,8 +652,6 @@ static void iwl3945_handle_data_packet(struct iwl3945_priv *priv, int is_data,
633 /* Set the size of the skb to the size of the frame */ 652 /* Set the size of the skb to the size of the frame */
634 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); 653 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len));
635 654
636 hdr = (void *)rxb->skb->data;
637
638 if (iwl3945_param_hwcrypto) 655 if (iwl3945_param_hwcrypto)
639 iwl3945_set_decrypted_flag(priv, rxb->skb, 656 iwl3945_set_decrypted_flag(priv, rxb->skb,
640 le32_to_cpu(rx_end->status), stats); 657 le32_to_cpu(rx_end->status), stats);
@@ -643,7 +660,7 @@ static void iwl3945_handle_data_packet(struct iwl3945_priv *priv, int is_data,
643 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats); 660 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats);
644 661
645#ifdef CONFIG_IWL3945_LEDS 662#ifdef CONFIG_IWL3945_LEDS
646 if (is_data) 663 if (ieee80211_is_data(hdr->frame_control))
647 priv->rxtxpackets += len; 664 priv->rxtxpackets += len;
648#endif 665#endif
649 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); 666 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
@@ -692,12 +709,12 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
692 } 709 }
693 710
694 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 711 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
695 iwl3945_handle_data_packet(priv, 1, rxb, &rx_status); 712 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
696 return; 713 return;
697 } 714 }
698 715
699 /* Convert 3945's rssi indicator to dBm */ 716 /* Convert 3945's rssi indicator to dBm */
700 rx_status.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; 717 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
701 718
702 /* Set default noise value to -127 */ 719 /* Set default noise value to -127 */
703 if (priv->last_rx_noise == 0) 720 if (priv->last_rx_noise == 0)
@@ -716,21 +733,21 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
716 * Calculate rx_status.signal (quality indicator in %) based on SNR. */ 733 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
717 if (rx_stats_noise_diff) { 734 if (rx_stats_noise_diff) {
718 snr = rx_stats_sig_avg / rx_stats_noise_diff; 735 snr = rx_stats_sig_avg / rx_stats_noise_diff;
719 rx_status.noise = rx_status.ssi - 736 rx_status.noise = rx_status.signal -
720 iwl3945_calc_db_from_ratio(snr); 737 iwl3945_calc_db_from_ratio(snr);
721 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 738 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
722 rx_status.noise); 739 rx_status.noise);
723 740
724 /* If noise info not available, calculate signal quality indicator (%) 741 /* If noise info not available, calculate signal quality indicator (%)
725 * using just the dBm signal level. */ 742 * using just the dBm signal level. */
726 } else { 743 } else {
727 rx_status.noise = priv->last_rx_noise; 744 rx_status.noise = priv->last_rx_noise;
728 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 0); 745 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
729 } 746 }
730 747
731 748
732 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 749 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
733 rx_status.ssi, rx_status.noise, rx_status.signal, 750 rx_status.signal, rx_status.noise, rx_status.qual,
734 rx_stats_sig_avg, rx_stats_noise_diff); 751 rx_stats_sig_avg, rx_stats_noise_diff);
735 752
736 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 753 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -740,8 +757,8 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
740 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 757 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
741 network_packet ? '*' : ' ', 758 network_packet ? '*' : ' ',
742 le16_to_cpu(rx_hdr->channel), 759 le16_to_cpu(rx_hdr->channel),
743 rx_status.ssi, rx_status.ssi, 760 rx_status.signal, rx_status.signal,
744 rx_status.ssi, rx_status.rate_idx); 761 rx_status.noise, rx_status.rate_idx);
745 762
746#ifdef CONFIG_IWL3945_DEBUG 763#ifdef CONFIG_IWL3945_DEBUG
747 if (iwl3945_debug_level & (IWL_DL_RX)) 764 if (iwl3945_debug_level & (IWL_DL_RX))
@@ -752,7 +769,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
752 if (network_packet) { 769 if (network_packet) {
753 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 770 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
754 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 771 priv->last_tsf = le64_to_cpu(rx_end->timestamp);
755 priv->last_rx_rssi = rx_status.ssi; 772 priv->last_rx_rssi = rx_status.signal;
756 priv->last_rx_noise = rx_status.noise; 773 priv->last_rx_noise = rx_status.noise;
757 } 774 }
758 775
@@ -840,27 +857,12 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
840 } 857 }
841 } 858 }
842 859
843 iwl3945_handle_data_packet(priv, 0, rxb, &rx_status); 860 case IEEE80211_FTYPE_DATA:
844 break; 861 /* fall through */
845 862 default:
846 case IEEE80211_FTYPE_CTL: 863 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
847 break;
848
849 case IEEE80211_FTYPE_DATA: {
850 DECLARE_MAC_BUF(mac1);
851 DECLARE_MAC_BUF(mac2);
852 DECLARE_MAC_BUF(mac3);
853
854 if (unlikely(iwl3945_is_duplicate_packet(priv, header)))
855 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
856 print_mac(mac1, header->addr1),
857 print_mac(mac2, header->addr2),
858 print_mac(mac3, header->addr3));
859 else
860 iwl3945_handle_data_packet(priv, 1, rxb, &rx_status);
861 break; 864 break;
862 } 865 }
863 }
864} 866}
865 867
866int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr, 868int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr,
@@ -962,23 +964,24 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
962*/ 964*/
963void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 965void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
964 struct iwl3945_cmd *cmd, 966 struct iwl3945_cmd *cmd,
965 struct ieee80211_tx_control *ctrl, 967 struct ieee80211_tx_info *info,
966 struct ieee80211_hdr *hdr, int sta_id, int tx_id) 968 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
967{ 969{
968 unsigned long flags; 970 unsigned long flags;
969 u16 rate_index = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1); 971 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
972 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1);
970 u16 rate_mask; 973 u16 rate_mask;
971 int rate; 974 int rate;
972 u8 rts_retry_limit; 975 u8 rts_retry_limit;
973 u8 data_retry_limit; 976 u8 data_retry_limit;
974 __le32 tx_flags; 977 __le32 tx_flags;
975 u16 fc = le16_to_cpu(hdr->frame_control); 978 __le16 fc = hdr->frame_control;
976 979
977 rate = iwl3945_rates[rate_index].plcp; 980 rate = iwl3945_rates[rate_index].plcp;
978 tx_flags = cmd->cmd.tx.tx_flags; 981 tx_flags = cmd->cmd.tx.tx_flags;
979 982
980 /* We need to figure out how to get the sta->supp_rates while 983 /* We need to figure out how to get the sta->supp_rates while
981 * in this running context; perhaps encoding into ctrl->tx_rate? */ 984 * in this running context */
982 rate_mask = IWL_RATES_MASK; 985 rate_mask = IWL_RATES_MASK;
983 986
984 spin_lock_irqsave(&priv->sta_lock, flags); 987 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -997,7 +1000,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
997 else 1000 else
998 rts_retry_limit = 7; 1001 rts_retry_limit = 7;
999 1002
1000 if (ieee80211_is_probe_response(fc)) { 1003 if (ieee80211_is_probe_resp(fc)) {
1001 data_retry_limit = 3; 1004 data_retry_limit = 3;
1002 if (data_retry_limit < rts_retry_limit) 1005 if (data_retry_limit < rts_retry_limit)
1003 rts_retry_limit = data_retry_limit; 1006 rts_retry_limit = data_retry_limit;
@@ -1007,12 +1010,12 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
1007 if (priv->data_retry_limit != -1) 1010 if (priv->data_retry_limit != -1)
1008 data_retry_limit = priv->data_retry_limit; 1011 data_retry_limit = priv->data_retry_limit;
1009 1012
1010 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 1013 if (ieee80211_is_mgmt(fc)) {
1011 switch (fc & IEEE80211_FCTL_STYPE) { 1014 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1012 case IEEE80211_STYPE_AUTH: 1015 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1013 case IEEE80211_STYPE_DEAUTH: 1016 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1014 case IEEE80211_STYPE_ASSOC_REQ: 1017 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1015 case IEEE80211_STYPE_REASSOC_REQ: 1018 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1016 if (tx_flags & TX_CMD_FLG_RTS_MSK) { 1019 if (tx_flags & TX_CMD_FLG_RTS_MSK) {
1017 tx_flags &= ~TX_CMD_FLG_RTS_MSK; 1020 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1018 tx_flags |= TX_CMD_FLG_CTS_MSK; 1021 tx_flags |= TX_CMD_FLG_CTS_MSK;
@@ -1233,7 +1236,7 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1233 iwl3945_power_init_handle(priv); 1236 iwl3945_power_init_handle(priv);
1234 1237
1235 spin_lock_irqsave(&priv->lock, flags); 1238 spin_lock_irqsave(&priv->lock, flags);
1236 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24)); 1239 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, CSR39_ANA_PLL_CFG_VAL);
1237 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1240 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1238 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1241 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1239 1242
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index c7695a215a39..fa81ba1af3d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -36,6 +36,10 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/*used for rfkill*/
40#include <linux/rfkill.h>
41#include <linux/input.h>
42
39/* Hardware specific file defines the PCI IDs table for that hardware module */ 43/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl3945_hw_card_ids[]; 44extern struct pci_device_id iwl3945_hw_card_ids[];
41 45
@@ -124,7 +128,6 @@ int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i);
124 128
125/* One for each TFD */ 129/* One for each TFD */
126struct iwl3945_tx_info { 130struct iwl3945_tx_info {
127 struct ieee80211_tx_status status;
128 struct sk_buff *skb[MAX_NUM_OF_TBS]; 131 struct sk_buff *skb[MAX_NUM_OF_TBS];
129}; 132};
130 133
@@ -507,8 +510,6 @@ struct iwl3945_ucode {
507 u8 data[0]; /* data in same order as "size" elements */ 510 u8 data[0]; /* data in same order as "size" elements */
508}; 511};
509 512
510#define IWL_IBSS_MAC_HASH_SIZE 32
511
512struct iwl3945_ibss_seq { 513struct iwl3945_ibss_seq {
513 u8 mac[ETH_ALEN]; 514 u8 mac[ETH_ALEN];
514 u16 seq_num; 515 u16 seq_num;
@@ -566,17 +567,8 @@ extern int iwl3945_send_add_station(struct iwl3945_priv *priv,
566 struct iwl3945_addsta_cmd *sta, u8 flags); 567 struct iwl3945_addsta_cmd *sta, u8 flags);
567extern u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *bssid, 568extern u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *bssid,
568 int is_ap, u8 flags); 569 int is_ap, u8 flags);
569extern int iwl3945_is_network_packet(struct iwl3945_priv *priv,
570 struct ieee80211_hdr *header);
571extern int iwl3945_power_init_handle(struct iwl3945_priv *priv); 570extern int iwl3945_power_init_handle(struct iwl3945_priv *priv);
572extern int iwl3945_eeprom_init(struct iwl3945_priv *priv); 571extern int iwl3945_eeprom_init(struct iwl3945_priv *priv);
573extern void iwl3945_handle_data_packet_monitor(struct iwl3945_priv *priv,
574 struct iwl3945_rx_mem_buffer *rxb,
575 void *data, short len,
576 struct ieee80211_rx_status *stats,
577 u16 phy_flags);
578extern int iwl3945_is_duplicate_packet(struct iwl3945_priv *priv,
579 struct ieee80211_hdr *header);
580extern int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv); 572extern int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv);
581extern void iwl3945_rx_queue_reset(struct iwl3945_priv *priv, 573extern void iwl3945_rx_queue_reset(struct iwl3945_priv *priv,
582 struct iwl3945_rx_queue *rxq); 574 struct iwl3945_rx_queue *rxq);
@@ -645,7 +637,7 @@ extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
645extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv); 637extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv);
646extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 638extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
647 struct iwl3945_cmd *cmd, 639 struct iwl3945_cmd *cmd,
648 struct ieee80211_tx_control *ctrl, 640 struct ieee80211_tx_info *info,
649 struct ieee80211_hdr *hdr, 641 struct ieee80211_hdr *hdr,
650 int sta_id, int tx_id); 642 int sta_id, int tx_id);
651extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv); 643extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv);
@@ -687,6 +679,18 @@ enum {
687 679
688#endif 680#endif
689 681
682#ifdef CONFIG_IWL3945_RFKILL
683struct iwl3945_priv;
684
685void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv);
686void iwl3945_rfkill_unregister(struct iwl3945_priv *priv);
687int iwl3945_rfkill_init(struct iwl3945_priv *priv);
688#else
689static inline void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv) {}
690static inline void iwl3945_rfkill_unregister(struct iwl3945_priv *priv) {}
691static inline int iwl3945_rfkill_init(struct iwl3945_priv *priv) { return 0; }
692#endif
693
690#define IWL_MAX_NUM_QUEUES IWL39_MAX_NUM_QUEUES 694#define IWL_MAX_NUM_QUEUES IWL39_MAX_NUM_QUEUES
691 695
692struct iwl3945_priv { 696struct iwl3945_priv {
@@ -780,12 +784,17 @@ struct iwl3945_priv {
780 struct iwl3945_init_alive_resp card_alive_init; 784 struct iwl3945_init_alive_resp card_alive_init;
781 struct iwl3945_alive_resp card_alive; 785 struct iwl3945_alive_resp card_alive;
782 786
787#ifdef CONFIG_IWL3945_RFKILL
788 struct rfkill *rfkill;
789#endif
790
783#ifdef CONFIG_IWL3945_LEDS 791#ifdef CONFIG_IWL3945_LEDS
784 struct iwl3945_led led[IWL_LED_TRG_MAX]; 792 struct iwl3945_led led[IWL_LED_TRG_MAX];
785 unsigned long last_blink_time; 793 unsigned long last_blink_time;
786 u8 last_blink_rate; 794 u8 last_blink_rate;
787 u8 allow_blinking; 795 u8 allow_blinking;
788 unsigned int rxtxpackets; 796 unsigned int rxtxpackets;
797 u64 led_tpt;
789#endif 798#endif
790 799
791 800
@@ -836,20 +845,10 @@ struct iwl3945_priv {
836 845
837 u8 mac80211_registered; 846 u8 mac80211_registered;
838 847
839 u32 notif_missed_beacons;
840
841 /* Rx'd packet timing information */ 848 /* Rx'd packet timing information */
842 u32 last_beacon_time; 849 u32 last_beacon_time;
843 u64 last_tsf; 850 u64 last_tsf;
844 851
845 /* Duplicate packet detection */
846 u16 last_seq_num;
847 u16 last_frag_num;
848 unsigned long last_packet_time;
849
850 /* Hash table for finding stations in IBSS network */
851 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE];
852
853 /* eeprom */ 852 /* eeprom */
854 struct iwl3945_eeprom eeprom; 853 struct iwl3945_eeprom eeprom;
855 854
@@ -886,6 +885,7 @@ struct iwl3945_priv {
886 struct work_struct report_work; 885 struct work_struct report_work;
887 struct work_struct request_scan; 886 struct work_struct request_scan;
888 struct work_struct beacon_update; 887 struct work_struct beacon_update;
888 struct work_struct set_monitor;
889 889
890 struct tasklet_struct irq_tasklet; 890 struct tasklet_struct irq_tasklet;
891 891
@@ -924,11 +924,6 @@ static inline int is_channel_valid(const struct iwl3945_channel_info *ch_info)
924 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 924 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
925} 925}
926 926
927static inline int is_channel_narrow(const struct iwl3945_channel_info *ch_info)
928{
929 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
930}
931
932static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info) 927static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info)
933{ 928{
934 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 929 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 1a66b508a8ea..fce950f4163c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -62,13 +62,18 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-4965-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-4965.h for driver implementation definitions. 66 * Use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl_4965_hw_h__ 69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__ 70#define __iwl_4965_hw_h__
71 71
72#include "iwl-fh.h"
73
74/* EERPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
72/* 77/*
73 * uCode queue management definitions ... 78 * uCode queue management definitions ...
74 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4. 79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
@@ -77,7 +82,7 @@
77 */ 82 */
78#define IWL_CMD_QUEUE_NUM 4 83#define IWL_CMD_QUEUE_NUM 4
79#define IWL_CMD_FIFO_NUM 4 84#define IWL_CMD_FIFO_NUM 4
80#define IWL_BACK_QUEUE_FIRST_ID 7 85#define IWL49_FIRST_AMPDU_QUEUE 7
81 86
82/* Tx rates */ 87/* Tx rates */
83#define IWL_CCK_RATES 4 88#define IWL_CCK_RATES 4
@@ -93,11 +98,16 @@
93#define IWL_RSSI_OFFSET 44 98#define IWL_RSSI_OFFSET 44
94 99
95 100
96#include "iwl-4965-commands.h" 101#include "iwl-commands.h"
97 102
98#define PCI_LINK_CTRL 0x0F0 103/* PCI registers */
104#define PCI_LINK_CTRL 0x0F0 /* 1 byte */
99#define PCI_POWER_SOURCE 0x0C8 105#define PCI_POWER_SOURCE 0x0C8
100#define PCI_REG_WUM8 0x0E8 106#define PCI_REG_WUM8 0x0E8
107
108/* PCI register values */
109#define PCI_LINK_VAL_L0S_EN 0x01
110#define PCI_LINK_VAL_L1_EN 0x02
101#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 111#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
102 112
103#define TFD_QUEUE_SIZE_MAX (256) 113#define TFD_QUEUE_SIZE_MAX (256)
@@ -131,10 +141,8 @@
131#define RTC_DATA_LOWER_BOUND (0x800000) 141#define RTC_DATA_LOWER_BOUND (0x800000)
132#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) 142#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
133 143
134#define IWL49_RTC_INST_SIZE \ 144#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
135 (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 145#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
136#define IWL49_RTC_DATA_SIZE \
137 (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
138 146
139#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE 147#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE
140#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE 148#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
@@ -785,585 +793,6 @@ enum {
785 793
786/********************* END TXPOWER *****************************************/ 794/********************* END TXPOWER *****************************************/
787 795
788/****************************/
789/* Flow Handler Definitions */
790/****************************/
791
792/**
793 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
794 * Addresses are offsets from device's PCI hardware base address.
795 */
796#define FH_MEM_LOWER_BOUND (0x1000)
797#define FH_MEM_UPPER_BOUND (0x1EF0)
798
799/**
800 * Keep-Warm (KW) buffer base address.
801 *
802 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
803 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
804 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
805 * from going into a power-savings mode that would cause higher DRAM latency,
806 * and possible data over/under-runs, before all Tx/Rx is complete.
807 *
808 * Driver loads IWL_FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
809 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
810 * automatically invokes keep-warm accesses when normal accesses might not
811 * be sufficient to maintain fast DRAM response.
812 *
813 * Bit fields:
814 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
815 */
816#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
817
818
819/**
820 * TFD Circular Buffers Base (CBBC) addresses
821 *
822 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
823 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
824 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
825 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
826 * aligned (address bits 0-7 must be 0).
827 *
828 * Bit fields in each pointer register:
829 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
830 */
831#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
832#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
833
834/* Find TFD CB base pointer for given queue (range 0-15). */
835#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
836
837
838/**
839 * Rx SRAM Control and Status Registers (RSCSR)
840 *
841 * These registers provide handshake between driver and 4965 for the Rx queue
842 * (this queue handles *all* command responses, notifications, Rx data, etc.
843 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
844 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
845 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
846 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
847 * mapping between RBDs and RBs.
848 *
849 * Driver must allocate host DRAM memory for the following, and set the
850 * physical address of each into 4965 registers:
851 *
852 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
853 * entries (although any power of 2, up to 4096, is selectable by driver).
854 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
855 * (typically 4K, although 8K or 16K are also selectable by driver).
856 * Driver sets up RB size and number of RBDs in the CB via Rx config
857 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
858 *
859 * Bit fields within one RBD:
860 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
861 *
862 * Driver sets physical address [35:8] of base of RBD circular buffer
863 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
864 *
865 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
866 * (RBs) have been filled, via a "write pointer", actually the index of
867 * the RB's corresponding RBD within the circular buffer. Driver sets
868 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
869 *
870 * Bit fields in lower dword of Rx status buffer (upper dword not used
871 * by driver; see struct iwl4965_shared, val0):
872 * 31-12: Not used by driver
873 * 11- 0: Index of last filled Rx buffer descriptor
874 * (4965 writes, driver reads this value)
875 *
876 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
877 * enter pointers to these RBs into contiguous RBD circular buffer entries,
878 * and update the 4965's "write" index register, FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
879 *
880 * This "write" index corresponds to the *next* RBD that the driver will make
881 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
882 * the circular buffer. This value should initially be 0 (before preparing any
883 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
884 * wrap back to 0 at the end of the circular buffer (but don't wrap before
885 * "read" index has advanced past 1! See below).
886 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
887 *
888 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
889 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
890 * to tell the driver the index of the latest filled RBD. The driver must
891 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
892 *
893 * The driver must also internally keep track of a third index, which is the
894 * next RBD to process. When receiving an Rx interrupt, driver should process
895 * all filled but unprocessed RBs up to, but not including, the RB
896 * corresponding to the "read" index. For example, if "read" index becomes "1",
897 * driver may process the RB pointed to by RBD 0. Depending on volume of
898 * traffic, there may be many RBs to process.
899 *
900 * If read index == write index, 4965 thinks there is no room to put new data.
901 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
902 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
903 * and "read" indexes; that is, make sure that there are no more than 254
904 * buffers waiting to be filled.
905 */
906#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
907#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
908#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
909
910/**
911 * Physical base address of 8-byte Rx Status buffer.
912 * Bit fields:
913 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
914 */
915#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
916
917/**
918 * Physical base address of Rx Buffer Descriptor Circular Buffer.
919 * Bit fields:
920 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
921 */
922#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
923
924/**
925 * Rx write pointer (index, really!).
926 * Bit fields:
927 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
928 * NOTE: For 256-entry circular buffer, use only bits [7:0].
929 */
930#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
931#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
932
933
934/**
935 * Rx Config/Status Registers (RCSR)
936 * Rx Config Reg for channel 0 (only channel used)
937 *
938 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
939 * normal operation (see bit fields).
940 *
941 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
942 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
943 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
944 *
945 * Bit fields:
946 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
947 * '10' operate normally
948 * 29-24: reserved
949 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
950 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
951 * 19-18: reserved
952 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
953 * '10' 12K, '11' 16K.
954 * 15-14: reserved
955 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
956 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
957 * typical value 0x10 (about 1/2 msec)
958 * 3- 0: reserved
959 */
960#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
961#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
962#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
963
964#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
965
966#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */
967#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */
968#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */
969#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */
970#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */
971#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */
972
973#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
974#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
975#define RX_RB_TIMEOUT (0x10)
976
977#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
978#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
979#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
980
981#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
982#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
983#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
984#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
985
986#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
987#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
988
989
990/**
991 * Rx Shared Status Registers (RSSR)
992 *
993 * After stopping Rx DMA channel (writing 0 to FH_MEM_RCSR_CHNL0_CONFIG_REG),
994 * driver must poll FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
995 *
996 * Bit fields:
997 * 24: 1 = Channel 0 is idle
998 *
999 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV contain
1000 * default values that should not be altered by the driver.
1001 */
1002#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
1003#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1004
1005#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
1006#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
1007#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008)
1008
1009#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1010
1011
1012/**
1013 * Transmit DMA Channel Control/Status Registers (TCSR)
1014 *
1015 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1016 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1017 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1018 *
1019 * To use a Tx DMA channel, driver must initialize its
1020 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1021 *
1022 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1023 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1024 *
1025 * All other bits should be 0.
1026 *
1027 * Bit fields:
1028 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1029 * '10' operate normally
1030 * 29- 4: Reserved, set to "0"
1031 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1032 * 2- 0: Reserved, set to "0"
1033 */
1034#define IWL_FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1035#define IWL_FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
1036
1037/* Find Control/Status reg for given Tx DMA/FIFO channel */
1038#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1039 (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
1040
1041#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
1042#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
1043
1044#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1045#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1046#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1047
1048/**
1049 * Tx Shared Status Registers (TSSR)
1050 *
1051 * After stopping Tx DMA channel (writing 0 to
1052 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1053 * IWL_FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
1054 * (channel's buffers empty | no pending requests).
1055 *
1056 * Bit fields:
1057 * 31-24: 1 = Channel buffers empty (channel 7:0)
1058 * 23-16: 1 = No pending requests (channel 7:0)
1059 */
1060#define IWL_FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
1061#define IWL_FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
1062
1063#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010)
1064
1065#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \
1066 ((1 << (_chnl)) << 24)
1067#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \
1068 ((1 << (_chnl)) << 16)
1069
1070#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
1071 (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
1072 IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
1073
1074
1075/********************* START TX SCHEDULER *************************************/
1076
1077/**
1078 * 4965 Tx Scheduler
1079 *
1080 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
1081 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
1082 * host DRAM. It steers each frame's Tx command (which contains the frame
1083 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
1084 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
1085 * but one DMA channel may take input from several queues.
1086 *
1087 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
1088 *
1089 * 0 -- EDCA BK (background) frames, lowest priority
1090 * 1 -- EDCA BE (best effort) frames, normal priority
1091 * 2 -- EDCA VI (video) frames, higher priority
1092 * 3 -- EDCA VO (voice) and management frames, highest priority
1093 * 4 -- Commands (e.g. RXON, etc.)
1094 * 5 -- HCCA short frames
1095 * 6 -- HCCA long frames
1096 * 7 -- not used by driver (device-internal only)
1097 *
1098 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
1099 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
1100 * support 11n aggregation via EDCA DMA channels.
1101 *
1102 * The driver sets up each queue to work in one of two modes:
1103 *
1104 * 1) Scheduler-Ack, in which the scheduler automatically supports a
1105 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
1106 * contains TFDs for a unique combination of Recipient Address (RA)
1107 * and Traffic Identifier (TID), that is, traffic of a given
1108 * Quality-Of-Service (QOS) priority, destined for a single station.
1109 *
1110 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
1111 * each frame within the BA window, including whether it's been transmitted,
1112 * and whether it's been acknowledged by the receiving station. The device
1113 * automatically processes block-acks received from the receiving STA,
1114 * and reschedules un-acked frames to be retransmitted (successful
1115 * Tx completion may end up being out-of-order).
1116 *
1117 * The driver must maintain the queue's Byte Count table in host DRAM
1118 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
1119 * This mode does not support fragmentation.
1120 *
1121 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
1122 * The device may automatically retry Tx, but will retry only one frame
1123 * at a time, until receiving ACK from receiving station, or reaching
1124 * retry limit and giving up.
1125 *
1126 * The command queue (#4) must use this mode!
1127 * This mode does not require use of the Byte Count table in host DRAM.
1128 *
1129 * Driver controls scheduler operation via 3 means:
1130 * 1) Scheduler registers
1131 * 2) Shared scheduler data base in internal 4956 SRAM
1132 * 3) Shared data in host DRAM
1133 *
1134 * Initialization:
1135 *
1136 * When loading, driver should allocate memory for:
1137 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
1138 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
1139 * (1024 bytes for each queue).
1140 *
1141 * After receiving "Alive" response from uCode, driver must initialize
1142 * the scheduler (especially for queue #4, the command queue, otherwise
1143 * the driver can't issue commands!):
1144 */
1145
1146/**
1147 * Max Tx window size is the max number of contiguous TFDs that the scheduler
1148 * can keep track of at one time when creating block-ack chains of frames.
1149 * Note that "64" matches the number of ack bits in a block-ack packet.
1150 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
1151 * SCD_CONTEXT_QUEUE_OFFSET(x) values.
1152 */
1153#define SCD_WIN_SIZE 64
1154#define SCD_FRAME_LIMIT 64
1155
1156/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
1157#define SCD_START_OFFSET 0xa02c00
1158
1159/*
1160 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
1161 * Value is valid only after "Alive" response from uCode.
1162 */
1163#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0)
1164
1165/*
1166 * Driver may need to update queue-empty bits after changing queue's
1167 * write and read pointers (indexes) during (re-)initialization (i.e. when
1168 * scheduler is not tracking what's happening).
1169 * Bit fields:
1170 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
1171 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
1172 * NOTE: This register is not used by Linux driver.
1173 */
1174#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4)
1175
1176/*
1177 * Physical base address of array of byte count (BC) circular buffers (CBs).
1178 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
1179 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
1180 * Others are spaced by 1024 bytes.
1181 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
1182 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
1183 * Bit fields:
1184 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
1185 */
1186#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10)
1187
1188/*
1189 * Enables any/all Tx DMA/FIFO channels.
1190 * Scheduler generates requests for only the active channels.
1191 * Set this to 0xff to enable all 8 channels (normal usage).
1192 * Bit fields:
1193 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
1194 */
1195#define SCD_TXFACT (SCD_START_OFFSET + 0x1c)
1196
1197/* Mask to enable contiguous Tx DMA/FIFO channels between "lo" and "hi". */
1198#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \
1199 ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
1200
1201/*
1202 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
1203 * Initialized and updated by driver as new TFDs are added to queue.
1204 * NOTE: If using Block Ack, index must correspond to frame's
1205 * Start Sequence Number; index = (SSN & 0xff)
1206 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
1207 */
1208#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4)
1209
1210/*
1211 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
1212 * For FIFO mode, index indicates next frame to transmit.
1213 * For Scheduler-ACK mode, index indicates first frame in Tx window.
1214 * Initialized by driver, updated by scheduler.
1215 */
1216#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4)
1217
1218/*
1219 * Select which queues work in chain mode (1) vs. not (0).
1220 * Use chain mode to build chains of aggregated frames.
1221 * Bit fields:
1222 * 31-16: Reserved
1223 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
1224 * NOTE: If driver sets up queue for chain mode, it should be also set up
1225 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
1226 */
1227#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0)
1228
1229/*
1230 * Select which queues interrupt driver when scheduler increments
1231 * a queue's read pointer (index).
1232 * Bit fields:
1233 * 31-16: Reserved
1234 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
1235 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
1236 * from Rx queue to read Tx command responses and update Tx queues.
1237 */
1238#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4)
1239
1240/*
1241 * Queue search status registers. One for each queue.
1242 * Sets up queue mode and assigns queue to Tx DMA channel.
1243 * Bit fields:
1244 * 19-10: Write mask/enable bits for bits 0-9
1245 * 9: Driver should init to "0"
1246 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
1247 * Driver should init to "1" for aggregation mode, or "0" otherwise.
1248 * 7-6: Driver should init to "0"
1249 * 5: Window Size Left; indicates whether scheduler can request
1250 * another TFD, based on window size, etc. Driver should init
1251 * this bit to "1" for aggregation mode, or "0" for non-agg.
1252 * 4-1: Tx FIFO to use (range 0-7).
1253 * 0: Queue is active (1), not active (0).
1254 * Other bits should be written as "0"
1255 *
1256 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
1257 * via SCD_QUEUECHAIN_SEL.
1258 */
1259#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4)
1260
1261/* Bit field positions */
1262#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
1263#define SCD_QUEUE_STTS_REG_POS_TXF (1)
1264#define SCD_QUEUE_STTS_REG_POS_WSL (5)
1265#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
1266
1267/* Write masks */
1268#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
1269#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
1270
1271/**
1272 * 4965 internal SRAM structures for scheduler, shared with driver ...
1273 *
1274 * Driver should clear and initialize the following areas after receiving
1275 * "Alive" response from 4965 uCode, i.e. after initial
1276 * uCode load, or after a uCode load done for error recovery:
1277 *
1278 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
1279 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
1280 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
1281 *
1282 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
1283 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
1284 * All OFFSET values must be added to this base address.
1285 */
1286
1287/*
1288 * Queue context. One 8-byte entry for each of 16 queues.
1289 *
1290 * Driver should clear this entire area (size 0x80) to 0 after receiving
1291 * "Alive" notification from uCode. Additionally, driver should init
1292 * each queue's entry as follows:
1293 *
1294 * LS Dword bit fields:
1295 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
1296 *
1297 * MS Dword bit fields:
1298 * 16-22: Frame limit. Driver should init to 10 (0xa).
1299 *
1300 * Driver should init all other bits to 0.
1301 *
1302 * Init must be done after driver receives "Alive" response from 4965 uCode,
1303 * and when setting up queue for aggregation.
1304 */
1305#define SCD_CONTEXT_DATA_OFFSET 0x380
1306#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
1307
1308#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
1309#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
1310#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
1311#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
1312
1313/*
1314 * Tx Status Bitmap
1315 *
1316 * Driver should clear this entire area (size 0x100) to 0 after receiving
1317 * "Alive" notification from uCode. Area is used only by device itself;
1318 * no other support (besides clearing) is required from driver.
1319 */
1320#define SCD_TX_STTS_BITMAP_OFFSET 0x400
1321
1322/*
1323 * RAxTID to queue translation mapping.
1324 *
1325 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
1326 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
1327 * one QOS priority level destined for one station (for this wireless link,
1328 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
1329 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
1330 * mode, the device ignores the mapping value.
1331 *
1332 * Bit fields, for each 16-bit map:
1333 * 15-9: Reserved, set to 0
1334 * 8-4: Index into device's station table for recipient station
1335 * 3-0: Traffic ID (tid), range 0-15
1336 *
1337 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
1338 * "Alive" notification from uCode. To update a 16-bit map value, driver
1339 * must read a dword-aligned value from device SRAM, replace the 16-bit map
1340 * value of interest, and write the dword value back into device SRAM.
1341 */
1342#define SCD_TRANSLATE_TBL_OFFSET 0x500
1343
1344/* Find translation table dword to read/write for given queue */
1345#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
1346 ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
1347
1348#define SCD_TXFIFO_POS_TID (0)
1349#define SCD_TXFIFO_POS_RA (4)
1350#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
1351
1352/*********************** END TX SCHEDULER *************************************/
1353
1354static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
1355{
1356 return le32_to_cpu(rate_n_flags) & 0xFF;
1357}
1358static inline u16 iwl4965_hw_get_rate_n_flags(__le32 rate_n_flags)
1359{
1360 return le32_to_cpu(rate_n_flags) & 0xFFFF;
1361}
1362static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1363{
1364 return cpu_to_le32(flags|(u16)rate);
1365}
1366
1367 796
1368/** 797/**
1369 * Tx/Rx Queues 798 * Tx/Rx Queues
@@ -1385,14 +814,15 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1385 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array 814 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
1386 * in DRAM containing 256 Transmit Frame Descriptors (TFDs). 815 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
1387 */ 816 */
1388#define IWL4965_MAX_WIN_SIZE 64 817#define IWL49_MAX_WIN_SIZE 64
1389#define IWL4965_QUEUE_SIZE 256 818#define IWL49_QUEUE_SIZE 256
1390#define IWL4965_NUM_FIFOS 7 819#define IWL49_NUM_FIFOS 7
1391#define IWL4965_MAX_NUM_QUEUES 16 820#define IWL49_CMD_FIFO_NUM 4
1392 821#define IWL49_NUM_QUEUES 16
822#define IWL49_NUM_AMPDU_QUEUES 8
1393 823
1394/** 824/**
1395 * struct iwl4965_tfd_frame_data 825 * struct iwl_tfd_frame_data
1396 * 826 *
1397 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame. 827 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame.
1398 * Each buffer must be on dword boundary. 828 * Each buffer must be on dword boundary.
@@ -1411,7 +841,7 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1411 * 31-20: Tx buffer 2 length (bytes) 841 * 31-20: Tx buffer 2 length (bytes)
1412 * 19- 0: Tx buffer 2 address bits [35:16] 842 * 19- 0: Tx buffer 2 address bits [35:16]
1413 */ 843 */
1414struct iwl4965_tfd_frame_data { 844struct iwl_tfd_frame_data {
1415 __le32 tb1_addr; 845 __le32 tb1_addr;
1416 846
1417 __le32 val1; 847 __le32 val1;
@@ -1441,7 +871,7 @@ struct iwl4965_tfd_frame_data {
1441 871
1442 872
1443/** 873/**
1444 * struct iwl4965_tfd_frame 874 * struct iwl_tfd_frame
1445 * 875 *
1446 * Transmit Frame Descriptor (TFD) 876 * Transmit Frame Descriptor (TFD)
1447 * 877 *
@@ -1468,7 +898,7 @@ struct iwl4965_tfd_frame_data {
1468 * 898 *
1469 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. 899 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
1470 */ 900 */
1471struct iwl4965_tfd_frame { 901struct iwl_tfd_frame {
1472 __le32 val0; 902 __le32 val0;
1473 /* __le32 rsvd1:24; */ 903 /* __le32 rsvd1:24; */
1474 /* __le32 num_tbs:5; */ 904 /* __le32 num_tbs:5; */
@@ -1477,7 +907,7 @@ struct iwl4965_tfd_frame {
1477#define IWL_num_tbs_SYM val0 907#define IWL_num_tbs_SYM val0
1478 /* __le32 rsvd2:1; */ 908 /* __le32 rsvd2:1; */
1479 /* __le32 padding:2; */ 909 /* __le32 padding:2; */
1480 struct iwl4965_tfd_frame_data pa[10]; 910 struct iwl_tfd_frame_data pa[10];
1481 __le32 reserved; 911 __le32 reserved;
1482} __attribute__ ((packed)); 912} __attribute__ ((packed));
1483 913
@@ -1520,10 +950,10 @@ struct iwl4965_queue_byte_cnt_entry {
1520 * 4965 assumes tables are separated by 1024 bytes. 950 * 4965 assumes tables are separated by 1024 bytes.
1521 */ 951 */
1522struct iwl4965_sched_queue_byte_cnt_tbl { 952struct iwl4965_sched_queue_byte_cnt_tbl {
1523 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE + 953 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL49_QUEUE_SIZE +
1524 IWL4965_MAX_WIN_SIZE]; 954 IWL49_MAX_WIN_SIZE];
1525 u8 dont_care[1024 - 955 u8 dont_care[1024 -
1526 (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) * 956 (IWL49_QUEUE_SIZE + IWL49_MAX_WIN_SIZE) *
1527 sizeof(__le16)]; 957 sizeof(__le16)];
1528} __attribute__ ((packed)); 958} __attribute__ ((packed));
1529 959
@@ -1553,7 +983,7 @@ struct iwl4965_sched_queue_byte_cnt_tbl {
1553 */ 983 */
1554struct iwl4965_shared { 984struct iwl4965_shared {
1555 struct iwl4965_sched_queue_byte_cnt_tbl 985 struct iwl4965_sched_queue_byte_cnt_tbl
1556 queues_byte_cnt_tbls[IWL4965_MAX_NUM_QUEUES]; 986 queues_byte_cnt_tbls[IWL49_NUM_QUEUES];
1557 __le32 rb_closed; 987 __le32 rb_closed;
1558 988
1559 /* __le32 rb_closed_stts_rb_num:12; */ 989 /* __le32 rb_closed_stts_rb_num:12; */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 3a7f0cb710ec..3ccb84aa5dbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -28,7 +28,6 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/wireless.h> 29#include <linux/wireless.h>
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <net/ieee80211.h>
32 31
33#include <linux/netdevice.h> 32#include <linux/netdevice.h>
34#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
@@ -38,13 +37,14 @@
38 37
39#include "../net/mac80211/rate.h" 38#include "../net/mac80211/rate.h"
40 39
41#include "iwl-4965.h" 40#include "iwl-dev.h"
41#include "iwl-sta.h"
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45#define RS_NAME "iwl-4965-rs" 45#define RS_NAME "iwl-4965-rs"
46 46
47#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1 47#define NUM_TRY_BEFORE_ANT_TOGGLE 1
48#define IWL_NUMBER_TRY 1 48#define IWL_NUMBER_TRY 1
49#define IWL_HT_NUMBER_TRY 3 49#define IWL_HT_NUMBER_TRY 3
50 50
@@ -65,9 +65,16 @@ static u8 rs_ht_to_legacy[] = {
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX 65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66}; 66};
67 67
68struct iwl4965_rate { 68static const u8 ant_toggle_lookup[] = {
69 u32 rate_n_flags; 69 /*ANT_NONE -> */ ANT_NONE,
70} __attribute__ ((packed)); 70 /*ANT_A -> */ ANT_B,
71 /*ANT_B -> */ ANT_C,
72 /*ANT_AB -> */ ANT_BC,
73 /*ANT_C -> */ ANT_A,
74 /*ANT_AC -> */ ANT_AB,
75 /*ANT_BC -> */ ANT_AC,
76 /*ANT_ABC -> */ ANT_ABC,
77};
71 78
72/** 79/**
73 * struct iwl4965_rate_scale_data -- tx success history for one rate 80 * struct iwl4965_rate_scale_data -- tx success history for one rate
@@ -88,19 +95,17 @@ struct iwl4965_rate_scale_data {
88 * one for "active", and one for "search". 95 * one for "active", and one for "search".
89 */ 96 */
90struct iwl4965_scale_tbl_info { 97struct iwl4965_scale_tbl_info {
91 enum iwl4965_table_type lq_type; 98 enum iwl_table_type lq_type;
92 enum iwl4965_antenna_type antenna_type; 99 u8 ant_type;
93 u8 is_SGI; /* 1 = short guard interval */ 100 u8 is_SGI; /* 1 = short guard interval */
94 u8 is_fat; /* 1 = 40 MHz channel width */ 101 u8 is_fat; /* 1 = 40 MHz channel width */
95 u8 is_dup; /* 1 = duplicated data streams */ 102 u8 is_dup; /* 1 = duplicated data streams */
96 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 103 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
97 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
98 struct iwl4965_rate current_rate; /* rate_n_flags, uCode API format */ 105 u32 current_rate; /* rate_n_flags, uCode API format */
99 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 106 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
100}; 107};
101 108
102#ifdef CONFIG_IWL4965_HT
103
104struct iwl4965_traffic_load { 109struct iwl4965_traffic_load {
105 unsigned long time_stamp; /* age of the oldest statistics */ 110 unsigned long time_stamp; /* age of the oldest statistics */
106 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time 111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
@@ -112,8 +117,6 @@ struct iwl4965_traffic_load {
112 u8 head; /* start of the circular buffer */ 117 u8 head; /* start of the circular buffer */
113}; 118};
114 119
115#endif /* CONFIG_IWL4965_HT */
116
117/** 120/**
118 * struct iwl4965_lq_sta -- driver's rate scaling private structure 121 * struct iwl4965_lq_sta -- driver's rate scaling private structure
119 * 122 *
@@ -136,8 +139,6 @@ struct iwl4965_lq_sta {
136 u32 flush_timer; /* time staying in mode before new search */ 139 u32 flush_timer; /* time staying in mode before new search */
137 140
138 u8 action_counter; /* # mode-switch actions tried */ 141 u8 action_counter; /* # mode-switch actions tried */
139 u8 antenna;
140 u8 valid_antenna;
141 u8 is_green; 142 u8 is_green;
142 u8 is_dup; 143 u8 is_dup;
143 enum ieee80211_band band; 144 enum ieee80211_band band;
@@ -145,24 +146,21 @@ struct iwl4965_lq_sta {
145 146
146 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 147 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
147 u32 supp_rates; 148 u32 supp_rates;
148 u16 active_rate; 149 u16 active_legacy_rate;
149 u16 active_siso_rate; 150 u16 active_siso_rate;
150 u16 active_mimo_rate; 151 u16 active_mimo2_rate;
152 u16 active_mimo3_rate;
151 u16 active_rate_basic; 153 u16 active_rate_basic;
152 154
153 struct iwl_link_quality_cmd lq; 155 struct iwl_link_quality_cmd lq;
154 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 156 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
155#ifdef CONFIG_IWL4965_HT
156 struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT]; 157 struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT];
157 u8 tx_agg_tid_en; 158 u8 tx_agg_tid_en;
158#endif
159#ifdef CONFIG_MAC80211_DEBUGFS 159#ifdef CONFIG_MAC80211_DEBUGFS
160 struct dentry *rs_sta_dbgfs_scale_table_file; 160 struct dentry *rs_sta_dbgfs_scale_table_file;
161 struct dentry *rs_sta_dbgfs_stats_table_file; 161 struct dentry *rs_sta_dbgfs_stats_table_file;
162#ifdef CONFIG_IWL4965_HT
163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 162 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
164#endif 163 u32 dbg_fixed_rate;
165 struct iwl4965_rate dbg_fixed;
166#endif 164#endif
167 struct iwl_priv *drv; 165 struct iwl_priv *drv;
168}; 166};
@@ -171,17 +169,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
171 struct net_device *dev, 169 struct net_device *dev,
172 struct ieee80211_hdr *hdr, 170 struct ieee80211_hdr *hdr,
173 struct sta_info *sta); 171 struct sta_info *sta);
174static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 172static void rs_fill_link_cmd(const struct iwl_priv *priv,
175 struct iwl4965_rate *tx_mcs, 173 struct iwl4965_lq_sta *lq_sta,
176 struct iwl_link_quality_cmd *tbl); 174 u32 rate_n_flags);
177 175
178 176
179#ifdef CONFIG_MAC80211_DEBUGFS 177#ifdef CONFIG_MAC80211_DEBUGFS
180static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 178static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
181 struct iwl4965_rate *mcs, int index); 179 u32 *rate_n_flags, int index);
182#else 180#else
183static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 181static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
184 struct iwl4965_rate *mcs, int index) 182 u32 *rate_n_flags, int index)
185{} 183{}
186#endif 184#endif
187 185
@@ -190,6 +188,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
190 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 188 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
191 * "G" is the only table that supports CCK (the first 4 rates). 189 * "G" is the only table that supports CCK (the first 4 rates).
192 */ 190 */
191/*FIXME:RS:need to spearate tables for MIMO2/MIMO3*/
193static s32 expected_tpt_A[IWL_RATE_COUNT] = { 192static s32 expected_tpt_A[IWL_RATE_COUNT] = {
194 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 193 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
195}; 194};
@@ -230,7 +229,7 @@ static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
230 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 229 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
231}; 230};
232 231
233static inline u8 iwl4965_rate_get_rate(u32 rate_n_flags) 232static inline u8 rs_extract_rate(u32 rate_n_flags)
234{ 233{
235 return (u8)(rate_n_flags & 0xFF); 234 return (u8)(rate_n_flags & 0xFF);
236} 235}
@@ -245,7 +244,11 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
245 window->stamp = 0; 244 window->stamp = 0;
246} 245}
247 246
248#ifdef CONFIG_IWL4965_HT 247static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
248{
249 return ((ant_type & valid_antenna) == ant_type);
250}
251
249/* 252/*
250 * removes the old data from the statistics. All data that is older than 253 * removes the old data from the statistics. All data that is older than
251 * TID_MAX_TIME_DIFF, will be deleted. 254 * TID_MAX_TIME_DIFF, will be deleted.
@@ -271,15 +274,21 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
271 * increment traffic load value for tid and also remove 274 * increment traffic load value for tid and also remove
272 * any old values if passed the certain time period 275 * any old values if passed the certain time period
273 */ 276 */
274static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid) 277static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
278 struct ieee80211_hdr *hdr)
275{ 279{
276 u32 curr_time = jiffies_to_msecs(jiffies); 280 u32 curr_time = jiffies_to_msecs(jiffies);
277 u32 time_diff; 281 u32 time_diff;
278 s32 index; 282 s32 index;
279 struct iwl4965_traffic_load *tl = NULL; 283 struct iwl4965_traffic_load *tl = NULL;
284 __le16 fc = hdr->frame_control;
285 u8 tid;
280 286
281 if (tid >= TID_MAX_LOAD_COUNT) 287 if (ieee80211_is_data_qos(fc)) {
282 return; 288 u8 *qc = ieee80211_get_qos_ctl(hdr);
289 tid = qc[0] & 0xf;
290 } else
291 return MAX_TID_COUNT;
283 292
284 tl = &lq_data->load[tid]; 293 tl = &lq_data->load[tid];
285 294
@@ -292,7 +301,7 @@ static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid)
292 tl->queue_count = 1; 301 tl->queue_count = 1;
293 tl->head = 0; 302 tl->head = 0;
294 tl->packet_count[0] = 1; 303 tl->packet_count[0] = 1;
295 return; 304 return MAX_TID_COUNT;
296 } 305 }
297 306
298 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); 307 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
@@ -309,6 +318,8 @@ static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid)
309 318
310 if ((index + 1) > tl->queue_count) 319 if ((index + 1) > tl->queue_count)
311 tl->queue_count = index + 1; 320 tl->queue_count = index + 1;
321
322 return tid;
312} 323}
313 324
314/* 325/*
@@ -349,9 +360,9 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
349 unsigned long state; 360 unsigned long state;
350 DECLARE_MAC_BUF(mac); 361 DECLARE_MAC_BUF(mac);
351 362
352 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 363 spin_lock_bh(&sta->lock);
353 state = sta->ampdu_mlme.tid_state_tx[tid]; 364 state = sta->ampdu_mlme.tid_state_tx[tid];
354 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 365 spin_unlock_bh(&sta->lock);
355 366
356 if (state == HT_AGG_STATE_IDLE && 367 if (state == HT_AGG_STATE_IDLE &&
357 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 368 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
@@ -372,7 +383,12 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
372 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 383 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
373} 384}
374 385
375#endif /* CONFIG_IWLWIFI_HT */ 386static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
387{
388 return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
389 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
390 !!(rate_n_flags & RATE_MCS_ANT_C_MSK));
391}
376 392
377/** 393/**
378 * rs_collect_tx_data - Update the success/failure sliding window 394 * rs_collect_tx_data - Update the success/failure sliding window
@@ -386,8 +402,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
386 int successes) 402 int successes)
387{ 403{
388 struct iwl4965_rate_scale_data *window = NULL; 404 struct iwl4965_rate_scale_data *window = NULL;
389 u64 mask; 405 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
390 u8 win_size = IWL_RATE_MAX_WINDOW;
391 s32 fail_count; 406 s32 fail_count;
392 407
393 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 408 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
@@ -405,14 +420,14 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
405 * we keep these bitmaps!). 420 * we keep these bitmaps!).
406 */ 421 */
407 while (retries > 0) { 422 while (retries > 0) {
408 if (window->counter >= win_size) { 423 if (window->counter >= IWL_RATE_MAX_WINDOW) {
409 window->counter = win_size - 1; 424
410 mask = 1; 425 /* remove earliest */
411 mask = (mask << (win_size - 1)); 426 window->counter = IWL_RATE_MAX_WINDOW - 1;
427
412 if (window->data & mask) { 428 if (window->data & mask) {
413 window->data &= ~mask; 429 window->data &= ~mask;
414 window->success_counter = 430 window->success_counter--;
415 window->success_counter - 1;
416 } 431 }
417 } 432 }
418 433
@@ -422,10 +437,9 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
422 /* Shift bitmap by one frame (throw away oldest history), 437 /* Shift bitmap by one frame (throw away oldest history),
423 * OR in "1", and increment "success" if this 438 * OR in "1", and increment "success" if this
424 * frame was successful. */ 439 * frame was successful. */
425 mask = window->data; 440 window->data <<= 1;;
426 window->data = (mask << 1);
427 if (successes > 0) { 441 if (successes > 0) {
428 window->success_counter = window->success_counter + 1; 442 window->success_counter++;
429 window->data |= 0x1; 443 window->data |= 0x1;
430 successes--; 444 successes--;
431 } 445 }
@@ -458,168 +472,162 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
458/* 472/*
459 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 473 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
460 */ 474 */
461static void rs_mcs_from_tbl(struct iwl4965_rate *mcs_rate, 475/* FIXME:RS:remove this function and put the flags statically in the table */
462 struct iwl4965_scale_tbl_info *tbl, 476static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
463 int index, u8 use_green) 477 int index, u8 use_green)
464{ 478{
479 u32 rate_n_flags = 0;
480
465 if (is_legacy(tbl->lq_type)) { 481 if (is_legacy(tbl->lq_type)) {
466 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp; 482 rate_n_flags = iwl_rates[index].plcp;
467 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) 483 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
468 mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK; 484 rate_n_flags |= RATE_MCS_CCK_MSK;
469 485
470 } else if (is_siso(tbl->lq_type)) { 486 } else if (is_Ht(tbl->lq_type)) {
471 if (index > IWL_LAST_OFDM_RATE) 487 if (index > IWL_LAST_OFDM_RATE) {
488 IWL_ERROR("invalid HT rate index %d\n", index);
472 index = IWL_LAST_OFDM_RATE; 489 index = IWL_LAST_OFDM_RATE;
473 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_siso | 490 }
474 RATE_MCS_HT_MSK; 491 rate_n_flags = RATE_MCS_HT_MSK;
475 } else {
476 if (index > IWL_LAST_OFDM_RATE)
477 index = IWL_LAST_OFDM_RATE;
478 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_mimo |
479 RATE_MCS_HT_MSK;
480 }
481
482 switch (tbl->antenna_type) {
483 case ANT_BOTH:
484 mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK;
485 break;
486 case ANT_MAIN:
487 mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK;
488 break;
489 case ANT_AUX:
490 mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK;
491 break;
492 case ANT_NONE:
493 break;
494 }
495
496 if (is_legacy(tbl->lq_type))
497 return;
498 492
499 if (tbl->is_fat) { 493 if (is_siso(tbl->lq_type))
500 if (tbl->is_dup) 494 rate_n_flags |= iwl_rates[index].plcp_siso;
501 mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK; 495 else if (is_mimo2(tbl->lq_type))
496 rate_n_flags |= iwl_rates[index].plcp_mimo2;
502 else 497 else
503 mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK; 498 rate_n_flags |= iwl_rates[index].plcp_mimo3;
499 } else {
500 IWL_ERROR("Invalid tbl->lq_type %d\n", tbl->lq_type);
504 } 501 }
505 if (tbl->is_SGI)
506 mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK;
507 502
508 if (use_green) { 503 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
509 mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK; 504 RATE_MCS_ANT_ABC_MSK);
510 if (is_siso(tbl->lq_type)) 505
511 mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK; 506 if (is_Ht(tbl->lq_type)) {
507 if (tbl->is_fat) {
508 if (tbl->is_dup)
509 rate_n_flags |= RATE_MCS_DUP_MSK;
510 else
511 rate_n_flags |= RATE_MCS_FAT_MSK;
512 }
513 if (tbl->is_SGI)
514 rate_n_flags |= RATE_MCS_SGI_MSK;
515
516 if (use_green) {
517 rate_n_flags |= RATE_MCS_GF_MSK;
518 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
519 rate_n_flags &= ~RATE_MCS_SGI_MSK;
520 IWL_ERROR("GF was set with SGI:SISO\n");
521 }
522 }
512 } 523 }
524 return rate_n_flags;
513} 525}
514 526
515/* 527/*
516 * Interpret uCode API's rate_n_flags format, 528 * Interpret uCode API's rate_n_flags format,
517 * fill "search" or "active" tx mode table. 529 * fill "search" or "active" tx mode table.
518 */ 530 */
519static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate, 531static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
520 enum ieee80211_band band, 532 enum ieee80211_band band,
521 struct iwl4965_scale_tbl_info *tbl, 533 struct iwl4965_scale_tbl_info *tbl,
522 int *rate_idx) 534 int *rate_idx)
523{ 535{
524 int index; 536 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
525 u32 ant_msk; 537 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
538 u8 mcs;
526 539
527 index = iwl4965_hwrate_to_plcp_idx(mcs_rate->rate_n_flags); 540 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
528 541
529 if (index == IWL_RATE_INVALID) { 542 if (*rate_idx == IWL_RATE_INVALID) {
530 *rate_idx = -1; 543 *rate_idx = -1;
531 return -EINVAL; 544 return -EINVAL;
532 } 545 }
533 tbl->is_SGI = 0; /* default legacy setup */ 546 tbl->is_SGI = 0; /* default legacy setup */
534 tbl->is_fat = 0; 547 tbl->is_fat = 0;
535 tbl->is_dup = 0; 548 tbl->is_dup = 0;
536 tbl->antenna_type = ANT_BOTH; /* default MIMO setup */ 549 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
550 tbl->lq_type = LQ_NONE;
537 551
538 /* legacy rate format */ 552 /* legacy rate format */
539 if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) { 553 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
540 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); 554 if (num_of_ant == 1) {
541
542 if (ant_msk == RATE_MCS_ANT_AB_MSK)
543 tbl->lq_type = LQ_NONE;
544 else {
545
546 if (band == IEEE80211_BAND_5GHZ) 555 if (band == IEEE80211_BAND_5GHZ)
547 tbl->lq_type = LQ_A; 556 tbl->lq_type = LQ_A;
548 else 557 else
549 tbl->lq_type = LQ_G; 558 tbl->lq_type = LQ_G;
550
551 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
552 tbl->antenna_type = ANT_MAIN;
553 else
554 tbl->antenna_type = ANT_AUX;
555 }
556 *rate_idx = index;
557
558 /* HT rate format, SISO (might be 20 MHz legacy or 40 MHz fat width) */
559 } else if (iwl4965_rate_get_rate(mcs_rate->rate_n_flags)
560 <= IWL_RATE_SISO_60M_PLCP) {
561 tbl->lq_type = LQ_SISO;
562
563 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK);
564 if (ant_msk == RATE_MCS_ANT_AB_MSK)
565 tbl->lq_type = LQ_NONE;
566 else {
567 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
568 tbl->antenna_type = ANT_MAIN;
569 else
570 tbl->antenna_type = ANT_AUX;
571 } 559 }
572 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK) 560 /* HT rate format */
573 tbl->is_SGI = 1;
574
575 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) ||
576 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK))
577 tbl->is_fat = 1;
578
579 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)
580 tbl->is_dup = 1;
581
582 *rate_idx = index;
583
584 /* HT rate format, MIMO (might be 20 MHz legacy or 40 MHz fat width) */
585 } else { 561 } else {
586 tbl->lq_type = LQ_MIMO; 562 if (rate_n_flags & RATE_MCS_SGI_MSK)
587 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
588 tbl->is_SGI = 1; 563 tbl->is_SGI = 1;
589 564
590 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || 565 if ((rate_n_flags & RATE_MCS_FAT_MSK) ||
591 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) 566 (rate_n_flags & RATE_MCS_DUP_MSK))
592 tbl->is_fat = 1; 567 tbl->is_fat = 1;
593 568
594 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) 569 if (rate_n_flags & RATE_MCS_DUP_MSK)
595 tbl->is_dup = 1; 570 tbl->is_dup = 1;
596 *rate_idx = index; 571
572 mcs = rs_extract_rate(rate_n_flags);
573
574 /* SISO */
575 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
576 if (num_of_ant == 1)
577 tbl->lq_type = LQ_SISO; /*else NONE*/
578 /* MIMO2 */
579 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
580 if (num_of_ant == 2)
581 tbl->lq_type = LQ_MIMO2;
582 /* MIMO3 */
583 } else {
584 if (num_of_ant == 3)
585 tbl->lq_type = LQ_MIMO3;
586 }
597 } 587 }
598 return 0; 588 return 0;
599} 589}
600 590
601static inline void rs_toggle_antenna(struct iwl4965_rate *new_rate, 591/* switch to another antenna/antennas and return 1 */
602 struct iwl4965_scale_tbl_info *tbl) 592/* if no other valid antenna found, return 0 */
593static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
594 struct iwl4965_scale_tbl_info *tbl)
603{ 595{
604 if (tbl->antenna_type == ANT_AUX) { 596 u8 new_ant_type;
605 tbl->antenna_type = ANT_MAIN; 597
606 new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK; 598 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
607 new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; 599 return 0;
608 } else { 600
609 tbl->antenna_type = ANT_AUX; 601 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
610 new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 602 return 0;
611 new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; 603
612 } 604 new_ant_type = ant_toggle_lookup[tbl->ant_type];
605
606 while ((new_ant_type != tbl->ant_type) &&
607 !rs_is_valid_ant(valid_ant, new_ant_type))
608 new_ant_type = ant_toggle_lookup[new_ant_type];
609
610 if (new_ant_type == tbl->ant_type)
611 return 0;
612
613 tbl->ant_type = new_ant_type;
614 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
615 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
616 return 1;
613} 617}
614 618
615static inline u8 rs_use_green(struct iwl_priv *priv, 619/* FIXME:RS: in 4965 we don't use greenfield at all */
616 struct ieee80211_conf *conf) 620/* FIXME:RS: don't use greenfield for now in TX */
621#if 0
622static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
617{ 623{
618#ifdef CONFIG_IWL4965_HT
619 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 624 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
620 priv->current_ht_config.is_green_field && 625 priv->current_ht_config.is_green_field &&
621 !priv->current_ht_config.non_GF_STA_present); 626 !priv->current_ht_config.non_GF_STA_present);
622#endif /* CONFIG_IWL4965_HT */ 627}
628#endif
629static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
630{
623 return 0; 631 return 0;
624} 632}
625 633
@@ -630,27 +638,28 @@ static inline u8 rs_use_green(struct iwl_priv *priv,
630 * basic available rates. 638 * basic available rates.
631 * 639 *
632 */ 640 */
633static void rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta, 641static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta,
634 struct ieee80211_hdr *hdr, 642 struct ieee80211_hdr *hdr,
635 enum iwl4965_table_type rate_type, 643 enum iwl_table_type rate_type)
636 u16 *data_rate)
637{ 644{
638 if (is_legacy(rate_type)) 645 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
639 *data_rate = lq_sta->active_rate; 646 lq_sta->active_rate_basic)
640 else { 647 return lq_sta->active_rate_basic;
648
649 if (is_legacy(rate_type)) {
650 return lq_sta->active_legacy_rate;
651 } else {
641 if (is_siso(rate_type)) 652 if (is_siso(rate_type))
642 *data_rate = lq_sta->active_siso_rate; 653 return lq_sta->active_siso_rate;
654 else if (is_mimo2(rate_type))
655 return lq_sta->active_mimo2_rate;
643 else 656 else
644 *data_rate = lq_sta->active_mimo_rate; 657 return lq_sta->active_mimo3_rate;
645 }
646
647 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
648 lq_sta->active_rate_basic) {
649 *data_rate = lq_sta->active_rate_basic;
650 } 658 }
651} 659}
652 660
653static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type) 661static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
662 int rate_type)
654{ 663{
655 u8 high = IWL_RATE_INVALID; 664 u8 high = IWL_RATE_INVALID;
656 u8 low = IWL_RATE_INVALID; 665 u8 low = IWL_RATE_INVALID;
@@ -684,7 +693,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
684 693
685 low = index; 694 low = index;
686 while (low != IWL_RATE_INVALID) { 695 while (low != IWL_RATE_INVALID) {
687 low = iwl4965_rates[low].prev_rs; 696 low = iwl_rates[low].prev_rs;
688 if (low == IWL_RATE_INVALID) 697 if (low == IWL_RATE_INVALID)
689 break; 698 break;
690 if (rate_mask & (1 << low)) 699 if (rate_mask & (1 << low))
@@ -694,7 +703,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
694 703
695 high = index; 704 high = index;
696 while (high != IWL_RATE_INVALID) { 705 while (high != IWL_RATE_INVALID) {
697 high = iwl4965_rates[high].next_rs; 706 high = iwl_rates[high].next_rs;
698 if (high == IWL_RATE_INVALID) 707 if (high == IWL_RATE_INVALID)
699 break; 708 break;
700 if (rate_mask & (1 << high)) 709 if (rate_mask & (1 << high))
@@ -705,9 +714,9 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
705 return (high << 8) | low; 714 return (high << 8) | low;
706} 715}
707 716
708static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta, 717static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
709 struct iwl4965_scale_tbl_info *tbl, u8 scale_index, 718 struct iwl4965_scale_tbl_info *tbl, u8 scale_index,
710 u8 ht_possible, struct iwl4965_rate *mcs_rate) 719 u8 ht_possible)
711{ 720{
712 s32 low; 721 s32 low;
713 u16 rate_mask; 722 u16 rate_mask;
@@ -726,15 +735,14 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
726 else 735 else
727 tbl->lq_type = LQ_G; 736 tbl->lq_type = LQ_G;
728 737
729 if ((tbl->antenna_type == ANT_BOTH) || 738 if (num_of_ant(tbl->ant_type) > 1)
730 (tbl->antenna_type == ANT_NONE)) 739 tbl->ant_type = ANT_A;/*FIXME:RS*/
731 tbl->antenna_type = ANT_MAIN;
732 740
733 tbl->is_fat = 0; 741 tbl->is_fat = 0;
734 tbl->is_SGI = 0; 742 tbl->is_SGI = 0;
735 } 743 }
736 744
737 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, &rate_mask); 745 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
738 746
739 /* Mask with station rate restriction */ 747 /* Mask with station rate restriction */
740 if (is_legacy(tbl->lq_type)) { 748 if (is_legacy(tbl->lq_type)) {
@@ -748,25 +756,26 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
748 756
749 /* If we switched from HT to legacy, check current rate */ 757 /* If we switched from HT to legacy, check current rate */
750 if (switch_to_legacy && (rate_mask & (1 << scale_index))) { 758 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
751 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 759 low = scale_index;
752 return; 760 goto out;
753 } 761 }
754 762
755 high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type); 763 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
764 tbl->lq_type);
756 low = high_low & 0xff; 765 low = high_low & 0xff;
757 766
758 if (low != IWL_RATE_INVALID) 767 if (low == IWL_RATE_INVALID)
759 rs_mcs_from_tbl(mcs_rate, tbl, low, is_green); 768 low = scale_index;
760 else 769
761 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 770out:
771 return rate_n_flags_from_tbl(tbl, low, is_green);
762} 772}
763 773
764/* 774/*
765 * mac80211 sends us Tx status 775 * mac80211 sends us Tx status
766 */ 776 */
767static void rs_tx_status(void *priv_rate, struct net_device *dev, 777static void rs_tx_status(void *priv_rate, struct net_device *dev,
768 struct sk_buff *skb, 778 struct sk_buff *skb)
769 struct ieee80211_tx_status *tx_resp)
770{ 779{
771 int status; 780 int status;
772 u8 retries; 781 u8 retries;
@@ -778,13 +787,14 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
778 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 787 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
779 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 788 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
780 struct ieee80211_hw *hw = local_to_hw(local); 789 struct ieee80211_hw *hw = local_to_hw(local);
790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
781 struct iwl4965_rate_scale_data *window = NULL; 791 struct iwl4965_rate_scale_data *window = NULL;
782 struct iwl4965_rate_scale_data *search_win = NULL; 792 struct iwl4965_rate_scale_data *search_win = NULL;
783 struct iwl4965_rate tx_mcs; 793 u32 tx_rate;
784 struct iwl4965_scale_tbl_info tbl_type; 794 struct iwl4965_scale_tbl_info tbl_type;
785 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl; 795 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl;
786 u8 active_index = 0; 796 u8 active_index = 0;
787 u16 fc = le16_to_cpu(hdr->frame_control); 797 __le16 fc = hdr->frame_control;
788 s32 tpt = 0; 798 s32 tpt = 0;
789 799
790 IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n"); 800 IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n");
@@ -793,11 +803,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
793 return; 803 return;
794 804
795 /* This packet was aggregated but doesn't carry rate scale info */ 805 /* This packet was aggregated but doesn't carry rate scale info */
796 if ((tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) && 806 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
797 !(tx_resp->flags & IEEE80211_TX_STATUS_AMPDU)) 807 !(info->flags & IEEE80211_TX_STAT_AMPDU))
798 return; 808 return;
799 809
800 retries = tx_resp->retry_count; 810 retries = info->status.retry_count;
801 811
802 if (retries > 15) 812 if (retries > 15)
803 retries = 15; 813 retries = 15;
@@ -812,9 +822,6 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
812 822
813 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 823 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
814 824
815 if (!priv->lq_mngr.lq_ready)
816 goto out;
817
818 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 825 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
819 !lq_sta->ibss_sta_added) 826 !lq_sta->ibss_sta_added)
820 goto out; 827 goto out;
@@ -822,15 +829,6 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
822 table = &lq_sta->lq; 829 table = &lq_sta->lq;
823 active_index = lq_sta->active_tbl; 830 active_index = lq_sta->active_tbl;
824 831
825 /* Get mac80211 antenna info */
826 lq_sta->antenna =
827 (lq_sta->valid_antenna & local->hw.conf.antenna_sel_tx);
828 if (!lq_sta->antenna)
829 lq_sta->antenna = lq_sta->valid_antenna;
830
831 /* Ignore mac80211 antenna info for now */
832 lq_sta->antenna = lq_sta->valid_antenna;
833
834 curr_tbl = &(lq_sta->lq_info[active_index]); 832 curr_tbl = &(lq_sta->lq_info[active_index]);
835 search_tbl = &(lq_sta->lq_info[(1 - active_index)]); 833 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
836 window = (struct iwl4965_rate_scale_data *) 834 window = (struct iwl4965_rate_scale_data *)
@@ -846,28 +844,26 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
846 * to check "search" mode, or a prior "search" mode after we've moved 844 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode). 845 * to a new "search" mode (which might become the new "active" mode).
848 */ 846 */
849 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[0].rate_n_flags); 847 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
850 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 848 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
851 if (priv->band == IEEE80211_BAND_5GHZ) 849 if (priv->band == IEEE80211_BAND_5GHZ)
852 rs_index -= IWL_FIRST_OFDM_RATE; 850 rs_index -= IWL_FIRST_OFDM_RATE;
853 851
854 if ((tx_resp->control.tx_rate == NULL) || 852 if ((info->tx_rate_idx < 0) ||
855 (tbl_type.is_SGI ^ 853 (tbl_type.is_SGI ^
856 !!(tx_resp->control.flags & IEEE80211_TXCTL_SHORT_GI)) || 854 !!(info->flags & IEEE80211_TX_CTL_SHORT_GI)) ||
857 (tbl_type.is_fat ^ 855 (tbl_type.is_fat ^
858 !!(tx_resp->control.flags & IEEE80211_TXCTL_40_MHZ_WIDTH)) || 856 !!(info->flags & IEEE80211_TX_CTL_40_MHZ_WIDTH)) ||
859 (tbl_type.is_dup ^ 857 (tbl_type.is_dup ^
860 !!(tx_resp->control.flags & IEEE80211_TXCTL_DUP_DATA)) || 858 !!(info->flags & IEEE80211_TX_CTL_DUP_DATA)) ||
861 (tbl_type.antenna_type ^ 859 (tbl_type.ant_type ^ info->antenna_sel_tx) ||
862 tx_resp->control.antenna_sel_tx) || 860 (!!(tx_rate & RATE_MCS_HT_MSK) ^
863 (!!(tx_mcs.rate_n_flags & RATE_MCS_HT_MSK) ^ 861 !!(info->flags & IEEE80211_TX_CTL_OFDM_HT)) ||
864 !!(tx_resp->control.flags & IEEE80211_TXCTL_OFDM_HT)) || 862 (!!(tx_rate & RATE_MCS_GF_MSK) ^
865 (!!(tx_mcs.rate_n_flags & RATE_MCS_GF_MSK) ^ 863 !!(info->flags & IEEE80211_TX_CTL_GREEN_FIELD)) ||
866 !!(tx_resp->control.flags & IEEE80211_TXCTL_GREEN_FIELD)) ||
867 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate != 864 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
868 tx_resp->control.tx_rate->bitrate)) { 865 hw->wiphy->bands[info->band]->bitrates[info->tx_rate_idx].bitrate)) {
869 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", 866 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate);
870 tx_mcs.rate_n_flags);
871 goto out; 867 goto out;
872 } 868 }
873 869
@@ -875,15 +871,14 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
875 while (retries) { 871 while (retries) {
876 /* Look up the rate and other info used for each tx attempt. 872 /* Look up the rate and other info used for each tx attempt.
877 * Each tx attempt steps one entry deeper in the rate table. */ 873 * Each tx attempt steps one entry deeper in the rate table. */
878 tx_mcs.rate_n_flags = 874 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
879 le32_to_cpu(table->rs_table[index].rate_n_flags); 875 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
880 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band,
881 &tbl_type, &rs_index); 876 &tbl_type, &rs_index);
882 877
883 /* If type matches "search" table, 878 /* If type matches "search" table,
884 * add failure to "search" history */ 879 * add failure to "search" history */
885 if ((tbl_type.lq_type == search_tbl->lq_type) && 880 if ((tbl_type.lq_type == search_tbl->lq_type) &&
886 (tbl_type.antenna_type == search_tbl->antenna_type) && 881 (tbl_type.ant_type == search_tbl->ant_type) &&
887 (tbl_type.is_SGI == search_tbl->is_SGI)) { 882 (tbl_type.is_SGI == search_tbl->is_SGI)) {
888 if (search_tbl->expected_tpt) 883 if (search_tbl->expected_tpt)
889 tpt = search_tbl->expected_tpt[rs_index]; 884 tpt = search_tbl->expected_tpt[rs_index];
@@ -894,7 +889,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
894 /* Else if type matches "current/active" table, 889 /* Else if type matches "current/active" table,
895 * add failure to "current/active" history */ 890 * add failure to "current/active" history */
896 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 891 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
897 (tbl_type.antenna_type == curr_tbl->antenna_type) && 892 (tbl_type.ant_type == curr_tbl->ant_type) &&
898 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 893 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
899 if (curr_tbl->expected_tpt) 894 if (curr_tbl->expected_tpt)
900 tpt = curr_tbl->expected_tpt[rs_index]; 895 tpt = curr_tbl->expected_tpt[rs_index];
@@ -917,44 +912,41 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
917 * if Tx was successful first try, use original rate, 912 * if Tx was successful first try, use original rate,
918 * else look up the rate that was, finally, successful. 913 * else look up the rate that was, finally, successful.
919 */ 914 */
920 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[index].rate_n_flags); 915 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
921 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 916 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
922 917
923 /* Update frame history window with "success" if Tx got ACKed ... */ 918 /* Update frame history window with "success" if Tx got ACKed ... */
924 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) 919 status = !!(info->flags & IEEE80211_TX_STAT_ACK);
925 status = 1;
926 else
927 status = 0;
928 920
929 /* If type matches "search" table, 921 /* If type matches "search" table,
930 * add final tx status to "search" history */ 922 * add final tx status to "search" history */
931 if ((tbl_type.lq_type == search_tbl->lq_type) && 923 if ((tbl_type.lq_type == search_tbl->lq_type) &&
932 (tbl_type.antenna_type == search_tbl->antenna_type) && 924 (tbl_type.ant_type == search_tbl->ant_type) &&
933 (tbl_type.is_SGI == search_tbl->is_SGI)) { 925 (tbl_type.is_SGI == search_tbl->is_SGI)) {
934 if (search_tbl->expected_tpt) 926 if (search_tbl->expected_tpt)
935 tpt = search_tbl->expected_tpt[rs_index]; 927 tpt = search_tbl->expected_tpt[rs_index];
936 else 928 else
937 tpt = 0; 929 tpt = 0;
938 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 930 if (info->flags & IEEE80211_TX_CTL_AMPDU)
939 rs_collect_tx_data(search_win, rs_index, tpt, 931 rs_collect_tx_data(search_win, rs_index, tpt,
940 tx_resp->ampdu_ack_len, 932 info->status.ampdu_ack_len,
941 tx_resp->ampdu_ack_map); 933 info->status.ampdu_ack_map);
942 else 934 else
943 rs_collect_tx_data(search_win, rs_index, tpt, 935 rs_collect_tx_data(search_win, rs_index, tpt,
944 1, status); 936 1, status);
945 /* Else if type matches "current/active" table, 937 /* Else if type matches "current/active" table,
946 * add final tx status to "current/active" history */ 938 * add final tx status to "current/active" history */
947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 939 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
948 (tbl_type.antenna_type == curr_tbl->antenna_type) && 940 (tbl_type.ant_type == curr_tbl->ant_type) &&
949 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 941 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
950 if (curr_tbl->expected_tpt) 942 if (curr_tbl->expected_tpt)
951 tpt = curr_tbl->expected_tpt[rs_index]; 943 tpt = curr_tbl->expected_tpt[rs_index];
952 else 944 else
953 tpt = 0; 945 tpt = 0;
954 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 946 if (info->flags & IEEE80211_TX_CTL_AMPDU)
955 rs_collect_tx_data(window, rs_index, tpt, 947 rs_collect_tx_data(window, rs_index, tpt,
956 tx_resp->ampdu_ack_len, 948 info->status.ampdu_ack_len,
957 tx_resp->ampdu_ack_map); 949 info->status.ampdu_ack_map);
958 else 950 else
959 rs_collect_tx_data(window, rs_index, tpt, 951 rs_collect_tx_data(window, rs_index, tpt,
960 1, status); 952 1, status);
@@ -963,10 +955,10 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
963 /* If not searching for new mode, increment success/failed counter 955 /* If not searching for new mode, increment success/failed counter
964 * ... these help determine when to start searching again */ 956 * ... these help determine when to start searching again */
965 if (lq_sta->stay_in_tbl) { 957 if (lq_sta->stay_in_tbl) {
966 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) { 958 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
967 lq_sta->total_success += tx_resp->ampdu_ack_map; 959 lq_sta->total_success += info->status.ampdu_ack_map;
968 lq_sta->total_failed += 960 lq_sta->total_failed +=
969 (tx_resp->ampdu_ack_len - tx_resp->ampdu_ack_map); 961 (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
970 } else { 962 } else {
971 if (status) 963 if (status)
972 lq_sta->total_success++; 964 lq_sta->total_success++;
@@ -982,30 +974,6 @@ out:
982 return; 974 return;
983} 975}
984 976
985static u8 rs_is_ant_connected(u8 valid_antenna,
986 enum iwl4965_antenna_type antenna_type)
987{
988 if (antenna_type == ANT_AUX)
989 return ((valid_antenna & 0x2) ? 1:0);
990 else if (antenna_type == ANT_MAIN)
991 return ((valid_antenna & 0x1) ? 1:0);
992 else if (antenna_type == ANT_BOTH)
993 return ((valid_antenna & 0x3) == 0x3);
994
995 return 1;
996}
997
998static u8 rs_is_other_ant_connected(u8 valid_antenna,
999 enum iwl4965_antenna_type antenna_type)
1000{
1001 if (antenna_type == ANT_AUX)
1002 return rs_is_ant_connected(valid_antenna, ANT_MAIN);
1003 else
1004 return rs_is_ant_connected(valid_antenna, ANT_AUX);
1005
1006 return 0;
1007}
1008
1009/* 977/*
1010 * Begin a period of staying with a selected modulation mode. 978 * Begin a period of staying with a selected modulation mode.
1011 * Set "stay_in_tbl" flag to prevent any mode switches. 979 * Set "stay_in_tbl" flag to prevent any mode switches.
@@ -1014,10 +982,10 @@ static u8 rs_is_other_ant_connected(u8 valid_antenna,
1014 * These control how long we stay using same modulation mode before 982 * These control how long we stay using same modulation mode before
1015 * searching for a new mode. 983 * searching for a new mode.
1016 */ 984 */
1017static void rs_set_stay_in_table(u8 is_legacy, 985static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1018 struct iwl4965_lq_sta *lq_sta) 986 struct iwl4965_lq_sta *lq_sta)
1019{ 987{
1020 IWL_DEBUG_HT("we are staying in the same table\n"); 988 IWL_DEBUG_RATE("we are staying in the same table\n");
1021 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 989 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1022 if (is_legacy) { 990 if (is_legacy) {
1023 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; 991 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
@@ -1036,7 +1004,7 @@ static void rs_set_stay_in_table(u8 is_legacy,
1036/* 1004/*
1037 * Find correct throughput table for given mode of modulation 1005 * Find correct throughput table for given mode of modulation
1038 */ 1006 */
1039static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta, 1007static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1040 struct iwl4965_scale_tbl_info *tbl) 1008 struct iwl4965_scale_tbl_info *tbl)
1041{ 1009{
1042 if (is_legacy(tbl->lq_type)) { 1010 if (is_legacy(tbl->lq_type)) {
@@ -1055,7 +1023,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1055 else 1023 else
1056 tbl->expected_tpt = expected_tpt_siso20MHz; 1024 tbl->expected_tpt = expected_tpt_siso20MHz;
1057 1025
1058 } else if (is_mimo(tbl->lq_type)) { 1026 } else if (is_mimo(tbl->lq_type)) { /* FIXME:need to separate mimo2/3 */
1059 if (tbl->is_fat && !lq_sta->is_dup) 1027 if (tbl->is_fat && !lq_sta->is_dup)
1060 if (tbl->is_SGI) 1028 if (tbl->is_SGI)
1061 tbl->expected_tpt = expected_tpt_mimo40MHzSGI; 1029 tbl->expected_tpt = expected_tpt_mimo40MHzSGI;
@@ -1069,7 +1037,6 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1069 tbl->expected_tpt = expected_tpt_G; 1037 tbl->expected_tpt = expected_tpt_G;
1070} 1038}
1071 1039
1072#ifdef CONFIG_IWL4965_HT
1073/* 1040/*
1074 * Find starting rate for new "search" high-throughput mode of modulation. 1041 * Find starting rate for new "search" high-throughput mode of modulation.
1075 * Goal is to find lowest expected rate (under perfect conditions) that is 1042 * Goal is to find lowest expected rate (under perfect conditions) that is
@@ -1085,7 +1052,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1085static s32 rs_get_best_rate(struct iwl_priv *priv, 1052static s32 rs_get_best_rate(struct iwl_priv *priv,
1086 struct iwl4965_lq_sta *lq_sta, 1053 struct iwl4965_lq_sta *lq_sta,
1087 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1054 struct iwl4965_scale_tbl_info *tbl, /* "search" */
1088 u16 rate_mask, s8 index, s8 rate) 1055 u16 rate_mask, s8 index)
1089{ 1056{
1090 /* "active" values */ 1057 /* "active" values */
1091 struct iwl4965_scale_tbl_info *active_tbl = 1058 struct iwl4965_scale_tbl_info *active_tbl =
@@ -1098,11 +1065,13 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1098 1065
1099 s32 new_rate, high, low, start_hi; 1066 s32 new_rate, high, low, start_hi;
1100 u16 high_low; 1067 u16 high_low;
1068 s8 rate = index;
1101 1069
1102 new_rate = high = low = start_hi = IWL_RATE_INVALID; 1070 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1103 1071
1104 for (; ;) { 1072 for (; ;) {
1105 high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type); 1073 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1074 tbl->lq_type);
1106 1075
1107 low = high_low & 0xff; 1076 low = high_low & 0xff;
1108 high = (high_low >> 8) & 0xff; 1077 high = (high_low >> 8) & 0xff;
@@ -1169,23 +1138,16 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1169 1138
1170 return new_rate; 1139 return new_rate;
1171} 1140}
1172#endif /* CONFIG_IWL4965_HT */
1173
1174static inline u8 rs_is_both_ant_supp(u8 valid_antenna)
1175{
1176 return (rs_is_ant_connected(valid_antenna, ANT_BOTH));
1177}
1178 1141
1179/* 1142/*
1180 * Set up search table for MIMO 1143 * Set up search table for MIMO
1181 */ 1144 */
1182static int rs_switch_to_mimo(struct iwl_priv *priv, 1145static int rs_switch_to_mimo2(struct iwl_priv *priv,
1183 struct iwl4965_lq_sta *lq_sta, 1146 struct iwl4965_lq_sta *lq_sta,
1184 struct ieee80211_conf *conf, 1147 struct ieee80211_conf *conf,
1185 struct sta_info *sta, 1148 struct sta_info *sta,
1186 struct iwl4965_scale_tbl_info *tbl, int index) 1149 struct iwl4965_scale_tbl_info *tbl, int index)
1187{ 1150{
1188#ifdef CONFIG_IWL4965_HT
1189 u16 rate_mask; 1151 u16 rate_mask;
1190 s32 rate; 1152 s32 rate;
1191 s8 is_green = lq_sta->is_green; 1153 s8 is_green = lq_sta->is_green;
@@ -1194,26 +1156,27 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1194 !sta->ht_info.ht_supported) 1156 !sta->ht_info.ht_supported)
1195 return -1; 1157 return -1;
1196 1158
1197 IWL_DEBUG_HT("LQ: try to switch to MIMO\n");
1198 tbl->lq_type = LQ_MIMO;
1199 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type,
1200 &rate_mask);
1201
1202 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) 1159 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC)
1203 return -1; 1160 return -1;
1204 1161
1205 /* Need both Tx chains/antennas to support MIMO */ 1162 /* Need both Tx chains/antennas to support MIMO */
1206 if (!rs_is_both_ant_supp(lq_sta->antenna)) 1163 if (priv->hw_params.tx_chains_num < 2)
1207 return -1; 1164 return -1;
1208 1165
1166 IWL_DEBUG_RATE("LQ: try to switch to MIMO2\n");
1167
1168 tbl->lq_type = LQ_MIMO2;
1209 tbl->is_dup = lq_sta->is_dup; 1169 tbl->is_dup = lq_sta->is_dup;
1210 tbl->action = 0; 1170 tbl->action = 0;
1171 rate_mask = lq_sta->active_mimo2_rate;
1172
1211 if (priv->current_ht_config.supported_chan_width 1173 if (priv->current_ht_config.supported_chan_width
1212 == IWL_CHANNEL_WIDTH_40MHZ) 1174 == IWL_CHANNEL_WIDTH_40MHZ)
1213 tbl->is_fat = 1; 1175 tbl->is_fat = 1;
1214 else 1176 else
1215 tbl->is_fat = 0; 1177 tbl->is_fat = 0;
1216 1178
1179 /* FIXME: - don't toggle SGI here
1217 if (tbl->is_fat) { 1180 if (tbl->is_fat) {
1218 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1181 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1219 tbl->is_SGI = 1; 1182 tbl->is_SGI = 1;
@@ -1223,22 +1186,24 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1223 tbl->is_SGI = 1; 1186 tbl->is_SGI = 1;
1224 else 1187 else
1225 tbl->is_SGI = 0; 1188 tbl->is_SGI = 0;
1189 */
1190
1191 rs_set_expected_tpt_table(lq_sta, tbl);
1226 1192
1227 rs_get_expected_tpt_table(lq_sta, tbl); 1193 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1228 1194
1229 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1195 IWL_DEBUG_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1230 1196
1231 IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask); 1197 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1232 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) 1198 IWL_DEBUG_RATE("Can't switch with index %d rate mask %x\n",
1199 rate, rate_mask);
1233 return -1; 1200 return -1;
1234 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1201 }
1202 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1235 1203
1236 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1204 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1237 tbl->current_rate.rate_n_flags, is_green); 1205 tbl->current_rate, is_green);
1238 return 0; 1206 return 0;
1239#else
1240 return -1;
1241#endif /*CONFIG_IWL4965_HT */
1242} 1207}
1243 1208
1244/* 1209/*
@@ -1250,21 +1215,20 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1250 struct sta_info *sta, 1215 struct sta_info *sta,
1251 struct iwl4965_scale_tbl_info *tbl, int index) 1216 struct iwl4965_scale_tbl_info *tbl, int index)
1252{ 1217{
1253#ifdef CONFIG_IWL4965_HT
1254 u16 rate_mask; 1218 u16 rate_mask;
1255 u8 is_green = lq_sta->is_green; 1219 u8 is_green = lq_sta->is_green;
1256 s32 rate; 1220 s32 rate;
1257 1221
1258 IWL_DEBUG_HT("LQ: try to switch to SISO\n");
1259 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1222 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) ||
1260 !sta->ht_info.ht_supported) 1223 !sta->ht_info.ht_supported)
1261 return -1; 1224 return -1;
1262 1225
1226 IWL_DEBUG_RATE("LQ: try to switch to SISO\n");
1227
1263 tbl->is_dup = lq_sta->is_dup; 1228 tbl->is_dup = lq_sta->is_dup;
1264 tbl->lq_type = LQ_SISO; 1229 tbl->lq_type = LQ_SISO;
1265 tbl->action = 0; 1230 tbl->action = 0;
1266 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, 1231 rate_mask = lq_sta->active_siso_rate;
1267 &rate_mask);
1268 1232
1269 if (priv->current_ht_config.supported_chan_width 1233 if (priv->current_ht_config.supported_chan_width
1270 == IWL_CHANNEL_WIDTH_40MHZ) 1234 == IWL_CHANNEL_WIDTH_40MHZ)
@@ -1272,6 +1236,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1272 else 1236 else
1273 tbl->is_fat = 0; 1237 tbl->is_fat = 0;
1274 1238
1239 /* FIXME: - don't toggle SGI here
1275 if (tbl->is_fat) { 1240 if (tbl->is_fat) {
1276 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1241 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1277 tbl->is_SGI = 1; 1242 tbl->is_SGI = 1;
@@ -1281,27 +1246,24 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1281 tbl->is_SGI = 1; 1246 tbl->is_SGI = 1;
1282 else 1247 else
1283 tbl->is_SGI = 0; 1248 tbl->is_SGI = 0;
1249 */
1284 1250
1285 if (is_green) 1251 if (is_green)
1286 tbl->is_SGI = 0; 1252 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1287 1253
1288 rs_get_expected_tpt_table(lq_sta, tbl); 1254 rs_set_expected_tpt_table(lq_sta, tbl);
1289 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1255 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1290 1256
1291 IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask); 1257 IWL_DEBUG_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1292 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { 1258 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1293 IWL_DEBUG_HT("can not switch with index %d rate mask %x\n", 1259 IWL_DEBUG_RATE("can not switch with index %d rate mask %x\n",
1294 rate, rate_mask); 1260 rate, rate_mask);
1295 return -1; 1261 return -1;
1296 } 1262 }
1297 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1263 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1298 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1264 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1299 tbl->current_rate.rate_n_flags, is_green); 1265 tbl->current_rate, is_green);
1300 return 0; 1266 return 0;
1301#else
1302 return -1;
1303
1304#endif /*CONFIG_IWL4965_HT */
1305} 1267}
1306 1268
1307/* 1269/*
@@ -1313,7 +1275,6 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1313 struct sta_info *sta, 1275 struct sta_info *sta,
1314 int index) 1276 int index)
1315{ 1277{
1316 int ret = 0;
1317 struct iwl4965_scale_tbl_info *tbl = 1278 struct iwl4965_scale_tbl_info *tbl =
1318 &(lq_sta->lq_info[lq_sta->active_tbl]); 1279 &(lq_sta->lq_info[lq_sta->active_tbl]);
1319 struct iwl4965_scale_tbl_info *search_tbl = 1280 struct iwl4965_scale_tbl_info *search_tbl =
@@ -1322,41 +1283,35 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1322 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1283 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1323 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1284 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1324 u8 start_action = tbl->action; 1285 u8 start_action = tbl->action;
1286 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1287 int ret = 0;
1325 1288
1326 for (; ;) { 1289 for (; ;) {
1327 switch (tbl->action) { 1290 switch (tbl->action) {
1328 case IWL_LEGACY_SWITCH_ANTENNA: 1291 case IWL_LEGACY_SWITCH_ANTENNA:
1329 IWL_DEBUG_HT("LQ Legacy switch Antenna\n"); 1292 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
1330 1293
1331 search_tbl->lq_type = LQ_NONE;
1332 lq_sta->action_counter++; 1294 lq_sta->action_counter++;
1333 1295
1334 /* Don't change antenna if success has been great */ 1296 /* Don't change antenna if success has been great */
1335 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1297 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1336 break; 1298 break;
1337 1299
1338 /* Don't change antenna if other one is not connected */
1339 if (!rs_is_other_ant_connected(lq_sta->antenna,
1340 tbl->antenna_type))
1341 break;
1342
1343 /* Set up search table to try other antenna */ 1300 /* Set up search table to try other antenna */
1344 memcpy(search_tbl, tbl, sz); 1301 memcpy(search_tbl, tbl, sz);
1345 1302
1346 rs_toggle_antenna(&(search_tbl->current_rate), 1303 if (rs_toggle_antenna(valid_tx_ant,
1347 search_tbl); 1304 &search_tbl->current_rate, search_tbl)) {
1348 rs_get_expected_tpt_table(lq_sta, search_tbl); 1305 lq_sta->search_better_tbl = 1;
1349 lq_sta->search_better_tbl = 1; 1306 goto out;
1350 goto out; 1307 }
1351 1308 break;
1352 case IWL_LEGACY_SWITCH_SISO: 1309 case IWL_LEGACY_SWITCH_SISO:
1353 IWL_DEBUG_HT("LQ: Legacy switch to SISO\n"); 1310 IWL_DEBUG_RATE("LQ: Legacy switch to SISO\n");
1354 1311
1355 /* Set up search table to try SISO */ 1312 /* Set up search table to try SISO */
1356 memcpy(search_tbl, tbl, sz); 1313 memcpy(search_tbl, tbl, sz);
1357 search_tbl->lq_type = LQ_SISO;
1358 search_tbl->is_SGI = 0; 1314 search_tbl->is_SGI = 0;
1359 search_tbl->is_fat = 0;
1360 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1315 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1361 search_tbl, index); 1316 search_tbl, index);
1362 if (!ret) { 1317 if (!ret) {
@@ -1366,16 +1321,15 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1366 } 1321 }
1367 1322
1368 break; 1323 break;
1369 case IWL_LEGACY_SWITCH_MIMO: 1324 case IWL_LEGACY_SWITCH_MIMO2:
1370 IWL_DEBUG_HT("LQ: Legacy switch MIMO\n"); 1325 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
1371 1326
1372 /* Set up search table to try MIMO */ 1327 /* Set up search table to try MIMO */
1373 memcpy(search_tbl, tbl, sz); 1328 memcpy(search_tbl, tbl, sz);
1374 search_tbl->lq_type = LQ_MIMO;
1375 search_tbl->is_SGI = 0; 1329 search_tbl->is_SGI = 0;
1376 search_tbl->is_fat = 0; 1330 search_tbl->ant_type = ANT_AB;/*FIXME:RS*/
1377 search_tbl->antenna_type = ANT_BOTH; 1331 /*FIXME:RS:need to check ant validity*/
1378 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta, 1332 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1379 search_tbl, index); 1333 search_tbl, index);
1380 if (!ret) { 1334 if (!ret) {
1381 lq_sta->search_better_tbl = 1; 1335 lq_sta->search_better_tbl = 1;
@@ -1385,7 +1339,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1385 break; 1339 break;
1386 } 1340 }
1387 tbl->action++; 1341 tbl->action++;
1388 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1342 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1389 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1343 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1390 1344
1391 if (tbl->action == start_action) 1345 if (tbl->action == start_action)
@@ -1396,7 +1350,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1396 1350
1397 out: 1351 out:
1398 tbl->action++; 1352 tbl->action++;
1399 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1353 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1400 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1354 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1401 return 0; 1355 return 0;
1402 1356
@@ -1411,7 +1365,6 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1411 struct sta_info *sta, 1365 struct sta_info *sta,
1412 int index) 1366 int index)
1413{ 1367{
1414 int ret;
1415 u8 is_green = lq_sta->is_green; 1368 u8 is_green = lq_sta->is_green;
1416 struct iwl4965_scale_tbl_info *tbl = 1369 struct iwl4965_scale_tbl_info *tbl =
1417 &(lq_sta->lq_info[lq_sta->active_tbl]); 1370 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1421,35 +1374,30 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1421 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1374 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1422 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1375 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1423 u8 start_action = tbl->action; 1376 u8 start_action = tbl->action;
1377 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1378 int ret;
1424 1379
1425 for (;;) { 1380 for (;;) {
1426 lq_sta->action_counter++; 1381 lq_sta->action_counter++;
1427 switch (tbl->action) { 1382 switch (tbl->action) {
1428 case IWL_SISO_SWITCH_ANTENNA: 1383 case IWL_SISO_SWITCH_ANTENNA:
1429 IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n"); 1384 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
1430 search_tbl->lq_type = LQ_NONE;
1431 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1385 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1432 break; 1386 break;
1433 if (!rs_is_other_ant_connected(lq_sta->antenna,
1434 tbl->antenna_type))
1435 break;
1436 1387
1437 memcpy(search_tbl, tbl, sz); 1388 memcpy(search_tbl, tbl, sz);
1438 search_tbl->action = IWL_SISO_SWITCH_MIMO; 1389 if (rs_toggle_antenna(valid_tx_ant,
1439 rs_toggle_antenna(&(search_tbl->current_rate), 1390 &search_tbl->current_rate, search_tbl)) {
1440 search_tbl); 1391 lq_sta->search_better_tbl = 1;
1441 lq_sta->search_better_tbl = 1; 1392 goto out;
1442 1393 }
1443 goto out; 1394 break;
1444 1395 case IWL_SISO_SWITCH_MIMO2:
1445 case IWL_SISO_SWITCH_MIMO: 1396 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
1446 IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n");
1447 memcpy(search_tbl, tbl, sz); 1397 memcpy(search_tbl, tbl, sz);
1448 search_tbl->lq_type = LQ_MIMO;
1449 search_tbl->is_SGI = 0; 1398 search_tbl->is_SGI = 0;
1450 search_tbl->is_fat = 0; 1399 search_tbl->ant_type = ANT_AB; /*FIXME:RS*/
1451 search_tbl->antenna_type = ANT_BOTH; 1400 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1452 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta,
1453 search_tbl, index); 1401 search_tbl, index);
1454 if (!ret) { 1402 if (!ret) {
1455 lq_sta->search_better_tbl = 1; 1403 lq_sta->search_better_tbl = 1;
@@ -1457,29 +1405,34 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1457 } 1405 }
1458 break; 1406 break;
1459 case IWL_SISO_SWITCH_GI: 1407 case IWL_SISO_SWITCH_GI:
1460 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); 1408 if (!tbl->is_fat &&
1409 !(priv->current_ht_config.sgf &
1410 HT_SHORT_GI_20MHZ))
1411 break;
1412 if (tbl->is_fat &&
1413 !(priv->current_ht_config.sgf &
1414 HT_SHORT_GI_40MHZ))
1415 break;
1416
1417 IWL_DEBUG_RATE("LQ: SISO toggle SGI/NGI\n");
1461 1418
1462 memcpy(search_tbl, tbl, sz); 1419 memcpy(search_tbl, tbl, sz);
1463 search_tbl->action = 0; 1420 if (is_green) {
1464 if (search_tbl->is_SGI) 1421 if (!tbl->is_SGI)
1465 search_tbl->is_SGI = 0; 1422 break;
1466 else if (!is_green) 1423 else
1467 search_tbl->is_SGI = 1; 1424 IWL_ERROR("SGI was set in GF+SISO\n");
1468 else 1425 }
1469 break; 1426 search_tbl->is_SGI = !tbl->is_SGI;
1470 lq_sta->search_better_tbl = 1; 1427 rs_set_expected_tpt_table(lq_sta, search_tbl);
1471 if ((tbl->lq_type == LQ_SISO) && 1428 if (tbl->is_SGI) {
1472 (tbl->is_SGI)) {
1473 s32 tpt = lq_sta->last_tpt / 100; 1429 s32 tpt = lq_sta->last_tpt / 100;
1474 if (((!tbl->is_fat) && 1430 if (tpt >= search_tbl->expected_tpt[index])
1475 (tpt >= expected_tpt_siso20MHz[index])) || 1431 break;
1476 ((tbl->is_fat) &&
1477 (tpt >= expected_tpt_siso40MHz[index])))
1478 lq_sta->search_better_tbl = 0;
1479 } 1432 }
1480 rs_get_expected_tpt_table(lq_sta, search_tbl); 1433 search_tbl->current_rate = rate_n_flags_from_tbl(
1481 rs_mcs_from_tbl(&search_tbl->current_rate, 1434 search_tbl, index, is_green);
1482 search_tbl, index, is_green); 1435 lq_sta->search_better_tbl = 1;
1483 goto out; 1436 goto out;
1484 } 1437 }
1485 tbl->action++; 1438 tbl->action++;
@@ -1507,7 +1460,6 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1507 struct sta_info *sta, 1460 struct sta_info *sta,
1508 int index) 1461 int index)
1509{ 1462{
1510 int ret;
1511 s8 is_green = lq_sta->is_green; 1463 s8 is_green = lq_sta->is_green;
1512 struct iwl4965_scale_tbl_info *tbl = 1464 struct iwl4965_scale_tbl_info *tbl =
1513 &(lq_sta->lq_info[lq_sta->active_tbl]); 1465 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1516,24 +1468,24 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1516 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1468 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1517 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1469 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1518 u8 start_action = tbl->action; 1470 u8 start_action = tbl->action;
1471 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
1472 int ret;
1519 1473
1520 for (;;) { 1474 for (;;) {
1521 lq_sta->action_counter++; 1475 lq_sta->action_counter++;
1522 switch (tbl->action) { 1476 switch (tbl->action) {
1523 case IWL_MIMO_SWITCH_ANTENNA_A: 1477 case IWL_MIMO_SWITCH_ANTENNA_A:
1524 case IWL_MIMO_SWITCH_ANTENNA_B: 1478 case IWL_MIMO_SWITCH_ANTENNA_B:
1525 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); 1479 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
1526
1527 1480
1528 /* Set up new search table for SISO */ 1481 /* Set up new search table for SISO */
1529 memcpy(search_tbl, tbl, sz); 1482 memcpy(search_tbl, tbl, sz);
1530 search_tbl->lq_type = LQ_SISO; 1483
1531 search_tbl->is_SGI = 0; 1484 /*FIXME:RS:need to check ant validity + C*/
1532 search_tbl->is_fat = 0;
1533 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A) 1485 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1534 search_tbl->antenna_type = ANT_MAIN; 1486 search_tbl->ant_type = ANT_A;
1535 else 1487 else
1536 search_tbl->antenna_type = ANT_AUX; 1488 search_tbl->ant_type = ANT_B;
1537 1489
1538 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1490 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1539 search_tbl, index); 1491 search_tbl, index);
@@ -1544,37 +1496,35 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1544 break; 1496 break;
1545 1497
1546 case IWL_MIMO_SWITCH_GI: 1498 case IWL_MIMO_SWITCH_GI:
1547 IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n"); 1499 if (!tbl->is_fat &&
1500 !(priv->current_ht_config.sgf &
1501 HT_SHORT_GI_20MHZ))
1502 break;
1503 if (tbl->is_fat &&
1504 !(priv->current_ht_config.sgf &
1505 HT_SHORT_GI_40MHZ))
1506 break;
1507
1508 IWL_DEBUG_RATE("LQ: MIMO toggle SGI/NGI\n");
1548 1509
1549 /* Set up new search table for MIMO */ 1510 /* Set up new search table for MIMO */
1550 memcpy(search_tbl, tbl, sz); 1511 memcpy(search_tbl, tbl, sz);
1551 search_tbl->lq_type = LQ_MIMO; 1512 search_tbl->is_SGI = !tbl->is_SGI;
1552 search_tbl->antenna_type = ANT_BOTH; 1513 rs_set_expected_tpt_table(lq_sta, search_tbl);
1553 search_tbl->action = 0;
1554 if (search_tbl->is_SGI)
1555 search_tbl->is_SGI = 0;
1556 else
1557 search_tbl->is_SGI = 1;
1558 lq_sta->search_better_tbl = 1;
1559
1560 /* 1514 /*
1561 * If active table already uses the fastest possible 1515 * If active table already uses the fastest possible
1562 * modulation (dual stream with short guard interval), 1516 * modulation (dual stream with short guard interval),
1563 * and it's working well, there's no need to look 1517 * and it's working well, there's no need to look
1564 * for a better type of modulation! 1518 * for a better type of modulation!
1565 */ 1519 */
1566 if ((tbl->lq_type == LQ_MIMO) && 1520 if (tbl->is_SGI) {
1567 (tbl->is_SGI)) {
1568 s32 tpt = lq_sta->last_tpt / 100; 1521 s32 tpt = lq_sta->last_tpt / 100;
1569 if (((!tbl->is_fat) && 1522 if (tpt >= search_tbl->expected_tpt[index])
1570 (tpt >= expected_tpt_mimo20MHz[index])) || 1523 break;
1571 ((tbl->is_fat) &&
1572 (tpt >= expected_tpt_mimo40MHz[index])))
1573 lq_sta->search_better_tbl = 0;
1574 } 1524 }
1575 rs_get_expected_tpt_table(lq_sta, search_tbl); 1525 search_tbl->current_rate = rate_n_flags_from_tbl(
1576 rs_mcs_from_tbl(&search_tbl->current_rate, 1526 search_tbl, index, is_green);
1577 search_tbl, index, is_green); 1527 lq_sta->search_better_tbl = 1;
1578 goto out; 1528 goto out;
1579 1529
1580 } 1530 }
@@ -1608,7 +1558,9 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1608 int i; 1558 int i;
1609 int active_tbl; 1559 int active_tbl;
1610 int flush_interval_passed = 0; 1560 int flush_interval_passed = 0;
1561 struct iwl_priv *priv;
1611 1562
1563 priv = lq_sta->drv;
1612 active_tbl = lq_sta->active_tbl; 1564 active_tbl = lq_sta->active_tbl;
1613 1565
1614 tbl = &(lq_sta->lq_info[active_tbl]); 1566 tbl = &(lq_sta->lq_info[active_tbl]);
@@ -1623,9 +1575,6 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1623 (unsigned long)(lq_sta->flush_timer + 1575 (unsigned long)(lq_sta->flush_timer +
1624 IWL_RATE_SCALE_FLUSH_INTVL)); 1576 IWL_RATE_SCALE_FLUSH_INTVL));
1625 1577
1626 /* For now, disable the elapsed time criterion */
1627 flush_interval_passed = 0;
1628
1629 /* 1578 /*
1630 * Check if we should allow search for new modulation mode. 1579 * Check if we should allow search for new modulation mode.
1631 * If many frames have failed or succeeded, or we've used 1580 * If many frames have failed or succeeded, or we've used
@@ -1638,7 +1587,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1638 (lq_sta->total_success > lq_sta->max_success_limit) || 1587 (lq_sta->total_success > lq_sta->max_success_limit) ||
1639 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 1588 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1640 && (flush_interval_passed))) { 1589 && (flush_interval_passed))) {
1641 IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:", 1590 IWL_DEBUG_RATE("LQ: stay is expired %d %d %d\n:",
1642 lq_sta->total_failed, 1591 lq_sta->total_failed,
1643 lq_sta->total_success, 1592 lq_sta->total_success,
1644 flush_interval_passed); 1593 flush_interval_passed);
@@ -1661,7 +1610,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1661 lq_sta->table_count_limit) { 1610 lq_sta->table_count_limit) {
1662 lq_sta->table_count = 0; 1611 lq_sta->table_count = 0;
1663 1612
1664 IWL_DEBUG_HT("LQ: stay in table clear win\n"); 1613 IWL_DEBUG_RATE("LQ: stay in table clear win\n");
1665 for (i = 0; i < IWL_RATE_COUNT; i++) 1614 for (i = 0; i < IWL_RATE_COUNT; i++)
1666 rs_rate_scale_clear_window( 1615 rs_rate_scale_clear_window(
1667 &(tbl->win[i])); 1616 &(tbl->win[i]));
@@ -1699,24 +1648,23 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1699 int high_tpt = IWL_INVALID_VALUE; 1648 int high_tpt = IWL_INVALID_VALUE;
1700 u32 fail_count; 1649 u32 fail_count;
1701 s8 scale_action = 0; 1650 s8 scale_action = 0;
1702 u16 fc, rate_mask; 1651 __le16 fc;
1652 u16 rate_mask;
1703 u8 update_lq = 0; 1653 u8 update_lq = 0;
1704 struct iwl4965_lq_sta *lq_sta; 1654 struct iwl4965_lq_sta *lq_sta;
1705 struct iwl4965_scale_tbl_info *tbl, *tbl1; 1655 struct iwl4965_scale_tbl_info *tbl, *tbl1;
1706 u16 rate_scale_index_msk = 0; 1656 u16 rate_scale_index_msk = 0;
1707 struct iwl4965_rate mcs_rate; 1657 u32 rate;
1708 u8 is_green = 0; 1658 u8 is_green = 0;
1709 u8 active_tbl = 0; 1659 u8 active_tbl = 0;
1710 u8 done_search = 0; 1660 u8 done_search = 0;
1711 u16 high_low; 1661 u16 high_low;
1712#ifdef CONFIG_IWL4965_HT 1662 s32 sr;
1713 u8 tid = MAX_TID_COUNT; 1663 u8 tid = MAX_TID_COUNT;
1714 __le16 *qc;
1715#endif
1716 1664
1717 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1665 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
1718 1666
1719 fc = le16_to_cpu(hdr->frame_control); 1667 fc = hdr->frame_control;
1720 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { 1668 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) {
1721 /* Send management frames and broadcast/multicast data using 1669 /* Send management frames and broadcast/multicast data using
1722 * lowest rate. */ 1670 * lowest rate. */
@@ -1727,19 +1675,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1727 if (!sta || !sta->rate_ctrl_priv) 1675 if (!sta || !sta->rate_ctrl_priv)
1728 return; 1676 return;
1729 1677
1730 if (!priv->lq_mngr.lq_ready) {
1731 IWL_DEBUG_RATE("still rate scaling not ready\n");
1732 return;
1733 }
1734 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1678 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
1735 1679
1736#ifdef CONFIG_IWL4965_HT 1680 tid = rs_tl_add_packet(lq_sta, hdr);
1737 qc = ieee80211_get_qos_ctrl(hdr); 1681
1738 if (qc) {
1739 tid = (u8)(le16_to_cpu(*qc) & 0xf);
1740 rs_tl_add_packet(lq_sta, tid);
1741 }
1742#endif
1743 /* 1682 /*
1744 * Select rate-scale / modulation-mode table to work with in 1683 * Select rate-scale / modulation-mode table to work with in
1745 * the rest of this function: "search" if searching for better 1684 * the rest of this function: "search" if searching for better
@@ -1760,8 +1699,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1760 tbl->lq_type); 1699 tbl->lq_type);
1761 1700
1762 /* rates available for this association, and for modulation mode */ 1701 /* rates available for this association, and for modulation mode */
1763 rs_get_supported_rates(lq_sta, hdr, tbl->lq_type, 1702 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1764 &rate_mask);
1765 1703
1766 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); 1704 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask);
1767 1705
@@ -1781,27 +1719,16 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1781 if (!rate_scale_index_msk) 1719 if (!rate_scale_index_msk)
1782 rate_scale_index_msk = rate_mask; 1720 rate_scale_index_msk = rate_mask;
1783 1721
1784 /* If current rate is no longer supported on current association, 1722 if (!((1 << index) & rate_scale_index_msk)) {
1785 * or user changed preferences for rates, find a new supported rate. */ 1723 IWL_ERROR("Current Rate is not valid\n");
1786 if (index < 0 || !((1 << index) & rate_scale_index_msk)) { 1724 return;
1787 index = IWL_INVALID_VALUE;
1788 update_lq = 1;
1789
1790 /* get the highest available rate */
1791 for (i = 0; i <= IWL_RATE_COUNT; i++) {
1792 if ((1 << i) & rate_scale_index_msk)
1793 index = i;
1794 }
1795
1796 if (index == IWL_INVALID_VALUE) {
1797 IWL_WARNING("Can not find a suitable rate\n");
1798 return;
1799 }
1800 } 1725 }
1801 1726
1802 /* Get expected throughput table and history window for current rate */ 1727 /* Get expected throughput table and history window for current rate */
1803 if (!tbl->expected_tpt) 1728 if (!tbl->expected_tpt) {
1804 rs_get_expected_tpt_table(lq_sta, tbl); 1729 IWL_ERROR("tbl->expected_tpt is NULL\n");
1730 return;
1731 }
1805 1732
1806 window = &(tbl->win[index]); 1733 window = &(tbl->win[index]);
1807 1734
@@ -1813,10 +1740,9 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1813 * in current association (use new rate found above). 1740 * in current association (use new rate found above).
1814 */ 1741 */
1815 fail_count = window->counter - window->success_counter; 1742 fail_count = window->counter - window->success_counter;
1816 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && 1743 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1817 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) 1744 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1818 || (tbl->expected_tpt == NULL)) { 1745 IWL_DEBUG_RATE("LQ: still below TH. succ=%d total=%d "
1819 IWL_DEBUG_RATE("LQ: still below TH succ %d total %d "
1820 "for index %d\n", 1746 "for index %d\n",
1821 window->success_counter, window->counter, index); 1747 window->success_counter, window->counter, index);
1822 1748
@@ -1827,44 +1753,51 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1827 * or search for a new one? */ 1753 * or search for a new one? */
1828 rs_stay_in_table(lq_sta); 1754 rs_stay_in_table(lq_sta);
1829 1755
1830 /* Set up new rate table in uCode, if needed */
1831 if (update_lq) {
1832 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1833 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
1834 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1835 }
1836 goto out; 1756 goto out;
1837 1757
1838 /* Else we have enough samples; calculate estimate of 1758 /* Else we have enough samples; calculate estimate of
1839 * actual average throughput */ 1759 * actual average throughput */
1840 } else 1760 } else {
1841 window->average_tpt = ((window->success_ratio * 1761 /*FIXME:RS remove this else if we don't get this error*/
1762 if (window->average_tpt != ((window->success_ratio *
1763 tbl->expected_tpt[index] + 64) / 128)) {
1764 IWL_ERROR("expected_tpt should have been calculated"
1765 " by now\n");
1766 window->average_tpt = ((window->success_ratio *
1842 tbl->expected_tpt[index] + 64) / 128); 1767 tbl->expected_tpt[index] + 64) / 128);
1768 }
1769 }
1843 1770
1844 /* If we are searching for better modulation mode, check success. */ 1771 /* If we are searching for better modulation mode, check success. */
1845 if (lq_sta->search_better_tbl) { 1772 if (lq_sta->search_better_tbl) {
1846 int success_limit = IWL_RATE_SCALE_SWITCH;
1847 1773
1848 /* If good success, continue using the "search" mode; 1774 /* If good success, continue using the "search" mode;
1849 * no need to send new link quality command, since we're 1775 * no need to send new link quality command, since we're
1850 * continuing to use the setup that we've been trying. */ 1776 * continuing to use the setup that we've been trying. */
1851 if ((window->success_ratio > success_limit) || 1777 if (window->average_tpt > lq_sta->last_tpt) {
1852 (window->average_tpt > lq_sta->last_tpt)) { 1778
1853 if (!is_legacy(tbl->lq_type)) { 1779 IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE "
1854 IWL_DEBUG_HT("LQ: we are switching to HT" 1780 "suc=%d cur-tpt=%d old-tpt=%d\n",
1855 " rate suc %d current tpt %d" 1781 window->success_ratio,
1856 " old tpt %d\n", 1782 window->average_tpt,
1857 window->success_ratio, 1783 lq_sta->last_tpt);
1858 window->average_tpt, 1784
1859 lq_sta->last_tpt); 1785 if (!is_legacy(tbl->lq_type))
1860 lq_sta->enable_counter = 1; 1786 lq_sta->enable_counter = 1;
1861 } 1787
1862 /* Swap tables; "search" becomes "active" */ 1788 /* Swap tables; "search" becomes "active" */
1863 lq_sta->active_tbl = active_tbl; 1789 lq_sta->active_tbl = active_tbl;
1864 current_tpt = window->average_tpt; 1790 current_tpt = window->average_tpt;
1865 1791
1866 /* Else poor success; go back to mode in "active" table */ 1792 /* Else poor success; go back to mode in "active" table */
1867 } else { 1793 } else {
1794
1795 IWL_DEBUG_RATE("LQ: GOING BACK TO THE OLD TABLE "
1796 "suc=%d cur-tpt=%d old-tpt=%d\n",
1797 window->success_ratio,
1798 window->average_tpt,
1799 lq_sta->last_tpt);
1800
1868 /* Nullify "search" table */ 1801 /* Nullify "search" table */
1869 tbl->lq_type = LQ_NONE; 1802 tbl->lq_type = LQ_NONE;
1870 1803
@@ -1873,13 +1806,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1873 tbl = &(lq_sta->lq_info[active_tbl]); 1806 tbl = &(lq_sta->lq_info[active_tbl]);
1874 1807
1875 /* Revert to "active" rate and throughput info */ 1808 /* Revert to "active" rate and throughput info */
1876 index = iwl4965_hwrate_to_plcp_idx( 1809 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
1877 tbl->current_rate.rate_n_flags);
1878 current_tpt = lq_sta->last_tpt; 1810 current_tpt = lq_sta->last_tpt;
1879 1811
1880 /* Need to set up a new rate table in uCode */ 1812 /* Need to set up a new rate table in uCode */
1881 update_lq = 1; 1813 update_lq = 1;
1882 IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n");
1883 } 1814 }
1884 1815
1885 /* Either way, we've made a decision; modulation mode 1816 /* Either way, we've made a decision; modulation mode
@@ -1891,11 +1822,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1891 1822
1892 /* (Else) not in search of better modulation mode, try for better 1823 /* (Else) not in search of better modulation mode, try for better
1893 * starting rate, while staying in this mode. */ 1824 * starting rate, while staying in this mode. */
1894 high_low = rs_get_adjacent_rate(index, rate_scale_index_msk, 1825 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
1895 tbl->lq_type); 1826 tbl->lq_type);
1896 low = high_low & 0xff; 1827 low = high_low & 0xff;
1897 high = (high_low >> 8) & 0xff; 1828 high = (high_low >> 8) & 0xff;
1898 1829
1830 sr = window->success_ratio;
1831
1899 /* Collect measured throughputs for current and adjacent rates */ 1832 /* Collect measured throughputs for current and adjacent rates */
1900 current_tpt = window->average_tpt; 1833 current_tpt = window->average_tpt;
1901 if (low != IWL_RATE_INVALID) 1834 if (low != IWL_RATE_INVALID)
@@ -1903,19 +1836,22 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1903 if (high != IWL_RATE_INVALID) 1836 if (high != IWL_RATE_INVALID)
1904 high_tpt = tbl->win[high].average_tpt; 1837 high_tpt = tbl->win[high].average_tpt;
1905 1838
1906 /* Assume rate increase */ 1839 scale_action = 0;
1907 scale_action = 1;
1908 1840
1909 /* Too many failures, decrease rate */ 1841 /* Too many failures, decrease rate */
1910 if ((window->success_ratio <= IWL_RATE_DECREASE_TH) || 1842 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1911 (current_tpt == 0)) {
1912 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 1843 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
1913 scale_action = -1; 1844 scale_action = -1;
1914 1845
1915 /* No throughput measured yet for adjacent rates; try increase. */ 1846 /* No throughput measured yet for adjacent rates; try increase. */
1916 } else if ((low_tpt == IWL_INVALID_VALUE) && 1847 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1917 (high_tpt == IWL_INVALID_VALUE)) 1848 (high_tpt == IWL_INVALID_VALUE)) {
1918 scale_action = 1; 1849
1850 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
1851 scale_action = 1;
1852 else if (low != IWL_RATE_INVALID)
1853 scale_action = -1;
1854 }
1919 1855
1920 /* Both adjacent throughputs are measured, but neither one has better 1856 /* Both adjacent throughputs are measured, but neither one has better
1921 * throughput; we're using the best rate, don't change it! */ 1857 * throughput; we're using the best rate, don't change it! */
@@ -1931,9 +1867,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1931 /* Higher adjacent rate's throughput is measured */ 1867 /* Higher adjacent rate's throughput is measured */
1932 if (high_tpt != IWL_INVALID_VALUE) { 1868 if (high_tpt != IWL_INVALID_VALUE) {
1933 /* Higher rate has better throughput */ 1869 /* Higher rate has better throughput */
1934 if (high_tpt > current_tpt) 1870 if (high_tpt > current_tpt &&
1871 sr >= IWL_RATE_INCREASE_TH) {
1935 scale_action = 1; 1872 scale_action = 1;
1936 else { 1873 } else {
1937 IWL_DEBUG_RATE 1874 IWL_DEBUG_RATE
1938 ("decrease rate because of high tpt\n"); 1875 ("decrease rate because of high tpt\n");
1939 scale_action = -1; 1876 scale_action = -1;
@@ -1946,23 +1883,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1946 IWL_DEBUG_RATE 1883 IWL_DEBUG_RATE
1947 ("decrease rate because of low tpt\n"); 1884 ("decrease rate because of low tpt\n");
1948 scale_action = -1; 1885 scale_action = -1;
1949 } else 1886 } else if (sr >= IWL_RATE_INCREASE_TH) {
1950 scale_action = 1; 1887 scale_action = 1;
1888 }
1951 } 1889 }
1952 } 1890 }
1953 1891
1954 /* Sanity check; asked for decrease, but success rate or throughput 1892 /* Sanity check; asked for decrease, but success rate or throughput
1955 * has been good at old rate. Don't change it. */ 1893 * has been good at old rate. Don't change it. */
1956 if (scale_action == -1) { 1894 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
1957 if ((low != IWL_RATE_INVALID) && 1895 ((sr > IWL_RATE_HIGH_TH) ||
1958 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
1959 (current_tpt > (100 * tbl->expected_tpt[low])))) 1896 (current_tpt > (100 * tbl->expected_tpt[low]))))
1960 scale_action = 0;
1961
1962 /* Sanity check; asked for increase, but success rate has not been great
1963 * even at old rate, higher rate will be worse. Don't change it. */
1964 } else if ((scale_action == 1) &&
1965 (window->success_ratio < IWL_RATE_INCREASE_TH))
1966 scale_action = 0; 1897 scale_action = 0;
1967 1898
1968 switch (scale_action) { 1899 switch (scale_action) {
@@ -1987,15 +1918,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1987 break; 1918 break;
1988 } 1919 }
1989 1920
1990 IWL_DEBUG_HT("choose rate scale index %d action %d low %d " 1921 IWL_DEBUG_RATE("choose rate scale index %d action %d low %d "
1991 "high %d type %d\n", 1922 "high %d type %d\n",
1992 index, scale_action, low, high, tbl->lq_type); 1923 index, scale_action, low, high, tbl->lq_type);
1993 1924
1994 lq_update: 1925lq_update:
1995 /* Replace uCode's rate table for the destination station. */ 1926 /* Replace uCode's rate table for the destination station. */
1996 if (update_lq) { 1927 if (update_lq) {
1997 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); 1928 rate = rate_n_flags_from_tbl(tbl, index, is_green);
1998 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 1929 rs_fill_link_cmd(priv, lq_sta, rate);
1999 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1930 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2000 } 1931 }
2001 1932
@@ -2029,13 +1960,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2029 rs_rate_scale_clear_window(&(tbl->win[i])); 1960 rs_rate_scale_clear_window(&(tbl->win[i]));
2030 1961
2031 /* Use new "search" start rate */ 1962 /* Use new "search" start rate */
2032 index = iwl4965_hwrate_to_plcp_idx( 1963 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2033 tbl->current_rate.rate_n_flags);
2034 1964
2035 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", 1965 IWL_DEBUG_RATE("Switch current mcs: %X index: %d\n",
2036 tbl->current_rate.rate_n_flags, index); 1966 tbl->current_rate, index);
2037 rs_fill_link_cmd(lq_sta, &tbl->current_rate, 1967 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2038 &lq_sta->lq);
2039 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1968 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2040 } 1969 }
2041 1970
@@ -2046,13 +1975,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2046 * before next round of mode comparisons. */ 1975 * before next round of mode comparisons. */
2047 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 1976 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2048 if (is_legacy(tbl1->lq_type) && 1977 if (is_legacy(tbl1->lq_type) &&
2049#ifdef CONFIG_IWL4965_HT
2050 (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) && 1978 (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) &&
2051#endif
2052 (lq_sta->action_counter >= 1)) { 1979 (lq_sta->action_counter >= 1)) {
2053 lq_sta->action_counter = 0; 1980 lq_sta->action_counter = 0;
2054 IWL_DEBUG_HT("LQ: STAY in legacy table\n"); 1981 IWL_DEBUG_RATE("LQ: STAY in legacy table\n");
2055 rs_set_stay_in_table(1, lq_sta); 1982 rs_set_stay_in_table(priv, 1, lq_sta);
2056 } 1983 }
2057 1984
2058 /* If we're in an HT mode, and all 3 mode switch actions 1985 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2060,16 +1987,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2060 * mode for a while before next round of mode comparisons. */ 1987 * mode for a while before next round of mode comparisons. */
2061 if (lq_sta->enable_counter && 1988 if (lq_sta->enable_counter &&
2062 (lq_sta->action_counter >= IWL_ACTION_LIMIT)) { 1989 (lq_sta->action_counter >= IWL_ACTION_LIMIT)) {
2063#ifdef CONFIG_IWL4965_HT
2064 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && 1990 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2065 (lq_sta->tx_agg_tid_en & (1 << tid)) && 1991 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2066 (tid != MAX_TID_COUNT)) { 1992 (tid != MAX_TID_COUNT)) {
2067 IWL_DEBUG_HT("try to aggregate tid %d\n", tid); 1993 IWL_DEBUG_RATE("try to aggregate tid %d\n", tid);
2068 rs_tl_turn_on_agg(priv, tid, lq_sta, sta); 1994 rs_tl_turn_on_agg(priv, tid, lq_sta, sta);
2069 } 1995 }
2070#endif /*CONFIG_IWL4965_HT */
2071 lq_sta->action_counter = 0; 1996 lq_sta->action_counter = 0;
2072 rs_set_stay_in_table(0, lq_sta); 1997 rs_set_stay_in_table(priv, 0, lq_sta);
2073 } 1998 }
2074 1999
2075 /* 2000 /*
@@ -2085,7 +2010,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2085 } 2010 }
2086 2011
2087out: 2012out:
2088 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); 2013 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
2089 i = index; 2014 i = index;
2090 sta->last_txrate_idx = i; 2015 sta->last_txrate_idx = i;
2091 2016
@@ -2105,13 +2030,14 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2105 struct ieee80211_conf *conf, 2030 struct ieee80211_conf *conf,
2106 struct sta_info *sta) 2031 struct sta_info *sta)
2107{ 2032{
2108 int i;
2109 struct iwl4965_lq_sta *lq_sta; 2033 struct iwl4965_lq_sta *lq_sta;
2110 struct iwl4965_scale_tbl_info *tbl; 2034 struct iwl4965_scale_tbl_info *tbl;
2111 u8 active_tbl = 0;
2112 int rate_idx; 2035 int rate_idx;
2036 int i;
2037 u32 rate;
2113 u8 use_green = rs_use_green(priv, conf); 2038 u8 use_green = rs_use_green(priv, conf);
2114 struct iwl4965_rate mcs_rate; 2039 u8 active_tbl = 0;
2040 u8 valid_tx_ant;
2115 2041
2116 if (!sta || !sta->rate_ctrl_priv) 2042 if (!sta || !sta->rate_ctrl_priv)
2117 goto out; 2043 goto out;
@@ -2123,6 +2049,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2123 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2049 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
2124 goto out; 2050 goto out;
2125 2051
2052 valid_tx_ant = priv->hw_params.valid_tx_ant;
2053
2126 if (!lq_sta->search_better_tbl) 2054 if (!lq_sta->search_better_tbl)
2127 active_tbl = lq_sta->active_tbl; 2055 active_tbl = lq_sta->active_tbl;
2128 else 2056 else
@@ -2133,22 +2061,23 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2133 if ((i < 0) || (i >= IWL_RATE_COUNT)) 2061 if ((i < 0) || (i >= IWL_RATE_COUNT))
2134 i = 0; 2062 i = 0;
2135 2063
2136 mcs_rate.rate_n_flags = iwl4965_rates[i].plcp ; 2064 /* FIXME:RS: This is also wrong in 4965 */
2137 mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK; 2065 rate = iwl_rates[i].plcp;
2138 mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 2066 rate |= RATE_MCS_ANT_B_MSK;
2067 rate &= ~RATE_MCS_ANT_A_MSK;
2139 2068
2140 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) 2069 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2141 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; 2070 rate |= RATE_MCS_CCK_MSK;
2142 2071
2143 tbl->antenna_type = ANT_AUX; 2072 tbl->ant_type = ANT_B;
2144 rs_get_tbl_info_from_mcs(&mcs_rate, priv->band, tbl, &rate_idx); 2073 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2145 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) 2074 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2146 rs_toggle_antenna(&mcs_rate, tbl); 2075 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2147 2076
2148 rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green); 2077 rate = rate_n_flags_from_tbl(tbl, rate_idx, use_green);
2149 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; 2078 tbl->current_rate = rate;
2150 rs_get_expected_tpt_table(lq_sta, tbl); 2079 rs_set_expected_tpt_table(lq_sta, tbl);
2151 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 2080 rs_fill_link_cmd(NULL, lq_sta, rate);
2152 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2081 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2153 out: 2082 out:
2154 return; 2083 return;
@@ -2165,7 +2094,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2165 struct ieee80211_conf *conf = &local->hw.conf; 2094 struct ieee80211_conf *conf = &local->hw.conf;
2166 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2095 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2167 struct sta_info *sta; 2096 struct sta_info *sta;
2168 u16 fc; 2097 __le16 fc;
2169 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 2098 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2170 struct iwl4965_lq_sta *lq_sta; 2099 struct iwl4965_lq_sta *lq_sta;
2171 2100
@@ -2177,10 +2106,10 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2177 2106
2178 /* Send management frames and broadcast/multicast data using lowest 2107 /* Send management frames and broadcast/multicast data using lowest
2179 * rate. */ 2108 * rate. */
2180 fc = le16_to_cpu(hdr->frame_control); 2109 fc = hdr->frame_control;
2181 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2110 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
2182 !sta || !sta->rate_ctrl_priv) { 2111 !sta || !sta->rate_ctrl_priv) {
2183 sel->rate = rate_lowest(local, sband, sta); 2112 sel->rate_idx = rate_lowest_index(local, sband, sta);
2184 goto out; 2113 goto out;
2185 } 2114 }
2186 2115
@@ -2189,13 +2118,13 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2189 2118
2190 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2119 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2191 !lq_sta->ibss_sta_added) { 2120 !lq_sta->ibss_sta_added) {
2192 u8 sta_id = iwl4965_hw_find_station(priv, hdr->addr1); 2121 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2193 DECLARE_MAC_BUF(mac); 2122 DECLARE_MAC_BUF(mac);
2194 2123
2195 if (sta_id == IWL_INVALID_STATION) { 2124 if (sta_id == IWL_INVALID_STATION) {
2196 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2125 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2197 print_mac(mac, hdr->addr1)); 2126 print_mac(mac, hdr->addr1));
2198 sta_id = iwl4965_add_station_flags(priv, hdr->addr1, 2127 sta_id = iwl_add_station_flags(priv, hdr->addr1,
2199 0, CMD_ASYNC, NULL); 2128 0, CMD_ASYNC, NULL);
2200 } 2129 }
2201 if ((sta_id != IWL_INVALID_STATION)) { 2130 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2204,26 +2133,27 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2204 lq_sta->ibss_sta_added = 1; 2133 lq_sta->ibss_sta_added = 1;
2205 rs_initialize_lq(priv, conf, sta); 2134 rs_initialize_lq(priv, conf, sta);
2206 } 2135 }
2207 if (!lq_sta->ibss_sta_added)
2208 goto done;
2209 } 2136 }
2210 2137
2211done:
2212 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2138 if ((i < 0) || (i > IWL_RATE_COUNT)) {
2213 sel->rate = rate_lowest(local, sband, sta); 2139 sel->rate_idx = rate_lowest_index(local, sband, sta);
2214 goto out; 2140 goto out;
2215 } 2141 }
2216 2142
2217 sel->rate = &priv->ieee_rates[i]; 2143 if (sband->band == IEEE80211_BAND_5GHZ)
2144 i -= IWL_FIRST_OFDM_RATE;
2145 sel->rate_idx = i;
2218out: 2146out:
2219 rcu_read_unlock(); 2147 rcu_read_unlock();
2220} 2148}
2221 2149
2222static void *rs_alloc_sta(void *priv, gfp_t gfp) 2150static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2223{ 2151{
2224 struct iwl4965_lq_sta *lq_sta; 2152 struct iwl4965_lq_sta *lq_sta;
2153 struct iwl_priv *priv;
2225 int i, j; 2154 int i, j;
2226 2155
2156 priv = (struct iwl_priv *)priv_rate;
2227 IWL_DEBUG_RATE("create station rate scale window\n"); 2157 IWL_DEBUG_RATE("create station rate scale window\n");
2228 2158
2229 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp); 2159 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp);
@@ -2259,7 +2189,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2259 for (i = 0; i < IWL_RATE_COUNT; i++) 2189 for (i = 0; i < IWL_RATE_COUNT; i++)
2260 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2190 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
2261 2191
2262 IWL_DEBUG_RATE("rate scale global init\n"); 2192 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
2263 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2193 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2264 * the lowest or the highest rate.. Could consider using RSSI from 2194 * the lowest or the highest rate.. Could consider using RSSI from
2265 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2195 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -2267,17 +2197,17 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2267 2197
2268 lq_sta->ibss_sta_added = 0; 2198 lq_sta->ibss_sta_added = 0;
2269 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2199 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2270 u8 sta_id = iwl4965_hw_find_station(priv, sta->addr); 2200 u8 sta_id = iwl_find_station(priv, sta->addr);
2271 DECLARE_MAC_BUF(mac); 2201 DECLARE_MAC_BUF(mac);
2272 2202
2273 /* for IBSS the call are from tasklet */ 2203 /* for IBSS the call are from tasklet */
2274 IWL_DEBUG_HT("LQ: ADD station %s\n", 2204 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2275 print_mac(mac, sta->addr)); 2205 print_mac(mac, sta->addr));
2276 2206
2277 if (sta_id == IWL_INVALID_STATION) { 2207 if (sta_id == IWL_INVALID_STATION) {
2278 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2208 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2279 print_mac(mac, sta->addr)); 2209 print_mac(mac, sta->addr));
2280 sta_id = iwl4965_add_station_flags(priv, sta->addr, 2210 sta_id = iwl_add_station_flags(priv, sta->addr,
2281 0, CMD_ASYNC, NULL); 2211 0, CMD_ASYNC, NULL);
2282 } 2212 }
2283 if ((sta_id != IWL_INVALID_STATION)) { 2213 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2300,92 +2230,95 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2300 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2230 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2301 2231
2302 lq_sta->is_dup = 0; 2232 lq_sta->is_dup = 0;
2303 lq_sta->valid_antenna = priv->valid_antenna;
2304 lq_sta->antenna = priv->antenna;
2305 lq_sta->is_green = rs_use_green(priv, conf); 2233 lq_sta->is_green = rs_use_green(priv, conf);
2306 lq_sta->active_rate = priv->active_rate; 2234 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2307 lq_sta->active_rate &= ~(0x1000);
2308 lq_sta->active_rate_basic = priv->active_rate_basic; 2235 lq_sta->active_rate_basic = priv->active_rate_basic;
2309 lq_sta->band = priv->band; 2236 lq_sta->band = priv->band;
2310#ifdef CONFIG_IWL4965_HT
2311 /* 2237 /*
2312 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2238 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2313 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2239 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2314 */ 2240 */
2315 lq_sta->active_siso_rate = (priv->current_ht_config.supp_mcs_set[0] << 1); 2241 lq_sta->active_siso_rate = conf->ht_conf.supp_mcs_set[0] << 1;
2316 lq_sta->active_siso_rate |= 2242 lq_sta->active_siso_rate |= conf->ht_conf.supp_mcs_set[0] & 0x1;
2317 (priv->current_ht_config.supp_mcs_set[0] & 0x1);
2318 lq_sta->active_siso_rate &= ~((u16)0x2); 2243 lq_sta->active_siso_rate &= ~((u16)0x2);
2319 lq_sta->active_siso_rate = 2244 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2320 lq_sta->active_siso_rate << IWL_FIRST_OFDM_RATE;
2321 2245
2322 /* Same here */ 2246 /* Same here */
2323 lq_sta->active_mimo_rate = (priv->current_ht_config.supp_mcs_set[1] << 1); 2247 lq_sta->active_mimo2_rate = conf->ht_conf.supp_mcs_set[1] << 1;
2324 lq_sta->active_mimo_rate |= 2248 lq_sta->active_mimo2_rate |= conf->ht_conf.supp_mcs_set[1] & 0x1;
2325 (priv->current_ht_config.supp_mcs_set[1] & 0x1); 2249 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2326 lq_sta->active_mimo_rate &= ~((u16)0x2); 2250 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2327 lq_sta->active_mimo_rate = 2251
2328 lq_sta->active_mimo_rate << IWL_FIRST_OFDM_RATE; 2252 lq_sta->active_mimo3_rate = conf->ht_conf.supp_mcs_set[2] << 1;
2329 IWL_DEBUG_HT("SISO RATE 0x%X MIMO RATE 0x%X\n", 2253 lq_sta->active_mimo3_rate |= conf->ht_conf.supp_mcs_set[2] & 0x1;
2254 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2255 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2256
2257 IWL_DEBUG_RATE("SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2330 lq_sta->active_siso_rate, 2258 lq_sta->active_siso_rate,
2331 lq_sta->active_mimo_rate); 2259 lq_sta->active_mimo2_rate,
2260 lq_sta->active_mimo3_rate);
2261
2262 /* These values will be overriden later */
2263 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
2264 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2265
2332 /* as default allow aggregation for all tids */ 2266 /* as default allow aggregation for all tids */
2333 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2267 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2334#endif /*CONFIG_IWL4965_HT*/
2335#ifdef CONFIG_MAC80211_DEBUGFS
2336 lq_sta->drv = priv; 2268 lq_sta->drv = priv;
2337#endif
2338
2339 if (priv->assoc_station_added)
2340 priv->lq_mngr.lq_ready = 1;
2341 2269
2342 rs_initialize_lq(priv, conf, sta); 2270 rs_initialize_lq(priv, conf, sta);
2343} 2271}
2344 2272
2345static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 2273static void rs_fill_link_cmd(const struct iwl_priv *priv,
2346 struct iwl4965_rate *tx_mcs, 2274 struct iwl4965_lq_sta *lq_sta,
2347 struct iwl_link_quality_cmd *lq_cmd) 2275 u32 new_rate)
2348{ 2276{
2277 struct iwl4965_scale_tbl_info tbl_type;
2349 int index = 0; 2278 int index = 0;
2350 int rate_idx; 2279 int rate_idx;
2351 int repeat_rate = 0; 2280 int repeat_rate = 0;
2352 u8 ant_toggle_count = 0; 2281 u8 ant_toggle_cnt = 0;
2353 u8 use_ht_possible = 1; 2282 u8 use_ht_possible = 1;
2354 struct iwl4965_rate new_rate; 2283 u8 valid_tx_ant = 0;
2355 struct iwl4965_scale_tbl_info tbl_type = { 0 }; 2284 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2356 2285
2357 /* Override starting rate (index 0) if needed for debug purposes */ 2286 /* Override starting rate (index 0) if needed for debug purposes */
2358 rs_dbgfs_set_mcs(lq_sta, tx_mcs, index); 2287 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2359 2288
2360 /* Interpret rate_n_flags */ 2289 /* Interpret new_rate (rate_n_flags) */
2361 rs_get_tbl_info_from_mcs(tx_mcs, lq_sta->band, 2290 memset(&tbl_type, 0, sizeof(tbl_type));
2291 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2362 &tbl_type, &rate_idx); 2292 &tbl_type, &rate_idx);
2363 2293
2364 /* How many times should we repeat the initial rate? */ 2294 /* How many times should we repeat the initial rate? */
2365 if (is_legacy(tbl_type.lq_type)) { 2295 if (is_legacy(tbl_type.lq_type)) {
2366 ant_toggle_count = 1; 2296 ant_toggle_cnt = 1;
2367 repeat_rate = IWL_NUMBER_TRY; 2297 repeat_rate = IWL_NUMBER_TRY;
2368 } else 2298 } else {
2369 repeat_rate = IWL_HT_NUMBER_TRY; 2299 repeat_rate = IWL_HT_NUMBER_TRY;
2300 }
2370 2301
2371 lq_cmd->general_params.mimo_delimiter = 2302 lq_cmd->general_params.mimo_delimiter =
2372 is_mimo(tbl_type.lq_type) ? 1 : 0; 2303 is_mimo(tbl_type.lq_type) ? 1 : 0;
2373 2304
2374 /* Fill 1st table entry (index 0) */ 2305 /* Fill 1st table entry (index 0) */
2375 lq_cmd->rs_table[index].rate_n_flags = 2306 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2376 cpu_to_le32(tx_mcs->rate_n_flags);
2377 new_rate.rate_n_flags = tx_mcs->rate_n_flags;
2378 2307
2379 if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN)) 2308 if (num_of_ant(tbl_type.ant_type) == 1) {
2380 lq_cmd->general_params.single_stream_ant_msk 2309 lq_cmd->general_params.single_stream_ant_msk =
2381 = LINK_QUAL_ANT_A_MSK; 2310 tbl_type.ant_type;
2382 else 2311 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2383 lq_cmd->general_params.single_stream_ant_msk 2312 lq_cmd->general_params.dual_stream_ant_msk =
2384 = LINK_QUAL_ANT_B_MSK; 2313 tbl_type.ant_type;
2314 } /* otherwise we don't modify the existing value */
2385 2315
2386 index++; 2316 index++;
2387 repeat_rate--; 2317 repeat_rate--;
2388 2318
2319 if (priv)
2320 valid_tx_ant = priv->hw_params.valid_tx_ant;
2321
2389 /* Fill rest of rate table */ 2322 /* Fill rest of rate table */
2390 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2323 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2391 /* Repeat initial/next rate. 2324 /* Repeat initial/next rate.
@@ -2393,26 +2326,25 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2393 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ 2326 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2394 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { 2327 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2395 if (is_legacy(tbl_type.lq_type)) { 2328 if (is_legacy(tbl_type.lq_type)) {
2396 if (ant_toggle_count < 2329 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2397 NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2330 ant_toggle_cnt++;
2398 ant_toggle_count++; 2331 else if (priv &&
2399 else { 2332 rs_toggle_antenna(valid_tx_ant,
2400 rs_toggle_antenna(&new_rate, &tbl_type); 2333 &new_rate, &tbl_type))
2401 ant_toggle_count = 1; 2334 ant_toggle_cnt = 1;
2402 } 2335}
2403 }
2404 2336
2405 /* Override next rate if needed for debug purposes */ 2337 /* Override next rate if needed for debug purposes */
2406 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2338 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2407 2339
2408 /* Fill next table entry */ 2340 /* Fill next table entry */
2409 lq_cmd->rs_table[index].rate_n_flags = 2341 lq_cmd->rs_table[index].rate_n_flags =
2410 cpu_to_le32(new_rate.rate_n_flags); 2342 cpu_to_le32(new_rate);
2411 repeat_rate--; 2343 repeat_rate--;
2412 index++; 2344 index++;
2413 } 2345 }
2414 2346
2415 rs_get_tbl_info_from_mcs(&new_rate, lq_sta->band, &tbl_type, 2347 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2416 &rate_idx); 2348 &rate_idx);
2417 2349
2418 /* Indicate to uCode which entries might be MIMO. 2350 /* Indicate to uCode which entries might be MIMO.
@@ -2422,20 +2354,22 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2422 lq_cmd->general_params.mimo_delimiter = index; 2354 lq_cmd->general_params.mimo_delimiter = index;
2423 2355
2424 /* Get next rate */ 2356 /* Get next rate */
2425 rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, 2357 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2426 use_ht_possible, &new_rate); 2358 use_ht_possible);
2427 2359
2428 /* How many times should we repeat the next rate? */ 2360 /* How many times should we repeat the next rate? */
2429 if (is_legacy(tbl_type.lq_type)) { 2361 if (is_legacy(tbl_type.lq_type)) {
2430 if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2362 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2431 ant_toggle_count++; 2363 ant_toggle_cnt++;
2432 else { 2364 else if (priv &&
2433 rs_toggle_antenna(&new_rate, &tbl_type); 2365 rs_toggle_antenna(valid_tx_ant,
2434 ant_toggle_count = 1; 2366 &new_rate, &tbl_type))
2435 } 2367 ant_toggle_cnt = 1;
2368
2436 repeat_rate = IWL_NUMBER_TRY; 2369 repeat_rate = IWL_NUMBER_TRY;
2437 } else 2370 } else {
2438 repeat_rate = IWL_HT_NUMBER_TRY; 2371 repeat_rate = IWL_HT_NUMBER_TRY;
2372 }
2439 2373
2440 /* Don't allow HT rates after next pass. 2374 /* Don't allow HT rates after next pass.
2441 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ 2375 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
@@ -2445,14 +2379,13 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2445 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2379 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2446 2380
2447 /* Fill next table entry */ 2381 /* Fill next table entry */
2448 lq_cmd->rs_table[index].rate_n_flags = 2382 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2449 cpu_to_le32(new_rate.rate_n_flags);
2450 2383
2451 index++; 2384 index++;
2452 repeat_rate--; 2385 repeat_rate--;
2453 } 2386 }
2454 2387
2455 lq_cmd->general_params.dual_stream_ant_msk = 3; 2388 lq_cmd->agg_params.agg_frame_cnt_limit = 64;
2456 lq_cmd->agg_params.agg_dis_start_th = 3; 2389 lq_cmd->agg_params.agg_dis_start_th = 3;
2457 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); 2390 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
2458} 2391}
@@ -2473,15 +2406,17 @@ static void rs_clear(void *priv_rate)
2473 2406
2474 IWL_DEBUG_RATE("enter\n"); 2407 IWL_DEBUG_RATE("enter\n");
2475 2408
2476 priv->lq_mngr.lq_ready = 0; 2409 /* TODO - add rate scale state reset */
2477 2410
2478 IWL_DEBUG_RATE("leave\n"); 2411 IWL_DEBUG_RATE("leave\n");
2479} 2412}
2480 2413
2481static void rs_free_sta(void *priv, void *priv_sta) 2414static void rs_free_sta(void *priv_rate, void *priv_sta)
2482{ 2415{
2483 struct iwl4965_lq_sta *lq_sta = priv_sta; 2416 struct iwl4965_lq_sta *lq_sta = priv_sta;
2417 struct iwl_priv *priv;
2484 2418
2419 priv = (struct iwl_priv *)priv_rate;
2485 IWL_DEBUG_RATE("enter\n"); 2420 IWL_DEBUG_RATE("enter\n");
2486 kfree(lq_sta); 2421 kfree(lq_sta);
2487 IWL_DEBUG_RATE("leave\n"); 2422 IWL_DEBUG_RATE("leave\n");
@@ -2495,54 +2430,56 @@ static int open_file_generic(struct inode *inode, struct file *file)
2495 return 0; 2430 return 0;
2496} 2431}
2497static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 2432static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2498 struct iwl4965_rate *mcs, int index) 2433 u32 *rate_n_flags, int index)
2499{ 2434{
2500 u32 base_rate; 2435 struct iwl_priv *priv;
2501 2436
2502 if (lq_sta->band == IEEE80211_BAND_5GHZ) 2437 priv = lq_sta->drv;
2503 base_rate = 0x800D; 2438 if (lq_sta->dbg_fixed_rate) {
2504 else 2439 if (index < 12) {
2505 base_rate = 0x820A; 2440 *rate_n_flags = lq_sta->dbg_fixed_rate;
2506 2441 } else {
2507 if (lq_sta->dbg_fixed.rate_n_flags) { 2442 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2508 if (index < 12) 2443 *rate_n_flags = 0x800D;
2509 mcs->rate_n_flags = lq_sta->dbg_fixed.rate_n_flags; 2444 else
2510 else 2445 *rate_n_flags = 0x820A;
2511 mcs->rate_n_flags = base_rate; 2446 }
2512 IWL_DEBUG_RATE("Fixed rate ON\n"); 2447 IWL_DEBUG_RATE("Fixed rate ON\n");
2513 return; 2448 } else {
2449 IWL_DEBUG_RATE("Fixed rate OFF\n");
2514 } 2450 }
2515
2516 IWL_DEBUG_RATE("Fixed rate OFF\n");
2517} 2451}
2518 2452
2519static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, 2453static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2520 const char __user *user_buf, size_t count, loff_t *ppos) 2454 const char __user *user_buf, size_t count, loff_t *ppos)
2521{ 2455{
2522 struct iwl4965_lq_sta *lq_sta = file->private_data; 2456 struct iwl4965_lq_sta *lq_sta = file->private_data;
2457 struct iwl_priv *priv;
2523 char buf[64]; 2458 char buf[64];
2524 int buf_size; 2459 int buf_size;
2525 u32 parsed_rate; 2460 u32 parsed_rate;
2526 2461
2462 priv = lq_sta->drv;
2527 memset(buf, 0, sizeof(buf)); 2463 memset(buf, 0, sizeof(buf));
2528 buf_size = min(count, sizeof(buf) - 1); 2464 buf_size = min(count, sizeof(buf) - 1);
2529 if (copy_from_user(buf, user_buf, buf_size)) 2465 if (copy_from_user(buf, user_buf, buf_size))
2530 return -EFAULT; 2466 return -EFAULT;
2531 2467
2532 if (sscanf(buf, "%x", &parsed_rate) == 1) 2468 if (sscanf(buf, "%x", &parsed_rate) == 1)
2533 lq_sta->dbg_fixed.rate_n_flags = parsed_rate; 2469 lq_sta->dbg_fixed_rate = parsed_rate;
2534 else 2470 else
2535 lq_sta->dbg_fixed.rate_n_flags = 0; 2471 lq_sta->dbg_fixed_rate = 0;
2536 2472
2537 lq_sta->active_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ 2473 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2538 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2474 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2539 lq_sta->active_mimo_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2475 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2476 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2540 2477
2541 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", 2478 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n",
2542 lq_sta->lq.sta_id, lq_sta->dbg_fixed.rate_n_flags); 2479 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2543 2480
2544 if (lq_sta->dbg_fixed.rate_n_flags) { 2481 if (lq_sta->dbg_fixed_rate) {
2545 rs_fill_link_cmd(lq_sta, &lq_sta->dbg_fixed, &lq_sta->lq); 2482 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2546 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2483 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
2547 } 2484 }
2548 2485
@@ -2561,9 +2498,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2561 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2498 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2562 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2499 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2563 lq_sta->total_failed, lq_sta->total_success, 2500 lq_sta->total_failed, lq_sta->total_success,
2564 lq_sta->active_rate); 2501 lq_sta->active_legacy_rate);
2565 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2502 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2566 lq_sta->dbg_fixed.rate_n_flags); 2503 lq_sta->dbg_fixed_rate);
2567 desc += sprintf(buff+desc, "general:" 2504 desc += sprintf(buff+desc, "general:"
2568 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", 2505 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2569 lq_sta->lq.general_params.flags, 2506 lq_sta->lq.general_params.flags,
@@ -2613,7 +2550,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2613 lq_sta->lq_info[i].is_SGI, 2550 lq_sta->lq_info[i].is_SGI,
2614 lq_sta->lq_info[i].is_fat, 2551 lq_sta->lq_info[i].is_fat,
2615 lq_sta->lq_info[i].is_dup, 2552 lq_sta->lq_info[i].is_dup,
2616 lq_sta->lq_info[i].current_rate.rate_n_flags); 2553 lq_sta->lq_info[i].current_rate);
2617 for (j = 0; j < IWL_RATE_COUNT; j++) { 2554 for (j = 0; j < IWL_RATE_COUNT; j++) {
2618 desc += sprintf(buff+desc, 2555 desc += sprintf(buff+desc,
2619 "counter=%d success=%d %%=%d\n", 2556 "counter=%d success=%d %%=%d\n",
@@ -2640,11 +2577,9 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
2640 lq_sta->rs_sta_dbgfs_stats_table_file = 2577 lq_sta->rs_sta_dbgfs_stats_table_file =
2641 debugfs_create_file("rate_stats_table", 0600, dir, 2578 debugfs_create_file("rate_stats_table", 0600, dir,
2642 lq_sta, &rs_sta_dbgfs_stats_table_ops); 2579 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2643#ifdef CONFIG_IWL4965_HT
2644 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = 2580 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2645 debugfs_create_u8("tx_agg_tid_enable", 0600, dir, 2581 debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
2646 &lq_sta->tx_agg_tid_en); 2582 &lq_sta->tx_agg_tid_en);
2647#endif
2648 2583
2649} 2584}
2650 2585
@@ -2653,9 +2588,7 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
2653 struct iwl4965_lq_sta *lq_sta = priv_sta; 2588 struct iwl4965_lq_sta *lq_sta = priv_sta;
2654 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 2589 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2655 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 2590 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2656#ifdef CONFIG_IWL4965_HT
2657 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 2591 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2658#endif
2659} 2592}
2660#endif 2593#endif
2661 2594
@@ -2703,7 +2636,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2703 lq_sta = (void *)sta->rate_ctrl_priv; 2636 lq_sta = (void *)sta->rate_ctrl_priv;
2704 2637
2705 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type; 2638 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
2706 antenna = lq_sta->lq_info[lq_sta->active_tbl].antenna_type; 2639 antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
2707 2640
2708 if (is_legacy(lq_type)) 2641 if (is_legacy(lq_type))
2709 i = IWL_RATE_54M_INDEX; 2642 i = IWL_RATE_54M_INDEX;
@@ -2715,7 +2648,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2715 int active = lq_sta->active_tbl; 2648 int active = lq_sta->active_tbl;
2716 2649
2717 cnt += 2650 cnt +=
2718 sprintf(&buf[cnt], " %2dMbs: ", iwl4965_rates[i].ieee / 2); 2651 sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
2719 2652
2720 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); 2653 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2721 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) 2654 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
@@ -2726,7 +2659,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2726 samples += lq_sta->lq_info[active].win[i].counter; 2659 samples += lq_sta->lq_info[active].win[i].counter;
2727 good += lq_sta->lq_info[active].win[i].success_counter; 2660 good += lq_sta->lq_info[active].win[i].success_counter;
2728 success += lq_sta->lq_info[active].win[i].success_counter * 2661 success += lq_sta->lq_info[active].win[i].success_counter *
2729 iwl4965_rates[i].ieee; 2662 iwl_rates[i].ieee;
2730 2663
2731 if (lq_sta->lq_info[active].win[i].stamp) { 2664 if (lq_sta->lq_info[active].win[i].stamp) {
2732 int delta = 2665 int delta =
@@ -2746,10 +2679,11 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2746 i = j; 2679 i = j;
2747 } 2680 }
2748 2681
2749 /* Display the average rate of all samples taken. 2682 /*
2750 * 2683 * Display the average rate of all samples taken.
2751 * NOTE: We multiply # of samples by 2 since the IEEE measurement 2684 * NOTE: We multiply # of samples by 2 since the IEEE measurement
2752 * added from iwl4965_rates is actually 2X the rate */ 2685 * added from iwl_rates is actually 2X the rate.
2686 */
2753 if (samples) 2687 if (samples)
2754 cnt += sprintf(&buf[cnt], 2688 cnt += sprintf(&buf[cnt],
2755 "\nAverage rate is %3d.%02dMbs over last %4dms\n" 2689 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
@@ -2767,13 +2701,6 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2767 return cnt; 2701 return cnt;
2768} 2702}
2769 2703
2770void iwl4965_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
2771{
2772 struct iwl_priv *priv = hw->priv;
2773
2774 priv->lq_mngr.lq_ready = 1;
2775}
2776
2777int iwl4965_rate_control_register(void) 2704int iwl4965_rate_control_register(void)
2778{ 2705{
2779 return ieee80211_rate_control_register(&rs_ops); 2706 return ieee80211_rate_control_register(&rs_ops);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
index 866e378aa385..9b9972885aa5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
@@ -27,12 +27,13 @@
27#ifndef __iwl_4965_rs_h__ 27#ifndef __iwl_4965_rs_h__
28#define __iwl_4965_rs_h__ 28#define __iwl_4965_rs_h__
29 29
30#include "iwl-4965.h" 30#include "iwl-dev.h"
31 31
32struct iwl4965_rate_info { 32struct iwl_rate_info {
33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
35 u8 plcp_mimo; /* uCode API: IWL_RATE_MIMO_6M_PLCP, etc. */ 35 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
36 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
36 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ 37 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
37 u8 prev_ieee; /* previous rate in IEEE speeds */ 38 u8 prev_ieee; /* previous rate in IEEE speeds */
38 u8 next_ieee; /* next rate in IEEE speeds */ 39 u8 next_ieee; /* next rate in IEEE speeds */
@@ -44,7 +45,7 @@ struct iwl4965_rate_info {
44 45
45/* 46/*
46 * These serve as indexes into 47 * These serve as indexes into
47 * struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 48 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
48 */ 49 */
49enum { 50enum {
50 IWL_RATE_1M_INDEX = 0, 51 IWL_RATE_1M_INDEX = 0,
@@ -60,9 +61,9 @@ enum {
60 IWL_RATE_48M_INDEX, 61 IWL_RATE_48M_INDEX,
61 IWL_RATE_54M_INDEX, 62 IWL_RATE_54M_INDEX,
62 IWL_RATE_60M_INDEX, 63 IWL_RATE_60M_INDEX,
63 IWL_RATE_COUNT, 64 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
64 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, 65 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
65 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX 66 IWL_RATE_INVALID = IWL_RATE_COUNT,
66}; 67};
67 68
68enum { 69enum {
@@ -97,11 +98,13 @@ enum {
97 IWL_RATE_36M_PLCP = 11, 98 IWL_RATE_36M_PLCP = 11,
98 IWL_RATE_48M_PLCP = 1, 99 IWL_RATE_48M_PLCP = 1,
99 IWL_RATE_54M_PLCP = 3, 100 IWL_RATE_54M_PLCP = 3,
100 IWL_RATE_60M_PLCP = 3, 101 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
101 IWL_RATE_1M_PLCP = 10, 102 IWL_RATE_1M_PLCP = 10,
102 IWL_RATE_2M_PLCP = 20, 103 IWL_RATE_2M_PLCP = 20,
103 IWL_RATE_5M_PLCP = 55, 104 IWL_RATE_5M_PLCP = 55,
104 IWL_RATE_11M_PLCP = 110, 105 IWL_RATE_11M_PLCP = 110,
106 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
105}; 108};
106 109
107/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */ 110/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */
@@ -114,16 +117,25 @@ enum {
114 IWL_RATE_SISO_48M_PLCP = 5, 117 IWL_RATE_SISO_48M_PLCP = 5,
115 IWL_RATE_SISO_54M_PLCP = 6, 118 IWL_RATE_SISO_54M_PLCP = 6,
116 IWL_RATE_SISO_60M_PLCP = 7, 119 IWL_RATE_SISO_60M_PLCP = 7,
117 IWL_RATE_MIMO_6M_PLCP = 0x8, 120 IWL_RATE_MIMO2_6M_PLCP = 0x8,
118 IWL_RATE_MIMO_12M_PLCP = 0x9, 121 IWL_RATE_MIMO2_12M_PLCP = 0x9,
119 IWL_RATE_MIMO_18M_PLCP = 0xa, 122 IWL_RATE_MIMO2_18M_PLCP = 0xa,
120 IWL_RATE_MIMO_24M_PLCP = 0xb, 123 IWL_RATE_MIMO2_24M_PLCP = 0xb,
121 IWL_RATE_MIMO_36M_PLCP = 0xc, 124 IWL_RATE_MIMO2_36M_PLCP = 0xc,
122 IWL_RATE_MIMO_48M_PLCP = 0xd, 125 IWL_RATE_MIMO2_48M_PLCP = 0xd,
123 IWL_RATE_MIMO_54M_PLCP = 0xe, 126 IWL_RATE_MIMO2_54M_PLCP = 0xe,
124 IWL_RATE_MIMO_60M_PLCP = 0xf, 127 IWL_RATE_MIMO2_60M_PLCP = 0xf,
128 IWL_RATE_MIMO3_6M_PLCP = 0x10,
129 IWL_RATE_MIMO3_12M_PLCP = 0x11,
130 IWL_RATE_MIMO3_18M_PLCP = 0x12,
131 IWL_RATE_MIMO3_24M_PLCP = 0x13,
132 IWL_RATE_MIMO3_36M_PLCP = 0x14,
133 IWL_RATE_MIMO3_48M_PLCP = 0x15,
134 IWL_RATE_MIMO3_54M_PLCP = 0x16,
135 IWL_RATE_MIMO3_60M_PLCP = 0x17,
125 IWL_RATE_SISO_INVM_PLCP, 136 IWL_RATE_SISO_INVM_PLCP,
126 IWL_RATE_MIMO_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 137 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
138 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
127}; 139};
128 140
129/* MAC header values for bit rates */ 141/* MAC header values for bit rates */
@@ -196,11 +208,11 @@ enum {
196/* possible actions when in legacy mode */ 208/* possible actions when in legacy mode */
197#define IWL_LEGACY_SWITCH_ANTENNA 0 209#define IWL_LEGACY_SWITCH_ANTENNA 0
198#define IWL_LEGACY_SWITCH_SISO 1 210#define IWL_LEGACY_SWITCH_SISO 1
199#define IWL_LEGACY_SWITCH_MIMO 2 211#define IWL_LEGACY_SWITCH_MIMO2 2
200 212
201/* possible actions when in siso mode */ 213/* possible actions when in siso mode */
202#define IWL_SISO_SWITCH_ANTENNA 0 214#define IWL_SISO_SWITCH_ANTENNA 0
203#define IWL_SISO_SWITCH_MIMO 1 215#define IWL_SISO_SWITCH_MIMO2 1
204#define IWL_SISO_SWITCH_GI 2 216#define IWL_SISO_SWITCH_GI 2
205 217
206/* possible actions when in mimo mode */ 218/* possible actions when in mimo mode */
@@ -208,6 +220,10 @@ enum {
208#define IWL_MIMO_SWITCH_ANTENNA_B 1 220#define IWL_MIMO_SWITCH_ANTENNA_B 1
209#define IWL_MIMO_SWITCH_GI 2 221#define IWL_MIMO_SWITCH_GI 2
210 222
223/*FIXME:RS:separate MIMO2/3 transitions*/
224
225/*FIXME:RS:add posible acctions for MIMO3*/
226
211#define IWL_ACTION_LIMIT 3 /* # possible actions */ 227#define IWL_ACTION_LIMIT 3 /* # possible actions */
212 228
213#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ 229#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -224,43 +240,52 @@ enum {
224#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) 240#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
225#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 241#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
226 242
227extern const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 243extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
228 244
229enum iwl4965_table_type { 245enum iwl_table_type {
230 LQ_NONE, 246 LQ_NONE,
231 LQ_G, /* legacy types */ 247 LQ_G, /* legacy types */
232 LQ_A, 248 LQ_A,
233 LQ_SISO, /* high-throughput types */ 249 LQ_SISO, /* high-throughput types */
234 LQ_MIMO, 250 LQ_MIMO2,
251 LQ_MIMO3,
235 LQ_MAX, 252 LQ_MAX,
236}; 253};
237 254
238#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) 255#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
239#define is_siso(tbl) (((tbl) == LQ_SISO)) 256#define is_siso(tbl) ((tbl) == LQ_SISO)
240#define is_mimo(tbl) (((tbl) == LQ_MIMO)) 257#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
258#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
259#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
241#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) 260#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
242#define is_a_band(tbl) (((tbl) == LQ_A)) 261#define is_a_band(tbl) ((tbl) == LQ_A)
243#define is_g_and(tbl) (((tbl) == LQ_G)) 262#define is_g_and(tbl) ((tbl) == LQ_G)
244 263
245/* 4965 has 2 antennas/chains for Tx (but 3 for Rx) */ 264#define ANT_NONE 0x0
246enum iwl4965_antenna_type { 265#define ANT_A BIT(0)
247 ANT_NONE, 266#define ANT_B BIT(1)
248 ANT_MAIN, 267#define ANT_AB (ANT_A | ANT_B)
249 ANT_AUX, 268#define ANT_C BIT(2)
250 ANT_BOTH, 269#define ANT_AC (ANT_A | ANT_C)
251}; 270#define ANT_BC (ANT_B | ANT_C)
271#define ANT_ABC (ANT_AB | ANT_C)
272
273static inline u8 num_of_ant(u8 mask)
274{
275 return !!((mask) & ANT_A) +
276 !!((mask) & ANT_B) +
277 !!((mask) & ANT_C);
278}
252 279
253static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index) 280static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
254{ 281{
255 u8 rate = iwl4965_rates[rate_index].prev_ieee; 282 u8 rate = iwl_rates[rate_index].prev_ieee;
256 283
257 if (rate == IWL_RATE_INVALID) 284 if (rate == IWL_RATE_INVALID)
258 rate = rate_index; 285 rate = rate_index;
259 return rate; 286 return rate;
260} 287}
261 288
262extern int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags);
263
264/** 289/**
265 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation 290 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation
266 * 291 *
@@ -271,14 +296,6 @@ extern int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags);
271extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id); 296extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
272 297
273/** 298/**
274 * iwl4965_rate_scale_init - Initialize the rate scale table based on assoc info
275 *
276 * The specific throughput table used is based on the type of network
277 * the associated with, including A, B, G, and G w/ TGG protection
278 */
279extern void iwl4965_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
280
281/**
282 * iwl4965_rate_control_register - Register the rate control algorithm callbacks 299 * iwl4965_rate_control_register - Register the rate control algorithm callbacks
283 * 300 *
284 * Since the rate control algorithm is hardware specific, there is no need 301 * Since the rate control algorithm is hardware specific, there is no need
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index de330ae0ca95..9afecb813716 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -39,81 +39,33 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h"
47#include "iwl-sta.h"
48
49static int iwl4965_send_tx_power(struct iwl_priv *priv);
50static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
51
52/* Change firmware file name, using "-" and incrementing number,
53 * *only* when uCode interface or architecture changes so that it
54 * is not compatible with earlier drivers.
55 * This number will also appear in << 8 position of 1st dword of uCode file */
56#define IWL4965_UCODE_API "-2"
57
46 58
47/* module parameters */ 59/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = { 60static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL4965_MAX_NUM_QUEUES, 61 .num_of_queues = IWL49_NUM_QUEUES,
62 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
50 .enable_qos = 1, 63 .enable_qos = 1,
51 .amsdu_size_8K = 1, 64 .amsdu_size_8K = 1,
65 .restart_fw = 1,
52 /* the rest are 0 by default */ 66 /* the rest are 0 by default */
53}; 67};
54 68
55static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
56
57#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
77const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
78 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
93#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
117/* check contents of special bootstrap uCode SRAM */ 69/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv) 70static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{ 71{
@@ -192,15 +144,18 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
192 144
193 IWL_DEBUG_INFO("Begin load bsm\n"); 145 IWL_DEBUG_INFO("Begin load bsm\n");
194 146
147 priv->ucode_type = UCODE_RT;
148
195 /* make sure bootstrap program is no larger than BSM's SRAM size */ 149 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE) 150 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL; 151 return -EINVAL;
198 152
199 /* Tell bootstrap uCode where to find the "Initialize" uCode 153 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965. 154 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values, 155 * NOTE: iwl_init_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to 156 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */ 157 * runtime/protocol instructions and backup data cache.
158 */
204 pinst = priv->ucode_init.p_addr >> 4; 159 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4; 160 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len; 161 inst_len = priv->ucode_init.len;
@@ -259,271 +214,134 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
259 return 0; 214 return 0;
260} 215}
261 216
262static int iwl4965_init_drv(struct iwl_priv *priv) 217/**
218 * iwl4965_set_ucode_ptrs - Set uCode address location
219 *
220 * Tell initialization uCode where to find runtime uCode.
221 *
222 * BSM registers initially contain pointers to initialization uCode.
223 * We need to replace them to load runtime uCode inst and data,
224 * and to save runtime data when powering down.
225 */
226static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
263{ 227{
264 int ret; 228 dma_addr_t pinst;
265 int i; 229 dma_addr_t pdata;
266 230 unsigned long flags;
267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna; 231 int ret = 0;
268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276
277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
278 sizeof(struct iwl4965_shared),
279 &priv->shared_phys);
280
281 if (!priv->shared_virt) {
282 ret = -ENOMEM;
283 goto err;
284 }
285
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
287
288
289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
319 232
320 priv->rates_mask = IWL_RATES_MASK; 233 /* bits 35:4 for 4965 */
321 /* If power management is turned on, default to AC mode */ 234 pinst = priv->ucode_code.p_addr >> 4;
322 priv->power_mode = IWL_POWER_AC; 235 pdata = priv->ucode_data_backup.p_addr >> 4;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324 236
325 ret = iwl_init_channel_map(priv); 237 spin_lock_irqsave(&priv->lock, flags);
238 ret = iwl_grab_nic_access(priv);
326 if (ret) { 239 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret); 240 spin_unlock_irqrestore(&priv->lock, flags);
328 goto err; 241 return ret;
329 } 242 }
330 243
331 ret = iwl4965_init_geos(priv); 244 /* Tell bootstrap uCode where to find image to load */
332 if (ret) { 245 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
333 IWL_ERROR("initializing geos failed: %d\n", ret); 246 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
334 goto err_free_channel_map; 247 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
335 } 248 priv->ucode_data.len);
336 249
337 ret = ieee80211_register_hw(priv->hw); 250 /* Inst bytecount must be last to set up, bit 31 signals uCode
338 if (ret) { 251 * that all new ptr/size info is in place */
339 IWL_ERROR("Failed to register network device (error %d)\n", 252 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
340 ret); 253 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
341 goto err_free_geos; 254 iwl_release_nic_access(priv);
342 }
343 255
344 priv->hw->conf.beacon_int = 100; 256 spin_unlock_irqrestore(&priv->lock, flags);
345 priv->mac80211_registered = 1;
346 257
347 return 0; 258 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
348 259
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret; 260 return ret;
355} 261}
356 262
357static int is_fat_channel(__le32 rxon_flags) 263/**
358{ 264 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
359 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 265 *
360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); 266 * Called after REPLY_ALIVE notification received from "initialize" uCode.
361} 267 *
362 268 * The 4965 "initialize" ALIVE reply contains calibration data for:
363static u8 is_single_stream(struct iwl_priv *priv) 269 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
364{ 270 * (3945 does not contain this data).
365#ifdef CONFIG_IWL4965_HT 271 *
366 if (!priv->current_ht_config.is_ht || 272 * Tell "initialize" uCode to go ahead and load the runtime uCode.
367 (priv->current_ht_config.supp_mcs_set[1] == 0) || 273*/
368 (priv->ps_mode == IWL_MIMO_PS_STATIC)) 274static void iwl4965_init_alive_start(struct iwl_priv *priv)
369 return 1; 275{
370#else 276 /* Check alive response for "valid" sign from uCode */
371 return 1; 277 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
372#endif /*CONFIG_IWL4965_HT */ 278 /* We had an error bringing up the hardware, so take it
373 return 0; 279 * all the way back down so we can try again */
374} 280 IWL_DEBUG_INFO("Initialize Alive failed.\n");
375 281 goto restart;
376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) 282 }
377{ 283
378 int idx = 0; 284 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
379 285 * This is a paranoid check, because we would not have gotten the
380 /* 4965 HT rate format */ 286 * "initialize" alive if code weren't properly loaded. */
381 if (rate_n_flags & RATE_MCS_HT_MSK) { 287 if (iwl_verify_ucode(priv)) {
382 idx = (rate_n_flags & 0xff); 288 /* Runtime instruction load was bad;
383 289 * take it all the way back down so we can try again */
384 if (idx >= IWL_RATE_MIMO_6M_PLCP) 290 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
385 idx = idx - IWL_RATE_MIMO_6M_PLCP; 291 goto restart;
386 292 }
387 idx += IWL_FIRST_OFDM_RATE; 293
388 /* skip 9M not supported in ht*/ 294 /* Calculate temperature */
389 if (idx >= IWL_RATE_9M_INDEX) 295 priv->temperature = iwl4965_hw_get_temperature(priv);
390 idx += 1; 296
391 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) 297 /* Send pointers to protocol/runtime uCode image ... init code will
392 return idx; 298 * load and launch runtime uCode, which will send us another "Alive"
393 299 * notification. */
394 /* 4965 legacy rate format, search for match in table */ 300 IWL_DEBUG_INFO("Initialization Alive received.\n");
395 } else { 301 if (iwl4965_set_ucode_ptrs(priv)) {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++) 302 /* Runtime instruction load won't happen;
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF)) 303 * take it all the way back down so we can try again */
398 return idx; 304 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
305 goto restart;
399 } 306 }
307 return;
400 308
401 return -1; 309restart:
310 queue_work(priv->workqueue, &priv->restart);
402} 311}
403 312
404/** 313static int is_fat_channel(__le32 rxon_flags)
405 * translate ucode response to mac80211 tx status control values
406 */
407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
408 struct ieee80211_tx_control *control)
409{ 314{
410 int rate_index; 315 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
411 316 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
412 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1)
428 control->tx_rate = NULL;
429 else
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432} 317}
433 318
434/* 319/*
435 * Determine how many receiver/antenna chains to use. 320 * EEPROM handlers
436 * More provides better reception via diversity. Fewer saves power.
437 * MIMO (dual stream) requires at least 2, but works better with 3.
438 * This does not determine *which* chains to use, just how many.
439 */ 321 */
440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
441 u8 *idle_state, u8 *rx_state)
442{
443 u8 is_single = is_single_stream(priv);
444 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
445
446 /* # of Rx chains to use when expecting MIMO. */
447 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
448 *rx_state = 2;
449 else
450 *rx_state = 3;
451
452 /* # Rx chains when idling and maybe trying to save power */
453 switch (priv->ps_mode) {
454 case IWL_MIMO_PS_STATIC:
455 case IWL_MIMO_PS_DYNAMIC:
456 *idle_state = (is_cam) ? 2 : 1;
457 break;
458 case IWL_MIMO_PS_NONE:
459 *idle_state = (is_cam) ? *rx_state : 1;
460 break;
461 default:
462 *idle_state = 1;
463 break;
464 }
465 322
466 return 0; 323static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
467}
468
469int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
470{ 324{
471 int rc; 325 u16 eeprom_ver;
472 unsigned long flags; 326 u16 calib_ver;
473 327
474 spin_lock_irqsave(&priv->lock, flags); 328 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
475 rc = iwl_grab_nic_access(priv);
476 if (rc) {
477 spin_unlock_irqrestore(&priv->lock, flags);
478 return rc;
479 }
480 329
481 /* stop Rx DMA */ 330 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
484 (1 << 24), 1000);
485 if (rc < 0)
486 IWL_ERROR("Can't stop Rx DMA.\n");
487 331
488 iwl_release_nic_access(priv); 332 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
489 spin_unlock_irqrestore(&priv->lock, flags); 333 calib_ver < EEPROM_4965_TX_POWER_VERSION)
334 goto err;
490 335
491 return 0; 336 return 0;
492} 337err:
493 338 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr) 339 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
495{ 340 calib_ver, EEPROM_4965_TX_POWER_VERSION);
496 int i; 341 return -EINVAL;
497 int start = 0;
498 int ret = IWL_INVALID_STATION;
499 unsigned long flags;
500 DECLARE_MAC_BUF(mac);
501
502 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
503 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
504 start = IWL_STA_ID;
505
506 if (is_broadcast_ether_addr(addr))
507 return priv->hw_params.bcast_sta_id;
508
509 spin_lock_irqsave(&priv->sta_lock, flags);
510 for (i = start; i < priv->hw_params.max_stations; i++)
511 if ((priv->stations[i].used) &&
512 (!compare_ether_addr
513 (priv->stations[i].sta.sta.addr, addr))) {
514 ret = i;
515 goto out;
516 }
517
518 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
519 print_mac(mac, addr), priv->num_stations);
520 342
521 out:
522 spin_unlock_irqrestore(&priv->sta_lock, flags);
523 return ret;
524} 343}
525 344int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
527{ 345{
528 int ret; 346 int ret;
529 unsigned long flags; 347 unsigned long flags;
@@ -535,340 +353,130 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
535 return ret; 353 return ret;
536 } 354 }
537 355
538 if (!pwr_max) { 356 if (src == IWL_PWR_SRC_VAUX) {
539 u32 val; 357 u32 val;
540
541 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 358 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
542 &val); 359 &val);
543 360
544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 361 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 362 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 363 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
547 ~APMG_PS_CTRL_MSK_PWR_SRC); 364 ~APMG_PS_CTRL_MSK_PWR_SRC);
548 } else 365 }
366 } else {
549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 367 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 368 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
551 ~APMG_PS_CTRL_MSK_PWR_SRC); 369 ~APMG_PS_CTRL_MSK_PWR_SRC);
552
553 iwl_release_nic_access(priv);
554 spin_unlock_irqrestore(&priv->lock, flags);
555
556 return ret;
557}
558
559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
560{
561 int ret;
562 unsigned long flags;
563 unsigned int rb_size;
564
565 spin_lock_irqsave(&priv->lock, flags);
566 ret = iwl_grab_nic_access(priv);
567 if (ret) {
568 spin_unlock_irqrestore(&priv->lock, flags);
569 return ret;
570 } 370 }
571 371
572 if (priv->cfg->mod_params->amsdu_size_8K)
573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
574 else
575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
576
577 /* Stop Rx DMA */
578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
579
580 /* Reset driver's Rx queue write index */
581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
582
583 /* Tell device where to find RBD circular buffer in DRAM */
584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
585 rxq->dma_addr >> 8);
586
587 /* Tell device where in DRAM to update its Rx status */
588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
589 (priv->shared_phys +
590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
591
592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
596 rb_size |
597 /* 0x10 << 4 | */
598 (RX_QUEUE_SIZE_LOG <<
599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
600
601 /*
602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
603 */
604
605 iwl_release_nic_access(priv);
606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 return 0;
609}
610
611/* Tell 4965 where to find the "keep warm" buffer */
612static int iwl4965_kw_init(struct iwl_priv *priv)
613{
614 unsigned long flags;
615 int rc;
616
617 spin_lock_irqsave(&priv->lock, flags);
618 rc = iwl_grab_nic_access(priv);
619 if (rc)
620 goto out;
621
622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
623 priv->kw.dma_addr >> 4);
624 iwl_release_nic_access(priv); 372 iwl_release_nic_access(priv);
625out:
626 spin_unlock_irqrestore(&priv->lock, flags); 373 spin_unlock_irqrestore(&priv->lock, flags);
627 return rc;
628}
629
630static int iwl4965_kw_alloc(struct iwl_priv *priv)
631{
632 struct pci_dev *dev = priv->pci_dev;
633 struct iwl4965_kw *kw = &priv->kw;
634
635 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
636 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
637 if (!kw->v_addr)
638 return -ENOMEM;
639 374
640 return 0; 375 return ret;
641} 376}
642 377
643/** 378/*
644 * iwl4965_kw_free - Free the "keep warm" buffer 379 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
380 * must be called under priv->lock and mac access
645 */ 381 */
646static void iwl4965_kw_free(struct iwl_priv *priv) 382static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
647{ 383{
648 struct pci_dev *dev = priv->pci_dev; 384 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
649 struct iwl4965_kw *kw = &priv->kw;
650
651 if (kw->v_addr) {
652 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
653 memset(kw, 0, sizeof(*kw));
654 }
655} 385}
656 386
657/** 387static int iwl4965_apm_init(struct iwl_priv *priv)
658 * iwl4965_txq_ctx_reset - Reset TX queue context
659 * Destroys all DMA structures and initialise them again
660 *
661 * @param priv
662 * @return error code
663 */
664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
665{ 388{
666 int rc = 0; 389 int ret = 0;
667 int txq_id, slots_num;
668 unsigned long flags;
669
670 iwl4965_kw_free(priv);
671
672 /* Free all tx/cmd queues and keep-warm buffer */
673 iwl4965_hw_txq_ctx_free(priv);
674
675 /* Alloc keep-warm buffer */
676 rc = iwl4965_kw_alloc(priv);
677 if (rc) {
678 IWL_ERROR("Keep Warm allocation failed");
679 goto error_kw;
680 }
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 rc = iwl_grab_nic_access(priv);
685 if (unlikely(rc)) {
686 IWL_ERROR("TX reset failed");
687 spin_unlock_irqrestore(&priv->lock, flags);
688 goto error_reset;
689 }
690
691 /* Turn off all Tx DMA channels */
692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
693 iwl_release_nic_access(priv);
694 spin_unlock_irqrestore(&priv->lock, flags);
695
696 /* Tell 4965 where to find the keep-warm buffer */
697 rc = iwl4965_kw_init(priv);
698 if (rc) {
699 IWL_ERROR("kw_init failed\n");
700 goto error_reset;
701 }
702
703 /* Alloc and init all (default 16) Tx queues,
704 * including the command queue (#4) */
705 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
709 txq_id);
710 if (rc) {
711 IWL_ERROR("Tx %d queue init failed\n", txq_id);
712 goto error;
713 }
714 }
715
716 return rc;
717
718 error:
719 iwl4965_hw_txq_ctx_free(priv);
720 error_reset:
721 iwl4965_kw_free(priv);
722 error_kw:
723 return rc;
724}
725
726int iwl4965_hw_nic_init(struct iwl_priv *priv)
727{
728 int rc;
729 unsigned long flags;
730 struct iwl4965_rx_queue *rxq = &priv->rxq;
731 u8 rev_id;
732 u32 val;
733 u8 val_link;
734
735 iwl4965_power_init_handle(priv);
736 390
737 /* nic_init */ 391 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
738 spin_lock_irqsave(&priv->lock, flags); 392 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
739 393
394 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 395 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 396 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
742 397
398 /* set "initialization complete" bit to move adapter
399 * D0U* --> D0A* state */
743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 400 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
747 if (rc < 0) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 IWL_DEBUG_INFO("Failed to init the card\n");
750 return rc;
751 }
752 401
753 rc = iwl_grab_nic_access(priv); 402 /* wait for clock stabilization */
754 if (rc) { 403 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
755 spin_unlock_irqrestore(&priv->lock, flags); 404 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
756 return rc; 405 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
406 if (ret < 0) {
407 IWL_DEBUG_INFO("Failed to init the card\n");
408 goto out;
757 } 409 }
758 410
759 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 411 ret = iwl_grab_nic_access(priv);
412 if (ret)
413 goto out;
760 414
761 iwl_write_prph(priv, APMG_CLK_CTRL_REG, 415 /* enable DMA */
762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 416 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
763 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 417 APMG_CLK_VAL_BSM_CLK_RQT);
764 418
765 udelay(20); 419 udelay(20);
766 420
421 /* disable L1-Active */
767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 422 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 423 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
769 424
770 iwl_release_nic_access(priv); 425 iwl_release_nic_access(priv);
771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); 426out:
772 spin_unlock_irqrestore(&priv->lock, flags); 427 return ret;
428}
773 429
774 /* Determine HW type */
775 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
776 if (rc)
777 return rc;
778 430
779 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); 431static void iwl4965_nic_config(struct iwl_priv *priv)
432{
433 unsigned long flags;
434 u32 val;
435 u16 radio_cfg;
436 u8 val_link;
780 437
781 iwl4965_nic_set_pwr_src(priv, 1);
782 spin_lock_irqsave(&priv->lock, flags); 438 spin_lock_irqsave(&priv->lock, flags);
783 439
784 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { 440 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
785 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); 441 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
786 /* Enable No Snoop field */ 442 /* Enable No Snoop field */
787 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, 443 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
788 val & ~(1 << 11)); 444 val & ~(1 << 11));
789 } 445 }
790 446
791 spin_unlock_irqrestore(&priv->lock, flags);
792
793 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
794 IWL_ERROR("Older EEPROM detected! Aborting.\n");
795 return -EINVAL;
796 }
797
798 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 447 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
799 448
800 /* disable L1 entry -- workaround for pre-B1 */ 449 /* L1 is enabled by BIOS */
801 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); 450 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
451 /* diable L0S disabled L1A enabled */
452 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
453 else
454 /* L0S enabled L1A disabled */
455 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
802 456
803 spin_lock_irqsave(&priv->lock, flags); 457 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
804 458
805 /* set CSR_HW_CONFIG_REG for uCode use */ 459 /* write radio config values to register */
460 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
461 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
462 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
463 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
464 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
806 465
466 /* set CSR_HW_CONFIG_REG for uCode use */
807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 467 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R | 468 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | 469 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
811 470
812 rc = iwl_grab_nic_access(priv); 471 priv->calib_info = (struct iwl_eeprom_calib_info *)
813 if (rc < 0) { 472 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
814 spin_unlock_irqrestore(&priv->lock, flags);
815 IWL_DEBUG_INFO("Failed to init the card\n");
816 return rc;
817 }
818 473
819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
821 udelay(5);
822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
823
824 iwl_release_nic_access(priv);
825 spin_unlock_irqrestore(&priv->lock, flags); 474 spin_unlock_irqrestore(&priv->lock, flags);
826
827 iwl4965_hw_card_show_info(priv);
828
829 /* end nic_init */
830
831 /* Allocate the RX queue, or reset if it is already allocated */
832 if (!rxq->bd) {
833 rc = iwl4965_rx_queue_alloc(priv);
834 if (rc) {
835 IWL_ERROR("Unable to initialize Rx queue\n");
836 return -ENOMEM;
837 }
838 } else
839 iwl4965_rx_queue_reset(priv, rxq);
840
841 iwl4965_rx_replenish(priv);
842
843 iwl4965_rx_init(priv, rxq);
844
845 spin_lock_irqsave(&priv->lock, flags);
846
847 rxq->need_update = 1;
848 iwl4965_rx_queue_update_write_ptr(priv, rxq);
849
850 spin_unlock_irqrestore(&priv->lock, flags);
851
852 /* Allocate and init all Tx and Command queues */
853 rc = iwl4965_txq_ctx_reset(priv);
854 if (rc)
855 return rc;
856
857 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
858 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
859
860 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
861 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
862
863 set_bit(STATUS_INIT, &priv->status);
864
865 return 0;
866} 475}
867 476
868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv) 477static int iwl4965_apm_stop_master(struct iwl_priv *priv)
869{ 478{
870 int rc = 0; 479 int ret = 0;
871 u32 reg_val;
872 unsigned long flags; 480 unsigned long flags;
873 481
874 spin_lock_irqsave(&priv->lock, flags); 482 spin_lock_irqsave(&priv->lock, flags);
@@ -876,64 +484,24 @@ int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
876 /* set stop master bit */ 484 /* set stop master bit */
877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 485 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
878 486
879 reg_val = iwl_read32(priv, CSR_GP_CNTRL); 487 ret = iwl_poll_bit(priv, CSR_RESET,
880
881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
883 IWL_DEBUG_INFO("Card in power save, master is already "
884 "stopped\n");
885 else {
886 rc = iwl_poll_bit(priv, CSR_RESET,
887 CSR_RESET_REG_FLAG_MASTER_DISABLED, 488 CSR_RESET_REG_FLAG_MASTER_DISABLED,
888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 489 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
889 if (rc < 0) { 490 if (ret < 0)
890 spin_unlock_irqrestore(&priv->lock, flags); 491 goto out;
891 return rc;
892 }
893 }
894 492
493out:
895 spin_unlock_irqrestore(&priv->lock, flags); 494 spin_unlock_irqrestore(&priv->lock, flags);
896 IWL_DEBUG_INFO("stop master\n"); 495 IWL_DEBUG_INFO("stop master\n");
897 496
898 return rc; 497 return ret;
899}
900
901/**
902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
903 */
904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
905{
906
907 int txq_id;
908 unsigned long flags;
909
910 /* Stop each Tx DMA channel, and wait for it to be idle */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 spin_lock_irqsave(&priv->lock, flags);
913 if (iwl_grab_nic_access(priv)) {
914 spin_unlock_irqrestore(&priv->lock, flags);
915 continue;
916 }
917
918 iwl_write_direct32(priv,
919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
922 (txq_id), 200);
923 iwl_release_nic_access(priv);
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926
927 /* Deallocate memory for all Tx queues */
928 iwl4965_hw_txq_ctx_free(priv);
929} 498}
930 499
931int iwl4965_hw_nic_reset(struct iwl_priv *priv) 500static void iwl4965_apm_stop(struct iwl_priv *priv)
932{ 501{
933 int rc = 0;
934 unsigned long flags; 502 unsigned long flags;
935 503
936 iwl4965_hw_nic_stop_master(priv); 504 iwl4965_apm_stop_master(priv);
937 505
938 spin_lock_irqsave(&priv->lock, flags); 506 spin_lock_irqsave(&priv->lock, flags);
939 507
@@ -942,508 +510,66 @@ int iwl4965_hw_nic_reset(struct iwl_priv *priv)
942 udelay(10); 510 udelay(10);
943 511
944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 512 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
945 rc = iwl_poll_bit(priv, CSR_RESET,
946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
948
949 udelay(10);
950
951 rc = iwl_grab_nic_access(priv);
952 if (!rc) {
953 iwl_write_prph(priv, APMG_CLK_EN_REG,
954 APMG_CLK_VAL_DMA_CLK_RQT |
955 APMG_CLK_VAL_BSM_CLK_RQT);
956
957 udelay(10);
958
959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
961
962 iwl_release_nic_access(priv);
963 }
964
965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
966 wake_up_interruptible(&priv->wait_command_queue);
967
968 spin_unlock_irqrestore(&priv->lock, flags); 513 spin_unlock_irqrestore(&priv->lock, flags);
969
970 return rc;
971
972} 514}
973 515
974#define REG_RECALIB_PERIOD (60) 516static int iwl4965_apm_reset(struct iwl_priv *priv)
975
976/**
977 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
978 *
979 * This callback is provided in order to send a statistics request.
980 *
981 * This timer function is continually reset to execute within
982 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
983 * was received. We need to ensure we receive the statistics in order
984 * to update the temperature used for calibrating the TXPOWER.
985 */
986static void iwl4965_bg_statistics_periodic(unsigned long data)
987{ 517{
988 struct iwl_priv *priv = (struct iwl_priv *)data;
989
990 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
991 return;
992
993 iwl_send_statistics_request(priv, CMD_ASYNC);
994}
995
996#define CT_LIMIT_CONST 259
997#define TM_CT_KILL_THRESHOLD 110
998
999void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1000{
1001 struct iwl4965_ct_kill_config cmd;
1002 u32 R1, R2, R3;
1003 u32 temp_th;
1004 u32 crit_temperature;
1005 unsigned long flags;
1006 int ret = 0; 518 int ret = 0;
519 unsigned long flags;
1007 520
1008 spin_lock_irqsave(&priv->lock, flags); 521 iwl4965_apm_stop_master(priv);
1009 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1010 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1011 spin_unlock_irqrestore(&priv->lock, flags);
1012
1013 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
1014 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1015 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1016 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1017 } else {
1018 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1019 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1020 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1021 }
1022
1023 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1024
1025 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1026 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
1027 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1028 sizeof(cmd), &cmd);
1029 if (ret)
1030 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1031 else
1032 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
1033}
1034
1035#ifdef CONFIG_IWL4965_SENSITIVITY
1036
1037/* "false alarms" are signals that our DSP tries to lock onto,
1038 * but then determines that they are either noise, or transmissions
1039 * from a distant wireless network (also "noise", really) that get
1040 * "stepped on" by stronger transmissions within our own network.
1041 * This algorithm attempts to set a sensitivity level that is high
1042 * enough to receive all of our own network traffic, but not so
1043 * high that our DSP gets too busy trying to lock onto non-network
1044 * activity/noise. */
1045static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
1046 u32 norm_fa,
1047 u32 rx_enable_time,
1048 struct statistics_general_data *rx_info)
1049{
1050 u32 max_nrg_cck = 0;
1051 int i = 0;
1052 u8 max_silence_rssi = 0;
1053 u32 silence_ref = 0;
1054 u8 silence_rssi_a = 0;
1055 u8 silence_rssi_b = 0;
1056 u8 silence_rssi_c = 0;
1057 u32 val;
1058
1059 /* "false_alarms" values below are cross-multiplications to assess the
1060 * numbers of false alarms within the measured period of actual Rx
1061 * (Rx is off when we're txing), vs the min/max expected false alarms
1062 * (some should be expected if rx is sensitive enough) in a
1063 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1064 *
1065 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1066 *
1067 * */
1068 u32 false_alarms = norm_fa * 200 * 1024;
1069 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1070 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
1071 struct iwl4965_sensitivity_data *data = NULL;
1072
1073 data = &(priv->sensitivity_data);
1074
1075 data->nrg_auto_corr_silence_diff = 0;
1076
1077 /* Find max silence rssi among all 3 receivers.
1078 * This is background noise, which may include transmissions from other
1079 * networks, measured during silence before our network's beacon */
1080 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
1081 ALL_BAND_FILTER) >> 8);
1082 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
1083 ALL_BAND_FILTER) >> 8);
1084 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
1085 ALL_BAND_FILTER) >> 8);
1086
1087 val = max(silence_rssi_b, silence_rssi_c);
1088 max_silence_rssi = max(silence_rssi_a, (u8) val);
1089
1090 /* Store silence rssi in 20-beacon history table */
1091 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1092 data->nrg_silence_idx++;
1093 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1094 data->nrg_silence_idx = 0;
1095
1096 /* Find max silence rssi across 20 beacon history */
1097 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1098 val = data->nrg_silence_rssi[i];
1099 silence_ref = max(silence_ref, val);
1100 }
1101 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1102 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1103 silence_ref);
1104
1105 /* Find max rx energy (min value!) among all 3 receivers,
1106 * measured during beacon frame.
1107 * Save it in 10-beacon history table. */
1108 i = data->nrg_energy_idx;
1109 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1110 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1111
1112 data->nrg_energy_idx++;
1113 if (data->nrg_energy_idx >= 10)
1114 data->nrg_energy_idx = 0;
1115
1116 /* Find min rx energy (max value) across 10 beacon history.
1117 * This is the minimum signal level that we want to receive well.
1118 * Add backoff (margin so we don't miss slightly lower energy frames).
1119 * This establishes an upper bound (min value) for energy threshold. */
1120 max_nrg_cck = data->nrg_value[0];
1121 for (i = 1; i < 10; i++)
1122 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1123 max_nrg_cck += 6;
1124
1125 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1126 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1127 rx_info->beacon_energy_c, max_nrg_cck - 6);
1128
1129 /* Count number of consecutive beacons with fewer-than-desired
1130 * false alarms. */
1131 if (false_alarms < min_false_alarms)
1132 data->num_in_cck_no_fa++;
1133 else
1134 data->num_in_cck_no_fa = 0;
1135 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1136 data->num_in_cck_no_fa);
1137
1138 /* If we got too many false alarms this time, reduce sensitivity */
1139 if (false_alarms > max_false_alarms) {
1140 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1141 false_alarms, max_false_alarms);
1142 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1143 data->nrg_curr_state = IWL_FA_TOO_MANY;
1144
1145 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1146 /* Store for "fewer than desired" on later beacon */
1147 data->nrg_silence_ref = silence_ref;
1148
1149 /* increase energy threshold (reduce nrg value)
1150 * to decrease sensitivity */
1151 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1152 data->nrg_th_cck = data->nrg_th_cck
1153 - NRG_STEP_CCK;
1154 }
1155
1156 /* increase auto_corr values to decrease sensitivity */
1157 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1158 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1159 else {
1160 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1161 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1162 }
1163 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1164 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1165
1166 /* Else if we got fewer than desired, increase sensitivity */
1167 } else if (false_alarms < min_false_alarms) {
1168 data->nrg_curr_state = IWL_FA_TOO_FEW;
1169
1170 /* Compare silence level with silence level for most recent
1171 * healthy number or too many false alarms */
1172 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1173 (s32)silence_ref;
1174
1175 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1176 false_alarms, min_false_alarms,
1177 data->nrg_auto_corr_silence_diff);
1178
1179 /* Increase value to increase sensitivity, but only if:
1180 * 1a) previous beacon did *not* have *too many* false alarms
1181 * 1b) AND there's a significant difference in Rx levels
1182 * from a previous beacon with too many, or healthy # FAs
1183 * OR 2) We've seen a lot of beacons (100) with too few
1184 * false alarms */
1185 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1186 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1187 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1188
1189 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1190 /* Increase nrg value to increase sensitivity */
1191 val = data->nrg_th_cck + NRG_STEP_CCK;
1192 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1193
1194 /* Decrease auto_corr values to increase sensitivity */
1195 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1196 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1197
1198 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1199 data->auto_corr_cck_mrc =
1200 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1201
1202 } else
1203 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1204
1205 /* Else we got a healthy number of false alarms, keep status quo */
1206 } else {
1207 IWL_DEBUG_CALIB(" FA in safe zone\n");
1208 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1209
1210 /* Store for use in "fewer than desired" with later beacon */
1211 data->nrg_silence_ref = silence_ref;
1212
1213 /* If previous beacon had too many false alarms,
1214 * give it some extra margin by reducing sensitivity again
1215 * (but don't go below measured energy of desired Rx) */
1216 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1217 IWL_DEBUG_CALIB("... increasing margin\n");
1218 data->nrg_th_cck -= NRG_MARGIN;
1219 }
1220 }
1221
1222 /* Make sure the energy threshold does not go above the measured
1223 * energy of the desired Rx signals (reduced by backoff margin),
1224 * or else we might start missing Rx frames.
1225 * Lower value is higher energy, so we use max()!
1226 */
1227 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1228 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1229
1230 data->nrg_prev_state = data->nrg_curr_state;
1231
1232 return 0;
1233}
1234
1235
1236static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
1237 u32 norm_fa,
1238 u32 rx_enable_time)
1239{
1240 u32 val;
1241 u32 false_alarms = norm_fa * 200 * 1024;
1242 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1243 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1244 struct iwl4965_sensitivity_data *data = NULL;
1245
1246 data = &(priv->sensitivity_data);
1247
1248 /* If we got too many false alarms this time, reduce sensitivity */
1249 if (false_alarms > max_false_alarms) {
1250
1251 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1252 false_alarms, max_false_alarms);
1253
1254 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1255 data->auto_corr_ofdm =
1256 min((u32)AUTO_CORR_MAX_OFDM, val);
1257
1258 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1259 data->auto_corr_ofdm_mrc =
1260 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1261
1262 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1263 data->auto_corr_ofdm_x1 =
1264 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1265
1266 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1267 data->auto_corr_ofdm_mrc_x1 =
1268 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1269 }
1270
1271 /* Else if we got fewer than desired, increase sensitivity */
1272 else if (false_alarms < min_false_alarms) {
1273
1274 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1275 false_alarms, min_false_alarms);
1276
1277 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1278 data->auto_corr_ofdm =
1279 max((u32)AUTO_CORR_MIN_OFDM, val);
1280
1281 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1282 data->auto_corr_ofdm_mrc =
1283 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1284
1285 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1286 data->auto_corr_ofdm_x1 =
1287 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1288
1289 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1290 data->auto_corr_ofdm_mrc_x1 =
1291 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1292 }
1293 522
1294 else 523 spin_lock_irqsave(&priv->lock, flags);
1295 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1296 min_false_alarms, false_alarms, max_false_alarms);
1297 524
1298 return 0; 525 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1299}
1300 526
1301static int iwl4965_sensitivity_callback(struct iwl_priv *priv, 527 udelay(10);
1302 struct iwl_cmd *cmd, struct sk_buff *skb)
1303{
1304 /* We didn't cache the SKB; let the caller free it */
1305 return 1;
1306}
1307 528
1308/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 529 /* FIXME: put here L1A -L0S w/a */
1309static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1310{
1311 struct iwl4965_sensitivity_cmd cmd ;
1312 struct iwl4965_sensitivity_data *data = NULL;
1313 struct iwl_host_cmd cmd_out = {
1314 .id = SENSITIVITY_CMD,
1315 .len = sizeof(struct iwl4965_sensitivity_cmd),
1316 .meta.flags = flags,
1317 .data = &cmd,
1318 };
1319 int ret;
1320 530
1321 data = &(priv->sensitivity_data); 531 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1322
1323 memset(&cmd, 0, sizeof(cmd));
1324
1325 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1326 cpu_to_le16((u16)data->auto_corr_ofdm);
1327 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1328 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1329 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1330 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1331 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1332 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1333
1334 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1335 cpu_to_le16((u16)data->auto_corr_cck);
1336 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1337 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1338
1339 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1340 cpu_to_le16((u16)data->nrg_th_cck);
1341 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1342 cpu_to_le16((u16)data->nrg_th_ofdm);
1343
1344 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1345 __constant_cpu_to_le16(190);
1346 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1347 __constant_cpu_to_le16(390);
1348 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1349 __constant_cpu_to_le16(62);
1350
1351 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1352 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1353 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1354 data->nrg_th_ofdm);
1355
1356 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1357 data->auto_corr_cck, data->auto_corr_cck_mrc,
1358 data->nrg_th_cck);
1359
1360 /* Update uCode's "work" table, and copy it to DSP */
1361 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1362
1363 if (flags & CMD_ASYNC)
1364 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1365
1366 /* Don't send command to uCode if nothing has changed */
1367 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1368 sizeof(u16)*HD_TABLE_SIZE)) {
1369 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1370 return 0;
1371 }
1372 532
1373 /* Copy table for comparison next time */ 533 ret = iwl_poll_bit(priv, CSR_RESET,
1374 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 534 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1375 sizeof(u16)*HD_TABLE_SIZE); 535 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
1376 536
1377 ret = iwl_send_cmd(priv, &cmd_out);
1378 if (ret) 537 if (ret)
1379 IWL_ERROR("SENSITIVITY_CMD failed\n"); 538 goto out;
1380
1381 return ret;
1382}
1383
1384void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1385{
1386 struct iwl4965_sensitivity_data *data = NULL;
1387 int i;
1388 int ret = 0;
1389
1390 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1391
1392 if (force)
1393 memset(&(priv->sensitivity_tbl[0]), 0,
1394 sizeof(u16)*HD_TABLE_SIZE);
1395
1396 /* Clear driver's sensitivity algo data */
1397 data = &(priv->sensitivity_data);
1398 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1399 539
1400 data->num_in_cck_no_fa = 0; 540 udelay(10);
1401 data->nrg_curr_state = IWL_FA_TOO_MANY;
1402 data->nrg_prev_state = IWL_FA_TOO_MANY;
1403 data->nrg_silence_ref = 0;
1404 data->nrg_silence_idx = 0;
1405 data->nrg_energy_idx = 0;
1406 541
1407 for (i = 0; i < 10; i++) 542 ret = iwl_grab_nic_access(priv);
1408 data->nrg_value[i] = 0; 543 if (ret)
544 goto out;
545 /* Enable DMA and BSM Clock */
546 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
547 APMG_CLK_VAL_BSM_CLK_RQT);
1409 548
1410 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 549 udelay(10);
1411 data->nrg_silence_rssi[i] = 0;
1412 550
1413 data->auto_corr_ofdm = 90; 551 /* disable L1A */
1414 data->auto_corr_ofdm_mrc = 170; 552 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1415 data->auto_corr_ofdm_x1 = 105; 553 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1416 data->auto_corr_ofdm_mrc_x1 = 220;
1417 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1418 data->auto_corr_cck_mrc = 200;
1419 data->nrg_th_cck = 100;
1420 data->nrg_th_ofdm = 100;
1421 554
1422 data->last_bad_plcp_cnt_ofdm = 0; 555 iwl_release_nic_access(priv);
1423 data->last_fa_cnt_ofdm = 0;
1424 data->last_bad_plcp_cnt_cck = 0;
1425 data->last_fa_cnt_cck = 0;
1426 556
1427 /* Clear prior Sensitivity command data to force send to uCode */ 557 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1428 if (force) 558 wake_up_interruptible(&priv->wait_command_queue);
1429 memset(&(priv->sensitivity_tbl[0]), 0,
1430 sizeof(u16)*HD_TABLE_SIZE);
1431 559
1432 ret |= iwl4965_sensitivity_write(priv, flags); 560out:
1433 IWL_DEBUG_CALIB("<<return 0x%X\n", ret); 561 spin_unlock_irqrestore(&priv->lock, flags);
1434 562
1435 return; 563 return ret;
1436} 564}
1437 565
1438
1439/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 566/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1440 * Called after every association, but this runs only once! 567 * Called after every association, but this runs only once!
1441 * ... once chain noise is calibrated the first time, it's good forever. */ 568 * ... once chain noise is calibrated the first time, it's good forever. */
1442void iwl4965_chain_noise_reset(struct iwl_priv *priv) 569static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1443{ 570{
1444 struct iwl4965_chain_noise_data *data = NULL; 571 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
1445 572
1446 data = &(priv->chain_noise_data);
1447 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 573 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1448 struct iwl4965_calibration_cmd cmd; 574 struct iwl4965_calibration_cmd cmd;
1449 575
@@ -1452,388 +578,89 @@ void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1452 cmd.diff_gain_a = 0; 578 cmd.diff_gain_a = 0;
1453 cmd.diff_gain_b = 0; 579 cmd.diff_gain_b = 0;
1454 cmd.diff_gain_c = 0; 580 cmd.diff_gain_c = 0;
1455 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 581 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1456 sizeof(cmd), &cmd, NULL); 582 sizeof(cmd), &cmd))
1457 msleep(4); 583 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
1458 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 584 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1459 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 585 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1460 } 586 }
1461 return;
1462} 587}
1463 588
1464/* 589static void iwl4965_gain_computation(struct iwl_priv *priv,
1465 * Accumulate 20 beacons of signal and noise statistics for each of 590 u32 *average_noise,
1466 * 3 receivers/antennas/rx-chains, then figure out: 591 u16 min_average_noise_antenna_i,
1467 * 1) Which antennas are connected. 592 u32 min_average_noise)
1468 * 2) Differential rx gain settings to balance the 3 receivers.
1469 */
1470static void iwl4965_noise_calibration(struct iwl_priv *priv,
1471 struct iwl4965_notif_statistics *stat_resp)
1472{ 593{
1473 struct iwl4965_chain_noise_data *data = NULL; 594 int i, ret;
1474 int ret = 0; 595 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1475
1476 u32 chain_noise_a;
1477 u32 chain_noise_b;
1478 u32 chain_noise_c;
1479 u32 chain_sig_a;
1480 u32 chain_sig_b;
1481 u32 chain_sig_c;
1482 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1483 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1484 u32 max_average_sig;
1485 u16 max_average_sig_antenna_i;
1486 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1487 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1488 u16 i = 0;
1489 u16 chan_num = INITIALIZATION_VALUE;
1490 u32 band = INITIALIZATION_VALUE;
1491 u32 active_chains = 0;
1492 unsigned long flags;
1493 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1494
1495 data = &(priv->chain_noise_data);
1496
1497 /* Accumulate just the first 20 beacons after the first association,
1498 * then we're done forever. */
1499 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1500 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1501 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1502 return;
1503 }
1504
1505 spin_lock_irqsave(&priv->lock, flags);
1506 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1507 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1508 spin_unlock_irqrestore(&priv->lock, flags);
1509 return;
1510 }
1511
1512 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1513 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1514
1515 /* Make sure we accumulate data for just the associated channel
1516 * (even if scanning). */
1517 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1518 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1519 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1520 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1521 chan_num, band);
1522 spin_unlock_irqrestore(&priv->lock, flags);
1523 return;
1524 }
1525
1526 /* Accumulate beacon statistics values across 20 beacons */
1527 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1528 IN_BAND_FILTER;
1529 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1530 IN_BAND_FILTER;
1531 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1532 IN_BAND_FILTER;
1533
1534 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1535 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1536 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1537
1538 spin_unlock_irqrestore(&priv->lock, flags);
1539
1540 data->beacon_count++;
1541
1542 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1543 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1544 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1545
1546 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1547 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1548 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1549
1550 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1551 data->beacon_count);
1552 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1553 chain_sig_a, chain_sig_b, chain_sig_c);
1554 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1555 chain_noise_a, chain_noise_b, chain_noise_c);
1556
1557 /* If this is the 20th beacon, determine:
1558 * 1) Disconnected antennas (using signal strengths)
1559 * 2) Differential gain (using silence noise) to balance receivers */
1560 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1561
1562 /* Analyze signal for disconnected antenna */
1563 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1564 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1565 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1566
1567 if (average_sig[0] >= average_sig[1]) {
1568 max_average_sig = average_sig[0];
1569 max_average_sig_antenna_i = 0;
1570 active_chains = (1 << max_average_sig_antenna_i);
1571 } else {
1572 max_average_sig = average_sig[1];
1573 max_average_sig_antenna_i = 1;
1574 active_chains = (1 << max_average_sig_antenna_i);
1575 }
1576
1577 if (average_sig[2] >= max_average_sig) {
1578 max_average_sig = average_sig[2];
1579 max_average_sig_antenna_i = 2;
1580 active_chains = (1 << max_average_sig_antenna_i);
1581 }
1582
1583 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1584 average_sig[0], average_sig[1], average_sig[2]);
1585 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1586 max_average_sig, max_average_sig_antenna_i);
1587
1588 /* Compare signal strengths for all 3 receivers. */
1589 for (i = 0; i < NUM_RX_CHAINS; i++) {
1590 if (i != max_average_sig_antenna_i) {
1591 s32 rssi_delta = (max_average_sig -
1592 average_sig[i]);
1593
1594 /* If signal is very weak, compared with
1595 * strongest, mark it as disconnected. */
1596 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1597 data->disconn_array[i] = 1;
1598 else
1599 active_chains |= (1 << i);
1600 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1601 "disconn_array[i] = %d\n",
1602 i, rssi_delta, data->disconn_array[i]);
1603 }
1604 }
1605
1606 /*If both chains A & B are disconnected -
1607 * connect B and leave A as is */
1608 if (data->disconn_array[CHAIN_A] &&
1609 data->disconn_array[CHAIN_B]) {
1610 data->disconn_array[CHAIN_B] = 0;
1611 active_chains |= (1 << CHAIN_B);
1612 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1613 "W/A - declare B as connected\n");
1614 }
1615 596
1616 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", 597 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1617 active_chains);
1618 598
1619 /* Save for use within RXON, TX, SCAN commands, etc. */ 599 for (i = 0; i < NUM_RX_CHAINS; i++) {
1620 priv->valid_antenna = active_chains; 600 s32 delta_g = 0;
1621 601
1622 /* Analyze noise for rx balance */ 602 if (!(data->disconn_array[i]) &&
1623 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 603 (data->delta_gain_code[i] ==
1624 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1625 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1626
1627 for (i = 0; i < NUM_RX_CHAINS; i++) {
1628 if (!(data->disconn_array[i]) &&
1629 (average_noise[i] <= min_average_noise)) {
1630 /* This means that chain i is active and has
1631 * lower noise values so far: */
1632 min_average_noise = average_noise[i];
1633 min_average_noise_antenna_i = i;
1634 }
1635 }
1636
1637 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1638
1639 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1640 average_noise[0], average_noise[1],
1641 average_noise[2]);
1642
1643 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1644 min_average_noise, min_average_noise_antenna_i);
1645
1646 for (i = 0; i < NUM_RX_CHAINS; i++) {
1647 s32 delta_g = 0;
1648
1649 if (!(data->disconn_array[i]) &&
1650 (data->delta_gain_code[i] ==
1651 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 604 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1652 delta_g = average_noise[i] - min_average_noise; 605 delta_g = average_noise[i] - min_average_noise;
1653 data->delta_gain_code[i] = (u8)((delta_g * 606 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
1654 10) / 15); 607 data->delta_gain_code[i] =
1655 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < 608 min(data->delta_gain_code[i],
1656 data->delta_gain_code[i]) 609 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1657 data->delta_gain_code[i] = 610
1658 CHAIN_NOISE_MAX_DELTA_GAIN_CODE; 611 data->delta_gain_code[i] =
1659 612 (data->delta_gain_code[i] | (1 << 2));
1660 data->delta_gain_code[i] = 613 } else {
1661 (data->delta_gain_code[i] | (1 << 2)); 614 data->delta_gain_code[i] = 0;
1662 } else
1663 data->delta_gain_code[i] = 0;
1664 }
1665 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1666 data->delta_gain_code[0],
1667 data->delta_gain_code[1],
1668 data->delta_gain_code[2]);
1669
1670 /* Differential gain gets sent to uCode only once */
1671 if (!data->radio_write) {
1672 struct iwl4965_calibration_cmd cmd;
1673 data->radio_write = 1;
1674
1675 memset(&cmd, 0, sizeof(cmd));
1676 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1677 cmd.diff_gain_a = data->delta_gain_code[0];
1678 cmd.diff_gain_b = data->delta_gain_code[1];
1679 cmd.diff_gain_c = data->delta_gain_code[2];
1680 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1681 sizeof(cmd), &cmd);
1682 if (ret)
1683 IWL_DEBUG_CALIB("fail sending cmd "
1684 "REPLY_PHY_CALIBRATION_CMD \n");
1685
1686 /* TODO we might want recalculate
1687 * rx_chain in rxon cmd */
1688
1689 /* Mark so we run this algo only once! */
1690 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1691 } 615 }
1692 data->chain_noise_a = 0;
1693 data->chain_noise_b = 0;
1694 data->chain_noise_c = 0;
1695 data->chain_signal_a = 0;
1696 data->chain_signal_b = 0;
1697 data->chain_signal_c = 0;
1698 data->beacon_count = 0;
1699 }
1700 return;
1701}
1702
1703static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1704 struct iwl4965_notif_statistics *resp)
1705{
1706 u32 rx_enable_time;
1707 u32 fa_cck;
1708 u32 fa_ofdm;
1709 u32 bad_plcp_cck;
1710 u32 bad_plcp_ofdm;
1711 u32 norm_fa_ofdm;
1712 u32 norm_fa_cck;
1713 struct iwl4965_sensitivity_data *data = NULL;
1714 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1715 struct statistics_rx *statistics = &(resp->rx);
1716 unsigned long flags;
1717 struct statistics_general_data statis;
1718 int ret;
1719
1720 data = &(priv->sensitivity_data);
1721
1722 if (!iwl_is_associated(priv)) {
1723 IWL_DEBUG_CALIB("<< - not associated\n");
1724 return;
1725 } 616 }
617 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
618 data->delta_gain_code[0],
619 data->delta_gain_code[1],
620 data->delta_gain_code[2]);
1726 621
1727 spin_lock_irqsave(&priv->lock, flags); 622 /* Differential gain gets sent to uCode only once */
1728 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 623 if (!data->radio_write) {
1729 IWL_DEBUG_CALIB("<< invalid data.\n"); 624 struct iwl4965_calibration_cmd cmd;
1730 spin_unlock_irqrestore(&priv->lock, flags); 625 data->radio_write = 1;
1731 return;
1732 }
1733
1734 /* Extract Statistics: */
1735 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1736 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1737 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1738 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1739 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1740
1741 statis.beacon_silence_rssi_a =
1742 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1743 statis.beacon_silence_rssi_b =
1744 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1745 statis.beacon_silence_rssi_c =
1746 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1747 statis.beacon_energy_a =
1748 le32_to_cpu(statistics->general.beacon_energy_a);
1749 statis.beacon_energy_b =
1750 le32_to_cpu(statistics->general.beacon_energy_b);
1751 statis.beacon_energy_c =
1752 le32_to_cpu(statistics->general.beacon_energy_c);
1753
1754 spin_unlock_irqrestore(&priv->lock, flags);
1755
1756 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1757
1758 if (!rx_enable_time) {
1759 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1760 return;
1761 }
1762
1763 /* These statistics increase monotonically, and do not reset
1764 * at each beacon. Calculate difference from last value, or just
1765 * use the new statistics value if it has reset or wrapped around. */
1766 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1767 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1768 else {
1769 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1770 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1771 }
1772 626
1773 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) 627 memset(&cmd, 0, sizeof(cmd));
1774 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; 628 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1775 else { 629 cmd.diff_gain_a = data->delta_gain_code[0];
1776 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; 630 cmd.diff_gain_b = data->delta_gain_code[1];
1777 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; 631 cmd.diff_gain_c = data->delta_gain_code[2];
1778 } 632 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
633 sizeof(cmd), &cmd);
634 if (ret)
635 IWL_DEBUG_CALIB("fail sending cmd "
636 "REPLY_PHY_CALIBRATION_CMD \n");
1779 637
1780 if (data->last_fa_cnt_ofdm > fa_ofdm) 638 /* TODO we might want recalculate
1781 data->last_fa_cnt_ofdm = fa_ofdm; 639 * rx_chain in rxon cmd */
1782 else {
1783 fa_ofdm -= data->last_fa_cnt_ofdm;
1784 data->last_fa_cnt_ofdm += fa_ofdm;
1785 }
1786 640
1787 if (data->last_fa_cnt_cck > fa_cck) 641 /* Mark so we run this algo only once! */
1788 data->last_fa_cnt_cck = fa_cck; 642 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1789 else {
1790 fa_cck -= data->last_fa_cnt_cck;
1791 data->last_fa_cnt_cck += fa_cck;
1792 } 643 }
1793 644 data->chain_noise_a = 0;
1794 /* Total aborted signal locks */ 645 data->chain_noise_b = 0;
1795 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 646 data->chain_noise_c = 0;
1796 norm_fa_cck = fa_cck + bad_plcp_cck; 647 data->chain_signal_a = 0;
1797 648 data->chain_signal_b = 0;
1798 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 649 data->chain_signal_c = 0;
1799 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); 650 data->beacon_count = 0;
1800
1801 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1802 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1803 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
1804
1805 return;
1806} 651}
1807 652
1808static void iwl4965_bg_sensitivity_work(struct work_struct *work) 653static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
654 __le32 *tx_flags)
1809{ 655{
1810 struct iwl_priv *priv = container_of(work, struct iwl_priv, 656 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
1811 sensitivity_work); 657 *tx_flags |= TX_CMD_FLG_RTS_MSK;
1812 658 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
1813 mutex_lock(&priv->mutex); 659 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
1814 660 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1815 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 661 *tx_flags |= TX_CMD_FLG_CTS_MSK;
1816 test_bit(STATUS_SCANNING, &priv->status)) {
1817 mutex_unlock(&priv->mutex);
1818 return;
1819 }
1820
1821 if (priv->start_calib) {
1822 iwl4965_noise_calibration(priv, &priv->statistics);
1823
1824 if (priv->sensitivity_data.state ==
1825 IWL_SENS_CALIB_NEED_REINIT) {
1826 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1827 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1828 } else
1829 iwl4965_sensitivity_calibration(priv,
1830 &priv->statistics);
1831 } 662 }
1832
1833 mutex_unlock(&priv->mutex);
1834 return;
1835} 663}
1836#endif /*CONFIG_IWL4965_SENSITIVITY*/
1837 664
1838static void iwl4965_bg_txpower_work(struct work_struct *work) 665static void iwl4965_bg_txpower_work(struct work_struct *work)
1839{ 666{
@@ -1853,7 +680,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
1853 /* Regardless of if we are assocaited, we must reconfigure the 680 /* Regardless of if we are assocaited, we must reconfigure the
1854 * TX power since frames can be sent on non-radar channels while 681 * TX power since frames can be sent on non-radar channels while
1855 * not associated */ 682 * not associated */
1856 iwl4965_hw_reg_send_txpower(priv); 683 iwl4965_send_tx_power(priv);
1857 684
1858 /* Update last_temperature to keep is_calib_needed from running 685 /* Update last_temperature to keep is_calib_needed from running
1859 * when it isn't needed... */ 686 * when it isn't needed... */
@@ -1880,7 +707,7 @@ static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1880 * NOTE: Acquire priv->lock before calling this function ! 707 * NOTE: Acquire priv->lock before calling this function !
1881 */ 708 */
1882static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, 709static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1883 struct iwl4965_tx_queue *txq, 710 struct iwl_tx_queue *txq,
1884 int tx_fifo_id, int scd_retry) 711 int tx_fifo_id, int scd_retry)
1885{ 712{
1886 int txq_id = txq->q.id; 713 int txq_id = txq->q.id;
@@ -1890,11 +717,11 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1890 717
1891 /* Set up and activate */ 718 /* Set up and activate */
1892 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 719 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1893 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 720 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1894 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 721 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
1895 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 722 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1896 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | 723 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1897 SCD_QUEUE_STTS_REG_MSK); 724 IWL49_SCD_QUEUE_STTS_REG_MSK);
1898 725
1899 txq->sched_retry = scd_retry; 726 txq->sched_retry = scd_retry;
1900 727
@@ -1908,22 +735,12 @@ static const u16 default_queue_to_tx_fifo[] = {
1908 IWL_TX_FIFO_AC2, 735 IWL_TX_FIFO_AC2,
1909 IWL_TX_FIFO_AC1, 736 IWL_TX_FIFO_AC1,
1910 IWL_TX_FIFO_AC0, 737 IWL_TX_FIFO_AC0,
1911 IWL_CMD_FIFO_NUM, 738 IWL49_CMD_FIFO_NUM,
1912 IWL_TX_FIFO_HCCA_1, 739 IWL_TX_FIFO_HCCA_1,
1913 IWL_TX_FIFO_HCCA_2 740 IWL_TX_FIFO_HCCA_2
1914}; 741};
1915 742
1916static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 743static int iwl4965_alive_notify(struct iwl_priv *priv)
1917{
1918 set_bit(txq_id, &priv->txq_ctx_active_msk);
1919}
1920
1921static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1922{
1923 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1924}
1925
1926int iwl4965_alive_notify(struct iwl_priv *priv)
1927{ 744{
1928 u32 a; 745 u32 a;
1929 int i = 0; 746 int i = 0;
@@ -1932,15 +749,6 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1932 749
1933 spin_lock_irqsave(&priv->lock, flags); 750 spin_lock_irqsave(&priv->lock, flags);
1934 751
1935#ifdef CONFIG_IWL4965_SENSITIVITY
1936 memset(&(priv->sensitivity_data), 0,
1937 sizeof(struct iwl4965_sensitivity_data));
1938 memset(&(priv->chain_noise_data), 0,
1939 sizeof(struct iwl4965_chain_noise_data));
1940 for (i = 0; i < NUM_RX_CHAINS; i++)
1941 priv->chain_noise_data.delta_gain_code[i] =
1942 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1943#endif /* CONFIG_IWL4965_SENSITIVITY*/
1944 ret = iwl_grab_nic_access(priv); 752 ret = iwl_grab_nic_access(priv);
1945 if (ret) { 753 if (ret) {
1946 spin_unlock_irqrestore(&priv->lock, flags); 754 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1949,10 +757,10 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1949 757
1950 /* Clear 4965's internal Tx Scheduler data base */ 758 /* Clear 4965's internal Tx Scheduler data base */
1951 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); 759 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1952 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 760 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1953 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 761 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1954 iwl_write_targ_mem(priv, a, 0); 762 iwl_write_targ_mem(priv, a, 0);
1955 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 763 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1956 iwl_write_targ_mem(priv, a, 0); 764 iwl_write_targ_mem(priv, a, 0);
1957 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 765 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1958 iwl_write_targ_mem(priv, a, 0); 766 iwl_write_targ_mem(priv, a, 0);
@@ -1974,160 +782,109 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1974 782
1975 /* Max Tx Window size for Scheduler-ACK mode */ 783 /* Max Tx Window size for Scheduler-ACK mode */
1976 iwl_write_targ_mem(priv, priv->scd_base_addr + 784 iwl_write_targ_mem(priv, priv->scd_base_addr +
1977 SCD_CONTEXT_QUEUE_OFFSET(i), 785 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1978 (SCD_WIN_SIZE << 786 (SCD_WIN_SIZE <<
1979 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 787 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1980 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 788 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1981 789
1982 /* Frame limit */ 790 /* Frame limit */
1983 iwl_write_targ_mem(priv, priv->scd_base_addr + 791 iwl_write_targ_mem(priv, priv->scd_base_addr +
1984 SCD_CONTEXT_QUEUE_OFFSET(i) + 792 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1985 sizeof(u32), 793 sizeof(u32),
1986 (SCD_FRAME_LIMIT << 794 (SCD_FRAME_LIMIT <<
1987 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 795 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1988 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 796 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1989 797
1990 } 798 }
1991 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, 799 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1992 (1 << priv->hw_params.max_txq_num) - 1); 800 (1 << priv->hw_params.max_txq_num) - 1);
1993 801
1994 /* Activate all Tx DMA/FIFO channels */ 802 /* Activate all Tx DMA/FIFO channels */
1995 iwl_write_prph(priv, IWL49_SCD_TXFACT, 803 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
1996 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1997 804
1998 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 805 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1999 806
2000 /* Map each Tx/cmd queue to its corresponding fifo */ 807 /* Map each Tx/cmd queue to its corresponding fifo */
2001 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 808 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2002 int ac = default_queue_to_tx_fifo[i]; 809 int ac = default_queue_to_tx_fifo[i];
2003 iwl4965_txq_ctx_activate(priv, i); 810 iwl_txq_ctx_activate(priv, i);
2004 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 811 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2005 } 812 }
2006 813
2007 iwl_release_nic_access(priv); 814 iwl_release_nic_access(priv);
2008 spin_unlock_irqrestore(&priv->lock, flags); 815 spin_unlock_irqrestore(&priv->lock, flags);
2009 816
2010 /* Ask for statistics now, the uCode will send statistics notification
2011 * periodically after association */
2012 iwl_send_statistics_request(priv, CMD_ASYNC);
2013 return ret; 817 return ret;
2014} 818}
2015 819
820static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
821 .min_nrg_cck = 97,
822 .max_nrg_cck = 0,
823
824 .auto_corr_min_ofdm = 85,
825 .auto_corr_min_ofdm_mrc = 170,
826 .auto_corr_min_ofdm_x1 = 105,
827 .auto_corr_min_ofdm_mrc_x1 = 220,
828
829 .auto_corr_max_ofdm = 120,
830 .auto_corr_max_ofdm_mrc = 210,
831 .auto_corr_max_ofdm_x1 = 140,
832 .auto_corr_max_ofdm_mrc_x1 = 270,
833
834 .auto_corr_min_cck = 125,
835 .auto_corr_max_cck = 200,
836 .auto_corr_min_cck_mrc = 200,
837 .auto_corr_max_cck_mrc = 400,
838
839 .nrg_th_cck = 100,
840 .nrg_th_ofdm = 100,
841};
842
2016/** 843/**
2017 * iwl4965_hw_set_hw_params 844 * iwl4965_hw_set_hw_params
2018 * 845 *
2019 * Called when initializing driver 846 * Called when initializing driver
2020 */ 847 */
2021int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 848static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2022{ 849{
2023 850
2024 if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) || 851 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
2025 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 852 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
2026 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 853 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2027 IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES); 854 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
2028 return -EINVAL; 855 return -EINVAL;
2029 } 856 }
2030 857
2031 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 858 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
2032 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); 859 priv->hw_params.first_ampdu_q = IWL49_FIRST_AMPDU_QUEUE;
2033 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2034 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2035 if (priv->cfg->mod_params->amsdu_size_8K)
2036 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
2037 else
2038 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
2039 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
2040 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 860 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
2041 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 861 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
862 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
863 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
864 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
865 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
2042 866
2043 priv->hw_params.tx_chains_num = 2; 867 priv->hw_params.tx_chains_num = 2;
2044 priv->hw_params.rx_chains_num = 2; 868 priv->hw_params.rx_chains_num = 2;
2045 priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 869 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
2046 priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 870 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
2047 871 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
2048 return 0;
2049}
2050
2051/**
2052 * iwl4965_hw_txq_ctx_free - Free TXQ Context
2053 *
2054 * Destroy all TX DMA queues and structures
2055 */
2056void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
2057{
2058 int txq_id;
2059
2060 /* Tx queues */
2061 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
2062 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
2063
2064 /* Keep-warm buffer */
2065 iwl4965_kw_free(priv);
2066}
2067
2068/**
2069 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
2070 *
2071 * Does NOT advance any TFD circular buffer read/write indexes
2072 * Does NOT free the TFD itself (which is within circular buffer)
2073 */
2074int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2075{
2076 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
2077 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
2078 struct pci_dev *dev = priv->pci_dev;
2079 int i;
2080 int counter = 0;
2081 int index, is_odd;
2082 872
2083 /* Host command buffers stay mapped in memory, nothing to clean */ 873 priv->hw_params.sens = &iwl4965_sensitivity;
2084 if (txq->q.id == IWL_CMD_QUEUE_NUM)
2085 return 0;
2086
2087 /* Sanity check on number of chunks */
2088 counter = IWL_GET_BITS(*bd, num_tbs);
2089 if (counter > MAX_NUM_OF_TBS) {
2090 IWL_ERROR("Too many chunks: %i\n", counter);
2091 /* @todo issue fatal error, it is quite serious situation */
2092 return 0;
2093 }
2094 874
2095 /* Unmap chunks, if any.
2096 * TFD info for odd chunks is different format than for even chunks. */
2097 for (i = 0; i < counter; i++) {
2098 index = i / 2;
2099 is_odd = i & 0x1;
2100
2101 if (is_odd)
2102 pci_unmap_single(
2103 dev,
2104 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2105 (IWL_GET_BITS(bd->pa[index],
2106 tb2_addr_hi20) << 16),
2107 IWL_GET_BITS(bd->pa[index], tb2_len),
2108 PCI_DMA_TODEVICE);
2109
2110 else if (i > 0)
2111 pci_unmap_single(dev,
2112 le32_to_cpu(bd->pa[index].tb1_addr),
2113 IWL_GET_BITS(bd->pa[index], tb1_len),
2114 PCI_DMA_TODEVICE);
2115
2116 /* Free SKB, if any, for this chunk */
2117 if (txq->txb[txq->q.read_ptr].skb[i]) {
2118 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
2119
2120 dev_kfree_skb(skb);
2121 txq->txb[txq->q.read_ptr].skb[i] = NULL;
2122 }
2123 }
2124 return 0; 875 return 0;
2125} 876}
2126 877
2127int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) 878/* set card power command */
879static int iwl4965_set_power(struct iwl_priv *priv,
880 void *cmd)
2128{ 881{
2129 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); 882 int ret = 0;
2130 return -EINVAL; 883
884 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
885 sizeof(struct iwl4965_powertable_cmd),
886 cmd, NULL);
887 return ret;
2131} 888}
2132 889
2133static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) 890static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
@@ -2179,20 +936,6 @@ static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
2179 return comp; 936 return comp;
2180} 937}
2181 938
2182static const struct iwl_channel_info *
2183iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
2184 enum ieee80211_band band, u16 channel)
2185{
2186 const struct iwl_channel_info *ch_info;
2187
2188 ch_info = iwl_get_channel_info(priv, band, channel);
2189
2190 if (!is_channel_valid(ch_info))
2191 return NULL;
2192
2193 return ch_info;
2194}
2195
2196static s32 iwl4965_get_tx_atten_grp(u16 channel) 939static s32 iwl4965_get_tx_atten_grp(u16 channel)
2197{ 940{
2198 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && 941 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
@@ -2224,11 +967,11 @@ static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
2224 s32 b = -1; 967 s32 b = -1;
2225 968
2226 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { 969 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2227 if (priv->eeprom.calib_info.band_info[b].ch_from == 0) 970 if (priv->calib_info->band_info[b].ch_from == 0)
2228 continue; 971 continue;
2229 972
2230 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) 973 if ((channel >= priv->calib_info->band_info[b].ch_from)
2231 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) 974 && (channel <= priv->calib_info->band_info[b].ch_to))
2232 break; 975 break;
2233 } 976 }
2234 977
@@ -2256,14 +999,14 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2256 * in channel number. 999 * in channel number.
2257 */ 1000 */
2258static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, 1001static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2259 struct iwl4965_eeprom_calib_ch_info *chan_info) 1002 struct iwl_eeprom_calib_ch_info *chan_info)
2260{ 1003{
2261 s32 s = -1; 1004 s32 s = -1;
2262 u32 c; 1005 u32 c;
2263 u32 m; 1006 u32 m;
2264 const struct iwl4965_eeprom_calib_measure *m1; 1007 const struct iwl_eeprom_calib_measure *m1;
2265 const struct iwl4965_eeprom_calib_measure *m2; 1008 const struct iwl_eeprom_calib_measure *m2;
2266 struct iwl4965_eeprom_calib_measure *omeas; 1009 struct iwl_eeprom_calib_measure *omeas;
2267 u32 ch_i1; 1010 u32 ch_i1;
2268 u32 ch_i2; 1011 u32 ch_i2;
2269 1012
@@ -2273,8 +1016,8 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2273 return -1; 1016 return -1;
2274 } 1017 }
2275 1018
2276 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; 1019 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
2277 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; 1020 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
2278 chan_info->ch_num = (u8) channel; 1021 chan_info->ch_num = (u8) channel;
2279 1022
2280 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", 1023 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
@@ -2282,9 +1025,9 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2282 1025
2283 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { 1026 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2284 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { 1027 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2285 m1 = &(priv->eeprom.calib_info.band_info[s].ch1. 1028 m1 = &(priv->calib_info->band_info[s].ch1.
2286 measurements[c][m]); 1029 measurements[c][m]);
2287 m2 = &(priv->eeprom.calib_info.band_info[s].ch2. 1030 m2 = &(priv->calib_info->band_info[s].ch2.
2288 measurements[c][m]); 1031 measurements[c][m]);
2289 omeas = &(chan_info->measurements[c][m]); 1032 omeas = &(chan_info->measurements[c][m]);
2290 1033
@@ -2603,8 +1346,8 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2603 int i; 1346 int i;
2604 int c; 1347 int c;
2605 const struct iwl_channel_info *ch_info = NULL; 1348 const struct iwl_channel_info *ch_info = NULL;
2606 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; 1349 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
2607 const struct iwl4965_eeprom_calib_measure *measurement; 1350 const struct iwl_eeprom_calib_measure *measurement;
2608 s16 voltage; 1351 s16 voltage;
2609 s32 init_voltage; 1352 s32 init_voltage;
2610 s32 voltage_compensation; 1353 s32 voltage_compensation;
@@ -2616,30 +1359,17 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2616 s32 factory_actual_pwr[2]; 1359 s32 factory_actual_pwr[2];
2617 s32 power_index; 1360 s32 power_index;
2618 1361
2619 /* Sanity check requested level (dBm) */
2620 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2621 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2622 priv->user_txpower_limit);
2623 return -EINVAL;
2624 }
2625 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2626 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2627 priv->user_txpower_limit);
2628 return -EINVAL;
2629 }
2630
2631 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units 1362 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2632 * are used for indexing into txpower table) */ 1363 * are used for indexing into txpower table) */
2633 user_target_power = 2 * priv->user_txpower_limit; 1364 user_target_power = 2 * priv->tx_power_user_lmt;
2634 1365
2635 /* Get current (RXON) channel, band, width */ 1366 /* Get current (RXON) channel, band, width */
2636 ch_info =
2637 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
2638
2639 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, 1367 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2640 is_fat); 1368 is_fat);
2641 1369
2642 if (!ch_info) 1370 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1371
1372 if (!is_channel_valid(ch_info))
2643 return -EINVAL; 1373 return -EINVAL;
2644 1374
2645 /* get txatten group, used to select 1) thermal txpower adjustment 1375 /* get txatten group, used to select 1) thermal txpower adjustment
@@ -2661,9 +1391,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2661 /* hardware txpower limits ... 1391 /* hardware txpower limits ...
2662 * saturation (clipping distortion) txpowers are in half-dBm */ 1392 * saturation (clipping distortion) txpowers are in half-dBm */
2663 if (band) 1393 if (band)
2664 saturation_power = priv->eeprom.calib_info.saturation_power24; 1394 saturation_power = priv->calib_info->saturation_power24;
2665 else 1395 else
2666 saturation_power = priv->eeprom.calib_info.saturation_power52; 1396 saturation_power = priv->calib_info->saturation_power52;
2667 1397
2668 if (saturation_power < IWL_TX_POWER_SATURATION_MIN || 1398 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2669 saturation_power > IWL_TX_POWER_SATURATION_MAX) { 1399 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
@@ -2693,7 +1423,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2693 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1423 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2694 1424
2695 /* calculate tx gain adjustment based on power supply voltage */ 1425 /* calculate tx gain adjustment based on power supply voltage */
2696 voltage = priv->eeprom.calib_info.voltage; 1426 voltage = priv->calib_info->voltage;
2697 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1427 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2698 voltage_compensation = 1428 voltage_compensation =
2699 iwl4965_get_voltage_compensation(voltage, init_voltage); 1429 iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -2840,12 +1570,12 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2840} 1570}
2841 1571
2842/** 1572/**
2843 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit 1573 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
2844 * 1574 *
2845 * Uses the active RXON for channel, band, and characteristics (fat, high) 1575 * Uses the active RXON for channel, band, and characteristics (fat, high)
2846 * The power limit is taken from priv->user_txpower_limit. 1576 * The power limit is taken from priv->tx_power_user_lmt.
2847 */ 1577 */
2848int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv) 1578static int iwl4965_send_tx_power(struct iwl_priv *priv)
2849{ 1579{
2850 struct iwl4965_txpowertable_cmd cmd = { 0 }; 1580 struct iwl4965_txpowertable_cmd cmd = { 0 };
2851 int ret; 1581 int ret;
@@ -2888,8 +1618,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2888{ 1618{
2889 int ret = 0; 1619 int ret = 0;
2890 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1620 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2891 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon; 1621 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
2892 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon; 1622 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
2893 1623
2894 if ((rxon1->flags == rxon2->flags) && 1624 if ((rxon1->flags == rxon2->flags) &&
2895 (rxon1->filter_flags == rxon2->filter_flags) && 1625 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -2965,89 +1695,14 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2965 return rc; 1695 return rc;
2966} 1696}
2967 1697
2968#define RTS_HCCA_RETRY_LIMIT 3 1698static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
2969#define RTS_DFAULT_RETRY_LIMIT 60
2970
2971void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2972 struct iwl_cmd *cmd,
2973 struct ieee80211_tx_control *ctrl,
2974 struct ieee80211_hdr *hdr, int sta_id,
2975 int is_hcca)
2976{
2977 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2978 u8 rts_retry_limit = 0;
2979 u8 data_retry_limit = 0;
2980 u16 fc = le16_to_cpu(hdr->frame_control);
2981 u8 rate_plcp;
2982 u16 rate_flags = 0;
2983 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2984
2985 rate_plcp = iwl4965_rates[rate_idx].plcp;
2986
2987 rts_retry_limit = (is_hcca) ?
2988 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2989
2990 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2991 rate_flags |= RATE_MCS_CCK_MSK;
2992
2993
2994 if (ieee80211_is_probe_response(fc)) {
2995 data_retry_limit = 3;
2996 if (data_retry_limit < rts_retry_limit)
2997 rts_retry_limit = data_retry_limit;
2998 } else
2999 data_retry_limit = IWL_DEFAULT_TX_RETRY;
3000
3001 if (priv->data_retry_limit != -1)
3002 data_retry_limit = priv->data_retry_limit;
3003
3004
3005 if (ieee80211_is_data(fc)) {
3006 tx->initial_rate_index = 0;
3007 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
3008 } else {
3009 switch (fc & IEEE80211_FCTL_STYPE) {
3010 case IEEE80211_STYPE_AUTH:
3011 case IEEE80211_STYPE_DEAUTH:
3012 case IEEE80211_STYPE_ASSOC_REQ:
3013 case IEEE80211_STYPE_REASSOC_REQ:
3014 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
3015 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
3016 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
3017 }
3018 break;
3019 default:
3020 break;
3021 }
3022
3023 /* Alternate between antenna A and B for successive frames */
3024 if (priv->use_ant_b_for_management_frame) {
3025 priv->use_ant_b_for_management_frame = 0;
3026 rate_flags |= RATE_MCS_ANT_B_MSK;
3027 } else {
3028 priv->use_ant_b_for_management_frame = 1;
3029 rate_flags |= RATE_MCS_ANT_A_MSK;
3030 }
3031 }
3032
3033 tx->rts_retry_limit = rts_retry_limit;
3034 tx->data_retry_limit = data_retry_limit;
3035 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
3036}
3037
3038int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
3039{ 1699{
3040 struct iwl4965_shared *s = priv->shared_virt; 1700 struct iwl4965_shared *s = priv->shared_virt;
3041 return le32_to_cpu(s->rb_closed) & 0xFFF; 1701 return le32_to_cpu(s->rb_closed) & 0xFFF;
3042} 1702}
3043 1703
3044int iwl4965_hw_get_temperature(struct iwl_priv *priv)
3045{
3046 return priv->temperature;
3047}
3048
3049unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 1704unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3050 struct iwl4965_frame *frame, u8 rate) 1705 struct iwl_frame *frame, u8 rate)
3051{ 1706{
3052 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; 1707 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
3053 unsigned int frame_size; 1708 unsigned int frame_size;
@@ -3060,7 +1715,7 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3060 1715
3061 frame_size = iwl4965_fill_beacon_frame(priv, 1716 frame_size = iwl4965_fill_beacon_frame(priv,
3062 tx_beacon_cmd->frame, 1717 tx_beacon_cmd->frame,
3063 iwl4965_broadcast_addr, 1718 iwl_bcast_addr,
3064 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 1719 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3065 1720
3066 BUG_ON(frame_size > MAX_MPDU_SIZE); 1721 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -3068,105 +1723,45 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3068 1723
3069 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) 1724 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
3070 tx_beacon_cmd->tx.rate_n_flags = 1725 tx_beacon_cmd->tx.rate_n_flags =
3071 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); 1726 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
3072 else 1727 else
3073 tx_beacon_cmd->tx.rate_n_flags = 1728 tx_beacon_cmd->tx.rate_n_flags =
3074 iwl4965_hw_set_rate_n_flags(rate, 0); 1729 iwl_hw_set_rate_n_flags(rate, 0);
3075 1730
3076 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | 1731 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
3077 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK); 1732 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
3078 return (sizeof(*tx_beacon_cmd) + frame_size); 1733 return (sizeof(*tx_beacon_cmd) + frame_size);
3079} 1734}
3080 1735
3081/* 1736static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
3082 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3083 * given Tx queue, and enable the DMA channel used for that queue.
3084 *
3085 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3086 * channels supported in hardware.
3087 */
3088int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
3089{
3090 int rc;
3091 unsigned long flags;
3092 int txq_id = txq->q.id;
3093
3094 spin_lock_irqsave(&priv->lock, flags);
3095 rc = iwl_grab_nic_access(priv);
3096 if (rc) {
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098 return rc;
3099 }
3100
3101 /* Circular buffer (TFD queue in DRAM) physical base address */
3102 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
3103 txq->q.dma_addr >> 8);
3104
3105 /* Enable DMA channel, using same id as for TFD queue */
3106 iwl_write_direct32(
3107 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3108 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3109 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3110 iwl_release_nic_access(priv);
3111 spin_unlock_irqrestore(&priv->lock, flags);
3112
3113 return 0;
3114}
3115
3116int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
3117 dma_addr_t addr, u16 len)
3118{ 1737{
3119 int index, is_odd; 1738 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
3120 struct iwl4965_tfd_frame *tfd = ptr; 1739 sizeof(struct iwl4965_shared),
3121 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 1740 &priv->shared_phys);
3122 1741 if (!priv->shared_virt)
3123 /* Each TFD can point to a maximum 20 Tx buffers */ 1742 return -ENOMEM;
3124 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3125 IWL_ERROR("Error can not send more than %d chunks\n",
3126 MAX_NUM_OF_TBS);
3127 return -EINVAL;
3128 }
3129
3130 index = num_tbs / 2;
3131 is_odd = num_tbs & 0x1;
3132 1743
3133 if (!is_odd) { 1744 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
3134 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3135 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
3136 iwl_get_dma_hi_address(addr));
3137 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3138 } else {
3139 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3140 (u32) (addr & 0xffff));
3141 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3142 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3143 }
3144 1745
3145 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 1746 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
3146 1747
3147 return 0; 1748 return 0;
3148} 1749}
3149 1750
3150static void iwl4965_hw_card_show_info(struct iwl_priv *priv) 1751static void iwl4965_free_shared_mem(struct iwl_priv *priv)
3151{ 1752{
3152 u16 hw_version = priv->eeprom.board_revision_4965; 1753 if (priv->shared_virt)
3153 1754 pci_free_consistent(priv->pci_dev,
3154 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", 1755 sizeof(struct iwl4965_shared),
3155 ((hw_version >> 8) & 0x0F), 1756 priv->shared_virt,
3156 ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); 1757 priv->shared_phys);
3157
3158 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3159 priv->eeprom.board_pba_number_4965);
3160} 1758}
3161 1759
3162#define IWL_TX_CRC_SIZE 4
3163#define IWL_TX_DELIMITER_SIZE 4
3164
3165/** 1760/**
3166 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1761 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
3167 */ 1762 */
3168static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 1763static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3169 struct iwl4965_tx_queue *txq, 1764 struct iwl_tx_queue *txq,
3170 u16 byte_cnt) 1765 u16 byte_cnt)
3171{ 1766{
3172 int len; 1767 int len;
@@ -3180,50 +1775,13 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3180 tfd_offset[txq->q.write_ptr], byte_cnt, len); 1775 tfd_offset[txq->q.write_ptr], byte_cnt, len);
3181 1776
3182 /* If within first 64 entries, duplicate at end */ 1777 /* If within first 64 entries, duplicate at end */
3183 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) 1778 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
3184 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1779 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
3185 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 1780 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
3186 byte_cnt, len); 1781 byte_cnt, len);
3187} 1782}
3188 1783
3189/** 1784/**
3190 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3191 *
3192 * Selects how many and which Rx receivers/antennas/chains to use.
3193 * This should not be used for scan command ... it puts data in wrong place.
3194 */
3195void iwl4965_set_rxon_chain(struct iwl_priv *priv)
3196{
3197 u8 is_single = is_single_stream(priv);
3198 u8 idle_state, rx_state;
3199
3200 priv->staging_rxon.rx_chain = 0;
3201 rx_state = idle_state = 3;
3202
3203 /* Tell uCode which antennas are actually connected.
3204 * Before first association, we assume all antennas are connected.
3205 * Just after first association, iwl4965_noise_calibration()
3206 * checks which antennas actually *are* connected. */
3207 priv->staging_rxon.rx_chain |=
3208 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3209
3210 /* How many receivers should we use? */
3211 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3212 priv->staging_rxon.rx_chain |=
3213 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3214 priv->staging_rxon.rx_chain |=
3215 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3216
3217 if (!is_single && (rx_state >= 2) &&
3218 !test_bit(STATUS_POWER_PMI, &priv->status))
3219 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3220 else
3221 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3222
3223 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3224}
3225
3226/**
3227 * sign_extend - Sign extend a value using specified bit as sign-bit 1785 * sign_extend - Sign extend a value using specified bit as sign-bit
3228 * 1786 *
3229 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 1787 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
@@ -3240,12 +1798,12 @@ static s32 sign_extend(u32 oper, int index)
3240} 1798}
3241 1799
3242/** 1800/**
3243 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin) 1801 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
3244 * @statistics: Provides the temperature reading from the uCode 1802 * @statistics: Provides the temperature reading from the uCode
3245 * 1803 *
3246 * A return of <0 indicates bogus data in the statistics 1804 * A return of <0 indicates bogus data in the statistics
3247 */ 1805 */
3248int iwl4965_get_temperature(const struct iwl_priv *priv) 1806static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
3249{ 1807{
3250 s32 temperature; 1808 s32 temperature;
3251 s32 vt; 1809 s32 vt;
@@ -3280,8 +1838,7 @@ int iwl4965_get_temperature(const struct iwl_priv *priv)
3280 vt = sign_extend( 1838 vt = sign_extend(
3281 le32_to_cpu(priv->statistics.general.temperature), 23); 1839 le32_to_cpu(priv->statistics.general.temperature), 23);
3282 1840
3283 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", 1841 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
3284 R1, R2, R3, vt);
3285 1842
3286 if (R3 == R1) { 1843 if (R3 == R1) {
3287 IWL_ERROR("Calibration conflict R1 == R3\n"); 1844 IWL_ERROR("Calibration conflict R1 == R3\n");
@@ -3292,11 +1849,10 @@ int iwl4965_get_temperature(const struct iwl_priv *priv)
3292 * Add offset to center the adjustment around 0 degrees Centigrade. */ 1849 * Add offset to center the adjustment around 0 degrees Centigrade. */
3293 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); 1850 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3294 temperature /= (R3 - R1); 1851 temperature /= (R3 - R1);
3295 temperature = (temperature * 97) / 100 + 1852 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
3296 TEMPERATURE_CALIB_KELVIN_OFFSET;
3297 1853
3298 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature, 1854 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n",
3299 KELVIN_TO_CELSIUS(temperature)); 1855 temperature, KELVIN_TO_CELSIUS(temperature));
3300 1856
3301 return temperature; 1857 return temperature;
3302} 1858}
@@ -3343,89 +1899,11 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
3343 return 1; 1899 return 1;
3344} 1900}
3345 1901
3346/* Calculate noise level, based on measurements during network silence just 1902static void iwl4965_temperature_calib(struct iwl_priv *priv)
3347 * before arriving beacon. This measurement can be done only if we know
3348 * exactly when to expect beacons, therefore only when we're associated. */
3349static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3350{
3351 struct statistics_rx_non_phy *rx_info
3352 = &(priv->statistics.rx.general);
3353 int num_active_rx = 0;
3354 int total_silence = 0;
3355 int bcn_silence_a =
3356 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3357 int bcn_silence_b =
3358 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3359 int bcn_silence_c =
3360 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3361
3362 if (bcn_silence_a) {
3363 total_silence += bcn_silence_a;
3364 num_active_rx++;
3365 }
3366 if (bcn_silence_b) {
3367 total_silence += bcn_silence_b;
3368 num_active_rx++;
3369 }
3370 if (bcn_silence_c) {
3371 total_silence += bcn_silence_c;
3372 num_active_rx++;
3373 }
3374
3375 /* Average among active antennas */
3376 if (num_active_rx)
3377 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3378 else
3379 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3380
3381 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3382 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3383 priv->last_rx_noise);
3384}
3385
3386void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3387{ 1903{
3388 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3389 int change;
3390 s32 temp; 1904 s32 temp;
3391 1905
3392 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", 1906 temp = iwl4965_hw_get_temperature(priv);
3393 (int)sizeof(priv->statistics), pkt->len);
3394
3395 change = ((priv->statistics.general.temperature !=
3396 pkt->u.stats.general.temperature) ||
3397 ((priv->statistics.flag &
3398 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3399 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3400
3401 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3402
3403 set_bit(STATUS_STATISTICS, &priv->status);
3404
3405 /* Reschedule the statistics timer to occur in
3406 * REG_RECALIB_PERIOD seconds to ensure we get a
3407 * thermal update even if the uCode doesn't give
3408 * us one */
3409 mod_timer(&priv->statistics_periodic, jiffies +
3410 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3411
3412 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3413 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3414 iwl4965_rx_calc_noise(priv);
3415#ifdef CONFIG_IWL4965_SENSITIVITY
3416 queue_work(priv->workqueue, &priv->sensitivity_work);
3417#endif
3418 }
3419
3420 iwl_leds_background(priv);
3421
3422 /* If the hardware hasn't reported a change in
3423 * temperature then don't bother computing a
3424 * calibrated temperature value */
3425 if (!change)
3426 return;
3427
3428 temp = iwl4965_get_temperature(priv);
3429 if (temp < 0) 1907 if (temp < 0)
3430 return; 1908 return;
3431 1909
@@ -3444,810 +1922,12 @@ void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffe
3444 priv->temperature = temp; 1922 priv->temperature = temp;
3445 set_bit(STATUS_TEMPERATURE, &priv->status); 1923 set_bit(STATUS_TEMPERATURE, &priv->status);
3446 1924
3447 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 1925 if (!priv->disable_tx_power_cal &&
3448 iwl4965_is_temp_calib_needed(priv)) 1926 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1927 iwl4965_is_temp_calib_needed(priv))
3449 queue_work(priv->workqueue, &priv->txpower_work); 1928 queue_work(priv->workqueue, &priv->txpower_work);
3450} 1929}
3451 1930
3452static void iwl4965_add_radiotap(struct iwl_priv *priv,
3453 struct sk_buff *skb,
3454 struct iwl4965_rx_phy_res *rx_start,
3455 struct ieee80211_rx_status *stats,
3456 u32 ampdu_status)
3457{
3458 s8 signal = stats->ssi;
3459 s8 noise = 0;
3460 int rate = stats->rate_idx;
3461 u64 tsf = stats->mactime;
3462 __le16 antenna;
3463 __le16 phy_flags_hw = rx_start->phy_flags;
3464 struct iwl4965_rt_rx_hdr {
3465 struct ieee80211_radiotap_header rt_hdr;
3466 __le64 rt_tsf; /* TSF */
3467 u8 rt_flags; /* radiotap packet flags */
3468 u8 rt_rate; /* rate in 500kb/s */
3469 __le16 rt_channelMHz; /* channel in MHz */
3470 __le16 rt_chbitmask; /* channel bitfield */
3471 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3472 s8 rt_dbmnoise;
3473 u8 rt_antenna; /* antenna number */
3474 } __attribute__ ((packed)) *iwl4965_rt;
3475
3476 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3477 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3478 if (net_ratelimit())
3479 printk(KERN_ERR "not enough headroom [%d] for "
3480 "radiotap head [%zd]\n",
3481 skb_headroom(skb), sizeof(*iwl4965_rt));
3482 return;
3483 }
3484
3485 /* put radiotap header in front of 802.11 header and data */
3486 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3487
3488 /* initialise radiotap header */
3489 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3490 iwl4965_rt->rt_hdr.it_pad = 0;
3491
3492 /* total header + data */
3493 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3494 &iwl4965_rt->rt_hdr.it_len);
3495
3496 /* Indicate all the fields we add to the radiotap header */
3497 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3498 (1 << IEEE80211_RADIOTAP_FLAGS) |
3499 (1 << IEEE80211_RADIOTAP_RATE) |
3500 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3501 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3502 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3503 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3504 &iwl4965_rt->rt_hdr.it_present);
3505
3506 /* Zero the flags, we'll add to them as we go */
3507 iwl4965_rt->rt_flags = 0;
3508
3509 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3510
3511 iwl4965_rt->rt_dbmsignal = signal;
3512 iwl4965_rt->rt_dbmnoise = noise;
3513
3514 /* Convert the channel frequency and set the flags */
3515 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3516 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3517 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3518 IEEE80211_CHAN_5GHZ),
3519 &iwl4965_rt->rt_chbitmask);
3520 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3521 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3522 IEEE80211_CHAN_2GHZ),
3523 &iwl4965_rt->rt_chbitmask);
3524 else /* 802.11g */
3525 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3526 IEEE80211_CHAN_2GHZ),
3527 &iwl4965_rt->rt_chbitmask);
3528
3529 if (rate == -1)
3530 iwl4965_rt->rt_rate = 0;
3531 else {
3532 if (stats->band == IEEE80211_BAND_5GHZ)
3533 rate += IWL_FIRST_OFDM_RATE;
3534
3535 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3536 }
3537
3538 /*
3539 * "antenna number"
3540 *
3541 * It seems that the antenna field in the phy flags value
3542 * is actually a bitfield. This is undefined by radiotap,
3543 * it wants an actual antenna number but I always get "7"
3544 * for most legacy frames I receive indicating that the
3545 * same frame was received on all three RX chains.
3546 *
3547 * I think this field should be removed in favour of a
3548 * new 802.11n radiotap field "RX chains" that is defined
3549 * as a bitmask.
3550 */
3551 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
3552 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
3553
3554 /* set the preamble flag if appropriate */
3555 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3556 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3557
3558 stats->flag |= RX_FLAG_RADIOTAP;
3559}
3560
3561static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3562{
3563 /* 0 - mgmt, 1 - cnt, 2 - data */
3564 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
3565 priv->rx_stats[idx].cnt++;
3566 priv->rx_stats[idx].bytes += len;
3567}
3568
3569static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3570{
3571 u32 decrypt_out = 0;
3572
3573 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
3574 RX_RES_STATUS_STATION_FOUND)
3575 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
3576 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
3577
3578 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
3579
3580 /* packet was not encrypted */
3581 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3582 RX_RES_STATUS_SEC_TYPE_NONE)
3583 return decrypt_out;
3584
3585 /* packet was encrypted with unknown alg */
3586 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3587 RX_RES_STATUS_SEC_TYPE_ERR)
3588 return decrypt_out;
3589
3590 /* decryption was not done in HW */
3591 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
3592 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
3593 return decrypt_out;
3594
3595 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
3596
3597 case RX_RES_STATUS_SEC_TYPE_CCMP:
3598 /* alg is CCM: check MIC only */
3599 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
3600 /* Bad MIC */
3601 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3602 else
3603 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3604
3605 break;
3606
3607 case RX_RES_STATUS_SEC_TYPE_TKIP:
3608 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
3609 /* Bad TTAK */
3610 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
3611 break;
3612 }
3613 /* fall through if TTAK OK */
3614 default:
3615 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
3616 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3617 else
3618 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3619 break;
3620 };
3621
3622 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
3623 decrypt_in, decrypt_out);
3624
3625 return decrypt_out;
3626}
3627
3628static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3629 int include_phy,
3630 struct iwl4965_rx_mem_buffer *rxb,
3631 struct ieee80211_rx_status *stats)
3632{
3633 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3634 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3635 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3636 struct ieee80211_hdr *hdr;
3637 u16 len;
3638 __le32 *rx_end;
3639 unsigned int skblen;
3640 u32 ampdu_status;
3641 u32 ampdu_status_legacy;
3642
3643 if (!include_phy && priv->last_phy_res[0])
3644 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3645
3646 if (!rx_start) {
3647 IWL_ERROR("MPDU frame without a PHY data\n");
3648 return;
3649 }
3650 if (include_phy) {
3651 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3652 rx_start->cfg_phy_cnt);
3653
3654 len = le16_to_cpu(rx_start->byte_count);
3655
3656 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3657 sizeof(struct iwl4965_rx_phy_res) +
3658 rx_start->cfg_phy_cnt + len);
3659
3660 } else {
3661 struct iwl4965_rx_mpdu_res_start *amsdu =
3662 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3663
3664 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3665 sizeof(struct iwl4965_rx_mpdu_res_start));
3666 len = le16_to_cpu(amsdu->byte_count);
3667 rx_start->byte_count = amsdu->byte_count;
3668 rx_end = (__le32 *) (((u8 *) hdr) + len);
3669 }
3670 if (len > priv->hw_params.max_pkt_size || len < 16) {
3671 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3672 return;
3673 }
3674
3675 ampdu_status = le32_to_cpu(*rx_end);
3676 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3677
3678 if (!include_phy) {
3679 /* New status scheme, need to translate */
3680 ampdu_status_legacy = ampdu_status;
3681 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
3682 }
3683
3684 /* start from MAC */
3685 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3686 skb_put(rxb->skb, len); /* end where data ends */
3687
3688 /* We only process data packets if the interface is open */
3689 if (unlikely(!priv->is_open)) {
3690 IWL_DEBUG_DROP_LIMIT
3691 ("Dropping packet while interface is not open.\n");
3692 return;
3693 }
3694
3695 stats->flag = 0;
3696 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3697
3698 if (!priv->cfg->mod_params->sw_crypto)
3699 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3700
3701 if (priv->add_radiotap)
3702 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3703
3704 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
3705 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3706 priv->alloc_rxb_skb--;
3707 rxb->skb = NULL;
3708}
3709
3710/* Calc max signal level (dBm) among 3 possible receivers */
3711static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3712{
3713 /* data from PHY/DSP regarding signal strength, etc.,
3714 * contents are always there, not configurable by host. */
3715 struct iwl4965_rx_non_cfg_phy *ncphy =
3716 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3717 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3718 >> IWL_AGC_DB_POS;
3719
3720 u32 valid_antennae =
3721 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3722 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3723 u8 max_rssi = 0;
3724 u32 i;
3725
3726 /* Find max rssi among 3 possible receivers.
3727 * These values are measured by the digital signal processor (DSP).
3728 * They should stay fairly constant even as the signal strength varies,
3729 * if the radio's automatic gain control (AGC) is working right.
3730 * AGC value (see below) will provide the "interesting" info. */
3731 for (i = 0; i < 3; i++)
3732 if (valid_antennae & (1 << i))
3733 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3734
3735 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3736 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3737 max_rssi, agc);
3738
3739 /* dBm = max_rssi dB - agc dB - constant.
3740 * Higher AGC (higher radio gain) means lower signal. */
3741 return (max_rssi - agc - IWL_RSSI_OFFSET);
3742}
3743
3744#ifdef CONFIG_IWL4965_HT
3745
3746void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3747 struct ieee80211_ht_info *ht_info,
3748 enum ieee80211_band band)
3749{
3750 ht_info->cap = 0;
3751 memset(ht_info->supp_mcs_set, 0, 16);
3752
3753 ht_info->ht_supported = 1;
3754
3755 if (band == IEEE80211_BAND_5GHZ) {
3756 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3757 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3758 ht_info->supp_mcs_set[4] = 0x01;
3759 }
3760 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3761 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3762 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3763 (IWL_MIMO_PS_NONE << 2));
3764
3765 if (priv->cfg->mod_params->amsdu_size_8K)
3766 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3767
3768 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3769 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3770
3771 ht_info->supp_mcs_set[0] = 0xFF;
3772 ht_info->supp_mcs_set[1] = 0xFF;
3773}
3774#endif /* CONFIG_IWL4965_HT */
3775
3776static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3777{
3778 unsigned long flags;
3779
3780 spin_lock_irqsave(&priv->sta_lock, flags);
3781 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3782 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3783 priv->stations[sta_id].sta.sta.modify_mask = 0;
3784 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3785 spin_unlock_irqrestore(&priv->sta_lock, flags);
3786
3787 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3788}
3789
3790static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3791{
3792 /* FIXME: need locking over ps_status ??? */
3793 u8 sta_id = iwl4965_hw_find_station(priv, addr);
3794
3795 if (sta_id != IWL_INVALID_STATION) {
3796 u8 sta_awake = priv->stations[sta_id].
3797 ps_status == STA_PS_STATUS_WAKE;
3798
3799 if (sta_awake && ps_bit)
3800 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3801 else if (!sta_awake && !ps_bit) {
3802 iwl4965_sta_modify_ps_wake(priv, sta_id);
3803 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3804 }
3805 }
3806}
3807#ifdef CONFIG_IWLWIFI_DEBUG
3808
3809/**
3810 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3811 *
3812 * You may hack this function to show different aspects of received frames,
3813 * including selective frame dumps.
3814 * group100 parameter selects whether to show 1 out of 100 good frames.
3815 *
3816 * TODO: This was originally written for 3945, need to audit for
3817 * proper operation with 4965.
3818 */
3819static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3820 struct iwl4965_rx_packet *pkt,
3821 struct ieee80211_hdr *header, int group100)
3822{
3823 u32 to_us;
3824 u32 print_summary = 0;
3825 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3826 u32 hundred = 0;
3827 u32 dataframe = 0;
3828 u16 fc;
3829 u16 seq_ctl;
3830 u16 channel;
3831 u16 phy_flags;
3832 int rate_sym;
3833 u16 length;
3834 u16 status;
3835 u16 bcn_tmr;
3836 u32 tsf_low;
3837 u64 tsf;
3838 u8 rssi;
3839 u8 agc;
3840 u16 sig_avg;
3841 u16 noise_diff;
3842 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3843 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3844 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3845 u8 *data = IWL_RX_DATA(pkt);
3846
3847 if (likely(!(iwl_debug_level & IWL_DL_RX)))
3848 return;
3849
3850 /* MAC header */
3851 fc = le16_to_cpu(header->frame_control);
3852 seq_ctl = le16_to_cpu(header->seq_ctrl);
3853
3854 /* metadata */
3855 channel = le16_to_cpu(rx_hdr->channel);
3856 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3857 rate_sym = rx_hdr->rate;
3858 length = le16_to_cpu(rx_hdr->len);
3859
3860 /* end-of-frame status and timestamp */
3861 status = le32_to_cpu(rx_end->status);
3862 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3863 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3864 tsf = le64_to_cpu(rx_end->timestamp);
3865
3866 /* signal statistics */
3867 rssi = rx_stats->rssi;
3868 agc = rx_stats->agc;
3869 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3870 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3871
3872 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3873
3874 /* if data frame is to us and all is good,
3875 * (optionally) print summary for only 1 out of every 100 */
3876 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3877 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3878 dataframe = 1;
3879 if (!group100)
3880 print_summary = 1; /* print each frame */
3881 else if (priv->framecnt_to_us < 100) {
3882 priv->framecnt_to_us++;
3883 print_summary = 0;
3884 } else {
3885 priv->framecnt_to_us = 0;
3886 print_summary = 1;
3887 hundred = 1;
3888 }
3889 } else {
3890 /* print summary for all other frames */
3891 print_summary = 1;
3892 }
3893
3894 if (print_summary) {
3895 char *title;
3896 int rate_idx;
3897 u32 bitrate;
3898
3899 if (hundred)
3900 title = "100Frames";
3901 else if (fc & IEEE80211_FCTL_RETRY)
3902 title = "Retry";
3903 else if (ieee80211_is_assoc_response(fc))
3904 title = "AscRsp";
3905 else if (ieee80211_is_reassoc_response(fc))
3906 title = "RasRsp";
3907 else if (ieee80211_is_probe_response(fc)) {
3908 title = "PrbRsp";
3909 print_dump = 1; /* dump frame contents */
3910 } else if (ieee80211_is_beacon(fc)) {
3911 title = "Beacon";
3912 print_dump = 1; /* dump frame contents */
3913 } else if (ieee80211_is_atim(fc))
3914 title = "ATIM";
3915 else if (ieee80211_is_auth(fc))
3916 title = "Auth";
3917 else if (ieee80211_is_deauth(fc))
3918 title = "DeAuth";
3919 else if (ieee80211_is_disassoc(fc))
3920 title = "DisAssoc";
3921 else
3922 title = "Frame";
3923
3924 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3925 if (unlikely(rate_idx == -1))
3926 bitrate = 0;
3927 else
3928 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3929
3930 /* print frame summary.
3931 * MAC addresses show just the last byte (for brevity),
3932 * but you can hack it to show more, if you'd like to. */
3933 if (dataframe)
3934 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3935 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3936 title, fc, header->addr1[5],
3937 length, rssi, channel, bitrate);
3938 else {
3939 /* src/dst addresses assume managed mode */
3940 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3941 "src=0x%02x, rssi=%u, tim=%lu usec, "
3942 "phy=0x%02x, chnl=%d\n",
3943 title, fc, header->addr1[5],
3944 header->addr3[5], rssi,
3945 tsf_low - priv->scan_start_tsf,
3946 phy_flags, channel);
3947 }
3948 }
3949 if (print_dump)
3950 iwl_print_hex_dump(IWL_DL_RX, data, length);
3951}
3952#else
3953static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3954 struct iwl4965_rx_packet *pkt,
3955 struct ieee80211_hdr *header,
3956 int group100)
3957{
3958}
3959#endif
3960
3961
3962
3963/* Called for REPLY_RX (legacy ABG frames), or
3964 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3965static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb)
3967{
3968 struct ieee80211_hdr *header;
3969 struct ieee80211_rx_status rx_status;
3970 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3971 /* Use phy data (Rx signal strength, etc.) contained within
3972 * this rx packet for legacy frames,
3973 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3974 int include_phy = (pkt->hdr.cmd == REPLY_RX);
3975 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3976 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3977 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3978 __le32 *rx_end;
3979 unsigned int len = 0;
3980 u16 fc;
3981 u8 network_packet;
3982
3983 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3984 rx_status.freq =
3985 ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
3986 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3987 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3988 rx_status.rate_idx =
3989 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
3990 if (rx_status.band == IEEE80211_BAND_5GHZ)
3991 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3992
3993 rx_status.antenna = 0;
3994 rx_status.flag = 0;
3995
3996 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3997 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3998 rx_start->cfg_phy_cnt);
3999 return;
4000 }
4001
4002 if (!include_phy) {
4003 if (priv->last_phy_res[0])
4004 rx_start = (struct iwl4965_rx_phy_res *)
4005 &priv->last_phy_res[1];
4006 else
4007 rx_start = NULL;
4008 }
4009
4010 if (!rx_start) {
4011 IWL_ERROR("MPDU frame without a PHY data\n");
4012 return;
4013 }
4014
4015 if (include_phy) {
4016 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
4017 + rx_start->cfg_phy_cnt);
4018
4019 len = le16_to_cpu(rx_start->byte_count);
4020 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
4021 sizeof(struct iwl4965_rx_phy_res) + len);
4022 } else {
4023 struct iwl4965_rx_mpdu_res_start *amsdu =
4024 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
4025
4026 header = (void *)(pkt->u.raw +
4027 sizeof(struct iwl4965_rx_mpdu_res_start));
4028 len = le16_to_cpu(amsdu->byte_count);
4029 rx_end = (__le32 *) (pkt->u.raw +
4030 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
4031 }
4032
4033 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
4034 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
4035 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
4036 le32_to_cpu(*rx_end));
4037 return;
4038 }
4039
4040 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4041
4042 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
4043 rx_status.ssi = iwl4965_calc_rssi(rx_start);
4044
4045 /* Meaningful noise values are available only from beacon statistics,
4046 * which are gathered only when associated, and indicate noise
4047 * only for the associated network channel ...
4048 * Ignore these noise values while scanning (other channels) */
4049 if (iwl_is_associated(priv) &&
4050 !test_bit(STATUS_SCANNING, &priv->status)) {
4051 rx_status.noise = priv->last_rx_noise;
4052 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
4053 rx_status.noise);
4054 } else {
4055 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4056 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
4057 }
4058
4059 /* Reset beacon noise level if not associated. */
4060 if (!iwl_is_associated(priv))
4061 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4062
4063 /* Set "1" to report good data frames in groups of 100 */
4064 /* FIXME: need to optimze the call: */
4065 iwl4965_dbg_report_frame(priv, pkt, header, 1);
4066
4067 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4068 rx_status.ssi, rx_status.noise, rx_status.signal,
4069 (unsigned long long)rx_status.mactime);
4070
4071 network_packet = iwl4965_is_network_packet(priv, header);
4072 if (network_packet) {
4073 priv->last_rx_rssi = rx_status.ssi;
4074 priv->last_beacon_time = priv->ucode_beacon_time;
4075 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4076 }
4077
4078 fc = le16_to_cpu(header->frame_control);
4079 switch (fc & IEEE80211_FCTL_FTYPE) {
4080 case IEEE80211_FTYPE_MGMT:
4081 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4082 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4083 header->addr2);
4084 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
4085 break;
4086
4087 case IEEE80211_FTYPE_CTL:
4088#ifdef CONFIG_IWL4965_HT
4089 switch (fc & IEEE80211_FCTL_STYPE) {
4090 case IEEE80211_STYPE_BACK_REQ:
4091 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
4092 iwl4965_handle_data_packet(priv, 0, include_phy,
4093 rxb, &rx_status);
4094 break;
4095 default:
4096 break;
4097 }
4098#endif
4099 break;
4100
4101 case IEEE80211_FTYPE_DATA: {
4102 DECLARE_MAC_BUF(mac1);
4103 DECLARE_MAC_BUF(mac2);
4104 DECLARE_MAC_BUF(mac3);
4105
4106 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4107 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4108 header->addr2);
4109
4110 if (unlikely(!network_packet))
4111 IWL_DEBUG_DROP("Dropping (non network): "
4112 "%s, %s, %s\n",
4113 print_mac(mac1, header->addr1),
4114 print_mac(mac2, header->addr2),
4115 print_mac(mac3, header->addr3));
4116 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
4117 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
4118 print_mac(mac1, header->addr1),
4119 print_mac(mac2, header->addr2),
4120 print_mac(mac3, header->addr3));
4121 else
4122 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
4123 &rx_status);
4124 break;
4125 }
4126 default:
4127 break;
4128
4129 }
4130}
4131
4132/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4133 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4134static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
4135 struct iwl4965_rx_mem_buffer *rxb)
4136{
4137 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4138 priv->last_phy_res[0] = 1;
4139 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4140 sizeof(struct iwl4965_rx_phy_res));
4141}
4142static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
4143 struct iwl4965_rx_mem_buffer *rxb)
4144
4145{
4146#ifdef CONFIG_IWL4965_SENSITIVITY
4147 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4148 struct iwl4965_missed_beacon_notif *missed_beacon;
4149
4150 missed_beacon = &pkt->u.missed_beacon;
4151 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4152 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4153 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4154 le32_to_cpu(missed_beacon->total_missed_becons),
4155 le32_to_cpu(missed_beacon->num_recvd_beacons),
4156 le32_to_cpu(missed_beacon->num_expected_beacons));
4157 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4158 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4159 queue_work(priv->workqueue, &priv->sensitivity_work);
4160 }
4161#endif /*CONFIG_IWL4965_SENSITIVITY*/
4162}
4163#ifdef CONFIG_IWL4965_HT
4164
4165/**
4166 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4167 */
4168static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4169 int sta_id, int tid)
4170{
4171 unsigned long flags;
4172
4173 /* Remove "disable" flag, to enable Tx for this TID */
4174 spin_lock_irqsave(&priv->sta_lock, flags);
4175 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4176 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4177 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4178 spin_unlock_irqrestore(&priv->sta_lock, flags);
4179
4180 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4181}
4182
4183/**
4184 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4185 *
4186 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4187 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4188 */
4189static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4190 struct iwl4965_ht_agg *agg,
4191 struct iwl4965_compressed_ba_resp*
4192 ba_resp)
4193
4194{
4195 int i, sh, ack;
4196 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4197 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4198 u64 bitmap;
4199 int successes = 0;
4200 struct ieee80211_tx_status *tx_status;
4201
4202 if (unlikely(!agg->wait_for_ba)) {
4203 IWL_ERROR("Received BA when not expected\n");
4204 return -EINVAL;
4205 }
4206
4207 /* Mark that the expected block-ack response arrived */
4208 agg->wait_for_ba = 0;
4209 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4210
4211 /* Calculate shift to align block-ack bits with our Tx window bits */
4212 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4213 if (sh < 0) /* tbw something is wrong with indices */
4214 sh += 0x100;
4215
4216 /* don't use 64-bit values for now */
4217 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4218
4219 if (agg->frame_count > (64 - sh)) {
4220 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4221 return -1;
4222 }
4223
4224 /* check for success or failure according to the
4225 * transmitted bitmap and block-ack bitmap */
4226 bitmap &= agg->bitmap;
4227
4228 /* For each frame attempted in aggregation,
4229 * update driver's record of tx frame's status. */
4230 for (i = 0; i < agg->frame_count ; i++) {
4231 ack = bitmap & (1 << i);
4232 successes += !!ack;
4233 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4234 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4235 agg->start_idx + i);
4236 }
4237
4238 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4239 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4240 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4241 tx_status->ampdu_ack_map = successes;
4242 tx_status->ampdu_ack_len = agg->frame_count;
4243 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4244 &tx_status->control);
4245
4246 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4247
4248 return 0;
4249}
4250
4251/** 1931/**
4252 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration 1932 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4253 */ 1933 */
@@ -4258,22 +1938,24 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4258 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 1938 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4259 iwl_write_prph(priv, 1939 iwl_write_prph(priv,
4260 IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 1940 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
4261 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 1941 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4262 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1942 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4263} 1943}
4264 1944
4265/** 1945/**
4266 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID 1946 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
4267 * priv->lock must be held by the caller 1947 * priv->lock must be held by the caller
4268 */ 1948 */
4269static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, 1949static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
4270 u16 ssn_idx, u8 tx_fifo) 1950 u16 ssn_idx, u8 tx_fifo)
4271{ 1951{
4272 int ret = 0; 1952 int ret = 0;
4273 1953
4274 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) { 1954 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
4275 IWL_WARNING("queue number too small: %d, must be > %d\n", 1955 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
4276 txq_id, IWL_BACK_QUEUE_FIRST_ID); 1956 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1957 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1958 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
4277 return -EINVAL; 1959 return -EINVAL;
4278 } 1960 }
4279 1961
@@ -4291,7 +1973,7 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4291 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 1973 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4292 1974
4293 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 1975 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4294 iwl4965_txq_ctx_deactivate(priv, txq_id); 1976 iwl_txq_ctx_deactivate(priv, txq_id);
4295 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 1977 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4296 1978
4297 iwl_release_nic_access(priv); 1979 iwl_release_nic_access(priv);
@@ -4299,121 +1981,6 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4299 return 0; 1981 return 0;
4300} 1982}
4301 1983
4302int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
4303 u8 tid, int txq_id)
4304{
4305 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4306 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4307 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4308
4309 switch (priv->stations[sta_id].tid[tid].agg.state) {
4310 case IWL_EMPTYING_HW_QUEUE_DELBA:
4311 /* We are reclaiming the last packet of the */
4312 /* aggregated HW queue */
4313 if (txq_id == tid_data->agg.txq_id &&
4314 q->read_ptr == q->write_ptr) {
4315 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4316 int tx_fifo = default_tid_to_tx_fifo[tid];
4317 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4318 iwl4965_tx_queue_agg_disable(priv, txq_id,
4319 ssn, tx_fifo);
4320 tid_data->agg.state = IWL_AGG_OFF;
4321 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4322 }
4323 break;
4324 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4325 /* We are reclaiming the last packet of the queue */
4326 if (tid_data->tfds_in_queue == 0) {
4327 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4328 tid_data->agg.state = IWL_AGG_ON;
4329 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4330 }
4331 break;
4332 }
4333 return 0;
4334}
4335
4336/**
4337 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4338 * @index -- current index
4339 * @n_bd -- total number of entries in queue (s/b power of 2)
4340 */
4341static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4342{
4343 return (index == 0) ? n_bd - 1 : index - 1;
4344}
4345
4346/**
4347 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4348 *
4349 * Handles block-acknowledge notification from device, which reports success
4350 * of frames sent via aggregation.
4351 */
4352static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4353 struct iwl4965_rx_mem_buffer *rxb)
4354{
4355 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4356 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4357 int index;
4358 struct iwl4965_tx_queue *txq = NULL;
4359 struct iwl4965_ht_agg *agg;
4360 DECLARE_MAC_BUF(mac);
4361
4362 /* "flow" corresponds to Tx queue */
4363 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4364
4365 /* "ssn" is start of block-ack Tx window, corresponds to index
4366 * (in Tx queue's circular buffer) of first TFD/frame in window */
4367 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4368
4369 if (scd_flow >= priv->hw_params.max_txq_num) {
4370 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4371 return;
4372 }
4373
4374 txq = &priv->txq[scd_flow];
4375 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4376
4377 /* Find index just before block-ack window */
4378 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4379
4380 /* TODO: Need to get this copy more safely - now good for debug */
4381
4382 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4383 "sta_id = %d\n",
4384 agg->wait_for_ba,
4385 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4386 ba_resp->sta_id);
4387 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4388 "%d, scd_ssn = %d\n",
4389 ba_resp->tid,
4390 ba_resp->seq_ctl,
4391 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
4392 ba_resp->scd_flow,
4393 ba_resp->scd_ssn);
4394 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4395 agg->start_idx,
4396 (unsigned long long)agg->bitmap);
4397
4398 /* Update driver's record of ACK vs. not for each frame in window */
4399 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4400
4401 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4402 * block-ack window (we assume that they've been successfully
4403 * transmitted ... if not, it's too late anyway). */
4404 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4405 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4406 priv->stations[ba_resp->sta_id].
4407 tid[ba_resp->tid].tfds_in_queue -= freed;
4408 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4409 priv->mac80211_registered &&
4410 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4411 ieee80211_wake_queue(priv->hw, scd_flow);
4412 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4413 ba_resp->tid, scd_flow);
4414 }
4415}
4416
4417/** 1984/**
4418 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue 1985 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4419 */ 1986 */
@@ -4424,10 +1991,10 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4424 u32 tbl_dw; 1991 u32 tbl_dw;
4425 u16 scd_q2ratid; 1992 u16 scd_q2ratid;
4426 1993
4427 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1994 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4428 1995
4429 tbl_dw_addr = priv->scd_base_addr + 1996 tbl_dw_addr = priv->scd_base_addr +
4430 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 1997 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4431 1998
4432 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 1999 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4433 2000
@@ -4445,31 +2012,34 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4445/** 2012/**
4446 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue 2013 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4447 * 2014 *
4448 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, 2015 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
4449 * i.e. it must be one of the higher queues used for aggregation 2016 * i.e. it must be one of the higher queues used for aggregation
4450 */ 2017 */
4451static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, 2018static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
4452 int tx_fifo, int sta_id, int tid, 2019 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
4453 u16 ssn_idx)
4454{ 2020{
4455 unsigned long flags; 2021 unsigned long flags;
4456 int rc; 2022 int ret;
4457 u16 ra_tid; 2023 u16 ra_tid;
4458 2024
4459 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) 2025 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
4460 IWL_WARNING("queue number too small: %d, must be > %d\n", 2026 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
4461 txq_id, IWL_BACK_QUEUE_FIRST_ID); 2027 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
2028 txq_id, IWL49_FIRST_AMPDU_QUEUE,
2029 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
2030 return -EINVAL;
2031 }
4462 2032
4463 ra_tid = BUILD_RAxTID(sta_id, tid); 2033 ra_tid = BUILD_RAxTID(sta_id, tid);
4464 2034
4465 /* Modify device's station table to Tx this TID */ 2035 /* Modify device's station table to Tx this TID */
4466 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 2036 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
4467 2037
4468 spin_lock_irqsave(&priv->lock, flags); 2038 spin_lock_irqsave(&priv->lock, flags);
4469 rc = iwl_grab_nic_access(priv); 2039 ret = iwl_grab_nic_access(priv);
4470 if (rc) { 2040 if (ret) {
4471 spin_unlock_irqrestore(&priv->lock, flags); 2041 spin_unlock_irqrestore(&priv->lock, flags);
4472 return rc; 2042 return ret;
4473 } 2043 }
4474 2044
4475 /* Stop this Tx queue before configuring it */ 2045 /* Stop this Tx queue before configuring it */
@@ -4489,14 +2059,14 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4489 2059
4490 /* Set up Tx window size and frame limit for this queue */ 2060 /* Set up Tx window size and frame limit for this queue */
4491 iwl_write_targ_mem(priv, 2061 iwl_write_targ_mem(priv,
4492 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 2062 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4493 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 2063 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4494 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 2064 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4495 2065
4496 iwl_write_targ_mem(priv, priv->scd_base_addr + 2066 iwl_write_targ_mem(priv, priv->scd_base_addr +
4497 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 2067 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4498 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 2068 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4499 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 2069 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4500 2070
4501 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 2071 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4502 2072
@@ -4509,444 +2079,313 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4509 return 0; 2079 return 0;
4510} 2080}
4511 2081
4512#endif /* CONFIG_IWL4965_HT */ 2082int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4513 2083 enum ieee80211_ampdu_mlme_action action,
4514/** 2084 const u8 *addr, u16 tid, u16 *ssn)
4515 * iwl4965_add_station - Initialize a station's hardware rate table
4516 *
4517 * The uCode's station table contains a table of fallback rates
4518 * for automatic fallback during transmission.
4519 *
4520 * NOTE: This sets up a default set of values. These will be replaced later
4521 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4522 * rc80211_simple.
4523 *
4524 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4525 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4526 * which requires station table entry to exist).
4527 */
4528void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4529{
4530 int i, r;
4531 struct iwl_link_quality_cmd link_cmd = {
4532 .reserved1 = 0,
4533 };
4534 u16 rate_flags;
4535
4536 /* Set up the rate scaling to start at selected rate, fall back
4537 * all the way down to 1M in IEEE order, and then spin on 1M */
4538 if (is_ap)
4539 r = IWL_RATE_54M_INDEX;
4540 else if (priv->band == IEEE80211_BAND_5GHZ)
4541 r = IWL_RATE_6M_INDEX;
4542 else
4543 r = IWL_RATE_1M_INDEX;
4544
4545 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4546 rate_flags = 0;
4547 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4548 rate_flags |= RATE_MCS_CCK_MSK;
4549
4550 /* Use Tx antenna B only */
4551 rate_flags |= RATE_MCS_ANT_B_MSK;
4552 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4553
4554 link_cmd.rs_table[i].rate_n_flags =
4555 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4556 r = iwl4965_get_prev_ieee_rate(r);
4557 }
4558
4559 link_cmd.general_params.single_stream_ant_msk = 2;
4560 link_cmd.general_params.dual_stream_ant_msk = 3;
4561 link_cmd.agg_params.agg_dis_start_th = 3;
4562 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4563
4564 /* Update the rate scaling for control frame Tx to AP */
4565 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
4566
4567 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4568 sizeof(link_cmd), &link_cmd, NULL);
4569}
4570
4571#ifdef CONFIG_IWL4965_HT
4572
4573static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
4574 enum ieee80211_band band,
4575 u16 channel, u8 extension_chan_offset)
4576{
4577 const struct iwl_channel_info *ch_info;
4578
4579 ch_info = iwl_get_channel_info(priv, band, channel);
4580 if (!is_channel_valid(ch_info))
4581 return 0;
4582
4583 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4584 return 0;
4585
4586 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4587 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4588 return 1;
4589
4590 return 0;
4591}
4592
4593static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
4594 struct ieee80211_ht_info *sta_ht_inf)
4595{
4596 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4597
4598 if ((!iwl_ht_conf->is_ht) ||
4599 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4600 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4601 return 0;
4602
4603 if (sta_ht_inf) {
4604 if ((!sta_ht_inf->ht_supported) ||
4605 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4606 return 0;
4607 }
4608
4609 return (iwl4965_is_channel_extension(priv, priv->band,
4610 iwl_ht_conf->control_channel,
4611 iwl_ht_conf->extension_chan_offset));
4612}
4613
4614void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
4615{ 2085{
4616 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 2086 struct iwl_priv *priv = hw->priv;
4617 u32 val; 2087 DECLARE_MAC_BUF(mac);
4618 2088
4619 if (!ht_info->is_ht) 2089 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
4620 return; 2090 print_mac(mac, addr), tid);
4621 2091
4622 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ 2092 if (!(priv->cfg->sku & IWL_SKU_N))
4623 if (iwl4965_is_fat_tx_allowed(priv, NULL)) 2093 return -EACCES;
4624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4625 else
4626 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4627 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4628
4629 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4630 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4631 le16_to_cpu(rxon->channel),
4632 ht_info->control_channel);
4633 rxon->channel = cpu_to_le16(ht_info->control_channel);
4634 return;
4635 }
4636 2094
4637 /* Note: control channel is opposite of extension channel */ 2095 switch (action) {
4638 switch (ht_info->extension_chan_offset) { 2096 case IEEE80211_AMPDU_RX_START:
4639 case IWL_EXT_CHANNEL_OFFSET_ABOVE: 2097 IWL_DEBUG_HT("start Rx\n");
4640 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 2098 return iwl_rx_agg_start(priv, addr, tid, *ssn);
4641 break; 2099 case IEEE80211_AMPDU_RX_STOP:
4642 case IWL_EXT_CHANNEL_OFFSET_BELOW: 2100 IWL_DEBUG_HT("stop Rx\n");
4643 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 2101 return iwl_rx_agg_stop(priv, addr, tid);
4644 break; 2102 case IEEE80211_AMPDU_TX_START:
4645 case IWL_EXT_CHANNEL_OFFSET_NONE: 2103 IWL_DEBUG_HT("start Tx\n");
2104 return iwl_tx_agg_start(priv, addr, tid, ssn);
2105 case IEEE80211_AMPDU_TX_STOP:
2106 IWL_DEBUG_HT("stop Tx\n");
2107 return iwl_tx_agg_stop(priv, addr, tid);
4646 default: 2108 default:
4647 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; 2109 IWL_DEBUG_HT("unknown\n");
2110 return -EINVAL;
4648 break; 2111 break;
4649 } 2112 }
4650 2113 return 0;
4651 val = ht_info->ht_protection;
4652
4653 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4654
4655 iwl4965_set_rxon_chain(priv);
4656
4657 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4658 "rxon flags 0x%X operation mode :0x%X "
4659 "extension channel offset 0x%x "
4660 "control chan %d\n",
4661 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4662 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4663 ht_info->extension_chan_offset,
4664 ht_info->control_channel);
4665 return;
4666} 2114}
4667 2115
4668void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index, 2116static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
4669 struct ieee80211_ht_info *sta_ht_inf)
4670{ 2117{
4671 __le32 sta_flags; 2118 switch (cmd_id) {
4672 u8 mimo_ps_mode; 2119 case REPLY_RXON:
4673 2120 return (u16) sizeof(struct iwl4965_rxon_cmd);
4674 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4675 goto done;
4676
4677 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4678
4679 sta_flags = priv->stations[index].sta.station_flags;
4680
4681 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4682
4683 switch (mimo_ps_mode) {
4684 case WLAN_HT_CAP_MIMO_PS_STATIC:
4685 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4686 break;
4687 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4688 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4689 break;
4690 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4691 break;
4692 default: 2121 default:
4693 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); 2122 return len;
4694 break;
4695 } 2123 }
4696
4697 sta_flags |= cpu_to_le32(
4698 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4699
4700 sta_flags |= cpu_to_le32(
4701 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4702
4703 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4704 sta_flags |= STA_FLG_FAT_EN_MSK;
4705 else
4706 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4707
4708 priv->stations[index].sta.station_flags = sta_flags;
4709 done:
4710 return;
4711} 2124}
4712 2125
4713static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv, 2126static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
4714 int sta_id, int tid, u16 ssn)
4715{ 2127{
4716 unsigned long flags; 2128 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
4717 2129 addsta->mode = cmd->mode;
4718 spin_lock_irqsave(&priv->sta_lock, flags); 2130 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
4719 priv->stations[sta_id].sta.station_flags_msk = 0; 2131 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
4720 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; 2132 addsta->station_flags = cmd->station_flags;
4721 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; 2133 addsta->station_flags_msk = cmd->station_flags_msk;
4722 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); 2134 addsta->tid_disable_tx = cmd->tid_disable_tx;
4723 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 2135 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
4724 spin_unlock_irqrestore(&priv->sta_lock, flags); 2136 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2137 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2138 addsta->reserved1 = __constant_cpu_to_le16(0);
2139 addsta->reserved2 = __constant_cpu_to_le32(0);
4725 2140
4726 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 2141 return (u16)sizeof(struct iwl4965_addsta_cmd);
4727} 2142}
4728 2143
4729static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, 2144static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
4730 int sta_id, int tid)
4731{ 2145{
4732 unsigned long flags; 2146 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
4733
4734 spin_lock_irqsave(&priv->sta_lock, flags);
4735 priv->stations[sta_id].sta.station_flags_msk = 0;
4736 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4737 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4738 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4739 spin_unlock_irqrestore(&priv->sta_lock, flags);
4740
4741 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4742} 2147}
4743 2148
4744/* 2149/**
4745 * Find first available (lowest unused) Tx Queue, mark it "active". 2150 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
4746 * Called only when finding queue for aggregation.
4747 * Should never return anything < 7, because they should already
4748 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4749 */ 2151 */
4750static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) 2152static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
4751{ 2153 struct iwl_ht_agg *agg,
4752 int txq_id; 2154 struct iwl4965_tx_resp *tx_resp,
4753 2155 int txq_id, u16 start_idx)
4754 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
4755 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4756 return txq_id;
4757 return -1;
4758}
4759
4760static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4761 u16 tid, u16 *start_seq_num)
4762{ 2156{
4763 struct iwl_priv *priv = hw->priv; 2157 u16 status;
4764 int sta_id; 2158 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
4765 int tx_fifo; 2159 struct ieee80211_tx_info *info = NULL;
4766 int txq_id; 2160 struct ieee80211_hdr *hdr = NULL;
4767 int ssn = -1; 2161 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
4768 int ret = 0; 2162 int i, sh, idx;
4769 unsigned long flags; 2163 u16 seq;
4770 struct iwl4965_tid_data *tid_data; 2164 if (agg->wait_for_ba)
4771 DECLARE_MAC_BUF(mac); 2165 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
4772 2166
4773 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 2167 agg->frame_count = tx_resp->frame_count;
4774 tx_fifo = default_tid_to_tx_fifo[tid]; 2168 agg->start_idx = start_idx;
4775 else 2169 agg->rate_n_flags = rate_n_flags;
4776 return -EINVAL; 2170 agg->bitmap = 0;
4777 2171
4778 IWL_WARNING("%s on da = %s tid = %d\n", 2172 /* # frames attempted by Tx command */
4779 __func__, print_mac(mac, da), tid); 2173 if (agg->frame_count == 1) {
4780 2174 /* Only one frame was attempted; no block-ack will arrive */
4781 sta_id = iwl4965_hw_find_station(priv, da); 2175 status = le16_to_cpu(frame_status[0].status);
4782 if (sta_id == IWL_INVALID_STATION) 2176 idx = start_idx;
4783 return -ENXIO; 2177
4784 2178 /* FIXME: code repetition */
4785 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 2179 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
4786 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 2180 agg->frame_count, agg->start_idx, idx);
4787 return -ENXIO; 2181
4788 } 2182 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
4789 2183 info->status.retry_count = tx_resp->failure_frame;
4790 txq_id = iwl4965_txq_ctx_activate_free(priv); 2184 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
4791 if (txq_id == -1) 2185 info->flags |= iwl_is_tx_success(status)?
4792 return -ENXIO; 2186 IEEE80211_TX_STAT_ACK : 0;
2187 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
2188 /* FIXME: code repetition end */
2189
2190 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2191 status & 0xff, tx_resp->failure_frame);
2192 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2193
2194 agg->wait_for_ba = 0;
2195 } else {
2196 /* Two or more frames were attempted; expect block-ack */
2197 u64 bitmap = 0;
2198 int start = agg->start_idx;
2199
2200 /* Construct bit-map of pending frames within Tx window */
2201 for (i = 0; i < agg->frame_count; i++) {
2202 u16 sc;
2203 status = le16_to_cpu(frame_status[i].status);
2204 seq = le16_to_cpu(frame_status[i].sequence);
2205 idx = SEQ_TO_INDEX(seq);
2206 txq_id = SEQ_TO_QUEUE(seq);
2207
2208 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2209 AGG_TX_STATE_ABORT_MSK))
2210 continue;
2211
2212 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2213 agg->frame_count, txq_id, idx);
2214
2215 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2216
2217 sc = le16_to_cpu(hdr->seq_ctrl);
2218 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2219 IWL_ERROR("BUG_ON idx doesn't match seq control"
2220 " idx=%d, seq_idx=%d, seq=%d\n",
2221 idx, SEQ_TO_SN(sc),
2222 hdr->seq_ctrl);
2223 return -1;
2224 }
4793 2225
4794 spin_lock_irqsave(&priv->sta_lock, flags); 2226 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
4795 tid_data = &priv->stations[sta_id].tid[tid]; 2227 i, idx, SEQ_TO_SN(sc));
4796 ssn = SEQ_TO_SN(tid_data->seq_number); 2228
4797 tid_data->agg.txq_id = txq_id; 2229 sh = idx - start;
4798 spin_unlock_irqrestore(&priv->sta_lock, flags); 2230 if (sh > 64) {
2231 sh = (start - idx) + 0xff;
2232 bitmap = bitmap << sh;
2233 sh = 0;
2234 start = idx;
2235 } else if (sh < -64)
2236 sh = 0xff - (start - idx);
2237 else if (sh < 0) {
2238 sh = start - idx;
2239 start = idx;
2240 bitmap = bitmap << sh;
2241 sh = 0;
2242 }
2243 bitmap |= (1 << sh);
2244 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2245 start, (u32)(bitmap & 0xFFFFFFFF));
2246 }
4799 2247
4800 *start_seq_num = ssn; 2248 agg->bitmap = bitmap;
4801 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 2249 agg->start_idx = start;
4802 sta_id, tid, ssn); 2250 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
4803 if (ret) 2251 agg->frame_count, agg->start_idx,
4804 return ret; 2252 (unsigned long long)agg->bitmap);
4805 2253
4806 ret = 0; 2254 if (bitmap)
4807 if (tid_data->tfds_in_queue == 0) { 2255 agg->wait_for_ba = 1;
4808 printk(KERN_ERR "HW queue is empty\n");
4809 tid_data->agg.state = IWL_AGG_ON;
4810 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4811 } else {
4812 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4813 tid_data->tfds_in_queue);
4814 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4815 } 2256 }
4816 return ret; 2257 return 0;
4817} 2258}
4818 2259
4819static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, 2260/**
4820 u16 tid) 2261 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
4821{ 2262 */
4822 2263static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
4823 struct iwl_priv *priv = hw->priv; 2264 struct iwl_rx_mem_buffer *rxb)
4824 int tx_fifo_id, txq_id, sta_id, ssn = -1; 2265{
4825 struct iwl4965_tid_data *tid_data; 2266 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4826 int ret, write_ptr, read_ptr; 2267 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4827 unsigned long flags; 2268 int txq_id = SEQ_TO_QUEUE(sequence);
4828 DECLARE_MAC_BUF(mac); 2269 int index = SEQ_TO_INDEX(sequence);
2270 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2271 struct ieee80211_tx_info *info;
2272 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2273 u32 status = le32_to_cpu(tx_resp->u.status);
2274 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
2275 __le16 fc;
2276 struct ieee80211_hdr *hdr;
2277 u8 *qc = NULL;
4829 2278
4830 if (!da) { 2279 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
4831 IWL_ERROR("da = NULL\n"); 2280 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
4832 return -EINVAL; 2281 "is out of range [0-%d] %d %d\n", txq_id,
2282 index, txq->q.n_bd, txq->q.write_ptr,
2283 txq->q.read_ptr);
2284 return;
4833 } 2285 }
4834 2286
4835 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 2287 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
4836 tx_fifo_id = default_tid_to_tx_fifo[tid]; 2288 memset(&info->status, 0, sizeof(info->status));
4837 else
4838 return -EINVAL;
4839
4840 sta_id = iwl4965_hw_find_station(priv, da);
4841
4842 if (sta_id == IWL_INVALID_STATION)
4843 return -ENXIO;
4844
4845 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4846 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4847 2289
4848 tid_data = &priv->stations[sta_id].tid[tid]; 2290 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
4849 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 2291 fc = hdr->frame_control;
4850 txq_id = tid_data->agg.txq_id; 2292 if (ieee80211_is_data_qos(fc)) {
4851 write_ptr = priv->txq[txq_id].q.write_ptr; 2293 qc = ieee80211_get_qos_ctl(hdr);
4852 read_ptr = priv->txq[txq_id].q.read_ptr; 2294 tid = qc[0] & 0xf;
2295 }
4853 2296
4854 /* The queue is not empty */ 2297 sta_id = iwl_get_ra_sta_id(priv, hdr);
4855 if (write_ptr != read_ptr) { 2298 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
4856 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); 2299 IWL_ERROR("Station not known\n");
4857 priv->stations[sta_id].tid[tid].agg.state = 2300 return;
4858 IWL_EMPTYING_HW_QUEUE_DELBA;
4859 return 0;
4860 } 2301 }
4861 2302
4862 IWL_DEBUG_HT("HW queue empty\n");; 2303 if (txq->sched_retry) {
4863 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 2304 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2305 struct iwl_ht_agg *agg = NULL;
4864 2306
4865 spin_lock_irqsave(&priv->lock, flags); 2307 if (!qc)
4866 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 2308 return;
4867 spin_unlock_irqrestore(&priv->lock, flags);
4868 2309
4869 if (ret) 2310 agg = &priv->stations[sta_id].tid[tid].agg;
4870 return ret;
4871 2311
4872 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid); 2312 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
4873 2313
4874 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", 2314 /* check if BAR is needed */
4875 print_mac(mac, da), tid); 2315 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2316 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
4876 2317
4877 return 0; 2318 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
4878} 2319 int freed, ampdu_q;
2320 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2321 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2322 "%d index %d\n", scd_ssn , index);
2323 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2324 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
4879 2325
4880int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 2326 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4881 enum ieee80211_ampdu_mlme_action action, 2327 txq_id >= 0 && priv->mac80211_registered &&
4882 const u8 *addr, u16 tid, u16 *ssn) 2328 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
4883{ 2329 /* calculate mac80211 ampdu sw queue to wake */
4884 struct iwl_priv *priv = hw->priv; 2330 ampdu_q = txq_id - IWL49_FIRST_AMPDU_QUEUE +
4885 int sta_id; 2331 priv->hw->queues;
4886 DECLARE_MAC_BUF(mac); 2332 if (agg->state == IWL_AGG_OFF)
4887 2333 ieee80211_wake_queue(priv->hw, txq_id);
4888 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ", 2334 else
4889 print_mac(mac, addr), tid); 2335 ieee80211_wake_queue(priv->hw, ampdu_q);
4890 sta_id = iwl4965_hw_find_station(priv, addr); 2336 }
4891 switch (action) { 2337 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
4892 case IEEE80211_AMPDU_RX_START: 2338 }
4893 IWL_DEBUG_HT("start Rx\n"); 2339 } else {
4894 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn); 2340 info->status.retry_count = tx_resp->failure_frame;
4895 break; 2341 info->flags |=
4896 case IEEE80211_AMPDU_RX_STOP: 2342 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
4897 IWL_DEBUG_HT("stop Rx\n"); 2343 iwl_hwrate_to_tx_control(priv,
4898 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); 2344 le32_to_cpu(tx_resp->rate_n_flags),
4899 break; 2345 info);
4900 case IEEE80211_AMPDU_TX_START: 2346
4901 IWL_DEBUG_HT("start Tx\n"); 2347 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags "
4902 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn); 2348 "0x%x retries %d\n", txq_id,
4903 case IEEE80211_AMPDU_TX_STOP: 2349 iwl_get_tx_fail_reason(status),
4904 IWL_DEBUG_HT("stop Tx\n"); 2350 status, le32_to_cpu(tx_resp->rate_n_flags),
4905 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid); 2351 tx_resp->failure_frame);
4906 default: 2352
4907 IWL_DEBUG_HT("unknown\n"); 2353 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
4908 return -EINVAL; 2354
4909 break; 2355 if (index != -1) {
2356 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2357 if (tid != MAX_TID_COUNT)
2358 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2359 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
2360 (txq_id >= 0) && priv->mac80211_registered)
2361 ieee80211_wake_queue(priv->hw, txq_id);
2362 if (tid != MAX_TID_COUNT)
2363 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2364 }
4910 } 2365 }
4911 return 0; 2366
2367 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2368 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
4912} 2369}
4913 2370
4914#endif /* CONFIG_IWL4965_HT */
4915 2371
4916/* Set up 4965-specific Rx frame reply handlers */ 2372/* Set up 4965-specific Rx frame reply handlers */
4917void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv) 2373static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
4918{ 2374{
4919 /* Legacy Rx frames */ 2375 /* Legacy Rx frames */
4920 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; 2376 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
4921 2377 /* Tx response */
4922 /* High-throughput (HT) Rx frames */ 2378 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
4923 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4924 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4925
4926 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4927 iwl4965_rx_missed_beacon_notif;
4928
4929#ifdef CONFIG_IWL4965_HT
4930 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4931#endif /* CONFIG_IWL4965_HT */
4932} 2379}
4933 2380
4934void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv) 2381static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
4935{ 2382{
4936 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); 2383 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4937#ifdef CONFIG_IWL4965_SENSITIVITY
4938 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4939#endif
4940 init_timer(&priv->statistics_periodic);
4941 priv->statistics_periodic.data = (unsigned long)priv;
4942 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4943} 2384}
4944 2385
4945void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv) 2386static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
4946{ 2387{
4947 del_timer_sync(&priv->statistics_periodic); 2388 cancel_work_sync(&priv->txpower_work);
4948
4949 cancel_delayed_work(&priv->init_alive_start);
4950} 2389}
4951 2390
4952 2391
@@ -4955,23 +2394,56 @@ static struct iwl_hcmd_ops iwl4965_hcmd = {
4955}; 2394};
4956 2395
4957static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2396static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4958 .enqueue_hcmd = iwl4965_enqueue_hcmd, 2397 .get_hcmd_size = iwl4965_get_hcmd_size,
2398 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2399 .chain_noise_reset = iwl4965_chain_noise_reset,
2400 .gain_computation = iwl4965_gain_computation,
2401 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
4959}; 2402};
4960 2403
4961static struct iwl_lib_ops iwl4965_lib = { 2404static struct iwl_lib_ops iwl4965_lib = {
4962 .init_drv = iwl4965_init_drv,
4963 .set_hw_params = iwl4965_hw_set_hw_params, 2405 .set_hw_params = iwl4965_hw_set_hw_params,
2406 .alloc_shared_mem = iwl4965_alloc_shared_mem,
2407 .free_shared_mem = iwl4965_free_shared_mem,
2408 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
4964 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2409 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4965 .hw_nic_init = iwl4965_hw_nic_init, 2410 .txq_set_sched = iwl4965_txq_set_sched,
2411 .txq_agg_enable = iwl4965_txq_agg_enable,
2412 .txq_agg_disable = iwl4965_txq_agg_disable,
2413 .rx_handler_setup = iwl4965_rx_handler_setup,
2414 .setup_deferred_work = iwl4965_setup_deferred_work,
2415 .cancel_deferred_work = iwl4965_cancel_deferred_work,
4966 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 2416 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4967 .alive_notify = iwl4965_alive_notify, 2417 .alive_notify = iwl4965_alive_notify,
2418 .init_alive_start = iwl4965_init_alive_start,
4968 .load_ucode = iwl4965_load_bsm, 2419 .load_ucode = iwl4965_load_bsm,
2420 .apm_ops = {
2421 .init = iwl4965_apm_init,
2422 .reset = iwl4965_apm_reset,
2423 .stop = iwl4965_apm_stop,
2424 .config = iwl4965_nic_config,
2425 .set_pwr_src = iwl4965_set_pwr_src,
2426 },
4969 .eeprom_ops = { 2427 .eeprom_ops = {
2428 .regulatory_bands = {
2429 EEPROM_REGULATORY_BAND_1_CHANNELS,
2430 EEPROM_REGULATORY_BAND_2_CHANNELS,
2431 EEPROM_REGULATORY_BAND_3_CHANNELS,
2432 EEPROM_REGULATORY_BAND_4_CHANNELS,
2433 EEPROM_REGULATORY_BAND_5_CHANNELS,
2434 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
2435 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
2436 },
4970 .verify_signature = iwlcore_eeprom_verify_signature, 2437 .verify_signature = iwlcore_eeprom_verify_signature,
4971 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2438 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4972 .release_semaphore = iwlcore_eeprom_release_semaphore, 2439 .release_semaphore = iwlcore_eeprom_release_semaphore,
2440 .check_version = iwl4965_eeprom_check_version,
2441 .query_addr = iwlcore_eeprom_query_addr,
4973 }, 2442 },
4974 .radio_kill_sw = iwl4965_radio_kill_sw, 2443 .set_power = iwl4965_set_power,
2444 .send_tx_power = iwl4965_send_tx_power,
2445 .update_chain_flags = iwl4965_update_chain_flags,
2446 .temperature = iwl4965_temperature_calib,
4975}; 2447};
4976 2448
4977static struct iwl_ops iwl4965_ops = { 2449static struct iwl_ops iwl4965_ops = {
@@ -4984,10 +2456,14 @@ struct iwl_cfg iwl4965_agn_cfg = {
4984 .name = "4965AGN", 2456 .name = "4965AGN",
4985 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", 2457 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4986 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 2458 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2459 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
4987 .ops = &iwl4965_ops, 2460 .ops = &iwl4965_ops,
4988 .mod_params = &iwl4965_mod_params, 2461 .mod_params = &iwl4965_mod_params,
4989}; 2462};
4990 2463
2464/* Module firmware */
2465MODULE_FIRMWARE("iwlwifi-4965" IWL4965_UCODE_API ".ucode");
2466
4991module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); 2467module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4992MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 2468MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4993module_param_named(disable, iwl4965_mod_params.disable, int, 0444); 2469module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
@@ -5002,10 +2478,14 @@ MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
5002 2478
5003module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); 2479module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
5004MODULE_PARM_DESC(queues_num, "number of hw queues."); 2480MODULE_PARM_DESC(queues_num, "number of hw queues.");
5005
5006/* QoS */ 2481/* QoS */
5007module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444); 2482module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
5008MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); 2483MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
2484/* 11n */
2485module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2486MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
5009module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 2487module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
5010MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 2488MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
5011 2489
2490module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
2491MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
new file mode 100644
index 000000000000..17d4f31c5934
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -0,0 +1,134 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions.
66 */
67
68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__
70
71#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
72#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
75
76/* EERPROM */
77#define IWL_5000_EEPROM_IMG_SIZE 2048
78
79
80#define IWL50_MAX_WIN_SIZE 64
81#define IWL50_QUEUE_SIZE 256
82#define IWL50_CMD_FIFO_NUM 7
83#define IWL50_NUM_QUEUES 20
84#define IWL50_NUM_AMPDU_QUEUES 10
85#define IWL50_FIRST_AMPDU_QUEUE 10
86
87#define IWL_sta_id_POS 12
88#define IWL_sta_id_LEN 4
89#define IWL_sta_id_SYM val
90
91/* Fixed (non-configurable) rx data from phy */
92
93/* Base physical address of iwl5000_shared is provided to SCD_DRAM_BASE_ADDR
94 * and &iwl5000_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */
95struct iwl5000_sched_queue_byte_cnt_tbl {
96 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL50_QUEUE_SIZE +
97 IWL50_MAX_WIN_SIZE];
98} __attribute__ ((packed));
99
100struct iwl5000_shared {
101 struct iwl5000_sched_queue_byte_cnt_tbl
102 queues_byte_cnt_tbls[IWL50_NUM_QUEUES];
103 __le32 rb_closed;
104
105 /* __le32 rb_closed_stts_rb_num:12; */
106#define IWL_rb_closed_stts_rb_num_POS 0
107#define IWL_rb_closed_stts_rb_num_LEN 12
108#define IWL_rb_closed_stts_rb_num_SYM rb_closed
109 /* __le32 rsrv1:4; */
110 /* __le32 rb_closed_stts_rx_frame_num:12; */
111#define IWL_rb_closed_stts_rx_frame_num_POS 16
112#define IWL_rb_closed_stts_rx_frame_num_LEN 12
113#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
114 /* __le32 rsrv2:4; */
115
116 __le32 frm_finished;
117 /* __le32 frame_finished_stts_rb_num:12; */
118#define IWL_frame_finished_stts_rb_num_POS 0
119#define IWL_frame_finished_stts_rb_num_LEN 12
120#define IWL_frame_finished_stts_rb_num_SYM frm_finished
121 /* __le32 rsrv3:4; */
122 /* __le32 frame_finished_stts_rx_frame_num:12; */
123#define IWL_frame_finished_stts_rx_frame_num_POS 16
124#define IWL_frame_finished_stts_rx_frame_num_LEN 12
125#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
126 /* __le32 rsrv4:4; */
127
128 __le32 padding1; /* so that allocation will be aligned to 16B */
129 __le32 padding2;
130} __attribute__ ((packed));
131
132
133#endif /* __iwl_5000_hw_h__ */
134
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
new file mode 100644
index 000000000000..878d6193b232
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -0,0 +1,1580 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 *
24 *****************************************************************************/
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/version.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h"
47
48#define IWL5000_UCODE_API "-1"
49
50static const u16 iwl5000_default_queue_to_tx_fifo[] = {
51 IWL_TX_FIFO_AC3,
52 IWL_TX_FIFO_AC2,
53 IWL_TX_FIFO_AC1,
54 IWL_TX_FIFO_AC0,
55 IWL50_CMD_FIFO_NUM,
56 IWL_TX_FIFO_HCCA_1,
57 IWL_TX_FIFO_HCCA_2
58};
59
60/* FIXME: same implementation as 4965 */
61static int iwl5000_apm_stop_master(struct iwl_priv *priv)
62{
63 int ret = 0;
64 unsigned long flags;
65
66 spin_lock_irqsave(&priv->lock, flags);
67
68 /* set stop master bit */
69 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
70
71 ret = iwl_poll_bit(priv, CSR_RESET,
72 CSR_RESET_REG_FLAG_MASTER_DISABLED,
73 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
74 if (ret < 0)
75 goto out;
76
77out:
78 spin_unlock_irqrestore(&priv->lock, flags);
79 IWL_DEBUG_INFO("stop master\n");
80
81 return ret;
82}
83
84
85static int iwl5000_apm_init(struct iwl_priv *priv)
86{
87 int ret = 0;
88
89 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
90 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
91
92 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
93 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
94 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
95
96 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
97
98 /* set "initialization complete" bit to move adapter
99 * D0U* --> D0A* state */
100 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
101
102 /* wait for clock stabilization */
103 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
104 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
105 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
106 if (ret < 0) {
107 IWL_DEBUG_INFO("Failed to init the card\n");
108 return ret;
109 }
110
111 ret = iwl_grab_nic_access(priv);
112 if (ret)
113 return ret;
114
115 /* enable DMA */
116 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
117
118 udelay(20);
119
120 /* disable L1-Active */
121 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
122 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
123
124 iwl_release_nic_access(priv);
125
126 return ret;
127}
128
129/* FIXME: this is indentical to 4965 */
130static void iwl5000_apm_stop(struct iwl_priv *priv)
131{
132 unsigned long flags;
133
134 iwl5000_apm_stop_master(priv);
135
136 spin_lock_irqsave(&priv->lock, flags);
137
138 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
139
140 udelay(10);
141
142 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
143
144 spin_unlock_irqrestore(&priv->lock, flags);
145}
146
147
148static int iwl5000_apm_reset(struct iwl_priv *priv)
149{
150 int ret = 0;
151 unsigned long flags;
152
153 iwl5000_apm_stop_master(priv);
154
155 spin_lock_irqsave(&priv->lock, flags);
156
157 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
158
159 udelay(10);
160
161
162 /* FIXME: put here L1A -L0S w/a */
163
164 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
165
166 /* set "initialization complete" bit to move adapter
167 * D0U* --> D0A* state */
168 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
169
170 /* wait for clock stabilization */
171 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
172 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
173 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
174 if (ret < 0) {
175 IWL_DEBUG_INFO("Failed to init the card\n");
176 goto out;
177 }
178
179 ret = iwl_grab_nic_access(priv);
180 if (ret)
181 goto out;
182
183 /* enable DMA */
184 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
185
186 udelay(20);
187
188 /* disable L1-Active */
189 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
190 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
191
192 iwl_release_nic_access(priv);
193
194out:
195 spin_unlock_irqrestore(&priv->lock, flags);
196
197 return ret;
198}
199
200
201static void iwl5000_nic_config(struct iwl_priv *priv)
202{
203 unsigned long flags;
204 u16 radio_cfg;
205 u8 val_link;
206
207 spin_lock_irqsave(&priv->lock, flags);
208
209 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
210
211 /* L1 is enabled by BIOS */
212 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
213 /* diable L0S disabled L1A enabled */
214 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
215 else
216 /* L0S enabled L1A disabled */
217 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
218
219 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
220
221 /* write radio config values to register */
222 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX)
223 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
224 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
225 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
226 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
227
228 /* set CSR_HW_CONFIG_REG for uCode use */
229 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
230 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
231 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
232
233 spin_unlock_irqrestore(&priv->lock, flags);
234}
235
236
237
238/*
239 * EEPROM
240 */
241static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
242{
243 u16 offset = 0;
244
245 if ((address & INDIRECT_ADDRESS) == 0)
246 return address;
247
248 switch (address & INDIRECT_TYPE_MSK) {
249 case INDIRECT_HOST:
250 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
251 break;
252 case INDIRECT_GENERAL:
253 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
254 break;
255 case INDIRECT_REGULATORY:
256 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
257 break;
258 case INDIRECT_CALIBRATION:
259 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
260 break;
261 case INDIRECT_PROCESS_ADJST:
262 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
263 break;
264 case INDIRECT_OTHERS:
265 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
266 break;
267 default:
268 IWL_ERROR("illegal indirect type: 0x%X\n",
269 address & INDIRECT_TYPE_MSK);
270 break;
271 }
272
273 /* translate the offset from words to byte */
274 return (address & ADDRESS_MSK) + (offset << 1);
275}
276
277static int iwl5000_eeprom_check_version(struct iwl_priv *priv)
278{
279 u16 eeprom_ver;
280 struct iwl_eeprom_calib_hdr {
281 u8 version;
282 u8 pa_type;
283 u16 voltage;
284 } *hdr;
285
286 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
287
288 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
289 EEPROM_5000_CALIB_ALL);
290
291 if (eeprom_ver < EEPROM_5000_EEPROM_VERSION ||
292 hdr->version < EEPROM_5000_TX_POWER_VERSION)
293 goto err;
294
295 return 0;
296err:
297 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
298 eeprom_ver, EEPROM_5000_EEPROM_VERSION,
299 hdr->version, EEPROM_5000_TX_POWER_VERSION);
300 return -EINVAL;
301
302}
303
304static void iwl5000_gain_computation(struct iwl_priv *priv,
305 u32 average_noise[NUM_RX_CHAINS],
306 u16 min_average_noise_antenna_i,
307 u32 min_average_noise)
308{
309 int i;
310 s32 delta_g;
311 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
312
313 /* Find Gain Code for the antennas B and C */
314 for (i = 1; i < NUM_RX_CHAINS; i++) {
315 if ((data->disconn_array[i])) {
316 data->delta_gain_code[i] = 0;
317 continue;
318 }
319 delta_g = (1000 * ((s32)average_noise[0] -
320 (s32)average_noise[i])) / 1500;
321 /* bound gain by 2 bits value max, 3rd bit is sign */
322 data->delta_gain_code[i] =
323 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
324
325 if (delta_g < 0)
326 /* set negative sign */
327 data->delta_gain_code[i] |= (1 << 2);
328 }
329
330 IWL_DEBUG_CALIB("Delta gains: ANT_B = %d ANT_C = %d\n",
331 data->delta_gain_code[1], data->delta_gain_code[2]);
332
333 if (!data->radio_write) {
334 struct iwl5000_calibration_chain_noise_gain_cmd cmd;
335 memset(&cmd, 0, sizeof(cmd));
336
337 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
338 cmd.delta_gain_1 = data->delta_gain_code[1];
339 cmd.delta_gain_2 = data->delta_gain_code[2];
340 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
341 sizeof(cmd), &cmd, NULL);
342
343 data->radio_write = 1;
344 data->state = IWL_CHAIN_NOISE_CALIBRATED;
345 }
346
347 data->chain_noise_a = 0;
348 data->chain_noise_b = 0;
349 data->chain_noise_c = 0;
350 data->chain_signal_a = 0;
351 data->chain_signal_b = 0;
352 data->chain_signal_c = 0;
353 data->beacon_count = 0;
354}
355
356static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
357{
358 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
359
360 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
361 struct iwl5000_calibration_chain_noise_reset_cmd cmd;
362
363 memset(&cmd, 0, sizeof(cmd));
364 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
365 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
366 sizeof(cmd), &cmd))
367 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
368 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
369 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
370 }
371}
372
373static void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
374 __le32 *tx_flags)
375{
376 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
377 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
378 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
379 else
380 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
381}
382
383static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
384 .min_nrg_cck = 95,
385 .max_nrg_cck = 0,
386 .auto_corr_min_ofdm = 90,
387 .auto_corr_min_ofdm_mrc = 170,
388 .auto_corr_min_ofdm_x1 = 120,
389 .auto_corr_min_ofdm_mrc_x1 = 240,
390
391 .auto_corr_max_ofdm = 120,
392 .auto_corr_max_ofdm_mrc = 210,
393 .auto_corr_max_ofdm_x1 = 155,
394 .auto_corr_max_ofdm_mrc_x1 = 290,
395
396 .auto_corr_min_cck = 125,
397 .auto_corr_max_cck = 200,
398 .auto_corr_min_cck_mrc = 170,
399 .auto_corr_max_cck_mrc = 400,
400 .nrg_th_cck = 95,
401 .nrg_th_ofdm = 95,
402};
403
404static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
405 size_t offset)
406{
407 u32 address = eeprom_indirect_address(priv, offset);
408 BUG_ON(address >= priv->cfg->eeprom_size);
409 return &priv->eeprom[address];
410}
411
412/*
413 * Calibration
414 */
415static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
416{
417 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
418
419 struct iwl5000_calibration cal_cmd = {
420 .op_code = IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD,
421 .data = {
422 (u8)xtal_calib[0],
423 (u8)xtal_calib[1],
424 }
425 };
426
427 return iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
428 sizeof(cal_cmd), &cal_cmd);
429}
430
431static int iwl5000_send_calib_results(struct iwl_priv *priv)
432{
433 int ret = 0;
434
435 struct iwl_host_cmd hcmd = {
436 .id = REPLY_PHY_CALIBRATION_CMD,
437 .meta.flags = CMD_SIZE_HUGE,
438 };
439
440 if (priv->calib_results.lo_res) {
441 hcmd.len = priv->calib_results.lo_res_len;
442 hcmd.data = priv->calib_results.lo_res;
443 ret = iwl_send_cmd_sync(priv, &hcmd);
444
445 if (ret)
446 goto err;
447 }
448
449 if (priv->calib_results.tx_iq_res) {
450 hcmd.len = priv->calib_results.tx_iq_res_len;
451 hcmd.data = priv->calib_results.tx_iq_res;
452 ret = iwl_send_cmd_sync(priv, &hcmd);
453
454 if (ret)
455 goto err;
456 }
457
458 if (priv->calib_results.tx_iq_perd_res) {
459 hcmd.len = priv->calib_results.tx_iq_perd_res_len;
460 hcmd.data = priv->calib_results.tx_iq_perd_res;
461 ret = iwl_send_cmd_sync(priv, &hcmd);
462
463 if (ret)
464 goto err;
465 }
466
467 return 0;
468err:
469 IWL_ERROR("Error %d\n", ret);
470 return ret;
471}
472
473static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
474{
475 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
476 struct iwl_host_cmd cmd = {
477 .id = CALIBRATION_CFG_CMD,
478 .len = sizeof(struct iwl5000_calib_cfg_cmd),
479 .data = &calib_cfg_cmd,
480 };
481
482 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
483 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
484 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
485 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
486 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
487
488 return iwl_send_cmd(priv, &cmd);
489}
490
491static void iwl5000_rx_calib_result(struct iwl_priv *priv,
492 struct iwl_rx_mem_buffer *rxb)
493{
494 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
495 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
496 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
497
498 iwl_free_calib_results(priv);
499
500 /* reduce the size of the length field itself */
501 len -= 4;
502
503 switch (hdr->op_code) {
504 case IWL5000_PHY_CALIBRATE_LO_CMD:
505 priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
506 priv->calib_results.lo_res_len = len;
507 memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
508 break;
509 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
510 priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
511 priv->calib_results.tx_iq_res_len = len;
512 memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
513 break;
514 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
515 priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
516 priv->calib_results.tx_iq_perd_res_len = len;
517 memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
518 break;
519 default:
520 IWL_ERROR("Unknown calibration notification %d\n",
521 hdr->op_code);
522 return;
523 }
524}
525
526static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
527 struct iwl_rx_mem_buffer *rxb)
528{
529 IWL_DEBUG_INFO("Init. calibration is completed, restarting fw.\n");
530 queue_work(priv->workqueue, &priv->restart);
531}
532
533/*
534 * ucode
535 */
536static int iwl5000_load_section(struct iwl_priv *priv,
537 struct fw_desc *image,
538 u32 dst_addr)
539{
540 int ret = 0;
541 unsigned long flags;
542
543 dma_addr_t phy_addr = image->p_addr;
544 u32 byte_cnt = image->len;
545
546 spin_lock_irqsave(&priv->lock, flags);
547 ret = iwl_grab_nic_access(priv);
548 if (ret) {
549 spin_unlock_irqrestore(&priv->lock, flags);
550 return ret;
551 }
552
553 iwl_write_direct32(priv,
554 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
555 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
556
557 iwl_write_direct32(priv,
558 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
559
560 iwl_write_direct32(priv,
561 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
562 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
563
564 /* FIME: write the MSB of the phy_addr in CTRL1
565 * iwl_write_direct32(priv,
566 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
567 ((phy_addr & MSB_MSK)
568 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
569 */
570 iwl_write_direct32(priv,
571 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt);
572 iwl_write_direct32(priv,
573 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
574 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
575 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
576 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
577
578 iwl_write_direct32(priv,
579 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
580 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
581 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
582 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
583
584 iwl_release_nic_access(priv);
585 spin_unlock_irqrestore(&priv->lock, flags);
586 return 0;
587}
588
589static int iwl5000_load_given_ucode(struct iwl_priv *priv,
590 struct fw_desc *inst_image,
591 struct fw_desc *data_image)
592{
593 int ret = 0;
594
595 ret = iwl5000_load_section(
596 priv, inst_image, RTC_INST_LOWER_BOUND);
597 if (ret)
598 return ret;
599
600 IWL_DEBUG_INFO("INST uCode section being loaded...\n");
601 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
602 priv->ucode_write_complete, 5 * HZ);
603 if (ret == -ERESTARTSYS) {
604 IWL_ERROR("Could not load the INST uCode section due "
605 "to interrupt\n");
606 return ret;
607 }
608 if (!ret) {
609 IWL_ERROR("Could not load the INST uCode section\n");
610 return -ETIMEDOUT;
611 }
612
613 priv->ucode_write_complete = 0;
614
615 ret = iwl5000_load_section(
616 priv, data_image, RTC_DATA_LOWER_BOUND);
617 if (ret)
618 return ret;
619
620 IWL_DEBUG_INFO("DATA uCode section being loaded...\n");
621
622 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
623 priv->ucode_write_complete, 5 * HZ);
624 if (ret == -ERESTARTSYS) {
625 IWL_ERROR("Could not load the INST uCode section due "
626 "to interrupt\n");
627 return ret;
628 } else if (!ret) {
629 IWL_ERROR("Could not load the DATA uCode section\n");
630 return -ETIMEDOUT;
631 } else
632 ret = 0;
633
634 priv->ucode_write_complete = 0;
635
636 return ret;
637}
638
639static int iwl5000_load_ucode(struct iwl_priv *priv)
640{
641 int ret = 0;
642
643 /* check whether init ucode should be loaded, or rather runtime ucode */
644 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
645 IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n");
646 ret = iwl5000_load_given_ucode(priv,
647 &priv->ucode_init, &priv->ucode_init_data);
648 if (!ret) {
649 IWL_DEBUG_INFO("Init ucode load complete.\n");
650 priv->ucode_type = UCODE_INIT;
651 }
652 } else {
653 IWL_DEBUG_INFO("Init ucode not found, or already loaded. "
654 "Loading runtime ucode...\n");
655 ret = iwl5000_load_given_ucode(priv,
656 &priv->ucode_code, &priv->ucode_data);
657 if (!ret) {
658 IWL_DEBUG_INFO("Runtime ucode load complete.\n");
659 priv->ucode_type = UCODE_RT;
660 }
661 }
662
663 return ret;
664}
665
666static void iwl5000_init_alive_start(struct iwl_priv *priv)
667{
668 int ret = 0;
669
670 /* Check alive response for "valid" sign from uCode */
671 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
672 /* We had an error bringing up the hardware, so take it
673 * all the way back down so we can try again */
674 IWL_DEBUG_INFO("Initialize Alive failed.\n");
675 goto restart;
676 }
677
678 /* initialize uCode was loaded... verify inst image.
679 * This is a paranoid check, because we would not have gotten the
680 * "initialize" alive if code weren't properly loaded. */
681 if (iwl_verify_ucode(priv)) {
682 /* Runtime instruction load was bad;
683 * take it all the way back down so we can try again */
684 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
685 goto restart;
686 }
687
688 iwl_clear_stations_table(priv);
689 ret = priv->cfg->ops->lib->alive_notify(priv);
690 if (ret) {
691 IWL_WARNING("Could not complete ALIVE transition: %d\n", ret);
692 goto restart;
693 }
694
695 iwl5000_send_calib_cfg(priv);
696 return;
697
698restart:
699 /* real restart (first load init_ucode) */
700 queue_work(priv->workqueue, &priv->restart);
701}
702
703static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
704 int txq_id, u32 index)
705{
706 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
707 (index & 0xff) | (txq_id << 8));
708 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
709}
710
711static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
712 struct iwl_tx_queue *txq,
713 int tx_fifo_id, int scd_retry)
714{
715 int txq_id = txq->q.id;
716 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
717
718 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
719 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
720 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
721 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
722 IWL50_SCD_QUEUE_STTS_REG_MSK);
723
724 txq->sched_retry = scd_retry;
725
726 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
727 active ? "Activate" : "Deactivate",
728 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
729}
730
731static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
732{
733 struct iwl_wimax_coex_cmd coex_cmd;
734
735 memset(&coex_cmd, 0, sizeof(coex_cmd));
736
737 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
738 sizeof(coex_cmd), &coex_cmd);
739}
740
741static int iwl5000_alive_notify(struct iwl_priv *priv)
742{
743 u32 a;
744 int i = 0;
745 unsigned long flags;
746 int ret;
747
748 spin_lock_irqsave(&priv->lock, flags);
749
750 ret = iwl_grab_nic_access(priv);
751 if (ret) {
752 spin_unlock_irqrestore(&priv->lock, flags);
753 return ret;
754 }
755
756 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
757 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
758 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
759 a += 4)
760 iwl_write_targ_mem(priv, a, 0);
761 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
762 a += 4)
763 iwl_write_targ_mem(priv, a, 0);
764 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
765 iwl_write_targ_mem(priv, a, 0);
766
767 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
768 (priv->shared_phys +
769 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10);
770 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
771 IWL50_SCD_QUEUECHAIN_SEL_ALL(
772 priv->hw_params.max_txq_num));
773 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
774
775 /* initiate the queues */
776 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
777 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
778 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
779 iwl_write_targ_mem(priv, priv->scd_base_addr +
780 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
781 iwl_write_targ_mem(priv, priv->scd_base_addr +
782 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
783 sizeof(u32),
784 ((SCD_WIN_SIZE <<
785 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
786 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
787 ((SCD_FRAME_LIMIT <<
788 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
789 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
790 }
791
792 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
793 IWL_MASK(0, priv->hw_params.max_txq_num));
794
795 /* Activate all Tx DMA/FIFO channels */
796 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
797
798 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
799 /* map qos queues to fifos one-to-one */
800 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
801 int ac = iwl5000_default_queue_to_tx_fifo[i];
802 iwl_txq_ctx_activate(priv, i);
803 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
804 }
805 /* TODO - need to initialize those FIFOs inside the loop above,
806 * not only mark them as active */
807 iwl_txq_ctx_activate(priv, 4);
808 iwl_txq_ctx_activate(priv, 7);
809 iwl_txq_ctx_activate(priv, 8);
810 iwl_txq_ctx_activate(priv, 9);
811
812 iwl_release_nic_access(priv);
813 spin_unlock_irqrestore(&priv->lock, flags);
814
815
816 iwl5000_send_wimax_coex(priv);
817
818 iwl5000_send_Xtal_calib(priv);
819
820 if (priv->ucode_type == UCODE_RT)
821 iwl5000_send_calib_results(priv);
822
823 return 0;
824}
825
826static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
827{
828 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
829 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
830 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
831 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
832 return -EINVAL;
833 }
834
835 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
836 priv->hw_params.first_ampdu_q = IWL50_FIRST_AMPDU_QUEUE;
837 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
838 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
839 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
840 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
841 priv->hw_params.max_bsm_size = 0;
842 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) |
843 BIT(IEEE80211_BAND_5GHZ);
844 priv->hw_params.sens = &iwl5000_sensitivity;
845
846 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
847 case CSR_HW_REV_TYPE_5100:
848 case CSR_HW_REV_TYPE_5150:
849 priv->hw_params.tx_chains_num = 1;
850 priv->hw_params.rx_chains_num = 2;
851 /* FIXME: move to ANT_A, ANT_B, ANT_C enum */
852 priv->hw_params.valid_tx_ant = ANT_A;
853 priv->hw_params.valid_rx_ant = ANT_AB;
854 break;
855 case CSR_HW_REV_TYPE_5300:
856 case CSR_HW_REV_TYPE_5350:
857 priv->hw_params.tx_chains_num = 3;
858 priv->hw_params.rx_chains_num = 3;
859 priv->hw_params.valid_tx_ant = ANT_ABC;
860 priv->hw_params.valid_rx_ant = ANT_ABC;
861 break;
862 }
863
864 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
865 case CSR_HW_REV_TYPE_5100:
866 case CSR_HW_REV_TYPE_5300:
867 /* 5X00 wants in Celsius */
868 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
869 break;
870 case CSR_HW_REV_TYPE_5150:
871 case CSR_HW_REV_TYPE_5350:
872 /* 5X50 wants in Kelvin */
873 priv->hw_params.ct_kill_threshold =
874 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
875 break;
876 }
877
878 return 0;
879}
880
881static int iwl5000_alloc_shared_mem(struct iwl_priv *priv)
882{
883 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
884 sizeof(struct iwl5000_shared),
885 &priv->shared_phys);
886 if (!priv->shared_virt)
887 return -ENOMEM;
888
889 memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared));
890
891 priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
892
893 return 0;
894}
895
896static void iwl5000_free_shared_mem(struct iwl_priv *priv)
897{
898 if (priv->shared_virt)
899 pci_free_consistent(priv->pci_dev,
900 sizeof(struct iwl5000_shared),
901 priv->shared_virt,
902 priv->shared_phys);
903}
904
905static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
906{
907 struct iwl5000_shared *s = priv->shared_virt;
908 return le32_to_cpu(s->rb_closed) & 0xFFF;
909}
910
911/**
912 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
913 */
914static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
915 struct iwl_tx_queue *txq,
916 u16 byte_cnt)
917{
918 struct iwl5000_shared *shared_data = priv->shared_virt;
919 int txq_id = txq->q.id;
920 u8 sec_ctl = 0;
921 u8 sta = 0;
922 int len;
923
924 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
925
926 if (txq_id != IWL_CMD_QUEUE_NUM) {
927 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id;
928 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl;
929
930 switch (sec_ctl & TX_CMD_SEC_MSK) {
931 case TX_CMD_SEC_CCM:
932 len += CCMP_MIC_LEN;
933 break;
934 case TX_CMD_SEC_TKIP:
935 len += TKIP_ICV_LEN;
936 break;
937 case TX_CMD_SEC_WEP:
938 len += WEP_IV_LEN + WEP_ICV_LEN;
939 break;
940 }
941 }
942
943 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
944 tfd_offset[txq->q.write_ptr], byte_cnt, len);
945
946 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
947 tfd_offset[txq->q.write_ptr], sta_id, sta);
948
949 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
950 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
951 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
952 byte_cnt, len);
953 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
954 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
955 sta_id, sta);
956 }
957}
958
959static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
960 struct iwl_tx_queue *txq)
961{
962 int txq_id = txq->q.id;
963 struct iwl5000_shared *shared_data = priv->shared_virt;
964 u8 sta = 0;
965
966 if (txq_id != IWL_CMD_QUEUE_NUM)
967 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id;
968
969 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
970 val = cpu_to_le16(1 | (sta << 12));
971
972 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
973 shared_data->queues_byte_cnt_tbls[txq_id].
974 tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr].
975 val = cpu_to_le16(1 | (sta << 12));
976 }
977}
978
979static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
980 u16 txq_id)
981{
982 u32 tbl_dw_addr;
983 u32 tbl_dw;
984 u16 scd_q2ratid;
985
986 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
987
988 tbl_dw_addr = priv->scd_base_addr +
989 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
990
991 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
992
993 if (txq_id & 0x1)
994 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
995 else
996 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
997
998 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
999
1000 return 0;
1001}
1002static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
1003{
1004 /* Simply stop the queue, but don't change any configuration;
1005 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1006 iwl_write_prph(priv,
1007 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
1008 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1009 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1010}
1011
1012static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1013 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
1014{
1015 unsigned long flags;
1016 int ret;
1017 u16 ra_tid;
1018
1019 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1020 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1021 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1022 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1023 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
1024 return -EINVAL;
1025 }
1026
1027 ra_tid = BUILD_RAxTID(sta_id, tid);
1028
1029 /* Modify device's station table to Tx this TID */
1030 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
1031
1032 spin_lock_irqsave(&priv->lock, flags);
1033 ret = iwl_grab_nic_access(priv);
1034 if (ret) {
1035 spin_unlock_irqrestore(&priv->lock, flags);
1036 return ret;
1037 }
1038
1039 /* Stop this Tx queue before configuring it */
1040 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
1041
1042 /* Map receiver-address / traffic-ID to this queue */
1043 iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1044
1045 /* Set this queue as a chain-building queue */
1046 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
1047
1048 /* enable aggregations for the queue */
1049 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
1050
1051 /* Place first TFD at index corresponding to start sequence number.
1052 * Assumes that ssn_idx is valid (!= 0xFFF) */
1053 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1054 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1055 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
1056
1057 /* Set up Tx window size and frame limit for this queue */
1058 iwl_write_targ_mem(priv, priv->scd_base_addr +
1059 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
1060 sizeof(u32),
1061 ((SCD_WIN_SIZE <<
1062 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1063 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1064 ((SCD_FRAME_LIMIT <<
1065 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1066 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1067
1068 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
1069
1070 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1071 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1072
1073 iwl_release_nic_access(priv);
1074 spin_unlock_irqrestore(&priv->lock, flags);
1075
1076 return 0;
1077}
1078
1079static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1080 u16 ssn_idx, u8 tx_fifo)
1081{
1082 int ret;
1083
1084 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1085 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1086 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1087 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1088 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
1089 return -EINVAL;
1090 }
1091
1092 ret = iwl_grab_nic_access(priv);
1093 if (ret)
1094 return ret;
1095
1096 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
1097
1098 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
1099
1100 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1101 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1102 /* supposes that ssn_idx is valid (!= 0xFFF) */
1103 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
1104
1105 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
1106 iwl_txq_ctx_deactivate(priv, txq_id);
1107 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1108
1109 iwl_release_nic_access(priv);
1110
1111 return 0;
1112}
1113
1114static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1115{
1116 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
1117 memcpy(data, cmd, size);
1118 return size;
1119}
1120
1121
1122/*
1123 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
1124 * must be called under priv->lock and mac access
1125 */
1126static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
1127{
1128 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
1129}
1130
1131
1132static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
1133{
1134 return le32_to_cpup((__le32*)&tx_resp->status +
1135 tx_resp->frame_count) & MAX_SN;
1136}
1137
1138static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1139 struct iwl_ht_agg *agg,
1140 struct iwl5000_tx_resp *tx_resp,
1141 int txq_id, u16 start_idx)
1142{
1143 u16 status;
1144 struct agg_tx_status *frame_status = &tx_resp->status;
1145 struct ieee80211_tx_info *info = NULL;
1146 struct ieee80211_hdr *hdr = NULL;
1147 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1148 int i, sh, idx;
1149 u16 seq;
1150
1151 if (agg->wait_for_ba)
1152 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
1153
1154 agg->frame_count = tx_resp->frame_count;
1155 agg->start_idx = start_idx;
1156 agg->rate_n_flags = rate_n_flags;
1157 agg->bitmap = 0;
1158
1159 /* # frames attempted by Tx command */
1160 if (agg->frame_count == 1) {
1161 /* Only one frame was attempted; no block-ack will arrive */
1162 status = le16_to_cpu(frame_status[0].status);
1163 idx = start_idx;
1164
1165 /* FIXME: code repetition */
1166 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1167 agg->frame_count, agg->start_idx, idx);
1168
1169 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1170 info->status.retry_count = tx_resp->failure_frame;
1171 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1172 info->flags |= iwl_is_tx_success(status)?
1173 IEEE80211_TX_STAT_ACK : 0;
1174 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
1175
1176 /* FIXME: code repetition end */
1177
1178 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
1179 status & 0xff, tx_resp->failure_frame);
1180 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
1181
1182 agg->wait_for_ba = 0;
1183 } else {
1184 /* Two or more frames were attempted; expect block-ack */
1185 u64 bitmap = 0;
1186 int start = agg->start_idx;
1187
1188 /* Construct bit-map of pending frames within Tx window */
1189 for (i = 0; i < agg->frame_count; i++) {
1190 u16 sc;
1191 status = le16_to_cpu(frame_status[i].status);
1192 seq = le16_to_cpu(frame_status[i].sequence);
1193 idx = SEQ_TO_INDEX(seq);
1194 txq_id = SEQ_TO_QUEUE(seq);
1195
1196 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1197 AGG_TX_STATE_ABORT_MSK))
1198 continue;
1199
1200 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1201 agg->frame_count, txq_id, idx);
1202
1203 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1204
1205 sc = le16_to_cpu(hdr->seq_ctrl);
1206 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1207 IWL_ERROR("BUG_ON idx doesn't match seq control"
1208 " idx=%d, seq_idx=%d, seq=%d\n",
1209 idx, SEQ_TO_SN(sc),
1210 hdr->seq_ctrl);
1211 return -1;
1212 }
1213
1214 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
1215 i, idx, SEQ_TO_SN(sc));
1216
1217 sh = idx - start;
1218 if (sh > 64) {
1219 sh = (start - idx) + 0xff;
1220 bitmap = bitmap << sh;
1221 sh = 0;
1222 start = idx;
1223 } else if (sh < -64)
1224 sh = 0xff - (start - idx);
1225 else if (sh < 0) {
1226 sh = start - idx;
1227 start = idx;
1228 bitmap = bitmap << sh;
1229 sh = 0;
1230 }
1231 bitmap |= (1 << sh);
1232 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
1233 start, (u32)(bitmap & 0xFFFFFFFF));
1234 }
1235
1236 agg->bitmap = bitmap;
1237 agg->start_idx = start;
1238 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1239 agg->frame_count, agg->start_idx,
1240 (unsigned long long)agg->bitmap);
1241
1242 if (bitmap)
1243 agg->wait_for_ba = 1;
1244 }
1245 return 0;
1246}
1247
1248static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1249 struct iwl_rx_mem_buffer *rxb)
1250{
1251 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1252 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1253 int txq_id = SEQ_TO_QUEUE(sequence);
1254 int index = SEQ_TO_INDEX(sequence);
1255 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1256 struct ieee80211_tx_info *info;
1257 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1258 u32 status = le16_to_cpu(tx_resp->status.status);
1259 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
1260 struct ieee80211_hdr *hdr;
1261 u8 *qc = NULL;
1262
1263 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1264 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
1265 "is out of range [0-%d] %d %d\n", txq_id,
1266 index, txq->q.n_bd, txq->q.write_ptr,
1267 txq->q.read_ptr);
1268 return;
1269 }
1270
1271 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1272 memset(&info->status, 0, sizeof(info->status));
1273
1274 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
1275 if (ieee80211_is_data_qos(hdr->frame_control)) {
1276 qc = ieee80211_get_qos_ctl(hdr);
1277 tid = qc[0] & 0xf;
1278 }
1279
1280 sta_id = iwl_get_ra_sta_id(priv, hdr);
1281 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1282 IWL_ERROR("Station not known\n");
1283 return;
1284 }
1285
1286 if (txq->sched_retry) {
1287 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1288 struct iwl_ht_agg *agg = NULL;
1289
1290 if (!qc)
1291 return;
1292
1293 agg = &priv->stations[sta_id].tid[tid].agg;
1294
1295 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1296
1297 /* check if BAR is needed */
1298 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
1299 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1300
1301 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1302 int freed, ampdu_q;
1303 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1304 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
1305 "%d index %d\n", scd_ssn , index);
1306 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1307 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1308
1309 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1310 txq_id >= 0 && priv->mac80211_registered &&
1311 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
1312 /* calculate mac80211 ampdu sw queue to wake */
1313 ampdu_q = txq_id - IWL50_FIRST_AMPDU_QUEUE +
1314 priv->hw->queues;
1315 if (agg->state == IWL_AGG_OFF)
1316 ieee80211_wake_queue(priv->hw, txq_id);
1317 else
1318 ieee80211_wake_queue(priv->hw, ampdu_q);
1319 }
1320 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1321 }
1322 } else {
1323 info->status.retry_count = tx_resp->failure_frame;
1324 info->flags =
1325 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
1326 iwl_hwrate_to_tx_control(priv,
1327 le32_to_cpu(tx_resp->rate_n_flags),
1328 info);
1329
1330 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags "
1331 "0x%x retries %d\n", txq_id,
1332 iwl_get_tx_fail_reason(status),
1333 status, le32_to_cpu(tx_resp->rate_n_flags),
1334 tx_resp->failure_frame);
1335
1336 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
1337 if (index != -1) {
1338 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1339 if (tid != MAX_TID_COUNT)
1340 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1341 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1342 (txq_id >= 0) && priv->mac80211_registered)
1343 ieee80211_wake_queue(priv->hw, txq_id);
1344 if (tid != MAX_TID_COUNT)
1345 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1346 }
1347 }
1348
1349 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1350 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
1351}
1352
1353/* Currently 5000 is the supperset of everything */
1354static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1355{
1356 return len;
1357}
1358
1359static void iwl5000_setup_deferred_work(struct iwl_priv *priv)
1360{
1361 /* in 5000 the tx power calibration is done in uCode */
1362 priv->disable_tx_power_cal = 1;
1363}
1364
1365static void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1366{
1367 /* init calibration handlers */
1368 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1369 iwl5000_rx_calib_result;
1370 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
1371 iwl5000_rx_calib_complete;
1372 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
1373}
1374
1375
1376static int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1377{
1378 return (addr >= RTC_DATA_LOWER_BOUND) &&
1379 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1380}
1381
1382static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1383{
1384 int ret = 0;
1385 struct iwl5000_rxon_assoc_cmd rxon_assoc;
1386 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1387 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1388
1389 if ((rxon1->flags == rxon2->flags) &&
1390 (rxon1->filter_flags == rxon2->filter_flags) &&
1391 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1392 (rxon1->ofdm_ht_single_stream_basic_rates ==
1393 rxon2->ofdm_ht_single_stream_basic_rates) &&
1394 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1395 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1396 (rxon1->ofdm_ht_triple_stream_basic_rates ==
1397 rxon2->ofdm_ht_triple_stream_basic_rates) &&
1398 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1399 (rxon1->rx_chain == rxon2->rx_chain) &&
1400 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1401 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1402 return 0;
1403 }
1404
1405 rxon_assoc.flags = priv->staging_rxon.flags;
1406 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1407 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1408 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1409 rxon_assoc.reserved1 = 0;
1410 rxon_assoc.reserved2 = 0;
1411 rxon_assoc.reserved3 = 0;
1412 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1413 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1414 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1415 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1416 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1417 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
1418 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
1419 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
1420
1421 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1422 sizeof(rxon_assoc), &rxon_assoc, NULL);
1423 if (ret)
1424 return ret;
1425
1426 return ret;
1427}
1428static int iwl5000_send_tx_power(struct iwl_priv *priv)
1429{
1430 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
1431
1432 /* half dBm need to multiply */
1433 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
1434 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
1435 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
1436 return iwl_send_cmd_pdu_async(priv, REPLY_TX_POWER_DBM_CMD,
1437 sizeof(tx_power_cmd), &tx_power_cmd,
1438 NULL);
1439}
1440
1441static void iwl5000_temperature(struct iwl_priv *priv)
1442{
1443 /* store temperature from statistics (in Celsius) */
1444 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
1445}
1446
1447static struct iwl_hcmd_ops iwl5000_hcmd = {
1448 .rxon_assoc = iwl5000_send_rxon_assoc,
1449};
1450
1451static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1452 .get_hcmd_size = iwl5000_get_hcmd_size,
1453 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1454 .gain_computation = iwl5000_gain_computation,
1455 .chain_noise_reset = iwl5000_chain_noise_reset,
1456 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
1457};
1458
1459static struct iwl_lib_ops iwl5000_lib = {
1460 .set_hw_params = iwl5000_hw_set_hw_params,
1461 .alloc_shared_mem = iwl5000_alloc_shared_mem,
1462 .free_shared_mem = iwl5000_free_shared_mem,
1463 .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
1464 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
1465 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
1466 .txq_set_sched = iwl5000_txq_set_sched,
1467 .txq_agg_enable = iwl5000_txq_agg_enable,
1468 .txq_agg_disable = iwl5000_txq_agg_disable,
1469 .rx_handler_setup = iwl5000_rx_handler_setup,
1470 .setup_deferred_work = iwl5000_setup_deferred_work,
1471 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1472 .load_ucode = iwl5000_load_ucode,
1473 .init_alive_start = iwl5000_init_alive_start,
1474 .alive_notify = iwl5000_alive_notify,
1475 .send_tx_power = iwl5000_send_tx_power,
1476 .temperature = iwl5000_temperature,
1477 .apm_ops = {
1478 .init = iwl5000_apm_init,
1479 .reset = iwl5000_apm_reset,
1480 .stop = iwl5000_apm_stop,
1481 .config = iwl5000_nic_config,
1482 .set_pwr_src = iwl4965_set_pwr_src,
1483 },
1484 .eeprom_ops = {
1485 .regulatory_bands = {
1486 EEPROM_5000_REG_BAND_1_CHANNELS,
1487 EEPROM_5000_REG_BAND_2_CHANNELS,
1488 EEPROM_5000_REG_BAND_3_CHANNELS,
1489 EEPROM_5000_REG_BAND_4_CHANNELS,
1490 EEPROM_5000_REG_BAND_5_CHANNELS,
1491 EEPROM_5000_REG_BAND_24_FAT_CHANNELS,
1492 EEPROM_5000_REG_BAND_52_FAT_CHANNELS
1493 },
1494 .verify_signature = iwlcore_eeprom_verify_signature,
1495 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1496 .release_semaphore = iwlcore_eeprom_release_semaphore,
1497 .check_version = iwl5000_eeprom_check_version,
1498 .query_addr = iwl5000_eeprom_query_addr,
1499 },
1500};
1501
1502static struct iwl_ops iwl5000_ops = {
1503 .lib = &iwl5000_lib,
1504 .hcmd = &iwl5000_hcmd,
1505 .utils = &iwl5000_hcmd_utils,
1506};
1507
1508static struct iwl_mod_params iwl50_mod_params = {
1509 .num_of_queues = IWL50_NUM_QUEUES,
1510 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1511 .enable_qos = 1,
1512 .amsdu_size_8K = 1,
1513 .restart_fw = 1,
1514 /* the rest are 0 by default */
1515};
1516
1517
1518struct iwl_cfg iwl5300_agn_cfg = {
1519 .name = "5300AGN",
1520 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1521 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1522 .ops = &iwl5000_ops,
1523 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1524 .mod_params = &iwl50_mod_params,
1525};
1526
1527struct iwl_cfg iwl5100_bg_cfg = {
1528 .name = "5100BG",
1529 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1530 .sku = IWL_SKU_G,
1531 .ops = &iwl5000_ops,
1532 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1533 .mod_params = &iwl50_mod_params,
1534};
1535
1536struct iwl_cfg iwl5100_abg_cfg = {
1537 .name = "5100ABG",
1538 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1539 .sku = IWL_SKU_A|IWL_SKU_G,
1540 .ops = &iwl5000_ops,
1541 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1542 .mod_params = &iwl50_mod_params,
1543};
1544
1545struct iwl_cfg iwl5100_agn_cfg = {
1546 .name = "5100AGN",
1547 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1548 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1549 .ops = &iwl5000_ops,
1550 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1551 .mod_params = &iwl50_mod_params,
1552};
1553
1554struct iwl_cfg iwl5350_agn_cfg = {
1555 .name = "5350AGN",
1556 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1557 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1558 .ops = &iwl5000_ops,
1559 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1560 .mod_params = &iwl50_mod_params,
1561};
1562
1563module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1564MODULE_PARM_DESC(disable50,
1565 "manually disable the 50XX radio (default 0 [radio on])");
1566module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
1567MODULE_PARM_DESC(swcrypto50,
1568 "using software crypto engine (default 0 [hardware])\n");
1569module_param_named(debug50, iwl50_mod_params.debug, int, 0444);
1570MODULE_PARM_DESC(debug50, "50XX debug output mask");
1571module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
1572MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1573module_param_named(qos_enable50, iwl50_mod_params.enable_qos, int, 0444);
1574MODULE_PARM_DESC(qos_enable50, "enable all 50XX QoS functionality");
1575module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444);
1576MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1577module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
1578MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1579module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444);
1580MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
new file mode 100644
index 000000000000..ef49440bd7f6
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -0,0 +1,802 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <net/mac80211.h>
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-calib.h"
68
69/* "false alarms" are signals that our DSP tries to lock onto,
70 * but then determines that they are either noise, or transmissions
71 * from a distant wireless network (also "noise", really) that get
72 * "stepped on" by stronger transmissions within our own network.
73 * This algorithm attempts to set a sensitivity level that is high
74 * enough to receive all of our own network traffic, but not so
75 * high that our DSP gets too busy trying to lock onto non-network
76 * activity/noise. */
77static int iwl_sens_energy_cck(struct iwl_priv *priv,
78 u32 norm_fa,
79 u32 rx_enable_time,
80 struct statistics_general_data *rx_info)
81{
82 u32 max_nrg_cck = 0;
83 int i = 0;
84 u8 max_silence_rssi = 0;
85 u32 silence_ref = 0;
86 u8 silence_rssi_a = 0;
87 u8 silence_rssi_b = 0;
88 u8 silence_rssi_c = 0;
89 u32 val;
90
91 /* "false_alarms" values below are cross-multiplications to assess the
92 * numbers of false alarms within the measured period of actual Rx
93 * (Rx is off when we're txing), vs the min/max expected false alarms
94 * (some should be expected if rx is sensitive enough) in a
95 * hypothetical listening period of 200 time units (TU), 204.8 msec:
96 *
97 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
98 *
99 * */
100 u32 false_alarms = norm_fa * 200 * 1024;
101 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
102 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
103 struct iwl_sensitivity_data *data = NULL;
104 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
105
106 data = &(priv->sensitivity_data);
107
108 data->nrg_auto_corr_silence_diff = 0;
109
110 /* Find max silence rssi among all 3 receivers.
111 * This is background noise, which may include transmissions from other
112 * networks, measured during silence before our network's beacon */
113 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
114 ALL_BAND_FILTER) >> 8);
115 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
116 ALL_BAND_FILTER) >> 8);
117 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
118 ALL_BAND_FILTER) >> 8);
119
120 val = max(silence_rssi_b, silence_rssi_c);
121 max_silence_rssi = max(silence_rssi_a, (u8) val);
122
123 /* Store silence rssi in 20-beacon history table */
124 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
125 data->nrg_silence_idx++;
126 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
127 data->nrg_silence_idx = 0;
128
129 /* Find max silence rssi across 20 beacon history */
130 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
131 val = data->nrg_silence_rssi[i];
132 silence_ref = max(silence_ref, val);
133 }
134 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
135 silence_rssi_a, silence_rssi_b, silence_rssi_c,
136 silence_ref);
137
138 /* Find max rx energy (min value!) among all 3 receivers,
139 * measured during beacon frame.
140 * Save it in 10-beacon history table. */
141 i = data->nrg_energy_idx;
142 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
143 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
144
145 data->nrg_energy_idx++;
146 if (data->nrg_energy_idx >= 10)
147 data->nrg_energy_idx = 0;
148
149 /* Find min rx energy (max value) across 10 beacon history.
150 * This is the minimum signal level that we want to receive well.
151 * Add backoff (margin so we don't miss slightly lower energy frames).
152 * This establishes an upper bound (min value) for energy threshold. */
153 max_nrg_cck = data->nrg_value[0];
154 for (i = 1; i < 10; i++)
155 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
156 max_nrg_cck += 6;
157
158 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
159 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
160 rx_info->beacon_energy_c, max_nrg_cck - 6);
161
162 /* Count number of consecutive beacons with fewer-than-desired
163 * false alarms. */
164 if (false_alarms < min_false_alarms)
165 data->num_in_cck_no_fa++;
166 else
167 data->num_in_cck_no_fa = 0;
168 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
169 data->num_in_cck_no_fa);
170
171 /* If we got too many false alarms this time, reduce sensitivity */
172 if ((false_alarms > max_false_alarms) &&
173 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
174 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
175 false_alarms, max_false_alarms);
176 IWL_DEBUG_CALIB("... reducing sensitivity\n");
177 data->nrg_curr_state = IWL_FA_TOO_MANY;
178 /* Store for "fewer than desired" on later beacon */
179 data->nrg_silence_ref = silence_ref;
180
181 /* increase energy threshold (reduce nrg value)
182 * to decrease sensitivity */
183 if (data->nrg_th_cck >
184 (ranges->max_nrg_cck + NRG_STEP_CCK))
185 data->nrg_th_cck = data->nrg_th_cck
186 - NRG_STEP_CCK;
187 else
188 data->nrg_th_cck = ranges->max_nrg_cck;
189 /* Else if we got fewer than desired, increase sensitivity */
190 } else if (false_alarms < min_false_alarms) {
191 data->nrg_curr_state = IWL_FA_TOO_FEW;
192
193 /* Compare silence level with silence level for most recent
194 * healthy number or too many false alarms */
195 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
196 (s32)silence_ref;
197
198 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
199 false_alarms, min_false_alarms,
200 data->nrg_auto_corr_silence_diff);
201
202 /* Increase value to increase sensitivity, but only if:
203 * 1a) previous beacon did *not* have *too many* false alarms
204 * 1b) AND there's a significant difference in Rx levels
205 * from a previous beacon with too many, or healthy # FAs
206 * OR 2) We've seen a lot of beacons (100) with too few
207 * false alarms */
208 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
209 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
210 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
211
212 IWL_DEBUG_CALIB("... increasing sensitivity\n");
213 /* Increase nrg value to increase sensitivity */
214 val = data->nrg_th_cck + NRG_STEP_CCK;
215 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
216 } else {
217 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
218 }
219
220 /* Else we got a healthy number of false alarms, keep status quo */
221 } else {
222 IWL_DEBUG_CALIB(" FA in safe zone\n");
223 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
224
225 /* Store for use in "fewer than desired" with later beacon */
226 data->nrg_silence_ref = silence_ref;
227
228 /* If previous beacon had too many false alarms,
229 * give it some extra margin by reducing sensitivity again
230 * (but don't go below measured energy of desired Rx) */
231 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
232 IWL_DEBUG_CALIB("... increasing margin\n");
233 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
234 data->nrg_th_cck -= NRG_MARGIN;
235 else
236 data->nrg_th_cck = max_nrg_cck;
237 }
238 }
239
240 /* Make sure the energy threshold does not go above the measured
241 * energy of the desired Rx signals (reduced by backoff margin),
242 * or else we might start missing Rx frames.
243 * Lower value is higher energy, so we use max()!
244 */
245 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
246 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
247
248 data->nrg_prev_state = data->nrg_curr_state;
249
250 /* Auto-correlation CCK algorithm */
251 if (false_alarms > min_false_alarms) {
252
253 /* increase auto_corr values to decrease sensitivity
254 * so the DSP won't be disturbed by the noise
255 */
256 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
257 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
258 else {
259 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
260 data->auto_corr_cck =
261 min((u32)ranges->auto_corr_max_cck, val);
262 }
263 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
264 data->auto_corr_cck_mrc =
265 min((u32)ranges->auto_corr_max_cck_mrc, val);
266 } else if ((false_alarms < min_false_alarms) &&
267 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
268 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
269
270 /* Decrease auto_corr values to increase sensitivity */
271 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
272 data->auto_corr_cck =
273 max((u32)ranges->auto_corr_min_cck, val);
274 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
275 data->auto_corr_cck_mrc =
276 max((u32)ranges->auto_corr_min_cck_mrc, val);
277 }
278
279 return 0;
280}
281
282
283static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
284 u32 norm_fa,
285 u32 rx_enable_time)
286{
287 u32 val;
288 u32 false_alarms = norm_fa * 200 * 1024;
289 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
290 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
291 struct iwl_sensitivity_data *data = NULL;
292 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
293
294 data = &(priv->sensitivity_data);
295
296 /* If we got too many false alarms this time, reduce sensitivity */
297 if (false_alarms > max_false_alarms) {
298
299 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
300 false_alarms, max_false_alarms);
301
302 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
303 data->auto_corr_ofdm =
304 min((u32)ranges->auto_corr_max_ofdm, val);
305
306 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
307 data->auto_corr_ofdm_mrc =
308 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
309
310 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
311 data->auto_corr_ofdm_x1 =
312 min((u32)ranges->auto_corr_max_ofdm_x1, val);
313
314 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
315 data->auto_corr_ofdm_mrc_x1 =
316 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
317 }
318
319 /* Else if we got fewer than desired, increase sensitivity */
320 else if (false_alarms < min_false_alarms) {
321
322 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
323 false_alarms, min_false_alarms);
324
325 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
326 data->auto_corr_ofdm =
327 max((u32)ranges->auto_corr_min_ofdm, val);
328
329 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
330 data->auto_corr_ofdm_mrc =
331 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
332
333 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
334 data->auto_corr_ofdm_x1 =
335 max((u32)ranges->auto_corr_min_ofdm_x1, val);
336
337 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
338 data->auto_corr_ofdm_mrc_x1 =
339 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
340 } else {
341 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
342 min_false_alarms, false_alarms, max_false_alarms);
343 }
344 return 0;
345}
346
347/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
348static int iwl_sensitivity_write(struct iwl_priv *priv)
349{
350 int ret = 0;
351 struct iwl_sensitivity_cmd cmd ;
352 struct iwl_sensitivity_data *data = NULL;
353 struct iwl_host_cmd cmd_out = {
354 .id = SENSITIVITY_CMD,
355 .len = sizeof(struct iwl_sensitivity_cmd),
356 .meta.flags = CMD_ASYNC,
357 .data = &cmd,
358 };
359
360 data = &(priv->sensitivity_data);
361
362 memset(&cmd, 0, sizeof(cmd));
363
364 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
365 cpu_to_le16((u16)data->auto_corr_ofdm);
366 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
367 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
368 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
369 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
370 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
371 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
372
373 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
374 cpu_to_le16((u16)data->auto_corr_cck);
375 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
376 cpu_to_le16((u16)data->auto_corr_cck_mrc);
377
378 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
379 cpu_to_le16((u16)data->nrg_th_cck);
380 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
381 cpu_to_le16((u16)data->nrg_th_ofdm);
382
383 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
384 __constant_cpu_to_le16(190);
385 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
386 __constant_cpu_to_le16(390);
387 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
388 __constant_cpu_to_le16(62);
389
390 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
391 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
392 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
393 data->nrg_th_ofdm);
394
395 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
396 data->auto_corr_cck, data->auto_corr_cck_mrc,
397 data->nrg_th_cck);
398
399 /* Update uCode's "work" table, and copy it to DSP */
400 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
401
402 /* Don't send command to uCode if nothing has changed */
403 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
404 sizeof(u16)*HD_TABLE_SIZE)) {
405 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
406 return 0;
407 }
408
409 /* Copy table for comparison next time */
410 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
411 sizeof(u16)*HD_TABLE_SIZE);
412
413 ret = iwl_send_cmd(priv, &cmd_out);
414 if (ret)
415 IWL_ERROR("SENSITIVITY_CMD failed\n");
416
417 return ret;
418}
419
420void iwl_init_sensitivity(struct iwl_priv *priv)
421{
422 int ret = 0;
423 int i;
424 struct iwl_sensitivity_data *data = NULL;
425 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
426
427 if (priv->disable_sens_cal)
428 return;
429
430 IWL_DEBUG_CALIB("Start iwl_init_sensitivity\n");
431
432 /* Clear driver's sensitivity algo data */
433 data = &(priv->sensitivity_data);
434
435 if (ranges == NULL)
436 return;
437
438 memset(data, 0, sizeof(struct iwl_sensitivity_data));
439
440 data->num_in_cck_no_fa = 0;
441 data->nrg_curr_state = IWL_FA_TOO_MANY;
442 data->nrg_prev_state = IWL_FA_TOO_MANY;
443 data->nrg_silence_ref = 0;
444 data->nrg_silence_idx = 0;
445 data->nrg_energy_idx = 0;
446
447 for (i = 0; i < 10; i++)
448 data->nrg_value[i] = 0;
449
450 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
451 data->nrg_silence_rssi[i] = 0;
452
453 data->auto_corr_ofdm = 90;
454 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
455 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
456 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
457 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
458 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
459 data->nrg_th_cck = ranges->nrg_th_cck;
460 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
461
462 data->last_bad_plcp_cnt_ofdm = 0;
463 data->last_fa_cnt_ofdm = 0;
464 data->last_bad_plcp_cnt_cck = 0;
465 data->last_fa_cnt_cck = 0;
466
467 ret |= iwl_sensitivity_write(priv);
468 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
469}
470EXPORT_SYMBOL(iwl_init_sensitivity);
471
472void iwl_sensitivity_calibration(struct iwl_priv *priv,
473 struct iwl_notif_statistics *resp)
474{
475 u32 rx_enable_time;
476 u32 fa_cck;
477 u32 fa_ofdm;
478 u32 bad_plcp_cck;
479 u32 bad_plcp_ofdm;
480 u32 norm_fa_ofdm;
481 u32 norm_fa_cck;
482 struct iwl_sensitivity_data *data = NULL;
483 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
484 struct statistics_rx *statistics = &(resp->rx);
485 unsigned long flags;
486 struct statistics_general_data statis;
487
488 if (priv->disable_sens_cal)
489 return;
490
491 data = &(priv->sensitivity_data);
492
493 if (!iwl_is_associated(priv)) {
494 IWL_DEBUG_CALIB("<< - not associated\n");
495 return;
496 }
497
498 spin_lock_irqsave(&priv->lock, flags);
499 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
500 IWL_DEBUG_CALIB("<< invalid data.\n");
501 spin_unlock_irqrestore(&priv->lock, flags);
502 return;
503 }
504
505 /* Extract Statistics: */
506 rx_enable_time = le32_to_cpu(rx_info->channel_load);
507 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
508 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
509 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
510 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
511
512 statis.beacon_silence_rssi_a =
513 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
514 statis.beacon_silence_rssi_b =
515 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
516 statis.beacon_silence_rssi_c =
517 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
518 statis.beacon_energy_a =
519 le32_to_cpu(statistics->general.beacon_energy_a);
520 statis.beacon_energy_b =
521 le32_to_cpu(statistics->general.beacon_energy_b);
522 statis.beacon_energy_c =
523 le32_to_cpu(statistics->general.beacon_energy_c);
524
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
528
529 if (!rx_enable_time) {
530 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
531 return;
532 }
533
534 /* These statistics increase monotonically, and do not reset
535 * at each beacon. Calculate difference from last value, or just
536 * use the new statistics value if it has reset or wrapped around. */
537 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
538 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
539 else {
540 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
541 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
542 }
543
544 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
545 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
546 else {
547 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
548 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
549 }
550
551 if (data->last_fa_cnt_ofdm > fa_ofdm)
552 data->last_fa_cnt_ofdm = fa_ofdm;
553 else {
554 fa_ofdm -= data->last_fa_cnt_ofdm;
555 data->last_fa_cnt_ofdm += fa_ofdm;
556 }
557
558 if (data->last_fa_cnt_cck > fa_cck)
559 data->last_fa_cnt_cck = fa_cck;
560 else {
561 fa_cck -= data->last_fa_cnt_cck;
562 data->last_fa_cnt_cck += fa_cck;
563 }
564
565 /* Total aborted signal locks */
566 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
567 norm_fa_cck = fa_cck + bad_plcp_cck;
568
569 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
570 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
571
572 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
573 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
574 iwl_sensitivity_write(priv);
575
576 return;
577}
578EXPORT_SYMBOL(iwl_sensitivity_calibration);
579
580/*
581 * Accumulate 20 beacons of signal and noise statistics for each of
582 * 3 receivers/antennas/rx-chains, then figure out:
583 * 1) Which antennas are connected.
584 * 2) Differential rx gain settings to balance the 3 receivers.
585 */
586void iwl_chain_noise_calibration(struct iwl_priv *priv,
587 struct iwl_notif_statistics *stat_resp)
588{
589 struct iwl_chain_noise_data *data = NULL;
590
591 u32 chain_noise_a;
592 u32 chain_noise_b;
593 u32 chain_noise_c;
594 u32 chain_sig_a;
595 u32 chain_sig_b;
596 u32 chain_sig_c;
597 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
598 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
599 u32 max_average_sig;
600 u16 max_average_sig_antenna_i;
601 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
602 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
603 u16 i = 0;
604 u16 rxon_chnum = INITIALIZATION_VALUE;
605 u16 stat_chnum = INITIALIZATION_VALUE;
606 u8 rxon_band24;
607 u8 stat_band24;
608 u32 active_chains = 0;
609 u8 num_tx_chains;
610 unsigned long flags;
611 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
612
613 if (priv->disable_chain_noise_cal)
614 return;
615
616 data = &(priv->chain_noise_data);
617
618 /* Accumulate just the first 20 beacons after the first association,
619 * then we're done forever. */
620 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
621 if (data->state == IWL_CHAIN_NOISE_ALIVE)
622 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
623 return;
624 }
625
626 spin_lock_irqsave(&priv->lock, flags);
627 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
628 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
629 spin_unlock_irqrestore(&priv->lock, flags);
630 return;
631 }
632
633 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK);
634 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel);
635 stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
636 stat_chnum = le32_to_cpu(stat_resp->flag) >> 16;
637
638 /* Make sure we accumulate data for just the associated channel
639 * (even if scanning). */
640 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
641 IWL_DEBUG_CALIB("Stats not from chan=%d, band24=%d\n",
642 rxon_chnum, rxon_band24);
643 spin_unlock_irqrestore(&priv->lock, flags);
644 return;
645 }
646
647 /* Accumulate beacon statistics values across 20 beacons */
648 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
649 IN_BAND_FILTER;
650 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
651 IN_BAND_FILTER;
652 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
653 IN_BAND_FILTER;
654
655 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
656 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
657 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
658
659 spin_unlock_irqrestore(&priv->lock, flags);
660
661 data->beacon_count++;
662
663 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
664 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
665 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
666
667 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
668 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
669 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
670
671 IWL_DEBUG_CALIB("chan=%d, band24=%d, beacon=%d\n",
672 rxon_chnum, rxon_band24, data->beacon_count);
673 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
674 chain_sig_a, chain_sig_b, chain_sig_c);
675 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
676 chain_noise_a, chain_noise_b, chain_noise_c);
677
678 /* If this is the 20th beacon, determine:
679 * 1) Disconnected antennas (using signal strengths)
680 * 2) Differential gain (using silence noise) to balance receivers */
681 if (data->beacon_count != CAL_NUM_OF_BEACONS)
682 return;
683
684 /* Analyze signal for disconnected antenna */
685 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
686 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
687 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
688
689 if (average_sig[0] >= average_sig[1]) {
690 max_average_sig = average_sig[0];
691 max_average_sig_antenna_i = 0;
692 active_chains = (1 << max_average_sig_antenna_i);
693 } else {
694 max_average_sig = average_sig[1];
695 max_average_sig_antenna_i = 1;
696 active_chains = (1 << max_average_sig_antenna_i);
697 }
698
699 if (average_sig[2] >= max_average_sig) {
700 max_average_sig = average_sig[2];
701 max_average_sig_antenna_i = 2;
702 active_chains = (1 << max_average_sig_antenna_i);
703 }
704
705 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
706 average_sig[0], average_sig[1], average_sig[2]);
707 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
708 max_average_sig, max_average_sig_antenna_i);
709
710 /* Compare signal strengths for all 3 receivers. */
711 for (i = 0; i < NUM_RX_CHAINS; i++) {
712 if (i != max_average_sig_antenna_i) {
713 s32 rssi_delta = (max_average_sig - average_sig[i]);
714
715 /* If signal is very weak, compared with
716 * strongest, mark it as disconnected. */
717 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
718 data->disconn_array[i] = 1;
719 else
720 active_chains |= (1 << i);
721 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
722 "disconn_array[i] = %d\n",
723 i, rssi_delta, data->disconn_array[i]);
724 }
725 }
726
727 num_tx_chains = 0;
728 for (i = 0; i < NUM_RX_CHAINS; i++) {
729 /* loops on all the bits of
730 * priv->hw_setting.valid_tx_ant */
731 u8 ant_msk = (1 << i);
732 if (!(priv->hw_params.valid_tx_ant & ant_msk))
733 continue;
734
735 num_tx_chains++;
736 if (data->disconn_array[i] == 0)
737 /* there is a Tx antenna connected */
738 break;
739 if (num_tx_chains == priv->hw_params.tx_chains_num &&
740 data->disconn_array[i]) {
741 /* This is the last TX antenna and is also
742 * disconnected connect it anyway */
743 data->disconn_array[i] = 0;
744 active_chains |= ant_msk;
745 IWL_DEBUG_CALIB("All Tx chains are disconnected W/A - "
746 "declare %d as connected\n", i);
747 break;
748 }
749 }
750
751 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
752 active_chains);
753
754 /* Save for use within RXON, TX, SCAN commands, etc. */
755 /*priv->valid_antenna = active_chains;*/
756 /*FIXME: should be reflected in RX chains in RXON */
757
758 /* Analyze noise for rx balance */
759 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
760 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
761 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
762
763 for (i = 0; i < NUM_RX_CHAINS; i++) {
764 if (!(data->disconn_array[i]) &&
765 (average_noise[i] <= min_average_noise)) {
766 /* This means that chain i is active and has
767 * lower noise values so far: */
768 min_average_noise = average_noise[i];
769 min_average_noise_antenna_i = i;
770 }
771 }
772
773 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
774 average_noise[0], average_noise[1],
775 average_noise[2]);
776
777 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
778 min_average_noise, min_average_noise_antenna_i);
779
780 priv->cfg->ops->utils->gain_computation(priv, average_noise,
781 min_average_noise_antenna_i, min_average_noise);
782}
783EXPORT_SYMBOL(iwl_chain_noise_calibration);
784
785
786void iwl_reset_run_time_calib(struct iwl_priv *priv)
787{
788 int i;
789 memset(&(priv->sensitivity_data), 0,
790 sizeof(struct iwl_sensitivity_data));
791 memset(&(priv->chain_noise_data), 0,
792 sizeof(struct iwl_chain_noise_data));
793 for (i = 0; i < NUM_RX_CHAINS; i++)
794 priv->chain_noise_data.delta_gain_code[i] =
795 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
796
797 /* Ask for statistics now, the uCode will send notification
798 * periodically after association */
799 iwl_send_statistics_request(priv, CMD_ASYNC);
800}
801EXPORT_SYMBOL(iwl_reset_run_time_calib);
802
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
new file mode 100644
index 000000000000..94c8e316382a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -0,0 +1,84 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl_chain_noise_calibration(struct iwl_priv *priv,
70 struct iwl_notif_statistics *stat_resp);
71void iwl_sensitivity_calibration(struct iwl_priv *priv,
72 struct iwl_notif_statistics *resp);
73
74void iwl_init_sensitivity(struct iwl_priv *priv);
75void iwl_reset_run_time_calib(struct iwl_priv *priv);
76static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
77{
78
79 if (!priv->disable_chain_noise_cal &&
80 priv->cfg->ops->utils->chain_noise_reset)
81 priv->cfg->ops->utils->chain_noise_reset(priv);
82}
83
84#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 3bcd107e2d71..e9bb1de0ce3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -61,9 +61,9 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-commands.h) only for uCode API definitions. 64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-4965-hw.h for hardware-related definitions. 65 * Please use iwl-4965-hw.h for hardware-related definitions.
66 * Please use iwl-4965.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl4965_commands_h__ 69#ifndef __iwl4965_commands_h__
@@ -93,6 +93,11 @@ enum {
93 REPLY_LEDS_CMD = 0x48, 93 REPLY_LEDS_CMD = 0x48,
94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
95 95
96 /* WiMAX coexistence */
97 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */
98 COEX_MEDIUM_NOTIFICATION = 0x5b,
99 COEX_EVENT_CMD = 0x5c,
100
96 /* 802.11h related */ 101 /* 802.11h related */
97 RADAR_NOTIFICATION = 0x70, /* not used */ 102 RADAR_NOTIFICATION = 0x70, /* not used */
98 REPLY_QUIET_CMD = 0x71, /* not used */ 103 REPLY_QUIET_CMD = 0x71, /* not used */
@@ -121,6 +126,7 @@ enum {
121 /* Miscellaneous commands */ 126 /* Miscellaneous commands */
122 QUIET_NOTIFICATION = 0x96, /* not used */ 127 QUIET_NOTIFICATION = 0x96, /* not used */
123 REPLY_TX_PWR_TABLE_CMD = 0x97, 128 REPLY_TX_PWR_TABLE_CMD = 0x97,
129 REPLY_TX_POWER_DBM_CMD = 0x98,
124 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 130 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
125 131
126 /* Bluetooth device coexistance config command */ 132 /* Bluetooth device coexistance config command */
@@ -269,21 +275,13 @@ struct iwl_cmd_header {
269 * 10 B active, A inactive 275 * 10 B active, A inactive
270 * 11 Both active 276 * 11 Both active
271 */ 277 */
272#define RATE_MCS_ANT_POS 14 278#define RATE_MCS_ANT_POS 14
273#define RATE_MCS_ANT_A_MSK 0x04000 279#define RATE_MCS_ANT_A_MSK 0x04000
274#define RATE_MCS_ANT_B_MSK 0x08000 280#define RATE_MCS_ANT_B_MSK 0x08000
275#define RATE_MCS_ANT_AB_MSK 0x0C000 281#define RATE_MCS_ANT_C_MSK 0x10000
282#define RATE_MCS_ANT_ABC_MSK 0x1C000
276 283
277 284#define RATE_MCS_ANT_INIT_IND 1
278/**
279 * struct iwl4965_tx_power - txpower format used in REPLY_SCAN_CMD
280 *
281 * Scan uses only one transmitter, so only one analog/dsp gain pair is needed.
282 */
283struct iwl4965_tx_power {
284 u8 tx_gain; /* gain for analog radio */
285 u8 dsp_atten; /* gain for DSP */
286} __attribute__ ((packed));
287 285
288#define POWER_TABLE_NUM_ENTRIES 33 286#define POWER_TABLE_NUM_ENTRIES 33
289#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 287#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
@@ -333,6 +331,19 @@ struct iwl4965_tx_power_db {
333 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 331 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
334} __attribute__ ((packed)); 332} __attribute__ ((packed));
335 333
334/**
335 * Commad REPLY_TX_POWER_DBM_CMD = 0x98
336 * struct iwl5000_tx_power_dbm_cmd
337 */
338#define IWL50_TX_POWER_AUTO 0x7f
339#define IWL50_TX_POWER_NO_CLOSED (0x1 << 6)
340
341struct iwl5000_tx_power_dbm_cmd {
342 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
343 u8 flags;
344 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
345 u8 reserved;
346} __attribute__ ((packed));
336 347
337/****************************************************************************** 348/******************************************************************************
338 * (0a) 349 * (0a)
@@ -367,7 +378,7 @@ struct iwl4965_tx_power_db {
367 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 378 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
368 * for each of 5 frequency ranges. 379 * for each of 5 frequency ranges.
369 */ 380 */
370struct iwl4965_init_alive_resp { 381struct iwl_init_alive_resp {
371 u8 ucode_minor; 382 u8 ucode_minor;
372 u8 ucode_major; 383 u8 ucode_major;
373 __le16 reserved1; 384 __le16 reserved1;
@@ -443,7 +454,7 @@ struct iwl4965_init_alive_resp {
443 * The Linux driver can print both logs to the system log when a uCode error 454 * The Linux driver can print both logs to the system log when a uCode error
444 * occurs. 455 * occurs.
445 */ 456 */
446struct iwl4965_alive_resp { 457struct iwl_alive_resp {
447 u8 ucode_minor; 458 u8 ucode_minor;
448 u8 ucode_major; 459 u8 ucode_major;
449 __le16 reserved1; 460 __le16 reserved1;
@@ -467,7 +478,7 @@ union tsf {
467/* 478/*
468 * REPLY_ERROR = 0x2 (response only, not a command) 479 * REPLY_ERROR = 0x2 (response only, not a command)
469 */ 480 */
470struct iwl4965_error_resp { 481struct iwl_error_resp {
471 __le32 error_type; 482 __le32 error_type;
472 u8 cmd_id; 483 u8 cmd_id;
473 u8 reserved1; 484 u8 reserved1;
@@ -545,6 +556,8 @@ enum {
545#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3 << 25) 556#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3 << 25)
546#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1 << 25) 557#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1 << 25)
547#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2 << 25) 558#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2 << 25)
559/* CTS to self (if spec allows) flag */
560#define RXON_FLG_SELF_CTS_EN __constant_cpu_to_le32(0x1<<30)
548 561
549/* rx_config filter flags */ 562/* rx_config filter flags */
550/* accept all data frames */ 563/* accept all data frames */
@@ -599,6 +612,46 @@ struct iwl4965_rxon_cmd {
599 u8 ofdm_ht_dual_stream_basic_rates; 612 u8 ofdm_ht_dual_stream_basic_rates;
600} __attribute__ ((packed)); 613} __attribute__ ((packed));
601 614
615/* 5000 HW just extend this cmmand */
616struct iwl_rxon_cmd {
617 u8 node_addr[6];
618 __le16 reserved1;
619 u8 bssid_addr[6];
620 __le16 reserved2;
621 u8 wlap_bssid_addr[6];
622 __le16 reserved3;
623 u8 dev_type;
624 u8 air_propagation;
625 __le16 rx_chain;
626 u8 ofdm_basic_rates;
627 u8 cck_basic_rates;
628 __le16 assoc_id;
629 __le32 flags;
630 __le32 filter_flags;
631 __le16 channel;
632 u8 ofdm_ht_single_stream_basic_rates;
633 u8 ofdm_ht_dual_stream_basic_rates;
634 u8 ofdm_ht_triple_stream_basic_rates;
635 u8 reserved5;
636 __le16 acquisition_data;
637 __le16 reserved6;
638} __attribute__ ((packed));
639
640struct iwl5000_rxon_assoc_cmd {
641 __le32 flags;
642 __le32 filter_flags;
643 u8 ofdm_basic_rates;
644 u8 cck_basic_rates;
645 __le16 reserved1;
646 u8 ofdm_ht_single_stream_basic_rates;
647 u8 ofdm_ht_dual_stream_basic_rates;
648 u8 ofdm_ht_triple_stream_basic_rates;
649 u8 reserved2;
650 __le16 rx_chain_select_flags;
651 __le16 acquisition_data;
652 __le32 reserved3;
653} __attribute__ ((packed));
654
602/* 655/*
603 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 656 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
604 */ 657 */
@@ -613,6 +666,9 @@ struct iwl4965_rxon_assoc_cmd {
613 __le16 reserved; 666 __le16 reserved;
614} __attribute__ ((packed)); 667} __attribute__ ((packed));
615 668
669
670
671
616/* 672/*
617 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 673 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
618 */ 674 */
@@ -669,7 +725,7 @@ struct iwl4965_csa_notification {
669 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW 725 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
670 * value, to cap the CW value. 726 * value, to cap the CW value.
671 */ 727 */
672struct iwl4965_ac_qos { 728struct iwl_ac_qos {
673 __le16 cw_min; 729 __le16 cw_min;
674 __le16 cw_max; 730 __le16 cw_max;
675 u8 aifsn; 731 u8 aifsn;
@@ -691,9 +747,9 @@ struct iwl4965_ac_qos {
691 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs 747 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
692 * 0: Background, 1: Best Effort, 2: Video, 3: Voice. 748 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
693 */ 749 */
694struct iwl4965_qosparam_cmd { 750struct iwl_qosparam_cmd {
695 __le32 qos_flags; 751 __le32 qos_flags;
696 struct iwl4965_ac_qos ac[AC_NUM]; 752 struct iwl_ac_qos ac[AC_NUM];
697} __attribute__ ((packed)); 753} __attribute__ ((packed));
698 754
699/****************************************************************************** 755/******************************************************************************
@@ -711,6 +767,8 @@ struct iwl4965_qosparam_cmd {
711#define IWL_STA_ID 2 767#define IWL_STA_ID 2
712#define IWL4965_BROADCAST_ID 31 768#define IWL4965_BROADCAST_ID 31
713#define IWL4965_STATION_COUNT 32 769#define IWL4965_STATION_COUNT 32
770#define IWL5000_BROADCAST_ID 15
771#define IWL5000_STATION_COUNT 16
714 772
715#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 773#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
716#define IWL_INVALID_STATION 255 774#define IWL_INVALID_STATION 255
@@ -766,6 +824,20 @@ struct iwl4965_keyinfo {
766 u8 key[16]; /* 16-byte unicast decryption key */ 824 u8 key[16]; /* 16-byte unicast decryption key */
767} __attribute__ ((packed)); 825} __attribute__ ((packed));
768 826
827/* 5000 */
828struct iwl_keyinfo {
829 __le16 key_flags;
830 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
831 u8 reserved1;
832 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
833 u8 key_offset;
834 u8 reserved2;
835 u8 key[16]; /* 16-byte unicast decryption key */
836 __le64 tx_secur_seq_cnt;
837 __le64 hw_tkip_mic_rx_key;
838 __le64 hw_tkip_mic_tx_key;
839} __attribute__ ((packed));
840
769/** 841/**
770 * struct sta_id_modify 842 * struct sta_id_modify
771 * @addr[ETH_ALEN]: station's MAC address 843 * @addr[ETH_ALEN]: station's MAC address
@@ -841,6 +913,38 @@ struct iwl4965_addsta_cmd {
841 __le32 reserved2; 913 __le32 reserved2;
842} __attribute__ ((packed)); 914} __attribute__ ((packed));
843 915
916/* 5000 */
917struct iwl_addsta_cmd {
918 u8 mode; /* 1: modify existing, 0: add new station */
919 u8 reserved[3];
920 struct sta_id_modify sta;
921 struct iwl_keyinfo key;
922 __le32 station_flags; /* STA_FLG_* */
923 __le32 station_flags_msk; /* STA_FLG_* */
924
925 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
926 * corresponding to bit (e.g. bit 5 controls TID 5).
927 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
928 __le16 tid_disable_tx;
929
930 __le16 reserved1;
931
932 /* TID for which to add block-ack support.
933 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
934 u8 add_immediate_ba_tid;
935
936 /* TID for which to remove block-ack support.
937 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
938 u8 remove_immediate_ba_tid;
939
940 /* Starting Sequence Number for added block-ack support.
941 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
942 __le16 add_immediate_ba_ssn;
943
944 __le32 reserved2;
945} __attribute__ ((packed));
946
947
844#define ADD_STA_SUCCESS_MSK 0x1 948#define ADD_STA_SUCCESS_MSK 0x1
845#define ADD_STA_NO_ROOM_IN_TABLE 0x2 949#define ADD_STA_NO_ROOM_IN_TABLE 0x2
846#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 950#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
@@ -848,10 +952,28 @@ struct iwl4965_addsta_cmd {
848/* 952/*
849 * REPLY_ADD_STA = 0x18 (response) 953 * REPLY_ADD_STA = 0x18 (response)
850 */ 954 */
851struct iwl4965_add_sta_resp { 955struct iwl_add_sta_resp {
852 u8 status; /* ADD_STA_* */ 956 u8 status; /* ADD_STA_* */
853} __attribute__ ((packed)); 957} __attribute__ ((packed));
854 958
959#define REM_STA_SUCCESS_MSK 0x1
960/*
961 * REPLY_REM_STA = 0x19 (response)
962 */
963struct iwl_rem_sta_resp {
964 u8 status;
965} __attribute__ ((packed));
966
967/*
968 * REPLY_REM_STA = 0x19 (command)
969 */
970struct iwl_rem_sta_cmd {
971 u8 num_sta; /* number of removed stations */
972 u8 reserved[3];
973 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
974 u8 reserved2[2];
975} __attribute__ ((packed));
976
855/* 977/*
856 * REPLY_WEP_KEY = 0x20 978 * REPLY_WEP_KEY = 0x20
857 */ 979 */
@@ -875,6 +997,7 @@ struct iwl_wep_cmd {
875#define WEP_KEY_WEP_TYPE 1 997#define WEP_KEY_WEP_TYPE 1
876#define WEP_KEYS_MAX 4 998#define WEP_KEYS_MAX 4
877#define WEP_INVALID_OFFSET 0xff 999#define WEP_INVALID_OFFSET 0xff
1000#define WEP_KEY_LEN_64 5
878#define WEP_KEY_LEN_128 13 1001#define WEP_KEY_LEN_128 13
879 1002
880/****************************************************************************** 1003/******************************************************************************
@@ -1018,6 +1141,11 @@ struct iwl4965_rx_mpdu_res_start {
1018 1141
1019/* REPLY_TX Tx flags field */ 1142/* REPLY_TX Tx flags field */
1020 1143
1144/* 1: Use RTS/CTS protocol or CTS-to-self if spec alows it
1145 * before this frame. if CTS-to-self required check
1146 * RXON_FLG_SELF_CTS_EN status. */
1147#define TX_CMD_FLG_RTS_CTS_MSK __constant_cpu_to_le32(1 << 0)
1148
1021/* 1: Use Request-To-Send protocol before this frame. 1149/* 1: Use Request-To-Send protocol before this frame.
1022 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ 1150 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
1023#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1) 1151#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1)
@@ -1100,6 +1228,14 @@ struct iwl4965_rx_mpdu_res_start {
1100#define TX_CMD_SEC_KEY128 0x08 1228#define TX_CMD_SEC_KEY128 0x08
1101 1229
1102/* 1230/*
1231 * security overhead sizes
1232 */
1233#define WEP_IV_LEN 4
1234#define WEP_ICV_LEN 4
1235#define CCMP_MIC_LEN 8
1236#define TKIP_ICV_LEN 4
1237
1238/*
1103 * 4965 uCode updates these Tx attempt count values in host DRAM. 1239 * 4965 uCode updates these Tx attempt count values in host DRAM.
1104 * Used for managing Tx retries when expecting block-acks. 1240 * Used for managing Tx retries when expecting block-acks.
1105 * Driver should set these fields to 0. 1241 * Driver should set these fields to 0.
@@ -1113,7 +1249,7 @@ struct iwl4965_dram_scratch {
1113/* 1249/*
1114 * REPLY_TX = 0x1c (command) 1250 * REPLY_TX = 0x1c (command)
1115 */ 1251 */
1116struct iwl4965_tx_cmd { 1252struct iwl_tx_cmd {
1117 /* 1253 /*
1118 * MPDU byte count: 1254 * MPDU byte count:
1119 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1255 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1259,6 +1395,15 @@ enum {
1259 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1395 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1260}; 1396};
1261 1397
1398static inline int iwl_is_tx_success(u32 status)
1399{
1400 status &= TX_STATUS_MSK;
1401 return (status == TX_STATUS_SUCCESS)
1402 || (status == TX_STATUS_DIRECT_DONE);
1403}
1404
1405
1406
1262/* ******************************* 1407/* *******************************
1263 * TX aggregation status 1408 * TX aggregation status
1264 ******************************* */ 1409 ******************************* */
@@ -1313,6 +1458,11 @@ enum {
1313 * within the sending station (this 4965), rather than whether it was 1458 * within the sending station (this 4965), rather than whether it was
1314 * received successfully by the destination station. 1459 * received successfully by the destination station.
1315 */ 1460 */
1461struct agg_tx_status {
1462 __le16 status;
1463 __le16 sequence;
1464} __attribute__ ((packed));
1465
1316struct iwl4965_tx_resp { 1466struct iwl4965_tx_resp {
1317 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1467 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1318 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1468 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
@@ -1344,34 +1494,56 @@ struct iwl4965_tx_resp {
1344 * table entry used for all frames in the new agg. 1494 * table entry used for all frames in the new agg.
1345 * 31-16: Sequence # for this frame's Tx cmd (not SSN!) 1495 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1346 */ 1496 */
1347 __le32 status; /* TX status (for aggregation status of 1st frame) */ 1497 union {
1498 __le32 status;
1499 struct agg_tx_status agg_status[0]; /* for each agg frame */
1500 } u;
1348} __attribute__ ((packed)); 1501} __attribute__ ((packed));
1349 1502
1350struct agg_tx_status { 1503struct iwl5000_tx_resp {
1351 __le16 status; 1504 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1352 __le16 sequence; 1505 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1353} __attribute__ ((packed)); 1506 u8 failure_rts; /* # failures due to unsuccessful RTS */
1507 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1354 1508
1355struct iwl4965_tx_resp_agg { 1509 /* For non-agg: Rate at which frame was successful.
1356 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1510 * For agg: Rate at which all frames were transmitted. */
1357 u8 reserved1; 1511 __le32 rate_n_flags; /* RATE_MCS_* */
1358 u8 failure_rts; 1512
1359 u8 failure_frame; 1513 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1360 __le32 rate_n_flags; 1514 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1361 __le16 wireless_media_time; 1515 __le16 wireless_media_time; /* uSecs */
1362 __le16 reserved3; 1516
1363 __le32 pa_power1; 1517 __le16 reserved;
1518 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1364 __le32 pa_power2; 1519 __le32 pa_power2;
1365 struct agg_tx_status status; /* TX status (for aggregation status */
1366 /* of 1st frame) */
1367} __attribute__ ((packed));
1368 1520
1521 __le32 tfd_info;
1522 __le16 seq_ctl;
1523 __le16 byte_cnt;
1524 __le32 tlc_info;
1525 /*
1526 * For non-agg: frame status TX_STATUS_*
1527 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1528 * fields follow this one, up to frame_count.
1529 * Bit fields:
1530 * 11- 0: AGG_TX_STATE_* status code
1531 * 15-12: Retry count for 1st frame in aggregation (retries
1532 * occur if tx failed for this frame when it was a
1533 * member of a previous aggregation block). If rate
1534 * scaling is used, retry count indicates the rate
1535 * table entry used for all frames in the new agg.
1536 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1537 */
1538 struct agg_tx_status status; /* TX status (in aggregation -
1539 * status of 1st frame) */
1540} __attribute__ ((packed));
1369/* 1541/*
1370 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1542 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1371 * 1543 *
1372 * Reports Block-Acknowledge from recipient station 1544 * Reports Block-Acknowledge from recipient station
1373 */ 1545 */
1374struct iwl4965_compressed_ba_resp { 1546struct iwl_compressed_ba_resp {
1375 __le32 sta_addr_lo32; 1547 __le32 sta_addr_lo32;
1376 __le16 sta_addr_hi16; 1548 __le16 sta_addr_hi16;
1377 __le16 reserved; 1549 __le16 reserved;
@@ -1853,6 +2025,7 @@ struct iwl4965_spectrum_notification {
1853#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0) 2025#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0)
1854#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2) 2026#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2)
1855#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2027#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
2028#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
1856 2029
1857struct iwl4965_powertable_cmd { 2030struct iwl4965_powertable_cmd {
1858 __le16 flags; 2031 __le16 flags;
@@ -1914,7 +2087,7 @@ struct iwl4965_card_state_notif {
1914#define RF_CARD_DISABLED 0x04 2087#define RF_CARD_DISABLED 0x04
1915#define RXON_CARD_DISABLED 0x10 2088#define RXON_CARD_DISABLED 0x10
1916 2089
1917struct iwl4965_ct_kill_config { 2090struct iwl_ct_kill_config {
1918 __le32 reserved; 2091 __le32 reserved;
1919 __le32 critical_temperature_M; 2092 __le32 critical_temperature_M;
1920 __le32 critical_temperature_R; 2093 __le32 critical_temperature_R;
@@ -1926,8 +2099,11 @@ struct iwl4965_ct_kill_config {
1926 * 2099 *
1927 *****************************************************************************/ 2100 *****************************************************************************/
1928 2101
2102#define SCAN_CHANNEL_TYPE_PASSIVE __constant_cpu_to_le32(0)
2103#define SCAN_CHANNEL_TYPE_ACTIVE __constant_cpu_to_le32(1)
2104
1929/** 2105/**
1930 * struct iwl4965_scan_channel - entry in REPLY_SCAN_CMD channel table 2106 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
1931 * 2107 *
1932 * One for each channel in the scan list. 2108 * One for each channel in the scan list.
1933 * Each channel can independently select: 2109 * Each channel can independently select:
@@ -1937,7 +2113,7 @@ struct iwl4965_ct_kill_config {
1937 * quiet_plcp_th, good_CRC_th) 2113 * quiet_plcp_th, good_CRC_th)
1938 * 2114 *
1939 * To avoid uCode errors, make sure the following are true (see comments 2115 * To avoid uCode errors, make sure the following are true (see comments
1940 * under struct iwl4965_scan_cmd about max_out_time and quiet_time): 2116 * under struct iwl_scan_cmd about max_out_time and quiet_time):
1941 * 1) If using passive_dwell (i.e. passive_dwell != 0): 2117 * 1) If using passive_dwell (i.e. passive_dwell != 0):
1942 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) 2118 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
1943 * 2) quiet_time <= active_dwell 2119 * 2) quiet_time <= active_dwell
@@ -1945,37 +2121,38 @@ struct iwl4965_ct_kill_config {
1945 * passive_dwell < max_out_time 2121 * passive_dwell < max_out_time
1946 * active_dwell < max_out_time 2122 * active_dwell < max_out_time
1947 */ 2123 */
1948struct iwl4965_scan_channel { 2124struct iwl_scan_channel {
1949 /* 2125 /*
1950 * type is defined as: 2126 * type is defined as:
1951 * 0:0 1 = active, 0 = passive 2127 * 0:0 1 = active, 0 = passive
1952 * 1:4 SSID direct bit map; if a bit is set, then corresponding 2128 * 1:20 SSID direct bit map; if a bit is set, then corresponding
1953 * SSID IE is transmitted in probe request. 2129 * SSID IE is transmitted in probe request.
1954 * 5:7 reserved 2130 * 21:31 reserved
1955 */ 2131 */
1956 u8 type; 2132 __le32 type;
1957 u8 channel; /* band is selected by iwl4965_scan_cmd "flags" field */ 2133 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
1958 struct iwl4965_tx_power tpc; 2134 u8 tx_gain; /* gain for analog radio */
2135 u8 dsp_atten; /* gain for DSP */
1959 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2136 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
1960 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2137 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
1961} __attribute__ ((packed)); 2138} __attribute__ ((packed));
1962 2139
1963/** 2140/**
1964 * struct iwl4965_ssid_ie - directed scan network information element 2141 * struct iwl_ssid_ie - directed scan network information element
1965 * 2142 *
1966 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field 2143 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field
1967 * in struct iwl4965_scan_channel; each channel may select different ssids from 2144 * in struct iwl4965_scan_channel; each channel may select different ssids from
1968 * among the 4 entries. SSID IEs get transmitted in reverse order of entry. 2145 * among the 4 entries. SSID IEs get transmitted in reverse order of entry.
1969 */ 2146 */
1970struct iwl4965_ssid_ie { 2147struct iwl_ssid_ie {
1971 u8 id; 2148 u8 id;
1972 u8 len; 2149 u8 len;
1973 u8 ssid[32]; 2150 u8 ssid[32];
1974} __attribute__ ((packed)); 2151} __attribute__ ((packed));
1975 2152
1976#define PROBE_OPTION_MAX 0x4 2153#define PROBE_OPTION_MAX 0x14
1977#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF) 2154#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF)
1978#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1) 2155#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1)
1979#define IWL_MAX_SCAN_SIZE 1024 2156#define IWL_MAX_SCAN_SIZE 1024
1980 2157
1981/* 2158/*
@@ -2028,9 +2205,9 @@ struct iwl4965_ssid_ie {
2028 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. 2205 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2029 * 2206 *
2030 * To avoid uCode errors, see timing restrictions described under 2207 * To avoid uCode errors, see timing restrictions described under
2031 * struct iwl4965_scan_channel. 2208 * struct iwl_scan_channel.
2032 */ 2209 */
2033struct iwl4965_scan_cmd { 2210struct iwl_scan_cmd {
2034 __le16 len; 2211 __le16 len;
2035 u8 reserved0; 2212 u8 reserved0;
2036 u8 channel_count; /* # channels in channel list */ 2213 u8 channel_count; /* # channels in channel list */
@@ -2051,10 +2228,10 @@ struct iwl4965_scan_cmd {
2051 2228
2052 /* For active scans (set to all-0s for passive scans). 2229 /* For active scans (set to all-0s for passive scans).
2053 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2230 * Does not include payload. Must specify Tx rate; no rate scaling. */
2054 struct iwl4965_tx_cmd tx_cmd; 2231 struct iwl_tx_cmd tx_cmd;
2055 2232
2056 /* For directed active scans (set to all-0s otherwise) */ 2233 /* For directed active scans (set to all-0s otherwise) */
2057 struct iwl4965_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2234 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2058 2235
2059 /* 2236 /*
2060 * Probe request frame, followed by channel list. 2237 * Probe request frame, followed by channel list.
@@ -2082,14 +2259,14 @@ struct iwl4965_scan_cmd {
2082/* 2259/*
2083 * REPLY_SCAN_CMD = 0x80 (response) 2260 * REPLY_SCAN_CMD = 0x80 (response)
2084 */ 2261 */
2085struct iwl4965_scanreq_notification { 2262struct iwl_scanreq_notification {
2086 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2263 __le32 status; /* 1: okay, 2: cannot fulfill request */
2087} __attribute__ ((packed)); 2264} __attribute__ ((packed));
2088 2265
2089/* 2266/*
2090 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2267 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2091 */ 2268 */
2092struct iwl4965_scanstart_notification { 2269struct iwl_scanstart_notification {
2093 __le32 tsf_low; 2270 __le32 tsf_low;
2094 __le32 tsf_high; 2271 __le32 tsf_high;
2095 __le32 beacon_timer; 2272 __le32 beacon_timer;
@@ -2106,7 +2283,7 @@ struct iwl4965_scanstart_notification {
2106/* 2283/*
2107 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 2284 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2108 */ 2285 */
2109struct iwl4965_scanresults_notification { 2286struct iwl_scanresults_notification {
2110 u8 channel; 2287 u8 channel;
2111 u8 band; 2288 u8 band;
2112 u8 reserved[2]; 2289 u8 reserved[2];
@@ -2118,7 +2295,7 @@ struct iwl4965_scanresults_notification {
2118/* 2295/*
2119 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2296 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2120 */ 2297 */
2121struct iwl4965_scancomplete_notification { 2298struct iwl_scancomplete_notification {
2122 u8 scanned_channels; 2299 u8 scanned_channels;
2123 u8 status; 2300 u8 status;
2124 u8 reserved; 2301 u8 reserved;
@@ -2148,7 +2325,7 @@ struct iwl4965_beacon_notif {
2148 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2325 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2149 */ 2326 */
2150struct iwl4965_tx_beacon_cmd { 2327struct iwl4965_tx_beacon_cmd {
2151 struct iwl4965_tx_cmd tx; 2328 struct iwl_tx_cmd tx;
2152 __le16 tim_idx; 2329 __le16 tim_idx;
2153 u8 tim_size; 2330 u8 tim_size;
2154 u8 reserved1; 2331 u8 reserved1;
@@ -2339,7 +2516,7 @@ struct statistics_general {
2339 */ 2516 */
2340#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */ 2517#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */
2341#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */ 2518#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */
2342struct iwl4965_statistics_cmd { 2519struct iwl_statistics_cmd {
2343 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 2520 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2344} __attribute__ ((packed)); 2521} __attribute__ ((packed));
2345 2522
@@ -2360,7 +2537,7 @@ struct iwl4965_statistics_cmd {
2360 */ 2537 */
2361#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2) 2538#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2)
2362#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8) 2539#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8)
2363struct iwl4965_notif_statistics { 2540struct iwl_notif_statistics {
2364 __le32 flag; 2541 __le32 flag;
2365 struct statistics_rx rx; 2542 struct statistics_rx rx;
2366 struct statistics_tx tx; 2543 struct statistics_tx tx;
@@ -2559,7 +2736,7 @@ struct iwl4965_missed_beacon_notif {
2559 */ 2736 */
2560 2737
2561/* 2738/*
2562 * Table entries in SENSITIVITY_CMD (struct iwl4965_sensitivity_cmd) 2739 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
2563 */ 2740 */
2564#define HD_TABLE_SIZE (11) /* number of entries */ 2741#define HD_TABLE_SIZE (11) /* number of entries */
2565#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 2742#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
@@ -2574,18 +2751,18 @@ struct iwl4965_missed_beacon_notif {
2574#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 2751#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
2575#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 2752#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
2576 2753
2577/* Control field in struct iwl4965_sensitivity_cmd */ 2754/* Control field in struct iwl_sensitivity_cmd */
2578#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) 2755#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0)
2579#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) 2756#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1)
2580 2757
2581/** 2758/**
2582 * struct iwl4965_sensitivity_cmd 2759 * struct iwl_sensitivity_cmd
2583 * @control: (1) updates working table, (0) updates default table 2760 * @control: (1) updates working table, (0) updates default table
2584 * @table: energy threshold values, use HD_* as index into table 2761 * @table: energy threshold values, use HD_* as index into table
2585 * 2762 *
2586 * Always use "1" in "control" to update uCode's working table and DSP. 2763 * Always use "1" in "control" to update uCode's working table and DSP.
2587 */ 2764 */
2588struct iwl4965_sensitivity_cmd { 2765struct iwl_sensitivity_cmd {
2589 __le16 control; /* always use "1" */ 2766 __le16 control; /* always use "1" */
2590 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 2767 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
2591} __attribute__ ((packed)); 2768} __attribute__ ((packed));
@@ -2659,6 +2836,86 @@ struct iwl4965_calibration_cmd {
2659 u8 reserved1; 2836 u8 reserved1;
2660} __attribute__ ((packed)); 2837} __attribute__ ((packed));
2661 2838
2839/* Phy calibration command for 5000 series */
2840
2841enum {
2842 IWL5000_PHY_CALIBRATE_DC_CMD = 8,
2843 IWL5000_PHY_CALIBRATE_LO_CMD = 9,
2844 IWL5000_PHY_CALIBRATE_RX_BB_CMD = 10,
2845 IWL5000_PHY_CALIBRATE_TX_IQ_CMD = 11,
2846 IWL5000_PHY_CALIBRATE_RX_IQ_CMD = 12,
2847 IWL5000_PHY_CALIBRATION_NOISE_CMD = 13,
2848 IWL5000_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
2849 IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
2850 IWL5000_PHY_CALIBRATE_BASE_BAND_CMD = 16,
2851 IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
2852 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18,
2853 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
2854};
2855
2856enum {
2857 CALIBRATION_CFG_CMD = 0x65,
2858 CALIBRATION_RES_NOTIFICATION = 0x66,
2859 CALIBRATION_COMPLETE_NOTIFICATION = 0x67
2860};
2861
2862struct iwl_cal_crystal_freq_cmd {
2863 u8 cap_pin1;
2864 u8 cap_pin2;
2865} __attribute__ ((packed));
2866
2867struct iwl5000_calibration {
2868 u8 op_code;
2869 u8 first_group;
2870 u8 num_groups;
2871 u8 all_data_valid;
2872 struct iwl_cal_crystal_freq_cmd data;
2873} __attribute__ ((packed));
2874
2875#define IWL_CALIB_INIT_CFG_ALL __constant_cpu_to_le32(0xffffffff)
2876
2877struct iwl_calib_cfg_elmnt_s {
2878 __le32 is_enable;
2879 __le32 start;
2880 __le32 send_res;
2881 __le32 apply_res;
2882 __le32 reserved;
2883} __attribute__ ((packed));
2884
2885struct iwl_calib_cfg_status_s {
2886 struct iwl_calib_cfg_elmnt_s once;
2887 struct iwl_calib_cfg_elmnt_s perd;
2888 __le32 flags;
2889} __attribute__ ((packed));
2890
2891struct iwl5000_calib_cfg_cmd {
2892 struct iwl_calib_cfg_status_s ucd_calib_cfg;
2893 struct iwl_calib_cfg_status_s drv_calib_cfg;
2894 __le32 reserved1;
2895} __attribute__ ((packed));
2896
2897struct iwl5000_calib_hdr {
2898 u8 op_code;
2899 u8 first_group;
2900 u8 groups_num;
2901 u8 data_valid;
2902} __attribute__ ((packed));
2903
2904struct iwl5000_calibration_chain_noise_reset_cmd {
2905 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
2906 u8 flags; /* not used */
2907 __le16 reserved;
2908} __attribute__ ((packed));
2909
2910struct iwl5000_calibration_chain_noise_gain_cmd {
2911 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
2912 u8 flags; /* not used */
2913 __le16 reserved;
2914 u8 delta_gain_1;
2915 u8 delta_gain_2;
2916 __le16 reserved1;
2917} __attribute__ ((packed));
2918
2662/****************************************************************************** 2919/******************************************************************************
2663 * (12) 2920 * (12)
2664 * Miscellaneous Commands: 2921 * Miscellaneous Commands:
@@ -2672,7 +2929,7 @@ struct iwl4965_calibration_cmd {
2672 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), 2929 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
2673 * this command turns it on or off, or sets up a periodic blinking cycle. 2930 * this command turns it on or off, or sets up a periodic blinking cycle.
2674 */ 2931 */
2675struct iwl4965_led_cmd { 2932struct iwl_led_cmd {
2676 __le32 interval; /* "interval" in uSec */ 2933 __le32 interval; /* "interval" in uSec */
2677 u8 id; /* 1: Activity, 2: Link, 3: Tech */ 2934 u8 id; /* 1: Activity, 2: Link, 3: Tech */
2678 u8 off; /* # intervals off while blinking; 2935 u8 off; /* # intervals off while blinking;
@@ -2682,30 +2939,81 @@ struct iwl4965_led_cmd {
2682 u8 reserved; 2939 u8 reserved;
2683} __attribute__ ((packed)); 2940} __attribute__ ((packed));
2684 2941
2942/*
2943 * Coexistence WIFI/WIMAX Command
2944 * COEX_PRIORITY_TABLE_CMD = 0x5a
2945 *
2946 */
2947enum {
2948 COEX_UNASSOC_IDLE = 0,
2949 COEX_UNASSOC_MANUAL_SCAN = 1,
2950 COEX_UNASSOC_AUTO_SCAN = 2,
2951 COEX_CALIBRATION = 3,
2952 COEX_PERIODIC_CALIBRATION = 4,
2953 COEX_CONNECTION_ESTAB = 5,
2954 COEX_ASSOCIATED_IDLE = 6,
2955 COEX_ASSOC_MANUAL_SCAN = 7,
2956 COEX_ASSOC_AUTO_SCAN = 8,
2957 COEX_ASSOC_ACTIVE_LEVEL = 9,
2958 COEX_RF_ON = 10,
2959 COEX_RF_OFF = 11,
2960 COEX_STAND_ALONE_DEBUG = 12,
2961 COEX_IPAN_ASSOC_LEVEL = 13,
2962 COEX_RSRVD1 = 14,
2963 COEX_RSRVD2 = 15,
2964 COEX_NUM_OF_EVENTS = 16
2965};
2966
2967struct iwl_wimax_coex_event_entry {
2968 u8 request_prio;
2969 u8 win_medium_prio;
2970 u8 reserved;
2971 u8 flags;
2972} __attribute__ ((packed));
2973
2974/* COEX flag masks */
2975
2976/* Staion table is valid */
2977#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
2978/* UnMask wakeup src at unassociated sleep */
2979#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
2980/* UnMask wakeup src at associated sleep */
2981#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
2982/* Enable CoEx feature. */
2983#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
2984
2985struct iwl_wimax_coex_cmd {
2986 u8 flags;
2987 u8 reserved[3];
2988 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
2989} __attribute__ ((packed));
2990
2685/****************************************************************************** 2991/******************************************************************************
2686 * (13) 2992 * (13)
2687 * Union of all expected notifications/responses: 2993 * Union of all expected notifications/responses:
2688 * 2994 *
2689 *****************************************************************************/ 2995 *****************************************************************************/
2690 2996
2691struct iwl4965_rx_packet { 2997struct iwl_rx_packet {
2692 __le32 len; 2998 __le32 len;
2693 struct iwl_cmd_header hdr; 2999 struct iwl_cmd_header hdr;
2694 union { 3000 union {
2695 struct iwl4965_alive_resp alive_frame; 3001 struct iwl_alive_resp alive_frame;
2696 struct iwl4965_rx_frame rx_frame; 3002 struct iwl4965_rx_frame rx_frame;
2697 struct iwl4965_tx_resp tx_resp; 3003 struct iwl4965_tx_resp tx_resp;
2698 struct iwl4965_spectrum_notification spectrum_notif; 3004 struct iwl4965_spectrum_notification spectrum_notif;
2699 struct iwl4965_csa_notification csa_notif; 3005 struct iwl4965_csa_notification csa_notif;
2700 struct iwl4965_error_resp err_resp; 3006 struct iwl_error_resp err_resp;
2701 struct iwl4965_card_state_notif card_state_notif; 3007 struct iwl4965_card_state_notif card_state_notif;
2702 struct iwl4965_beacon_notif beacon_status; 3008 struct iwl4965_beacon_notif beacon_status;
2703 struct iwl4965_add_sta_resp add_sta; 3009 struct iwl_add_sta_resp add_sta;
3010 struct iwl_rem_sta_resp rem_sta;
2704 struct iwl4965_sleep_notification sleep_notif; 3011 struct iwl4965_sleep_notification sleep_notif;
2705 struct iwl4965_spectrum_resp spectrum; 3012 struct iwl4965_spectrum_resp spectrum;
2706 struct iwl4965_notif_statistics stats; 3013 struct iwl_notif_statistics stats;
2707 struct iwl4965_compressed_ba_resp compressed_ba; 3014 struct iwl_compressed_ba_resp compressed_ba;
2708 struct iwl4965_missed_beacon_notif missed_beacon; 3015 struct iwl4965_missed_beacon_notif missed_beacon;
3016 struct iwl5000_calibration calib;
2709 __le32 status; 3017 __le32 status;
2710 u8 raw[0]; 3018 u8 raw[0];
2711 } u; 3019 } u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2dfd982d7d1f..a44188bf4459 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -34,9 +34,11 @@
34struct iwl_priv; /* FIXME: remove */ 34struct iwl_priv; /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-4965.h" /* FIXME: remove */ 37#include "iwl-dev.h" /* FIXME: remove */
38#include "iwl-core.h" 38#include "iwl-core.h"
39#include "iwl-io.h"
39#include "iwl-rfkill.h" 40#include "iwl-rfkill.h"
41#include "iwl-power.h"
40 42
41 43
42MODULE_DESCRIPTION("iwl core"); 44MODULE_DESCRIPTION("iwl core");
@@ -44,10 +46,106 @@ MODULE_VERSION(IWLWIFI_VERSION);
44MODULE_AUTHOR(DRV_COPYRIGHT); 46MODULE_AUTHOR(DRV_COPYRIGHT);
45MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
46 48
47#ifdef CONFIG_IWLWIFI_DEBUG 49#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
48u32 iwl_debug_level; 50 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
49EXPORT_SYMBOL(iwl_debug_level); 51 IWL_RATE_SISO_##s##M_PLCP, \
50#endif 52 IWL_RATE_MIMO2_##s##M_PLCP,\
53 IWL_RATE_MIMO3_##s##M_PLCP,\
54 IWL_RATE_##r##M_IEEE, \
55 IWL_RATE_##ip##M_INDEX, \
56 IWL_RATE_##in##M_INDEX, \
57 IWL_RATE_##rp##M_INDEX, \
58 IWL_RATE_##rn##M_INDEX, \
59 IWL_RATE_##pp##M_INDEX, \
60 IWL_RATE_##np##M_INDEX }
61
62/*
63 * Parameter order:
64 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
65 *
66 * If there isn't a valid next or previous rate then INV is used which
67 * maps to IWL_RATE_INVALID
68 *
69 */
70const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
71 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
72 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
73 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
74 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
75 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
76 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
77 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
78 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
79 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
80 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
81 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
82 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
83 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
84 /* FIXME:RS: ^^ should be INV (legacy) */
85};
86EXPORT_SYMBOL(iwl_rates);
87
88/**
89 * translate ucode response to mac80211 tx status control values
90 */
91void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
92 struct ieee80211_tx_info *control)
93{
94 int rate_index;
95
96 control->antenna_sel_tx =
97 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
98 if (rate_n_flags & RATE_MCS_HT_MSK)
99 control->flags |= IEEE80211_TX_CTL_OFDM_HT;
100 if (rate_n_flags & RATE_MCS_GF_MSK)
101 control->flags |= IEEE80211_TX_CTL_GREEN_FIELD;
102 if (rate_n_flags & RATE_MCS_FAT_MSK)
103 control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH;
104 if (rate_n_flags & RATE_MCS_DUP_MSK)
105 control->flags |= IEEE80211_TX_CTL_DUP_DATA;
106 if (rate_n_flags & RATE_MCS_SGI_MSK)
107 control->flags |= IEEE80211_TX_CTL_SHORT_GI;
108 rate_index = iwl_hwrate_to_plcp_idx(rate_n_flags);
109 if (control->band == IEEE80211_BAND_5GHZ)
110 rate_index -= IWL_FIRST_OFDM_RATE;
111 control->tx_rate_idx = rate_index;
112}
113EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
114
115int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
116{
117 int idx = 0;
118
119 /* HT rate format */
120 if (rate_n_flags & RATE_MCS_HT_MSK) {
121 idx = (rate_n_flags & 0xff);
122
123 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
125
126 idx += IWL_FIRST_OFDM_RATE;
127 /* skip 9M not supported in ht*/
128 if (idx >= IWL_RATE_9M_INDEX)
129 idx += 1;
130 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
131 return idx;
132
133 /* legacy rate format, search for match in table */
134 } else {
135 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
136 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
137 return idx;
138 }
139
140 return -1;
141}
142EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
143
144
145
146const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
147EXPORT_SYMBOL(iwl_bcast_addr);
148
51 149
52/* This function both allocates and initializes hw and priv. */ 150/* This function both allocates and initializes hw and priv. */
53struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 151struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
@@ -72,25 +170,132 @@ out:
72} 170}
73EXPORT_SYMBOL(iwl_alloc_all); 171EXPORT_SYMBOL(iwl_alloc_all);
74 172
173void iwl_hw_detect(struct iwl_priv *priv)
174{
175 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
176 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
177 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
178}
179EXPORT_SYMBOL(iwl_hw_detect);
180
181/* Tell nic where to find the "keep warm" buffer */
182int iwl_kw_init(struct iwl_priv *priv)
183{
184 unsigned long flags;
185 int ret;
186
187 spin_lock_irqsave(&priv->lock, flags);
188 ret = iwl_grab_nic_access(priv);
189 if (ret)
190 goto out;
191
192 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
193 priv->kw.dma_addr >> 4);
194 iwl_release_nic_access(priv);
195out:
196 spin_unlock_irqrestore(&priv->lock, flags);
197 return ret;
198}
199
200int iwl_kw_alloc(struct iwl_priv *priv)
201{
202 struct pci_dev *dev = priv->pci_dev;
203 struct iwl_kw *kw = &priv->kw;
204
205 kw->size = IWL_KW_SIZE;
206 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
207 if (!kw->v_addr)
208 return -ENOMEM;
209
210 return 0;
211}
212
213/**
214 * iwl_kw_free - Free the "keep warm" buffer
215 */
216void iwl_kw_free(struct iwl_priv *priv)
217{
218 struct pci_dev *dev = priv->pci_dev;
219 struct iwl_kw *kw = &priv->kw;
220
221 if (kw->v_addr) {
222 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
223 memset(kw, 0, sizeof(*kw));
224 }
225}
226
227int iwl_hw_nic_init(struct iwl_priv *priv)
228{
229 unsigned long flags;
230 struct iwl_rx_queue *rxq = &priv->rxq;
231 int ret;
232
233 /* nic_init */
234 spin_lock_irqsave(&priv->lock, flags);
235 priv->cfg->ops->lib->apm_ops.init(priv);
236 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
237 spin_unlock_irqrestore(&priv->lock, flags);
238
239 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
240
241 priv->cfg->ops->lib->apm_ops.config(priv);
242
243 /* Allocate the RX queue, or reset if it is already allocated */
244 if (!rxq->bd) {
245 ret = iwl_rx_queue_alloc(priv);
246 if (ret) {
247 IWL_ERROR("Unable to initialize Rx queue\n");
248 return -ENOMEM;
249 }
250 } else
251 iwl_rx_queue_reset(priv, rxq);
252
253 iwl_rx_replenish(priv);
254
255 iwl_rx_init(priv, rxq);
256
257 spin_lock_irqsave(&priv->lock, flags);
258
259 rxq->need_update = 1;
260 iwl_rx_queue_update_write_ptr(priv, rxq);
261
262 spin_unlock_irqrestore(&priv->lock, flags);
263
264 /* Allocate and init all Tx and Command queues */
265 ret = iwl_txq_ctx_reset(priv);
266 if (ret)
267 return ret;
268
269 set_bit(STATUS_INIT, &priv->status);
270
271 return 0;
272}
273EXPORT_SYMBOL(iwl_hw_nic_init);
274
75/** 275/**
76 * iwlcore_clear_stations_table - Clear the driver's station table 276 * iwl_clear_stations_table - Clear the driver's station table
77 * 277 *
78 * NOTE: This does not clear or otherwise alter the device's station table. 278 * NOTE: This does not clear or otherwise alter the device's station table.
79 */ 279 */
80void iwlcore_clear_stations_table(struct iwl_priv *priv) 280void iwl_clear_stations_table(struct iwl_priv *priv)
81{ 281{
82 unsigned long flags; 282 unsigned long flags;
83 283
84 spin_lock_irqsave(&priv->sta_lock, flags); 284 spin_lock_irqsave(&priv->sta_lock, flags);
85 285
286 if (iwl_is_alive(priv) &&
287 !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
288 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
289 IWL_ERROR("Couldn't clear the station table\n");
290
86 priv->num_stations = 0; 291 priv->num_stations = 0;
87 memset(priv->stations, 0, sizeof(priv->stations)); 292 memset(priv->stations, 0, sizeof(priv->stations));
88 293
89 spin_unlock_irqrestore(&priv->sta_lock, flags); 294 spin_unlock_irqrestore(&priv->sta_lock, flags);
90} 295}
91EXPORT_SYMBOL(iwlcore_clear_stations_table); 296EXPORT_SYMBOL(iwl_clear_stations_table);
92 297
93void iwlcore_reset_qos(struct iwl_priv *priv) 298void iwl_reset_qos(struct iwl_priv *priv)
94{ 299{
95 u16 cw_min = 15; 300 u16 cw_min = 15;
96 u16 cw_max = 1023; 301 u16 cw_max = 1023;
@@ -176,7 +381,397 @@ void iwlcore_reset_qos(struct iwl_priv *priv)
176 381
177 spin_unlock_irqrestore(&priv->lock, flags); 382 spin_unlock_irqrestore(&priv->lock, flags);
178} 383}
179EXPORT_SYMBOL(iwlcore_reset_qos); 384EXPORT_SYMBOL(iwl_reset_qos);
385
386#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */
387#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */
388static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
389 struct ieee80211_ht_info *ht_info,
390 enum ieee80211_band band)
391{
392 u16 max_bit_rate = 0;
393 u8 rx_chains_num = priv->hw_params.rx_chains_num;
394 u8 tx_chains_num = priv->hw_params.tx_chains_num;
395
396 ht_info->cap = 0;
397 memset(ht_info->supp_mcs_set, 0, 16);
398
399 ht_info->ht_supported = 1;
400
401 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
402 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
403 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
404 (IWL_MIMO_PS_NONE << 2));
405
406 max_bit_rate = MAX_BIT_RATE_20_MHZ;
407 if (priv->hw_params.fat_channel & BIT(band)) {
408 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
409 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
410 ht_info->supp_mcs_set[4] = 0x01;
411 max_bit_rate = MAX_BIT_RATE_40_MHZ;
412 }
413
414 if (priv->cfg->mod_params->amsdu_size_8K)
415 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
416
417 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
418 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
419
420 ht_info->supp_mcs_set[0] = 0xFF;
421 if (rx_chains_num >= 2)
422 ht_info->supp_mcs_set[1] = 0xFF;
423 if (rx_chains_num >= 3)
424 ht_info->supp_mcs_set[2] = 0xFF;
425
426 /* Highest supported Rx data rate */
427 max_bit_rate *= rx_chains_num;
428 ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF);
429 ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8);
430
431 /* Tx MCS capabilities */
432 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
433 if (tx_chains_num != rx_chains_num) {
434 ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF;
435 ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2);
436 }
437}
438
439static void iwlcore_init_hw_rates(struct iwl_priv *priv,
440 struct ieee80211_rate *rates)
441{
442 int i;
443
444 for (i = 0; i < IWL_RATE_COUNT; i++) {
445 rates[i].bitrate = iwl_rates[i].ieee * 5;
446 rates[i].hw_value = i; /* Rate scaling will work on indexes */
447 rates[i].hw_value_short = i;
448 rates[i].flags = 0;
449 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
450 /*
451 * If CCK != 1M then set short preamble rate flag.
452 */
453 rates[i].flags |=
454 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
455 0 : IEEE80211_RATE_SHORT_PREAMBLE;
456 }
457 }
458}
459
460/**
461 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
462 */
463static int iwlcore_init_geos(struct iwl_priv *priv)
464{
465 struct iwl_channel_info *ch;
466 struct ieee80211_supported_band *sband;
467 struct ieee80211_channel *channels;
468 struct ieee80211_channel *geo_ch;
469 struct ieee80211_rate *rates;
470 int i = 0;
471
472 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
473 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
474 IWL_DEBUG_INFO("Geography modes already initialized.\n");
475 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
476 return 0;
477 }
478
479 channels = kzalloc(sizeof(struct ieee80211_channel) *
480 priv->channel_count, GFP_KERNEL);
481 if (!channels)
482 return -ENOMEM;
483
484 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
485 GFP_KERNEL);
486 if (!rates) {
487 kfree(channels);
488 return -ENOMEM;
489 }
490
491 /* 5.2GHz channels start after the 2.4GHz channels */
492 sband = &priv->bands[IEEE80211_BAND_5GHZ];
493 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
494 /* just OFDM */
495 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
496 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
497
498 if (priv->cfg->sku & IWL_SKU_N)
499 iwlcore_init_ht_hw_capab(priv, &sband->ht_info,
500 IEEE80211_BAND_5GHZ);
501
502 sband = &priv->bands[IEEE80211_BAND_2GHZ];
503 sband->channels = channels;
504 /* OFDM & CCK */
505 sband->bitrates = rates;
506 sband->n_bitrates = IWL_RATE_COUNT;
507
508 if (priv->cfg->sku & IWL_SKU_N)
509 iwlcore_init_ht_hw_capab(priv, &sband->ht_info,
510 IEEE80211_BAND_2GHZ);
511
512 priv->ieee_channels = channels;
513 priv->ieee_rates = rates;
514
515 iwlcore_init_hw_rates(priv, rates);
516
517 for (i = 0; i < priv->channel_count; i++) {
518 ch = &priv->channel_info[i];
519
520 /* FIXME: might be removed if scan is OK */
521 if (!is_channel_valid(ch))
522 continue;
523
524 if (is_channel_a_band(ch))
525 sband = &priv->bands[IEEE80211_BAND_5GHZ];
526 else
527 sband = &priv->bands[IEEE80211_BAND_2GHZ];
528
529 geo_ch = &sband->channels[sband->n_channels++];
530
531 geo_ch->center_freq =
532 ieee80211_channel_to_frequency(ch->channel);
533 geo_ch->max_power = ch->max_power_avg;
534 geo_ch->max_antenna_gain = 0xff;
535 geo_ch->hw_value = ch->channel;
536
537 if (is_channel_valid(ch)) {
538 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
539 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
540
541 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
542 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
543
544 if (ch->flags & EEPROM_CHANNEL_RADAR)
545 geo_ch->flags |= IEEE80211_CHAN_RADAR;
546
547 geo_ch->flags |= ch->fat_extension_channel;
548
549 if (ch->max_power_avg > priv->tx_power_channel_lmt)
550 priv->tx_power_channel_lmt = ch->max_power_avg;
551 } else {
552 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
553 }
554
555 /* Save flags for reg domain usage */
556 geo_ch->orig_flags = geo_ch->flags;
557
558 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
559 ch->channel, geo_ch->center_freq,
560 is_channel_a_band(ch) ? "5.2" : "2.4",
561 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
562 "restricted" : "valid",
563 geo_ch->flags);
564 }
565
566 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
567 priv->cfg->sku & IWL_SKU_A) {
568 printk(KERN_INFO DRV_NAME
569 ": Incorrectly detected BG card as ABG. Please send "
570 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
571 priv->pci_dev->device, priv->pci_dev->subsystem_device);
572 priv->cfg->sku &= ~IWL_SKU_A;
573 }
574
575 printk(KERN_INFO DRV_NAME
576 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
577 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
578 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
579
580
581 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
582
583 return 0;
584}
585
586/*
587 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
588 */
589static void iwlcore_free_geos(struct iwl_priv *priv)
590{
591 kfree(priv->ieee_channels);
592 kfree(priv->ieee_rates);
593 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
594}
595
596static u8 is_single_rx_stream(struct iwl_priv *priv)
597{
598 return !priv->current_ht_config.is_ht ||
599 ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
600 (priv->current_ht_config.supp_mcs_set[2] == 0)) ||
601 priv->ps_mode == IWL_MIMO_PS_STATIC;
602}
603
604static u8 iwl_is_channel_extension(struct iwl_priv *priv,
605 enum ieee80211_band band,
606 u16 channel, u8 extension_chan_offset)
607{
608 const struct iwl_channel_info *ch_info;
609
610 ch_info = iwl_get_channel_info(priv, band, channel);
611 if (!is_channel_valid(ch_info))
612 return 0;
613
614 if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE)
615 return !(ch_info->fat_extension_channel &
616 IEEE80211_CHAN_NO_FAT_ABOVE);
617 else if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW)
618 return !(ch_info->fat_extension_channel &
619 IEEE80211_CHAN_NO_FAT_BELOW);
620
621 return 0;
622}
623
624u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
625 struct ieee80211_ht_info *sta_ht_inf)
626{
627 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
628
629 if ((!iwl_ht_conf->is_ht) ||
630 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
631 (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE))
632 return 0;
633
634 if (sta_ht_inf) {
635 if ((!sta_ht_inf->ht_supported) ||
636 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
637 return 0;
638 }
639
640 return iwl_is_channel_extension(priv, priv->band,
641 iwl_ht_conf->control_channel,
642 iwl_ht_conf->extension_chan_offset);
643}
644EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
645
646void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
647{
648 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
649 u32 val;
650
651 if (!ht_info->is_ht)
652 return;
653
654 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
655 if (iwl_is_fat_tx_allowed(priv, NULL))
656 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
657 else
658 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
659 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
660
661 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
662 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
663 le16_to_cpu(rxon->channel),
664 ht_info->control_channel);
665 return;
666 }
667
668 /* Note: control channel is opposite of extension channel */
669 switch (ht_info->extension_chan_offset) {
670 case IEEE80211_HT_IE_CHA_SEC_ABOVE:
671 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
672 break;
673 case IEEE80211_HT_IE_CHA_SEC_BELOW:
674 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
675 break;
676 case IEEE80211_HT_IE_CHA_SEC_NONE:
677 default:
678 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
679 break;
680 }
681
682 val = ht_info->ht_protection;
683
684 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
685
686 iwl_set_rxon_chain(priv);
687
688 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
689 "rxon flags 0x%X operation mode :0x%X "
690 "extension channel offset 0x%x "
691 "control chan %d\n",
692 ht_info->supp_mcs_set[0],
693 ht_info->supp_mcs_set[1],
694 ht_info->supp_mcs_set[2],
695 le32_to_cpu(rxon->flags), ht_info->ht_protection,
696 ht_info->extension_chan_offset,
697 ht_info->control_channel);
698 return;
699}
700EXPORT_SYMBOL(iwl_set_rxon_ht);
701
702/*
703 * Determine how many receiver/antenna chains to use.
704 * More provides better reception via diversity. Fewer saves power.
705 * MIMO (dual stream) requires at least 2, but works better with 3.
706 * This does not determine *which* chains to use, just how many.
707 */
708static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
709 u8 *idle_state, u8 *rx_state)
710{
711 u8 is_single = is_single_rx_stream(priv);
712 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
713
714 /* # of Rx chains to use when expecting MIMO. */
715 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
716 *rx_state = 2;
717 else
718 *rx_state = 3;
719
720 /* # Rx chains when idling and maybe trying to save power */
721 switch (priv->ps_mode) {
722 case IWL_MIMO_PS_STATIC:
723 case IWL_MIMO_PS_DYNAMIC:
724 *idle_state = (is_cam) ? 2 : 1;
725 break;
726 case IWL_MIMO_PS_NONE:
727 *idle_state = (is_cam) ? *rx_state : 1;
728 break;
729 default:
730 *idle_state = 1;
731 break;
732 }
733
734 return 0;
735}
736
737/**
738 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
739 *
740 * Selects how many and which Rx receivers/antennas/chains to use.
741 * This should not be used for scan command ... it puts data in wrong place.
742 */
743void iwl_set_rxon_chain(struct iwl_priv *priv)
744{
745 u8 is_single = is_single_rx_stream(priv);
746 u8 idle_state, rx_state;
747
748 priv->staging_rxon.rx_chain = 0;
749 rx_state = idle_state = 3;
750
751 /* Tell uCode which antennas are actually connected.
752 * Before first association, we assume all antennas are connected.
753 * Just after first association, iwl_chain_noise_calibration()
754 * checks which antennas actually *are* connected. */
755 priv->staging_rxon.rx_chain |=
756 cpu_to_le16(priv->hw_params.valid_rx_ant <<
757 RXON_RX_CHAIN_VALID_POS);
758
759 /* How many receivers should we use? */
760 iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state);
761 priv->staging_rxon.rx_chain |=
762 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
763 priv->staging_rxon.rx_chain |=
764 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
765
766 if (!is_single && (rx_state >= 2) &&
767 !test_bit(STATUS_POWER_PMI, &priv->status))
768 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
769 else
770 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
771
772 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
773}
774EXPORT_SYMBOL(iwl_set_rxon_chain);
180 775
181/** 776/**
182 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON 777 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
@@ -188,7 +783,7 @@ EXPORT_SYMBOL(iwlcore_reset_qos);
188 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 783 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
189 * in the staging RXON flag structure based on the phymode 784 * in the staging RXON flag structure based on the phymode
190 */ 785 */
191int iwlcore_set_rxon_channel(struct iwl_priv *priv, 786int iwl_set_rxon_channel(struct iwl_priv *priv,
192 enum ieee80211_band band, 787 enum ieee80211_band band,
193 u16 channel) 788 u16 channel)
194{ 789{
@@ -214,68 +809,185 @@ int iwlcore_set_rxon_channel(struct iwl_priv *priv,
214 809
215 return 0; 810 return 0;
216} 811}
217EXPORT_SYMBOL(iwlcore_set_rxon_channel); 812EXPORT_SYMBOL(iwl_set_rxon_channel);
218 813
219static void iwlcore_init_hw(struct iwl_priv *priv) 814int iwl_setup_mac(struct iwl_priv *priv)
220{ 815{
816 int ret;
221 struct ieee80211_hw *hw = priv->hw; 817 struct ieee80211_hw *hw = priv->hw;
222 hw->rate_control_algorithm = "iwl-4965-rs"; 818 hw->rate_control_algorithm = "iwl-4965-rs";
223 819
224 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 820 /* Tell mac80211 our characteristics */
225 * the range of signal quality values that we'll provide. 821 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
226 * Negative values for level/noise indicate that we'll provide dBm. 822 IEEE80211_HW_SIGNAL_DBM |
227 * For WE, at least, non-0 values here *enable* display of values 823 IEEE80211_HW_NOISE_DBM;
228 * in app (iwconfig). */
229 hw->max_rssi = -20; /* signal level, negative indicates dBm */
230 hw->max_noise = -20; /* noise level, negative indicates dBm */
231 hw->max_signal = 100; /* link quality indication (%) */
232
233 /* Tell mac80211 our Tx characteristics */
234 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
235
236 /* Default value; 4 EDCA QOS priorities */ 824 /* Default value; 4 EDCA QOS priorities */
237 hw->queues = 4; 825 hw->queues = 4;
238#ifdef CONFIG_IWL4965_HT 826 /* queues to support 11n aggregation */
239 /* Enhanced value; more queues, to support 11n aggregation */ 827 if (priv->cfg->sku & IWL_SKU_N)
240 hw->queues = 16; 828 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
241#endif /* CONFIG_IWL4965_HT */ 829
830 hw->conf.beacon_int = 100;
831
832 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
833 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
834 &priv->bands[IEEE80211_BAND_2GHZ];
835 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
836 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
837 &priv->bands[IEEE80211_BAND_5GHZ];
838
839 ret = ieee80211_register_hw(priv->hw);
840 if (ret) {
841 IWL_ERROR("Failed to register hw (error %d)\n", ret);
842 return ret;
843 }
844 priv->mac80211_registered = 1;
845
846 return 0;
242} 847}
848EXPORT_SYMBOL(iwl_setup_mac);
243 849
244int iwl_setup(struct iwl_priv *priv) 850int iwl_set_hw_params(struct iwl_priv *priv)
245{ 851{
246 int ret = 0; 852 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
247 iwlcore_init_hw(priv); 853 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
248 ret = priv->cfg->ops->lib->init_drv(priv); 854 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
249 return ret; 855 if (priv->cfg->mod_params->amsdu_size_8K)
856 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
857 else
858 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
859 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
860
861 if (priv->cfg->mod_params->disable_11n)
862 priv->cfg->sku &= ~IWL_SKU_N;
863
864 /* Device-specific setup */
865 return priv->cfg->ops->lib->set_hw_params(priv);
250} 866}
251EXPORT_SYMBOL(iwl_setup); 867EXPORT_SYMBOL(iwl_set_hw_params);
252 868
253/* Low level driver call this function to update iwlcore with 869int iwl_init_drv(struct iwl_priv *priv)
254 * driver status.
255 */
256int iwlcore_low_level_notify(struct iwl_priv *priv,
257 enum iwlcore_card_notify notify)
258{ 870{
259 int ret; 871 int ret;
260 switch (notify) { 872
261 case IWLCORE_INIT_EVT: 873 priv->retry_rate = 1;
262 ret = iwl_rfkill_init(priv); 874 priv->ibss_beacon = NULL;
263 if (ret) 875
264 IWL_ERROR("Unable to initialize RFKILL system. " 876 spin_lock_init(&priv->lock);
265 "Ignoring error: %d\n", ret); 877 spin_lock_init(&priv->power_data.lock);
266 break; 878 spin_lock_init(&priv->sta_lock);
267 case IWLCORE_START_EVT: 879 spin_lock_init(&priv->hcmd_lock);
268 break; 880 spin_lock_init(&priv->lq_mngr.lock);
269 case IWLCORE_STOP_EVT: 881
270 break; 882 INIT_LIST_HEAD(&priv->free_frames);
271 case IWLCORE_REMOVE_EVT: 883
272 iwl_rfkill_unregister(priv); 884 mutex_init(&priv->mutex);
273 break; 885
886 /* Clear the driver's (not device's) station table */
887 iwl_clear_stations_table(priv);
888
889 priv->data_retry_limit = -1;
890 priv->ieee_channels = NULL;
891 priv->ieee_rates = NULL;
892 priv->band = IEEE80211_BAND_2GHZ;
893
894 priv->iw_mode = IEEE80211_IF_TYPE_STA;
895
896 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
897 priv->ps_mode = IWL_MIMO_PS_NONE;
898
899 /* Choose which receivers/antennas to use */
900 iwl_set_rxon_chain(priv);
901 iwl_init_scan_params(priv);
902
903 if (priv->cfg->mod_params->enable_qos)
904 priv->qos_data.qos_enable = 1;
905
906 iwl_reset_qos(priv);
907
908 priv->qos_data.qos_active = 0;
909 priv->qos_data.qos_cap.val = 0;
910
911 iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
912
913 priv->rates_mask = IWL_RATES_MASK;
914 /* If power management is turned on, default to AC mode */
915 priv->power_mode = IWL_POWER_AC;
916 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX;
917
918 ret = iwl_init_channel_map(priv);
919 if (ret) {
920 IWL_ERROR("initializing regulatory failed: %d\n", ret);
921 goto err;
922 }
923
924 ret = iwlcore_init_geos(priv);
925 if (ret) {
926 IWL_ERROR("initializing geos failed: %d\n", ret);
927 goto err_free_channel_map;
274 } 928 }
275 929
276 return 0; 930 return 0;
931
932err_free_channel_map:
933 iwl_free_channel_map(priv);
934err:
935 return ret;
936}
937EXPORT_SYMBOL(iwl_init_drv);
938
939void iwl_free_calib_results(struct iwl_priv *priv)
940{
941 kfree(priv->calib_results.lo_res);
942 priv->calib_results.lo_res = NULL;
943 priv->calib_results.lo_res_len = 0;
944
945 kfree(priv->calib_results.tx_iq_res);
946 priv->calib_results.tx_iq_res = NULL;
947 priv->calib_results.tx_iq_res_len = 0;
948
949 kfree(priv->calib_results.tx_iq_perd_res);
950 priv->calib_results.tx_iq_perd_res = NULL;
951 priv->calib_results.tx_iq_perd_res_len = 0;
952}
953EXPORT_SYMBOL(iwl_free_calib_results);
954
955int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
956{
957 int ret = 0;
958 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
959 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
960 priv->tx_power_user_lmt);
961 return -EINVAL;
962 }
963
964 if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) {
965 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
966 priv->tx_power_user_lmt);
967 return -EINVAL;
968 }
969
970 if (priv->tx_power_user_lmt != tx_power)
971 force = true;
972
973 priv->tx_power_user_lmt = tx_power;
974
975 if (force && priv->cfg->ops->lib->send_tx_power)
976 ret = priv->cfg->ops->lib->send_tx_power(priv);
977
978 return ret;
979}
980EXPORT_SYMBOL(iwl_set_tx_power);
981
982
983void iwl_uninit_drv(struct iwl_priv *priv)
984{
985 iwl_free_calib_results(priv);
986 iwlcore_free_geos(priv);
987 iwl_free_channel_map(priv);
988 kfree(priv->scan);
277} 989}
278EXPORT_SYMBOL(iwlcore_low_level_notify); 990EXPORT_SYMBOL(iwl_uninit_drv);
279 991
280int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) 992int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
281{ 993{
@@ -290,3 +1002,440 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
290} 1002}
291EXPORT_SYMBOL(iwl_send_statistics_request); 1003EXPORT_SYMBOL(iwl_send_statistics_request);
292 1004
1005/**
1006 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1007 * using sample data 100 bytes apart. If these sample points are good,
1008 * it's a pretty good bet that everything between them is good, too.
1009 */
1010static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1011{
1012 u32 val;
1013 int ret = 0;
1014 u32 errcnt = 0;
1015 u32 i;
1016
1017 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1018
1019 ret = iwl_grab_nic_access(priv);
1020 if (ret)
1021 return ret;
1022
1023 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1024 /* read data comes through single port, auto-incr addr */
1025 /* NOTE: Use the debugless read so we don't flood kernel log
1026 * if IWL_DL_IO is set */
1027 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1028 i + RTC_INST_LOWER_BOUND);
1029 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1030 if (val != le32_to_cpu(*image)) {
1031 ret = -EIO;
1032 errcnt++;
1033 if (errcnt >= 3)
1034 break;
1035 }
1036 }
1037
1038 iwl_release_nic_access(priv);
1039
1040 return ret;
1041}
1042
1043/**
1044 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1045 * looking at all data.
1046 */
1047static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1048 u32 len)
1049{
1050 u32 val;
1051 u32 save_len = len;
1052 int ret = 0;
1053 u32 errcnt;
1054
1055 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1056
1057 ret = iwl_grab_nic_access(priv);
1058 if (ret)
1059 return ret;
1060
1061 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
1062
1063 errcnt = 0;
1064 for (; len > 0; len -= sizeof(u32), image++) {
1065 /* read data comes through single port, auto-incr addr */
1066 /* NOTE: Use the debugless read so we don't flood kernel log
1067 * if IWL_DL_IO is set */
1068 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1069 if (val != le32_to_cpu(*image)) {
1070 IWL_ERROR("uCode INST section is invalid at "
1071 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1072 save_len - len, val, le32_to_cpu(*image));
1073 ret = -EIO;
1074 errcnt++;
1075 if (errcnt >= 20)
1076 break;
1077 }
1078 }
1079
1080 iwl_release_nic_access(priv);
1081
1082 if (!errcnt)
1083 IWL_DEBUG_INFO
1084 ("ucode image in INSTRUCTION memory is good\n");
1085
1086 return ret;
1087}
1088
1089/**
1090 * iwl_verify_ucode - determine which instruction image is in SRAM,
1091 * and verify its contents
1092 */
1093int iwl_verify_ucode(struct iwl_priv *priv)
1094{
1095 __le32 *image;
1096 u32 len;
1097 int ret;
1098
1099 /* Try bootstrap */
1100 image = (__le32 *)priv->ucode_boot.v_addr;
1101 len = priv->ucode_boot.len;
1102 ret = iwlcore_verify_inst_sparse(priv, image, len);
1103 if (!ret) {
1104 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
1105 return 0;
1106 }
1107
1108 /* Try initialize */
1109 image = (__le32 *)priv->ucode_init.v_addr;
1110 len = priv->ucode_init.len;
1111 ret = iwlcore_verify_inst_sparse(priv, image, len);
1112 if (!ret) {
1113 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
1114 return 0;
1115 }
1116
1117 /* Try runtime/protocol */
1118 image = (__le32 *)priv->ucode_code.v_addr;
1119 len = priv->ucode_code.len;
1120 ret = iwlcore_verify_inst_sparse(priv, image, len);
1121 if (!ret) {
1122 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
1123 return 0;
1124 }
1125
1126 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1127
1128 /* Since nothing seems to match, show first several data entries in
1129 * instruction SRAM, so maybe visual inspection will give a clue.
1130 * Selection of bootstrap image (vs. other images) is arbitrary. */
1131 image = (__le32 *)priv->ucode_boot.v_addr;
1132 len = priv->ucode_boot.len;
1133 ret = iwl_verify_inst_full(priv, image, len);
1134
1135 return ret;
1136}
1137EXPORT_SYMBOL(iwl_verify_ucode);
1138
1139
1140static const char *desc_lookup(int i)
1141{
1142 switch (i) {
1143 case 1:
1144 return "FAIL";
1145 case 2:
1146 return "BAD_PARAM";
1147 case 3:
1148 return "BAD_CHECKSUM";
1149 case 4:
1150 return "NMI_INTERRUPT";
1151 case 5:
1152 return "SYSASSERT";
1153 case 6:
1154 return "FATAL_ERROR";
1155 }
1156
1157 return "UNKNOWN";
1158}
1159
1160#define ERROR_START_OFFSET (1 * sizeof(u32))
1161#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1162
1163void iwl_dump_nic_error_log(struct iwl_priv *priv)
1164{
1165 u32 data2, line;
1166 u32 desc, time, count, base, data1;
1167 u32 blink1, blink2, ilink1, ilink2;
1168 int ret;
1169
1170 if (priv->ucode_type == UCODE_INIT)
1171 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1172 else
1173 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1174
1175 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1176 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
1177 return;
1178 }
1179
1180 ret = iwl_grab_nic_access(priv);
1181 if (ret) {
1182 IWL_WARNING("Can not read from adapter at this time.\n");
1183 return;
1184 }
1185
1186 count = iwl_read_targ_mem(priv, base);
1187
1188 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1189 IWL_ERROR("Start IWL Error Log Dump:\n");
1190 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
1191 }
1192
1193 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1194 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1195 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1196 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1197 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1198 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1199 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1200 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1201 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1202
1203 IWL_ERROR("Desc Time "
1204 "data1 data2 line\n");
1205 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
1206 desc_lookup(desc), desc, time, data1, data2, line);
1207 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
1208 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1209 ilink1, ilink2);
1210
1211 iwl_release_nic_access(priv);
1212}
1213EXPORT_SYMBOL(iwl_dump_nic_error_log);
1214
1215#define EVENT_START_OFFSET (4 * sizeof(u32))
1216
1217/**
1218 * iwl_print_event_log - Dump error event log to syslog
1219 *
1220 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
1221 */
1222void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1223 u32 num_events, u32 mode)
1224{
1225 u32 i;
1226 u32 base; /* SRAM byte address of event log header */
1227 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1228 u32 ptr; /* SRAM byte address of log data */
1229 u32 ev, time, data; /* event log data */
1230
1231 if (num_events == 0)
1232 return;
1233 if (priv->ucode_type == UCODE_INIT)
1234 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1235 else
1236 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1237
1238 if (mode == 0)
1239 event_size = 2 * sizeof(u32);
1240 else
1241 event_size = 3 * sizeof(u32);
1242
1243 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1244
1245 /* "time" is actually "data" for mode 0 (no timestamp).
1246 * place event id # at far right for easier visual parsing. */
1247 for (i = 0; i < num_events; i++) {
1248 ev = iwl_read_targ_mem(priv, ptr);
1249 ptr += sizeof(u32);
1250 time = iwl_read_targ_mem(priv, ptr);
1251 ptr += sizeof(u32);
1252 if (mode == 0) {
1253 /* data, ev */
1254 IWL_ERROR("EVT_LOG:0x%08x:%04u\n", time, ev);
1255 } else {
1256 data = iwl_read_targ_mem(priv, ptr);
1257 ptr += sizeof(u32);
1258 IWL_ERROR("EVT_LOGT:%010u:0x%08x:%04u\n",
1259 time, data, ev);
1260 }
1261 }
1262}
1263EXPORT_SYMBOL(iwl_print_event_log);
1264
1265
1266void iwl_dump_nic_event_log(struct iwl_priv *priv)
1267{
1268 int ret;
1269 u32 base; /* SRAM byte address of event log header */
1270 u32 capacity; /* event log capacity in # entries */
1271 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1272 u32 num_wraps; /* # times uCode wrapped to top of log */
1273 u32 next_entry; /* index of next entry to be written by uCode */
1274 u32 size; /* # entries that we'll print */
1275
1276 if (priv->ucode_type == UCODE_INIT)
1277 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1278 else
1279 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1280
1281 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1282 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
1283 return;
1284 }
1285
1286 ret = iwl_grab_nic_access(priv);
1287 if (ret) {
1288 IWL_WARNING("Can not read from adapter at this time.\n");
1289 return;
1290 }
1291
1292 /* event log header */
1293 capacity = iwl_read_targ_mem(priv, base);
1294 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1295 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1296 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1297
1298 size = num_wraps ? capacity : next_entry;
1299
1300 /* bail out if nothing in log */
1301 if (size == 0) {
1302 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
1303 iwl_release_nic_access(priv);
1304 return;
1305 }
1306
1307 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
1308 size, num_wraps);
1309
1310 /* if uCode has wrapped back to top of log, start at the oldest entry,
1311 * i.e the next one that uCode would fill. */
1312 if (num_wraps)
1313 iwl_print_event_log(priv, next_entry,
1314 capacity - next_entry, mode);
1315 /* (then/else) start at top of log */
1316 iwl_print_event_log(priv, 0, next_entry, mode);
1317
1318 iwl_release_nic_access(priv);
1319}
1320EXPORT_SYMBOL(iwl_dump_nic_event_log);
1321
1322void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1323{
1324 struct iwl_ct_kill_config cmd;
1325 unsigned long flags;
1326 int ret = 0;
1327
1328 spin_lock_irqsave(&priv->lock, flags);
1329 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1330 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1331 spin_unlock_irqrestore(&priv->lock, flags);
1332
1333 cmd.critical_temperature_R =
1334 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1335
1336 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1337 sizeof(cmd), &cmd);
1338 if (ret)
1339 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1340 else
1341 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
1342 "critical temperature is %d\n",
1343 cmd.critical_temperature_R);
1344}
1345EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1346
1347/*
1348 * CARD_STATE_CMD
1349 *
1350 * Use: Sets the device's internal card state to enable, disable, or halt
1351 *
1352 * When in the 'enable' state the card operates as normal.
1353 * When in the 'disable' state, the card enters into a low power mode.
1354 * When in the 'halt' state, the card is shut down and must be fully
1355 * restarted to come back on.
1356 */
1357static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1358{
1359 struct iwl_host_cmd cmd = {
1360 .id = REPLY_CARD_STATE_CMD,
1361 .len = sizeof(u32),
1362 .data = &flags,
1363 .meta.flags = meta_flag,
1364 };
1365
1366 return iwl_send_cmd(priv, &cmd);
1367}
1368
1369void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1370{
1371 unsigned long flags;
1372
1373 if (test_bit(STATUS_RF_KILL_SW, &priv->status))
1374 return;
1375
1376 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO OFF\n");
1377
1378 iwl_scan_cancel(priv);
1379 /* FIXME: This is a workaround for AP */
1380 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
1381 spin_lock_irqsave(&priv->lock, flags);
1382 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
1383 CSR_UCODE_SW_BIT_RFKILL);
1384 spin_unlock_irqrestore(&priv->lock, flags);
1385 /* call the host command only if no hw rf-kill set */
1386 if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
1387 iwl_is_ready(priv))
1388 iwl_send_card_state(priv,
1389 CARD_STATE_CMD_DISABLE, 0);
1390 set_bit(STATUS_RF_KILL_SW, &priv->status);
1391 /* make sure mac80211 stop sending Tx frame */
1392 if (priv->mac80211_registered)
1393 ieee80211_stop_queues(priv->hw);
1394 }
1395}
1396EXPORT_SYMBOL(iwl_radio_kill_sw_disable_radio);
1397
1398int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1399{
1400 unsigned long flags;
1401
1402 if (!test_bit(STATUS_RF_KILL_SW, &priv->status))
1403 return 0;
1404
1405 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO ON\n");
1406
1407 spin_lock_irqsave(&priv->lock, flags);
1408 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1409
1410 /* If the driver is up it will receive CARD_STATE_NOTIFICATION
1411 * notification where it will clear SW rfkill status.
1412 * Setting it here would break the handler. Only if the
1413 * interface is down we can set here since we don't
1414 * receive any further notification.
1415 */
1416 if (!priv->is_open)
1417 clear_bit(STATUS_RF_KILL_SW, &priv->status);
1418 spin_unlock_irqrestore(&priv->lock, flags);
1419
1420 /* wake up ucode */
1421 msleep(10);
1422
1423 spin_lock_irqsave(&priv->lock, flags);
1424 iwl_read32(priv, CSR_UCODE_DRV_GP1);
1425 if (!iwl_grab_nic_access(priv))
1426 iwl_release_nic_access(priv);
1427 spin_unlock_irqrestore(&priv->lock, flags);
1428
1429 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
1430 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
1431 "disabled by HW switch\n");
1432 return 0;
1433 }
1434
1435 /* If the driver is already loaded, it will receive
1436 * CARD_STATE_NOTIFICATION notifications and the handler will
1437 * call restart to reload the driver.
1438 */
1439 return 1;
1440}
1441EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7193d97630dc..db66114f1e56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -70,7 +70,7 @@ struct iwl_host_cmd;
70struct iwl_cmd; 70struct iwl_cmd;
71 71
72 72
73#define IWLWIFI_VERSION "1.2.26k" 73#define IWLWIFI_VERSION "1.3.27k"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
75 75
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 76#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -86,28 +86,63 @@ struct iwl_hcmd_ops {
86 int (*rxon_assoc)(struct iwl_priv *priv); 86 int (*rxon_assoc)(struct iwl_priv *priv);
87}; 87};
88struct iwl_hcmd_utils_ops { 88struct iwl_hcmd_utils_ops {
89 int (*enqueue_hcmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 89 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
90 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
91 void (*gain_computation)(struct iwl_priv *priv,
92 u32 *average_noise,
93 u16 min_average_noise_antennat_i,
94 u32 min_average_noise);
95 void (*chain_noise_reset)(struct iwl_priv *priv);
96 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
97 __le32 *tx_flags);
90}; 98};
91 99
92struct iwl_lib_ops { 100struct iwl_lib_ops {
93 /* iwlwifi driver (priv) init */
94 int (*init_drv)(struct iwl_priv *priv);
95 /* set hw dependant perameters */ 101 /* set hw dependant perameters */
96 int (*set_hw_params)(struct iwl_priv *priv); 102 int (*set_hw_params)(struct iwl_priv *priv);
97 103 /* ucode shared memory */
104 int (*alloc_shared_mem)(struct iwl_priv *priv);
105 void (*free_shared_mem)(struct iwl_priv *priv);
106 int (*shared_mem_rx_idx)(struct iwl_priv *priv);
107 /* Handling TX */
98 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, 108 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
99 struct iwl4965_tx_queue *txq, 109 struct iwl_tx_queue *txq,
100 u16 byte_cnt); 110 u16 byte_cnt);
101 /* nic init */ 111 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
102 int (*hw_nic_init)(struct iwl_priv *priv); 112 struct iwl_tx_queue *txq);
113 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
114 /* aggregations */
115 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
116 int sta_id, int tid, u16 ssn_idx);
117 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx,
118 u8 tx_fifo);
119 /* setup Rx handler */
120 void (*rx_handler_setup)(struct iwl_priv *priv);
121 /* setup deferred work */
122 void (*setup_deferred_work)(struct iwl_priv *priv);
123 /* cancel deferred work */
124 void (*cancel_deferred_work)(struct iwl_priv *priv);
125 /* alive notification after init uCode load */
126 void (*init_alive_start)(struct iwl_priv *priv);
103 /* alive notification */ 127 /* alive notification */
104 int (*alive_notify)(struct iwl_priv *priv); 128 int (*alive_notify)(struct iwl_priv *priv);
105 /* check validity of rtc data address */ 129 /* check validity of rtc data address */
106 int (*is_valid_rtc_data_addr)(u32 addr); 130 int (*is_valid_rtc_data_addr)(u32 addr);
107 /* 1st ucode load */ 131 /* 1st ucode load */
108 int (*load_ucode)(struct iwl_priv *priv); 132 int (*load_ucode)(struct iwl_priv *priv);
109 /* rfkill */ 133 /* power management */
110 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio); 134 struct {
135 int (*init)(struct iwl_priv *priv);
136 int (*reset)(struct iwl_priv *priv);
137 void (*stop)(struct iwl_priv *priv);
138 void (*config)(struct iwl_priv *priv);
139 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
140 } apm_ops;
141 /* power */
142 int (*set_power)(struct iwl_priv *priv, void *cmd);
143 int (*send_tx_power) (struct iwl_priv *priv);
144 void (*update_chain_flags)(struct iwl_priv *priv);
145 void (*temperature) (struct iwl_priv *priv);
111 /* eeprom operations (as defined in iwl-eeprom.h) */ 146 /* eeprom operations (as defined in iwl-eeprom.h) */
112 struct iwl_eeprom_ops eeprom_ops; 147 struct iwl_eeprom_ops eeprom_ops;
113}; 148};
@@ -124,15 +159,19 @@ struct iwl_mod_params {
124 int debug; /* def: 0 = minimal debug log messages */ 159 int debug; /* def: 0 = minimal debug log messages */
125 int disable_hw_scan; /* def: 0 = use h/w scan */ 160 int disable_hw_scan; /* def: 0 = use h/w scan */
126 int num_of_queues; /* def: HW dependent */ 161 int num_of_queues; /* def: HW dependent */
162 int num_of_ampdu_queues;/* def: HW dependent */
127 int enable_qos; /* def: 1 = use quality of service */ 163 int enable_qos; /* def: 1 = use quality of service */
164 int disable_11n; /* def: 0 = disable 11n capabilities */
128 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 165 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
129 int antenna; /* def: 0 = both antennas (use diversity) */ 166 int antenna; /* def: 0 = both antennas (use diversity) */
167 int restart_fw; /* def: 1 = restart firmware */
130}; 168};
131 169
132struct iwl_cfg { 170struct iwl_cfg {
133 const char *name; 171 const char *name;
134 const char *fw_name; 172 const char *fw_name;
135 unsigned int sku; 173 unsigned int sku;
174 int eeprom_size;
136 const struct iwl_ops *ops; 175 const struct iwl_ops *ops;
137 const struct iwl_mod_params *mod_params; 176 const struct iwl_mod_params *mod_params;
138}; 177};
@@ -143,14 +182,113 @@ struct iwl_cfg {
143 182
144struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 183struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
145 struct ieee80211_ops *hw_ops); 184 struct ieee80211_ops *hw_ops);
185void iwl_hw_detect(struct iwl_priv *priv);
146 186
147void iwlcore_clear_stations_table(struct iwl_priv *priv); 187void iwl_clear_stations_table(struct iwl_priv *priv);
148void iwlcore_reset_qos(struct iwl_priv *priv); 188void iwl_free_calib_results(struct iwl_priv *priv);
149int iwlcore_set_rxon_channel(struct iwl_priv *priv, 189void iwl_reset_qos(struct iwl_priv *priv);
190void iwl_set_rxon_chain(struct iwl_priv *priv);
191int iwl_set_rxon_channel(struct iwl_priv *priv,
150 enum ieee80211_band band, 192 enum ieee80211_band band,
151 u16 channel); 193 u16 channel);
194void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
195u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
196 struct ieee80211_ht_info *sta_ht_inf);
197int iwl_hw_nic_init(struct iwl_priv *priv);
198int iwl_setup_mac(struct iwl_priv *priv);
199int iwl_set_hw_params(struct iwl_priv *priv);
200int iwl_init_drv(struct iwl_priv *priv);
201void iwl_uninit_drv(struct iwl_priv *priv);
202/* "keep warm" functions */
203int iwl_kw_init(struct iwl_priv *priv);
204int iwl_kw_alloc(struct iwl_priv *priv);
205void iwl_kw_free(struct iwl_priv *priv);
206
207/*****************************************************
208* RX
209******************************************************/
210void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211int iwl_rx_queue_alloc(struct iwl_priv *priv);
212void iwl_rx_handle(struct iwl_priv *priv);
213int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
214 struct iwl_rx_queue *q);
215void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
216void iwl_rx_replenish(struct iwl_priv *priv);
217int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
218int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn);
219int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
220/* FIXME: remove when TX is moved to iwl core */
221int iwl_rx_queue_restock(struct iwl_priv *priv);
222int iwl_rx_queue_space(const struct iwl_rx_queue *q);
223void iwl_rx_allocate(struct iwl_priv *priv);
224void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
225int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
226/* Handlers */
227void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
228 struct iwl_rx_mem_buffer *rxb);
229void iwl_rx_statistics(struct iwl_priv *priv,
230 struct iwl_rx_mem_buffer *rxb);
231
232/* TX helpers */
233
234/*****************************************************
235* TX
236******************************************************/
237int iwl_txq_ctx_reset(struct iwl_priv *priv);
238int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
239/* FIXME: remove when free Tx is fully merged into iwlcore */
240int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
241void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
242int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
243 dma_addr_t addr, u16 len);
244int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
245int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
246int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
247int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
248
249/*****************************************************
250 * TX power
251 ****************************************************/
252int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
253
254/*****************************************************
255 * RF -Kill - here and not in iwl-rfkill.h to be available when
256 * RF-kill subsystem is not compiled.
257 ****************************************************/
258void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
259int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
260
261/*******************************************************************************
262 * Rate
263 ******************************************************************************/
264
265void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
266 struct ieee80211_tx_info *info);
267int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
268
269static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
270{
271 return le32_to_cpu(rate_n_flags) & 0xFF;
272}
273static inline u32 iwl_hw_get_rate_n_flags(__le32 rate_n_flags)
274{
275 return le32_to_cpu(rate_n_flags) & 0x1FFFF;
276}
277static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
278{
279 return cpu_to_le32(flags|(u32)rate);
280}
152 281
153int iwl_setup(struct iwl_priv *priv); 282/*******************************************************************************
283 * Scanning
284 ******************************************************************************/
285void iwl_init_scan_params(struct iwl_priv *priv);
286int iwl_scan_cancel(struct iwl_priv *priv);
287int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
288const char *iwl_escape_essid(const char *essid, u8 essid_len);
289int iwl_scan_initiate(struct iwl_priv *priv);
290void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
291void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
154 292
155/***************************************************** 293/*****************************************************
156 * S e n d i n g H o s t C o m m a n d s * 294 * S e n d i n g H o s t C o m m a n d s *
@@ -167,6 +305,17 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
167 int (*callback)(struct iwl_priv *priv, 305 int (*callback)(struct iwl_priv *priv,
168 struct iwl_cmd *cmd, 306 struct iwl_cmd *cmd,
169 struct sk_buff *skb)); 307 struct sk_buff *skb));
308
309int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
310
311/*****************************************************
312* Error Handling Debugging
313******************************************************/
314void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
315 u32 num_events, u32 mode);
316void iwl_dump_nic_error_log(struct iwl_priv *priv);
317void iwl_dump_nic_event_log(struct iwl_priv *priv);
318
170/*************** DRIVER STATUS FUNCTIONS *****/ 319/*************** DRIVER STATUS FUNCTIONS *****/
171 320
172#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 321#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
@@ -188,6 +337,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
188#define STATUS_POWER_PMI 16 337#define STATUS_POWER_PMI 16
189#define STATUS_FW_ERROR 17 338#define STATUS_FW_ERROR 17
190#define STATUS_CONF_PENDING 18 339#define STATUS_CONF_PENDING 18
340#define STATUS_MODE_PENDING 19
191 341
192 342
193static inline int iwl_is_ready(struct iwl_priv *priv) 343static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -209,10 +359,19 @@ static inline int iwl_is_init(struct iwl_priv *priv)
209 return test_bit(STATUS_INIT, &priv->status); 359 return test_bit(STATUS_INIT, &priv->status);
210} 360}
211 361
362static inline int iwl_is_rfkill_sw(struct iwl_priv *priv)
363{
364 return test_bit(STATUS_RF_KILL_SW, &priv->status);
365}
366
367static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
368{
369 return test_bit(STATUS_RF_KILL_HW, &priv->status);
370}
371
212static inline int iwl_is_rfkill(struct iwl_priv *priv) 372static inline int iwl_is_rfkill(struct iwl_priv *priv)
213{ 373{
214 return test_bit(STATUS_RF_KILL_HW, &priv->status) || 374 return iwl_is_rfkill_hw(priv) || iwl_is_rfkill_sw(priv);
215 test_bit(STATUS_RF_KILL_SW, &priv->status);
216} 375}
217 376
218static inline int iwl_is_ready_rf(struct iwl_priv *priv) 377static inline int iwl_is_ready_rf(struct iwl_priv *priv)
@@ -224,23 +383,27 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
224 return iwl_is_ready(priv); 383 return iwl_is_ready(priv);
225} 384}
226 385
227 386extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
228enum iwlcore_card_notify {
229 IWLCORE_INIT_EVT = 0,
230 IWLCORE_START_EVT = 1,
231 IWLCORE_STOP_EVT = 2,
232 IWLCORE_REMOVE_EVT = 3,
233};
234
235int iwlcore_low_level_notify(struct iwl_priv *priv,
236 enum iwlcore_card_notify notify);
237extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags); 387extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
238int iwl_send_lq_cmd(struct iwl_priv *priv, 388extern int iwl_verify_ucode(struct iwl_priv *priv);
239 struct iwl_link_quality_cmd *lq, u8 flags); 389extern int iwl_send_lq_cmd(struct iwl_priv *priv,
390 struct iwl_link_quality_cmd *lq, u8 flags);
391extern void iwl_rx_reply_rx(struct iwl_priv *priv,
392 struct iwl_rx_mem_buffer *rxb);
393extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
394 struct iwl_rx_mem_buffer *rxb);
395void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
396 struct iwl_rx_mem_buffer *rxb);
240 397
241static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 398static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
242{ 399{
243 return priv->cfg->ops->hcmd->rxon_assoc(priv); 400 return priv->cfg->ops->hcmd->rxon_assoc(priv);
244} 401}
245 402
403static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
404 struct iwl_priv *priv, enum ieee80211_band band)
405{
406 return priv->hw->wiphy->bands[band];
407}
408
246#endif /* __iwl_core_h__ */ 409#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 12725796ea5f..545ed692d889 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -87,16 +87,16 @@
87/* EEPROM reads */ 87/* EEPROM reads */
88#define CSR_EEPROM_REG (CSR_BASE+0x02c) 88#define CSR_EEPROM_REG (CSR_BASE+0x02c)
89#define CSR_EEPROM_GP (CSR_BASE+0x030) 89#define CSR_EEPROM_GP (CSR_BASE+0x030)
90#define CSR_GIO_REG (CSR_BASE+0x03C)
90#define CSR_GP_UCODE (CSR_BASE+0x044) 91#define CSR_GP_UCODE (CSR_BASE+0x044)
91#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) 92#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
92#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) 93#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
93#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) 94#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
94#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) 95#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
95#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
96#define CSR_LED_REG (CSR_BASE+0x094) 96#define CSR_LED_REG (CSR_BASE+0x094)
97#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
97 98
98/* Analog phase-lock-loop configuration (3945 only) 99/* Analog phase-lock-loop configuration */
99 * Set bit 24. */
100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) 100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
101/* 101/*
102 * Indicates hardware rev, to determine CCK backoff for txpower calculation. 102 * Indicates hardware rev, to determine CCK backoff for txpower calculation.
@@ -107,9 +107,9 @@
107 107
108/* Bits for CSR_HW_IF_CONFIG_REG */ 108/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
110#define CSR49_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 110#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
111#define CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 111#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
112#define CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 112#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
113 113
114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) 114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) 115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
@@ -170,6 +170,10 @@
170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ 170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
171 CSR_FH_INT_BIT_TX_CHNL0) 171 CSR_FH_INT_BIT_TX_CHNL0)
172 172
173/* GPIO */
174#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
175#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
176#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
173 177
174/* RESET */ 178/* RESET */
175#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) 179#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
@@ -191,6 +195,16 @@
191#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 195#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
192 196
193 197
198/* HW REV */
199#define CSR_HW_REV_TYPE_MSK (0x00000F0)
200#define CSR_HW_REV_TYPE_3945 (0x00000D0)
201#define CSR_HW_REV_TYPE_4965 (0x0000000)
202#define CSR_HW_REV_TYPE_5300 (0x0000020)
203#define CSR_HW_REV_TYPE_5350 (0x0000030)
204#define CSR_HW_REV_TYPE_5100 (0x0000050)
205#define CSR_HW_REV_TYPE_5150 (0x0000040)
206#define CSR_HW_REV_TYPE_NONE (0x00000F0)
207
194/* EEPROM REG */ 208/* EEPROM REG */
195#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 209#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
196#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 210#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -200,17 +214,15 @@
200#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) 214#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
201#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 215#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
202 216
217/* CSR GIO */
218#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
219
203/* UCODE DRV GP */ 220/* UCODE DRV GP */
204#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) 221#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
205#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) 222#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
206#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) 223#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
207#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) 224#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
208 225
209/* GPIO */
210#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
211#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
212#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
213
214/* GI Chicken Bits */ 226/* GI Chicken Bits */
215#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 227#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
216#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 228#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
@@ -220,6 +232,10 @@
220#define CSR_LED_REG_TRUN_ON (0x78) 232#define CSR_LED_REG_TRUN_ON (0x78)
221#define CSR_LED_REG_TRUN_OFF (0x38) 233#define CSR_LED_REG_TRUN_OFF (0x38)
222 234
235/* ANA_PLL */
236#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
237#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
238
223/*=== HBUS (Host-side Bus) ===*/ 239/*=== HBUS (Host-side Bus) ===*/
224#define HBUS_BASE (0x400) 240#define HBUS_BASE (0x400)
225/* 241/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c60724c21db8..58384805a494 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -30,37 +30,35 @@
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#ifdef CONFIG_IWLWIFI_DEBUG 32#ifdef CONFIG_IWLWIFI_DEBUG
33extern u32 iwl_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 33#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl_debug_level & (level)) \ 34do { if (priv->debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 36 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
38 37
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 38#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl_debug_level & (level)) && net_ratelimit()) \ 39do { if ((priv->debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43 42
44static inline void iwl_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
52
53#ifdef CONFIG_IWLWIFI_DEBUGFS 43#ifdef CONFIG_IWLWIFI_DEBUGFS
54struct iwl_debugfs { 44struct iwl_debugfs {
55 const char *name; 45 const char *name;
56 struct dentry *dir_drv; 46 struct dentry *dir_drv;
57 struct dentry *dir_data; 47 struct dentry *dir_data;
58 struct dir_data_files{ 48 struct dentry *dir_rf;
49 struct dir_data_files {
59 struct dentry *file_sram; 50 struct dentry *file_sram;
51 struct dentry *file_eeprom;
60 struct dentry *file_stations; 52 struct dentry *file_stations;
61 struct dentry *file_rx_statistics; 53 struct dentry *file_rx_statistics;
62 struct dentry *file_tx_statistics; 54 struct dentry *file_tx_statistics;
55 struct dentry *file_log_event;
63 } dbgfs_data_files; 56 } dbgfs_data_files;
57 struct dir_rf_files {
58 struct dentry *file_disable_sensitivity;
59 struct dentry *file_disable_chain_noise;
60 struct dentry *file_disable_tx_power;
61 } dbgfs_rf_files;
64 u32 sram_offset; 62 u32 sram_offset;
65 u32 sram_len; 63 u32 sram_len;
66}; 64};
@@ -76,9 +74,6 @@ static inline void IWL_DEBUG(int level, const char *fmt, ...)
76static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) 74static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
77{ 75{
78} 76}
79static inline void iwl_print_hex_dump(int level, void *p, u32 len)
80{
81}
82#endif /* CONFIG_IWLWIFI_DEBUG */ 77#endif /* CONFIG_IWLWIFI_DEBUG */
83 78
84 79
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9a30e1df311d..ed948dc59b3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -34,7 +34,7 @@
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35 35
36 36
37#include "iwl-4965.h" 37#include "iwl-dev.h"
38#include "iwl-debug.h" 38#include "iwl-debug.h"
39#include "iwl-core.h" 39#include "iwl-core.h"
40#include "iwl-io.h" 40#include "iwl-io.h"
@@ -55,6 +55,13 @@
55 goto err; \ 55 goto err; \
56} while (0) 56} while (0)
57 57
58#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
59 dbgfs->dbgfs_##parent##_files.file_##name = \
60 debugfs_create_bool(#name, 0644, dbgfs->dir_##parent, ptr); \
61 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name)) \
62 goto err; \
63} while (0)
64
58#define DEBUGFS_REMOVE(name) do { \ 65#define DEBUGFS_REMOVE(name) do { \
59 debugfs_remove(name); \ 66 debugfs_remove(name); \
60 name = NULL; \ 67 name = NULL; \
@@ -85,6 +92,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
85 .open = iwl_dbgfs_open_file_generic, \ 92 .open = iwl_dbgfs_open_file_generic, \
86}; 93};
87 94
95#define DEBUGFS_WRITE_FILE_OPS(name) \
96 DEBUGFS_WRITE_FUNC(name); \
97static const struct file_operations iwl_dbgfs_##name##_ops = { \
98 .write = iwl_dbgfs_##name##_write, \
99 .open = iwl_dbgfs_open_file_generic, \
100};
101
102
88#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 103#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
89 DEBUGFS_READ_FUNC(name); \ 104 DEBUGFS_READ_FUNC(name); \
90 DEBUGFS_WRITE_FUNC(name); \ 105 DEBUGFS_WRITE_FUNC(name); \
@@ -206,7 +221,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
206 size_t count, loff_t *ppos) 221 size_t count, loff_t *ppos)
207{ 222{
208 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 223 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
209 struct iwl4965_station_entry *station; 224 struct iwl_station_entry *station;
210 int max_sta = priv->hw_params.max_stations; 225 int max_sta = priv->hw_params.max_stations;
211 char *buf; 226 char *buf;
212 int i, j, pos = 0; 227 int i, j, pos = 0;
@@ -240,21 +255,18 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
240 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n"); 255 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
241 pos += scnprintf(buf + pos, bufsz - pos, 256 pos += scnprintf(buf + pos, bufsz - pos,
242 "seq_num\t\ttxq_id"); 257 "seq_num\t\ttxq_id");
243#ifdef CONFIG_IWL4965_HT
244 pos += scnprintf(buf + pos, bufsz - pos, 258 pos += scnprintf(buf + pos, bufsz - pos,
245 "\tframe_count\twait_for_ba\t"); 259 "\tframe_count\twait_for_ba\t");
246 pos += scnprintf(buf + pos, bufsz - pos, 260 pos += scnprintf(buf + pos, bufsz - pos,
247 "start_idx\tbitmap0\t"); 261 "start_idx\tbitmap0\t");
248 pos += scnprintf(buf + pos, bufsz - pos, 262 pos += scnprintf(buf + pos, bufsz - pos,
249 "bitmap1\trate_n_flags"); 263 "bitmap1\trate_n_flags");
250#endif
251 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 264 pos += scnprintf(buf + pos, bufsz - pos, "\n");
252 265
253 for (j = 0; j < MAX_TID_COUNT; j++) { 266 for (j = 0; j < MAX_TID_COUNT; j++) {
254 pos += scnprintf(buf + pos, bufsz - pos, 267 pos += scnprintf(buf + pos, bufsz - pos,
255 "[%d]:\t\t%u", j, 268 "[%d]:\t\t%u", j,
256 station->tid[j].seq_number); 269 station->tid[j].seq_number);
257#ifdef CONFIG_IWL4965_HT
258 pos += scnprintf(buf + pos, bufsz - pos, 270 pos += scnprintf(buf + pos, bufsz - pos,
259 "\t%u\t\t%u\t\t%u\t\t", 271 "\t%u\t\t%u\t\t%u\t\t",
260 station->tid[j].agg.txq_id, 272 station->tid[j].agg.txq_id,
@@ -265,7 +277,6 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
265 station->tid[j].agg.start_idx, 277 station->tid[j].agg.start_idx,
266 (unsigned long long)station->tid[j].agg.bitmap, 278 (unsigned long long)station->tid[j].agg.bitmap,
267 station->tid[j].agg.rate_n_flags); 279 station->tid[j].agg.rate_n_flags);
268#endif
269 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 280 pos += scnprintf(buf + pos, bufsz - pos, "\n");
270 } 281 }
271 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 282 pos += scnprintf(buf + pos, bufsz - pos, "\n");
@@ -277,8 +288,70 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
277 return ret; 288 return ret;
278} 289}
279 290
291static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
292 char __user *user_buf,
293 size_t count,
294 loff_t *ppos)
295{
296 ssize_t ret;
297 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
298 int pos = 0, ofs = 0, buf_size = 0;
299 const u8 *ptr;
300 char *buf;
301 size_t eeprom_len = priv->cfg->eeprom_size;
302 buf_size = 4 * eeprom_len + 256;
303
304 if (eeprom_len % 16) {
305 IWL_ERROR("EEPROM size is not multiple of 16.\n");
306 return -ENODATA;
307 }
308
309 /* 4 characters for byte 0xYY */
310 buf = kzalloc(buf_size, GFP_KERNEL);
311 if (!buf) {
312 IWL_ERROR("Can not allocate Buffer\n");
313 return -ENOMEM;
314 }
315
316 ptr = priv->eeprom;
317 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
318 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
319 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
320 buf_size - pos, 0);
321 pos += strlen(buf);
322 if (buf_size - pos > 0)
323 buf[pos++] = '\n';
324 }
325
326 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
327 kfree(buf);
328 return ret;
329}
330
331static ssize_t iwl_dbgfs_log_event_write(struct file *file,
332 const char __user *user_buf,
333 size_t count, loff_t *ppos)
334{
335 struct iwl_priv *priv = file->private_data;
336 u32 event_log_flag;
337 char buf[8];
338 int buf_size;
339
340 memset(buf, 0, sizeof(buf));
341 buf_size = min(count, sizeof(buf) - 1);
342 if (copy_from_user(buf, user_buf, buf_size))
343 return -EFAULT;
344 if (sscanf(buf, "%d", &event_log_flag) != 1)
345 return -EFAULT;
346 if (event_log_flag == 1)
347 iwl_dump_nic_event_log(priv);
348
349 return count;
350}
280 351
281DEBUGFS_READ_WRITE_FILE_OPS(sram); 352DEBUGFS_READ_WRITE_FILE_OPS(sram);
353DEBUGFS_WRITE_FILE_OPS(log_event);
354DEBUGFS_READ_FILE_OPS(eeprom);
282DEBUGFS_READ_FILE_OPS(stations); 355DEBUGFS_READ_FILE_OPS(stations);
283DEBUGFS_READ_FILE_OPS(rx_statistics); 356DEBUGFS_READ_FILE_OPS(rx_statistics);
284DEBUGFS_READ_FILE_OPS(tx_statistics); 357DEBUGFS_READ_FILE_OPS(tx_statistics);
@@ -290,6 +363,7 @@ DEBUGFS_READ_FILE_OPS(tx_statistics);
290int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 363int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
291{ 364{
292 struct iwl_debugfs *dbgfs; 365 struct iwl_debugfs *dbgfs;
366 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
293 367
294 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 368 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
295 if (!dbgfs) { 369 if (!dbgfs) {
@@ -298,17 +372,23 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
298 372
299 priv->dbgfs = dbgfs; 373 priv->dbgfs = dbgfs;
300 dbgfs->name = name; 374 dbgfs->name = name;
301 dbgfs->dir_drv = debugfs_create_dir(name, NULL); 375 dbgfs->dir_drv = debugfs_create_dir(name, phyd);
302 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){ 376 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){
303 goto err; 377 goto err;
304 } 378 }
305 379
306 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv); 380 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
381 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
382 DEBUGFS_ADD_FILE(eeprom, data);
307 DEBUGFS_ADD_FILE(sram, data); 383 DEBUGFS_ADD_FILE(sram, data);
384 DEBUGFS_ADD_FILE(log_event, data);
308 DEBUGFS_ADD_FILE(stations, data); 385 DEBUGFS_ADD_FILE(stations, data);
309 DEBUGFS_ADD_FILE(rx_statistics, data); 386 DEBUGFS_ADD_FILE(rx_statistics, data);
310 DEBUGFS_ADD_FILE(tx_statistics, data); 387 DEBUGFS_ADD_FILE(tx_statistics, data);
311 388 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
389 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
390 &priv->disable_chain_noise_cal);
391 DEBUGFS_ADD_BOOL(disable_tx_power, rf, &priv->disable_tx_power_cal);
312 return 0; 392 return 0;
313 393
314err: 394err:
@@ -327,11 +407,17 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
327 if (!(priv->dbgfs)) 407 if (!(priv->dbgfs))
328 return; 408 return;
329 409
410 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
330 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics); 411 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics);
331 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics); 412 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics);
332 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram); 413 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
414 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
333 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 415 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
334 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 416 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
417 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
418 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
419 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
420 DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
335 DEBUGFS_REMOVE(priv->dbgfs->dir_drv); 421 DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
336 kfree(priv->dbgfs); 422 kfree(priv->dbgfs);
337 priv->dbgfs = NULL; 423 priv->dbgfs = NULL;
@@ -339,3 +425,4 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
339EXPORT_SYMBOL(iwl_dbgfs_unregister); 425EXPORT_SYMBOL(iwl_dbgfs_unregister);
340 426
341 427
428
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 581b98556c86..4d789e353e3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -24,13 +24,13 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26/* 26/*
27 * Please use this file (iwl-4965.h) for driver implementation definitions. 27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-4965-commands.h for uCode API definitions. 28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions. 29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */ 30 */
31 31
32#ifndef __iwl_4965_h__ 32#ifndef __iwl_dev_h__
33#define __iwl_4965_h__ 33#define __iwl_dev_h__
34 34
35#include <linux/pci.h> /* for struct pci_device_id */ 35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h> 36#include <linux/kernel.h>
@@ -44,16 +44,18 @@
44#include "iwl-prph.h" 44#include "iwl-prph.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h" 46#include "iwl-led.h"
47#include "iwl-power.h"
47 48
48/* configuration for the iwl4965 */ 49/* configuration for the iwl4965 */
49extern struct iwl_cfg iwl4965_agn_cfg; 50extern struct iwl_cfg iwl4965_agn_cfg;
51extern struct iwl_cfg iwl5300_agn_cfg;
52extern struct iwl_cfg iwl5100_agn_cfg;
53extern struct iwl_cfg iwl5350_agn_cfg;
54extern struct iwl_cfg iwl5100_bg_cfg;
55extern struct iwl_cfg iwl5100_abg_cfg;
50 56
51/* Change firmware file name, using "-" and incrementing number, 57/* CT-KILL constants */
52 * *only* when uCode interface or architecture changes so that it 58#define CT_KILL_THRESHOLD 110 /* in Celsius */
53 * is not compatible with earlier drivers.
54 * This number will also appear in << 8 position of 1st dword of uCode file */
55#define IWL4965_UCODE_API "-1"
56
57 59
58/* Default noise level to report when noise measurement is not available. 60/* Default noise level to report when noise measurement is not available.
59 * This may be because we're: 61 * This may be because we're:
@@ -68,12 +70,6 @@ extern struct iwl_cfg iwl4965_agn_cfg;
68 * averages within an s8's (used in some apps) range of negative values. */ 70 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) 71#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70 72
71enum iwl4965_antenna {
72 IWL_ANTENNA_DIVERSITY,
73 IWL_ANTENNA_MAIN,
74 IWL_ANTENNA_AUX
75};
76
77/* 73/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes 74 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec: 75 * Per spec:
@@ -91,7 +87,7 @@ enum iwl4965_antenna {
91#define DEFAULT_SHORT_RETRY_LIMIT 7U 87#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U 88#define DEFAULT_LONG_RETRY_LIMIT 4U
93 89
94struct iwl4965_rx_mem_buffer { 90struct iwl_rx_mem_buffer {
95 dma_addr_t dma_addr; 91 dma_addr_t dma_addr;
96 struct sk_buff *skb; 92 struct sk_buff *skb;
97 struct list_head list; 93 struct list_head list;
@@ -102,7 +98,7 @@ struct iwl4965_rx_mem_buffer {
102 * 98 *
103 * Contains common data for Rx and Tx queues 99 * Contains common data for Rx and Tx queues
104 */ 100 */
105struct iwl4965_queue { 101struct iwl_queue {
106 int n_bd; /* number of BDs in this queue */ 102 int n_bd; /* number of BDs in this queue */
107 int write_ptr; /* 1-st empty entry (index) host_w*/ 103 int write_ptr; /* 1-st empty entry (index) host_w*/
108 int read_ptr; /* last used entry (index) host_r*/ 104 int read_ptr; /* last used entry (index) host_r*/
@@ -118,13 +114,12 @@ struct iwl4965_queue {
118#define MAX_NUM_OF_TBS (20) 114#define MAX_NUM_OF_TBS (20)
119 115
120/* One for each TFD */ 116/* One for each TFD */
121struct iwl4965_tx_info { 117struct iwl_tx_info {
122 struct ieee80211_tx_status status;
123 struct sk_buff *skb[MAX_NUM_OF_TBS]; 118 struct sk_buff *skb[MAX_NUM_OF_TBS];
124}; 119};
125 120
126/** 121/**
127 * struct iwl4965_tx_queue - Tx Queue for DMA 122 * struct iwl_tx_queue - Tx Queue for DMA
128 * @q: generic Rx/Tx queue descriptor 123 * @q: generic Rx/Tx queue descriptor
129 * @bd: base of circular buffer of TFDs 124 * @bd: base of circular buffer of TFDs
130 * @cmd: array of command/Tx buffers 125 * @cmd: array of command/Tx buffers
@@ -136,12 +131,12 @@ struct iwl4965_tx_info {
136 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 131 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
137 * descriptors) and required locking structures. 132 * descriptors) and required locking structures.
138 */ 133 */
139struct iwl4965_tx_queue { 134struct iwl_tx_queue {
140 struct iwl4965_queue q; 135 struct iwl_queue q;
141 struct iwl4965_tfd_frame *bd; 136 struct iwl_tfd_frame *bd;
142 struct iwl_cmd *cmd; 137 struct iwl_cmd *cmd;
143 dma_addr_t dma_addr_cmd; 138 dma_addr_t dma_addr_cmd;
144 struct iwl4965_tx_info *txb; 139 struct iwl_tx_info *txb;
145 int need_update; 140 int need_update;
146 int sched_retry; 141 int sched_retry;
147 int active; 142 int active;
@@ -158,50 +153,17 @@ struct iwl4965_channel_tgh_info {
158 s64 last_radar_time; 153 s64 last_radar_time;
159}; 154};
160 155
161/* current Tx power values to use, one for each rate for each channel.
162 * requested power is limited by:
163 * -- regulatory EEPROM limits for this channel
164 * -- hardware capabilities (clip-powers)
165 * -- spectrum management
166 * -- user preference (e.g. iwconfig)
167 * when requested power is set, base power index must also be set. */
168struct iwl4965_channel_power_info {
169 struct iwl4965_tx_power tpc; /* actual radio and DSP gain settings */
170 s8 power_table_index; /* actual (compenst'd) index into gain table */
171 s8 base_power_index; /* gain index for power at factory temp. */
172 s8 requested_power; /* power (dBm) requested for this chnl/rate */
173};
174
175/* current scan Tx power values to use, one for each scan rate for each
176 * channel. */
177struct iwl4965_scan_power_info {
178 struct iwl4965_tx_power tpc; /* actual radio and DSP gain settings */
179 s8 power_table_index; /* actual (compenst'd) index into gain table */
180 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
181};
182
183/* For fat_extension_channel */
184enum {
185 HT_IE_EXT_CHANNEL_NONE = 0,
186 HT_IE_EXT_CHANNEL_ABOVE,
187 HT_IE_EXT_CHANNEL_INVALID,
188 HT_IE_EXT_CHANNEL_BELOW,
189 HT_IE_EXT_CHANNEL_MAX
190};
191
192/* 156/*
193 * One for each channel, holds all channel setup data 157 * One for each channel, holds all channel setup data
194 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant 158 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
195 * with one another! 159 * with one another!
196 */ 160 */
197#define IWL4965_MAX_RATE (33)
198
199struct iwl_channel_info { 161struct iwl_channel_info {
200 struct iwl4965_channel_tgd_info tgd; 162 struct iwl4965_channel_tgd_info tgd;
201 struct iwl4965_channel_tgh_info tgh; 163 struct iwl4965_channel_tgh_info tgh;
202 struct iwl4965_eeprom_channel eeprom; /* EEPROM regulatory limit */ 164 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
203 struct iwl4965_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for 165 struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for
204 * FAT channel */ 166 * FAT channel */
205 167
206 u8 channel; /* channel number */ 168 u8 channel; /* channel number */
207 u8 flags; /* flags copied from EEPROM */ 169 u8 flags; /* flags copied from EEPROM */
@@ -214,11 +176,6 @@ struct iwl_channel_info {
214 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ 176 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
215 enum ieee80211_band band; 177 enum ieee80211_band band;
216 178
217 /* Radio/DSP gain settings for each "normal" data Tx rate.
218 * These include, in addition to RF and DSP gain, a few fields for
219 * remembering/modifying gain settings (indexes). */
220 struct iwl4965_channel_power_info power_info[IWL4965_MAX_RATE];
221
222 /* FAT channel info */ 179 /* FAT channel info */
223 s8 fat_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ 180 s8 fat_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
224 s8 fat_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */ 181 s8 fat_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */
@@ -226,9 +183,6 @@ struct iwl_channel_info {
226 s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */ 183 s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */
227 u8 fat_flags; /* flags copied from EEPROM */ 184 u8 fat_flags; /* flags copied from EEPROM */
228 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */ 185 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */
229
230 /* Radio/DSP gain settings for each scan rate, for directed scans. */
231 struct iwl4965_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
232}; 186};
233 187
234struct iwl4965_clip_group { 188struct iwl4965_clip_group {
@@ -252,29 +206,9 @@ struct iwl4965_clip_group {
252 206
253/* Power management (not Tx power) structures */ 207/* Power management (not Tx power) structures */
254 208
255struct iwl4965_power_vec_entry { 209enum iwl_pwr_src {
256 struct iwl4965_powertable_cmd cmd; 210 IWL_PWR_SRC_VMAIN,
257 u8 no_dtim; 211 IWL_PWR_SRC_VAUX,
258};
259#define IWL_POWER_RANGE_0 (0)
260#define IWL_POWER_RANGE_1 (1)
261
262#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
263#define IWL_POWER_INDEX_3 0x03
264#define IWL_POWER_INDEX_5 0x05
265#define IWL_POWER_AC 0x06
266#define IWL_POWER_BATTERY 0x07
267#define IWL_POWER_LIMIT 0x07
268#define IWL_POWER_MASK 0x0F
269#define IWL_POWER_ENABLED 0x10
270#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK)
271
272struct iwl4965_power_mgr {
273 spinlock_t lock;
274 struct iwl4965_power_vec_entry pwr_range_0[IWL_POWER_AC];
275 struct iwl4965_power_vec_entry pwr_range_1[IWL_POWER_AC];
276 u8 active_index;
277 u32 dtim_val;
278}; 212};
279 213
280#define IEEE80211_DATA_LEN 2304 214#define IEEE80211_DATA_LEN 2304
@@ -282,7 +216,7 @@ struct iwl4965_power_mgr {
282#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 216#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
283#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) 217#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
284 218
285struct iwl4965_frame { 219struct iwl_frame {
286 union { 220 union {
287 struct ieee80211_hdr frame; 221 struct ieee80211_hdr frame;
288 struct iwl4965_tx_beacon_cmd beacon; 222 struct iwl4965_tx_beacon_cmd beacon;
@@ -328,6 +262,8 @@ struct iwl_cmd_meta {
328 262
329} __attribute__ ((packed)); 263} __attribute__ ((packed));
330 264
265#define IWL_CMD_MAX_PAYLOAD 320
266
331/** 267/**
332 * struct iwl_cmd 268 * struct iwl_cmd
333 * 269 *
@@ -339,8 +275,8 @@ struct iwl_cmd {
339 struct iwl_cmd_meta meta; /* driver data */ 275 struct iwl_cmd_meta meta; /* driver data */
340 struct iwl_cmd_header hdr; /* uCode API */ 276 struct iwl_cmd_header hdr; /* uCode API */
341 union { 277 union {
342 struct iwl4965_addsta_cmd addsta; 278 struct iwl_addsta_cmd addsta;
343 struct iwl4965_led_cmd led; 279 struct iwl_led_cmd led;
344 u32 flags; 280 u32 flags;
345 u8 val8; 281 u8 val8;
346 u16 val16; 282 u16 val16;
@@ -348,12 +284,13 @@ struct iwl_cmd {
348 struct iwl4965_bt_cmd bt; 284 struct iwl4965_bt_cmd bt;
349 struct iwl4965_rxon_time_cmd rxon_time; 285 struct iwl4965_rxon_time_cmd rxon_time;
350 struct iwl4965_powertable_cmd powertable; 286 struct iwl4965_powertable_cmd powertable;
351 struct iwl4965_qosparam_cmd qosparam; 287 struct iwl_qosparam_cmd qosparam;
352 struct iwl4965_tx_cmd tx; 288 struct iwl_tx_cmd tx;
353 struct iwl4965_tx_beacon_cmd tx_beacon; 289 struct iwl4965_tx_beacon_cmd tx_beacon;
354 struct iwl4965_rxon_assoc_cmd rxon_assoc; 290 struct iwl4965_rxon_assoc_cmd rxon_assoc;
291 struct iwl_rem_sta_cmd rm_sta;
355 u8 *indirect; 292 u8 *indirect;
356 u8 payload[360]; 293 u8 payload[IWL_CMD_MAX_PAYLOAD];
357 } __attribute__ ((packed)) cmd; 294 } __attribute__ ((packed)) cmd;
358} __attribute__ ((packed)); 295} __attribute__ ((packed));
359 296
@@ -378,7 +315,7 @@ struct iwl_host_cmd {
378#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 315#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
379 316
380/** 317/**
381 * struct iwl4965_rx_queue - Rx queue 318 * struct iwl_rx_queue - Rx queue
382 * @processed: Internal index to last handled Rx packet 319 * @processed: Internal index to last handled Rx packet
383 * @read: Shared index to newest available Rx buffer 320 * @read: Shared index to newest available Rx buffer
384 * @write: Shared index to oldest written Rx packet 321 * @write: Shared index to oldest written Rx packet
@@ -387,13 +324,13 @@ struct iwl_host_cmd {
387 * @rx_used: List of Rx buffers with no SKB 324 * @rx_used: List of Rx buffers with no SKB
388 * @need_update: flag to indicate we need to update read/write index 325 * @need_update: flag to indicate we need to update read/write index
389 * 326 *
390 * NOTE: rx_free and rx_used are used as a FIFO for iwl4965_rx_mem_buffers 327 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
391 */ 328 */
392struct iwl4965_rx_queue { 329struct iwl_rx_queue {
393 __le32 *bd; 330 __le32 *bd;
394 dma_addr_t dma_addr; 331 dma_addr_t dma_addr;
395 struct iwl4965_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 332 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
396 struct iwl4965_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 333 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
397 u32 processed; 334 u32 processed;
398 u32 read; 335 u32 read;
399 u32 write; 336 u32 write;
@@ -419,9 +356,8 @@ struct iwl4965_rx_queue {
419#define IWL_INVALID_RATE 0xFF 356#define IWL_INVALID_RATE 0xFF
420#define IWL_INVALID_VALUE -1 357#define IWL_INVALID_VALUE -1
421 358
422#ifdef CONFIG_IWL4965_HT
423/** 359/**
424 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack 360 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
425 * @txq_id: Tx queue used for Tx attempt 361 * @txq_id: Tx queue used for Tx attempt
426 * @frame_count: # frames attempted by Tx command 362 * @frame_count: # frames attempted by Tx command
427 * @wait_for_ba: Expect block-ack before next Tx reply 363 * @wait_for_ba: Expect block-ack before next Tx reply
@@ -434,7 +370,7 @@ struct iwl4965_rx_queue {
434 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info 370 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
435 * until block ack arrives. 371 * until block ack arrives.
436 */ 372 */
437struct iwl4965_ht_agg { 373struct iwl_ht_agg {
438 u16 txq_id; 374 u16 txq_id;
439 u16 frame_count; 375 u16 frame_count;
440 u16 wait_for_ba; 376 u16 wait_for_ba;
@@ -448,21 +384,17 @@ struct iwl4965_ht_agg {
448 u8 state; 384 u8 state;
449}; 385};
450 386
451#endif /* CONFIG_IWL4965_HT */
452 387
453struct iwl4965_tid_data { 388struct iwl_tid_data {
454 u16 seq_number; 389 u16 seq_number;
455 u16 tfds_in_queue; 390 u16 tfds_in_queue;
456#ifdef CONFIG_IWL4965_HT 391 struct iwl_ht_agg agg;
457 struct iwl4965_ht_agg agg;
458#endif /* CONFIG_IWL4965_HT */
459}; 392};
460 393
461struct iwl4965_hw_key { 394struct iwl_hw_key {
462 enum ieee80211_key_alg alg; 395 enum ieee80211_key_alg alg;
463 int keylen; 396 int keylen;
464 u8 keyidx; 397 u8 keyidx;
465 struct ieee80211_key_conf *conf;
466 u8 key[32]; 398 u8 key[32];
467}; 399};
468 400
@@ -474,7 +406,6 @@ union iwl4965_ht_rate_supp {
474 }; 406 };
475}; 407};
476 408
477#ifdef CONFIG_IWL4965_HT
478#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) 409#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
479#define CFG_HT_MPDU_DENSITY_2USEC (0x5) 410#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
480#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC 411#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
@@ -497,9 +428,8 @@ struct iwl_ht_info {
497 u8 ht_protection; 428 u8 ht_protection;
498 u8 non_GF_STA_present; 429 u8 non_GF_STA_present;
499}; 430};
500#endif /*CONFIG_IWL4965_HT */
501 431
502union iwl4965_qos_capabity { 432union iwl_qos_capabity {
503 struct { 433 struct {
504 u8 edca_count:4; /* bit 0-3 */ 434 u8 edca_count:4; /* bit 0-3 */
505 u8 q_ack:1; /* bit 4 */ 435 u8 q_ack:1; /* bit 4 */
@@ -520,22 +450,22 @@ union iwl4965_qos_capabity {
520}; 450};
521 451
522/* QoS structures */ 452/* QoS structures */
523struct iwl4965_qos_info { 453struct iwl_qos_info {
524 int qos_enable; 454 int qos_enable;
525 int qos_active; 455 int qos_active;
526 union iwl4965_qos_capabity qos_cap; 456 union iwl_qos_capabity qos_cap;
527 struct iwl4965_qosparam_cmd def_qos_parm; 457 struct iwl_qosparam_cmd def_qos_parm;
528}; 458};
529 459
530#define STA_PS_STATUS_WAKE 0 460#define STA_PS_STATUS_WAKE 0
531#define STA_PS_STATUS_SLEEP 1 461#define STA_PS_STATUS_SLEEP 1
532 462
533struct iwl4965_station_entry { 463struct iwl_station_entry {
534 struct iwl4965_addsta_cmd sta; 464 struct iwl_addsta_cmd sta;
535 struct iwl4965_tid_data tid[MAX_TID_COUNT]; 465 struct iwl_tid_data tid[MAX_TID_COUNT];
536 u8 used; 466 u8 used;
537 u8 ps_status; 467 u8 ps_status;
538 struct iwl4965_hw_key keyinfo; 468 struct iwl_hw_key keyinfo;
539}; 469};
540 470
541/* one for each uCode image (inst/data, boot/init/runtime) */ 471/* one for each uCode image (inst/data, boot/init/runtime) */
@@ -546,7 +476,7 @@ struct fw_desc {
546}; 476};
547 477
548/* uCode file layout */ 478/* uCode file layout */
549struct iwl4965_ucode { 479struct iwl_ucode {
550 __le32 ver; /* major/minor/subminor */ 480 __le32 ver; /* major/minor/subminor */
551 __le32 inst_size; /* bytes of runtime instructions */ 481 __le32 inst_size; /* bytes of runtime instructions */
552 __le32 data_size; /* bytes of runtime data */ 482 __le32 data_size; /* bytes of runtime data */
@@ -556,8 +486,6 @@ struct iwl4965_ucode {
556 u8 data[0]; /* data in same order as "size" elements */ 486 u8 data[0]; /* data in same order as "size" elements */
557}; 487};
558 488
559#define IWL_IBSS_MAC_HASH_SIZE 32
560
561struct iwl4965_ibss_seq { 489struct iwl4965_ibss_seq {
562 u8 mac[ETH_ALEN]; 490 u8 mac[ETH_ALEN];
563 u16 seq_num; 491 u16 seq_num;
@@ -566,20 +494,52 @@ struct iwl4965_ibss_seq {
566 struct list_head list; 494 struct list_head list;
567}; 495};
568 496
497struct iwl_sensitivity_ranges {
498 u16 min_nrg_cck;
499 u16 max_nrg_cck;
500
501 u16 nrg_th_cck;
502 u16 nrg_th_ofdm;
503
504 u16 auto_corr_min_ofdm;
505 u16 auto_corr_min_ofdm_mrc;
506 u16 auto_corr_min_ofdm_x1;
507 u16 auto_corr_min_ofdm_mrc_x1;
508
509 u16 auto_corr_max_ofdm;
510 u16 auto_corr_max_ofdm_mrc;
511 u16 auto_corr_max_ofdm_x1;
512 u16 auto_corr_max_ofdm_mrc_x1;
513
514 u16 auto_corr_max_cck;
515 u16 auto_corr_max_cck_mrc;
516 u16 auto_corr_min_cck;
517 u16 auto_corr_min_cck_mrc;
518};
519
520
521#define IWL_FAT_CHANNEL_52 BIT(IEEE80211_BAND_5GHZ)
522
569/** 523/**
570 * struct iwl_hw_params 524 * struct iwl_hw_params
571 * @max_txq_num: Max # Tx queues supported 525 * @max_txq_num: Max # Tx queues supported
572 * @tx_cmd_len: Size of Tx command (but not including frame itself) 526 * @tx/rx_chains_num: Number of TX/RX chains
573 * @tx_ant_num: Number of TX antennas 527 * @valid_tx/rx_ant: usable antennas
574 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 528 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
575 * @rx_buffer_size:
576 * @max_rxq_log: Log-base-2 of max_rxq_size 529 * @max_rxq_log: Log-base-2 of max_rxq_size
530 * @rx_buf_size: Rx buffer size
577 * @max_stations: 531 * @max_stations:
578 * @bcast_sta_id: 532 * @bcast_sta_id:
533 * @fat_channel: is 40MHz width possible in band 2.4
534 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
535 * @sw_crypto: 0 for hw, 1 for sw
536 * @max_xxx_size: for ucode uses
537 * @ct_kill_threshold: temperature threshold
538 * @struct iwl_sensitivity_ranges: range of sensitivity values
539 * @first_ampdu_q: first HW queue available for ampdu
579 */ 540 */
580struct iwl_hw_params { 541struct iwl_hw_params {
581 u16 max_txq_num; 542 u16 max_txq_num;
582 u16 tx_cmd_len;
583 u8 tx_chains_num; 543 u8 tx_chains_num;
584 u8 rx_chains_num; 544 u8 rx_chains_num;
585 u8 valid_tx_ant; 545 u8 valid_tx_ant;
@@ -590,10 +550,18 @@ struct iwl_hw_params {
590 u32 max_pkt_size; 550 u32 max_pkt_size;
591 u8 max_stations; 551 u8 max_stations;
592 u8 bcast_sta_id; 552 u8 bcast_sta_id;
553 u8 fat_channel;
554 u8 sw_crypto;
555 u32 max_inst_size;
556 u32 max_data_size;
557 u32 max_bsm_size;
558 u32 ct_kill_threshold; /* value in hw-dependent units */
559 const struct iwl_sensitivity_ranges *sens;
560 u8 first_ampdu_q;
593}; 561};
594 562
595#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) 563#define HT_SHORT_GI_20MHZ (1 << 0)
596#define HT_SHORT_GI_40MHZ_ONLY (1 << 1) 564#define HT_SHORT_GI_40MHZ (1 << 1)
597 565
598 566
599#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\ 567#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\
@@ -612,51 +580,18 @@ struct iwl_hw_params {
612 * for use by iwl-*.c 580 * for use by iwl-*.c
613 * 581 *
614 *****************************************************************************/ 582 *****************************************************************************/
615struct iwl4965_addsta_cmd; 583struct iwl_addsta_cmd;
616extern int iwl4965_send_add_station(struct iwl_priv *priv, 584extern int iwl_send_add_sta(struct iwl_priv *priv,
617 struct iwl4965_addsta_cmd *sta, u8 flags); 585 struct iwl_addsta_cmd *sta, u8 flags);
618extern u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr, 586u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
619 int is_ap, u8 flags, void *ht_data); 587 u8 flags, struct ieee80211_ht_info *ht_info);
620extern int iwl4965_is_network_packet(struct iwl_priv *priv,
621 struct ieee80211_hdr *header);
622extern int iwl4965_power_init_handle(struct iwl_priv *priv);
623extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv,
624 struct iwl4965_rx_mem_buffer *rxb,
625 void *data, short len,
626 struct ieee80211_rx_status *stats,
627 u16 phy_flags);
628extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv,
629 struct ieee80211_hdr *header);
630extern int iwl4965_rx_queue_alloc(struct iwl_priv *priv);
631extern void iwl4965_rx_queue_reset(struct iwl_priv *priv,
632 struct iwl4965_rx_queue *rxq);
633extern int iwl4965_calc_db_from_ratio(int sig_ratio);
634extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm);
635extern int iwl4965_tx_queue_init(struct iwl_priv *priv,
636 struct iwl4965_tx_queue *txq, int count, u32 id);
637extern void iwl4965_rx_replenish(void *data);
638extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
639extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, 588extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
640 struct ieee80211_hdr *hdr, 589 struct ieee80211_hdr *hdr,
641 const u8 *dest, int left); 590 const u8 *dest, int left);
642extern int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, 591extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
643 struct iwl4965_rx_queue *q); 592int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
644extern void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
645 u32 decrypt_res,
646 struct ieee80211_rx_status *stats);
647extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr);
648int iwl4965_init_geos(struct iwl_priv *priv);
649void iwl4965_free_geos(struct iwl_priv *priv);
650
651extern const u8 iwl4965_broadcast_addr[ETH_ALEN];
652int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
653 593
654/* 594extern const u8 iwl_bcast_addr[ETH_ALEN];
655 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
656 * call this... todo... fix that.
657*/
658extern u8 iwl4965_sync_station(struct iwl_priv *priv, int sta_id,
659 u16 tx_rate, u8 flags);
660 595
661/****************************************************************************** 596/******************************************************************************
662 * 597 *
@@ -674,96 +609,51 @@ extern u8 iwl4965_sync_station(struct iwl_priv *priv, int sta_id,
674 * iwl4965_mac_ <-- mac80211 callback 609 * iwl4965_mac_ <-- mac80211 callback
675 * 610 *
676 ****************************************************************************/ 611 ****************************************************************************/
677extern void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv); 612extern int iwl_rxq_stop(struct iwl_priv *priv);
678extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv); 613extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
679extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv);
680extern int iwl4965_hw_rxq_stop(struct iwl_priv *priv);
681extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv);
682extern int iwl4965_hw_nic_init(struct iwl_priv *priv);
683extern int iwl4965_hw_nic_stop_master(struct iwl_priv *priv);
684extern void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
685extern void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv);
686extern int iwl4965_hw_nic_reset(struct iwl_priv *priv);
687extern int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
688 dma_addr_t addr, u16 len);
689extern int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
690extern int iwl4965_hw_get_temperature(struct iwl_priv *priv);
691extern int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
692 struct iwl4965_tx_queue *txq);
693extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 614extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
694 struct iwl4965_frame *frame, u8 rate); 615 struct iwl_frame *frame, u8 rate);
695extern int iwl4965_hw_get_rx_read(struct iwl_priv *priv);
696extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
697 struct iwl_cmd *cmd,
698 struct ieee80211_tx_control *ctrl,
699 struct ieee80211_hdr *hdr,
700 int sta_id, int tx_id);
701extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv);
702extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
703extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
704 struct iwl4965_rx_mem_buffer *rxb);
705extern void iwl4965_disable_events(struct iwl_priv *priv); 616extern void iwl4965_disable_events(struct iwl_priv *priv);
706extern int iwl4965_get_temperature(const struct iwl_priv *priv);
707
708/**
709 * iwl4965_hw_find_station - Find station id for a given BSSID
710 * @bssid: MAC address of station ID to find
711 *
712 * NOTE: This should not be hardware specific but the code has
713 * not yet been merged into a single common layer for managing the
714 * station tables.
715 */
716extern u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
717 617
718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel); 618extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
719extern int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); 619extern int iwl_queue_space(const struct iwl_queue *q);
720extern int iwl4965_queue_space(const struct iwl4965_queue *q); 620static inline int iwl_queue_used(const struct iwl_queue *q, int i)
621{
622 return q->write_ptr > q->read_ptr ?
623 (i >= q->read_ptr && i < q->write_ptr) :
624 !(i < q->read_ptr && i >= q->write_ptr);
625}
626
627
628static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
629{
630 /* This is for scan command, the big buffer at end of command array */
631 if (is_huge)
632 return q->n_window; /* must be power of 2 */
633
634 /* Otherwise, use normal size buffers */
635 return index & (q->n_window - 1);
636}
637
638
721struct iwl_priv; 639struct iwl_priv;
722 640
723extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
724/* 641/*
725 * Forward declare iwl-4965.c functions for iwl-base.c 642 * Forward declare iwl-4965.c functions for iwl-base.c
726 */ 643 */
727extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
728 struct iwl4965_tx_queue *txq,
729 u16 byte_cnt);
730extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr,
731 int is_ap);
732extern void iwl4965_set_rxon_chain(struct iwl_priv *priv);
733extern int iwl4965_alive_notify(struct iwl_priv *priv);
734extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode);
735extern void iwl4965_chain_noise_reset(struct iwl_priv *priv);
736extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags,
737 u8 force);
738extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); 644extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
739extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, 645
740 u32 rate_n_flags,
741 struct ieee80211_tx_control *control);
742
743#ifdef CONFIG_IWL4965_HT
744void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
745 struct ieee80211_ht_info *ht_info,
746 enum ieee80211_band band);
747void iwl4965_set_rxon_ht(struct iwl_priv *priv,
748 struct iwl_ht_info *ht_info);
749void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
750 struct ieee80211_ht_info *sta_ht_inf);
751int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 646int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
752 enum ieee80211_ampdu_mlme_action action, 647 enum ieee80211_ampdu_mlme_action action,
753 const u8 *addr, u16 tid, u16 *ssn); 648 const u8 *addr, u16 tid, u16 *ssn);
754int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, 649int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
755 u8 tid, int txq_id); 650 u8 tid, int txq_id);
756#else
757static inline void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
758 struct ieee80211_ht_info *ht_info,
759 enum ieee80211_band band) {}
760 651
761#endif /*CONFIG_IWL4965_HT */
762/* Structures, enum, and defines specific to the 4965 */ 652/* Structures, enum, and defines specific to the 4965 */
763 653
764#define IWL4965_KW_SIZE 0x1000 /*4k */ 654#define IWL_KW_SIZE 0x1000 /*4k */
765 655
766struct iwl4965_kw { 656struct iwl_kw {
767 dma_addr_t dma_addr; 657 dma_addr_t dma_addr;
768 void *v_addr; 658 void *v_addr;
769 size_t size; 659 size_t size;
@@ -782,13 +672,8 @@ struct iwl4965_kw {
782#define IWL_OPERATION_MODE_MIXED 2 672#define IWL_OPERATION_MODE_MIXED 2
783#define IWL_OPERATION_MODE_20MHZ 3 673#define IWL_OPERATION_MODE_20MHZ 3
784 674
785#define IWL_EXT_CHANNEL_OFFSET_NONE 0 675#define IWL_TX_CRC_SIZE 4
786#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1 676#define IWL_TX_DELIMITER_SIZE 4
787#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2
788#define IWL_EXT_CHANNEL_OFFSET_BELOW 3
789
790#define NRG_NUM_PREV_STAT_L 20
791#define NUM_RX_CHAINS (3)
792 677
793#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 678#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
794 679
@@ -802,7 +687,6 @@ struct iwl4965_lq_mngr {
802 unsigned long stamp_last; 687 unsigned long stamp_last;
803 u32 flush_time; 688 u32 flush_time;
804 u32 tx_packets; 689 u32 tx_packets;
805 u8 lq_ready;
806}; 690};
807 691
808/* Sensitivity and chain noise calibration */ 692/* Sensitivity and chain noise calibration */
@@ -818,23 +702,8 @@ struct iwl4965_lq_mngr {
818#define MAX_FA_CCK 50 702#define MAX_FA_CCK 50
819#define MIN_FA_CCK 5 703#define MIN_FA_CCK 5
820 704
821#define NRG_MIN_CCK 97
822#define NRG_MAX_CCK 0
823
824#define AUTO_CORR_MIN_OFDM 85
825#define AUTO_CORR_MIN_OFDM_MRC 170
826#define AUTO_CORR_MIN_OFDM_X1 105
827#define AUTO_CORR_MIN_OFDM_MRC_X1 220
828#define AUTO_CORR_MAX_OFDM 120
829#define AUTO_CORR_MAX_OFDM_MRC 210
830#define AUTO_CORR_MAX_OFDM_X1 140
831#define AUTO_CORR_MAX_OFDM_MRC_X1 270
832#define AUTO_CORR_STEP_OFDM 1 705#define AUTO_CORR_STEP_OFDM 1
833 706
834#define AUTO_CORR_MIN_CCK (125)
835#define AUTO_CORR_MAX_CCK (200)
836#define AUTO_CORR_MIN_CCK_MRC 200
837#define AUTO_CORR_MAX_CCK_MRC 400
838#define AUTO_CORR_STEP_CCK 3 707#define AUTO_CORR_STEP_CCK 3
839#define AUTO_CORR_MAX_TH_CCK 160 708#define AUTO_CORR_MAX_TH_CCK 160
840 709
@@ -853,6 +722,9 @@ struct iwl4965_lq_mngr {
853#define IN_BAND_FILTER 0xFF 722#define IN_BAND_FILTER 0xFF
854#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF 723#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
855 724
725#define NRG_NUM_PREV_STAT_L 20
726#define NUM_RX_CHAINS 3
727
856enum iwl4965_false_alarm_state { 728enum iwl4965_false_alarm_state {
857 IWL_FA_TOO_MANY = 0, 729 IWL_FA_TOO_MANY = 0,
858 IWL_FA_TOO_FEW = 1, 730 IWL_FA_TOO_FEW = 1,
@@ -865,11 +737,6 @@ enum iwl4965_chain_noise_state {
865 IWL_CHAIN_NOISE_CALIBRATED = 2, 737 IWL_CHAIN_NOISE_CALIBRATED = 2,
866}; 738};
867 739
868enum iwl4965_sensitivity_state {
869 IWL_SENS_CALIB_ALLOWED = 0,
870 IWL_SENS_CALIB_NEED_REINIT = 1,
871};
872
873enum iwl4965_calib_enabled_state { 740enum iwl4965_calib_enabled_state {
874 IWL_CALIB_DISABLED = 0, /* must be 0 */ 741 IWL_CALIB_DISABLED = 0, /* must be 0 */
875 IWL_CALIB_ENABLED = 1, 742 IWL_CALIB_ENABLED = 1,
@@ -884,8 +751,23 @@ struct statistics_general_data {
884 u32 beacon_energy_c; 751 u32 beacon_energy_c;
885}; 752};
886 753
754struct iwl_calib_results {
755 void *tx_iq_res;
756 void *tx_iq_perd_res;
757 void *lo_res;
758 u32 tx_iq_res_len;
759 u32 tx_iq_perd_res_len;
760 u32 lo_res_len;
761};
762
763enum ucode_type {
764 UCODE_NONE = 0,
765 UCODE_INIT,
766 UCODE_RT
767};
768
887/* Sensitivity calib data */ 769/* Sensitivity calib data */
888struct iwl4965_sensitivity_data { 770struct iwl_sensitivity_data {
889 u32 auto_corr_ofdm; 771 u32 auto_corr_ofdm;
890 u32 auto_corr_ofdm_mrc; 772 u32 auto_corr_ofdm_mrc;
891 u32 auto_corr_ofdm_x1; 773 u32 auto_corr_ofdm_x1;
@@ -909,12 +791,10 @@ struct iwl4965_sensitivity_data {
909 s32 nrg_auto_corr_silence_diff; 791 s32 nrg_auto_corr_silence_diff;
910 u32 num_in_cck_no_fa; 792 u32 num_in_cck_no_fa;
911 u32 nrg_th_ofdm; 793 u32 nrg_th_ofdm;
912
913 u8 state;
914}; 794};
915 795
916/* Chain noise (differential Rx gain) calib data */ 796/* Chain noise (differential Rx gain) calib data */
917struct iwl4965_chain_noise_data { 797struct iwl_chain_noise_data {
918 u8 state; 798 u8 state;
919 u16 beacon_count; 799 u16 beacon_count;
920 u32 chain_noise_a; 800 u32 chain_noise_a;
@@ -960,7 +840,7 @@ struct iwl_priv {
960 bool add_radiotap; 840 bool add_radiotap;
961 841
962 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 842 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
963 struct iwl4965_rx_mem_buffer *rxb); 843 struct iwl_rx_mem_buffer *rxb);
964 844
965 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 845 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
966 846
@@ -985,6 +865,9 @@ struct iwl_priv {
985 s32 temperature; /* degrees Kelvin */ 865 s32 temperature; /* degrees Kelvin */
986 s32 last_temperature; 866 s32 last_temperature;
987 867
868 /* init calibration results */
869 struct iwl_calib_results calib_results;
870
988 /* Scan related variables */ 871 /* Scan related variables */
989 unsigned long last_scan_jiffies; 872 unsigned long last_scan_jiffies;
990 unsigned long next_scan_jiffies; 873 unsigned long next_scan_jiffies;
@@ -995,7 +878,8 @@ struct iwl_priv {
995 int one_direct_scan; 878 int one_direct_scan;
996 u8 direct_ssid_len; 879 u8 direct_ssid_len;
997 u8 direct_ssid[IW_ESSID_MAX_SIZE]; 880 u8 direct_ssid[IW_ESSID_MAX_SIZE];
998 struct iwl4965_scan_cmd *scan; 881 struct iwl_scan_cmd *scan;
882 u32 scan_tx_ant[IEEE80211_NUM_BANDS];
999 883
1000 /* spinlock */ 884 /* spinlock */
1001 spinlock_t lock; /* protect general shared data */ 885 spinlock_t lock; /* protect general shared data */
@@ -1007,6 +891,9 @@ struct iwl_priv {
1007 891
1008 /* pci hardware address support */ 892 /* pci hardware address support */
1009 void __iomem *hw_base; 893 void __iomem *hw_base;
894 u32 hw_rev;
895 u32 hw_wa_rev;
896 u8 rev_id;
1010 897
1011 /* uCode images, save to reload in case of failure */ 898 /* uCode images, save to reload in case of failure */
1012 struct fw_desc ucode_code; /* runtime inst */ 899 struct fw_desc ucode_code; /* runtime inst */
@@ -1015,6 +902,8 @@ struct iwl_priv {
1015 struct fw_desc ucode_init; /* initialization inst */ 902 struct fw_desc ucode_init; /* initialization inst */
1016 struct fw_desc ucode_init_data; /* initialization data */ 903 struct fw_desc ucode_init_data; /* initialization data */
1017 struct fw_desc ucode_boot; /* bootstrap inst */ 904 struct fw_desc ucode_boot; /* bootstrap inst */
905 enum ucode_type ucode_type;
906 u8 ucode_write_complete; /* the image write is complete */
1018 907
1019 908
1020 struct iwl4965_rxon_time_cmd rxon_timing; 909 struct iwl4965_rxon_time_cmd rxon_timing;
@@ -1023,22 +912,22 @@ struct iwl_priv {
1023 * changed via explicit cast within the 912 * changed via explicit cast within the
1024 * routines that actually update the physical 913 * routines that actually update the physical
1025 * hardware */ 914 * hardware */
1026 const struct iwl4965_rxon_cmd active_rxon; 915 const struct iwl_rxon_cmd active_rxon;
1027 struct iwl4965_rxon_cmd staging_rxon; 916 struct iwl_rxon_cmd staging_rxon;
1028 917
1029 int error_recovering; 918 int error_recovering;
1030 struct iwl4965_rxon_cmd recovery_rxon; 919 struct iwl_rxon_cmd recovery_rxon;
1031 920
1032 /* 1st responses from initialize and runtime uCode images. 921 /* 1st responses from initialize and runtime uCode images.
1033 * 4965's initialize alive response contains some calibration data. */ 922 * 4965's initialize alive response contains some calibration data. */
1034 struct iwl4965_init_alive_resp card_alive_init; 923 struct iwl_init_alive_resp card_alive_init;
1035 struct iwl4965_alive_resp card_alive; 924 struct iwl_alive_resp card_alive;
1036#ifdef CONFIG_IWLWIFI_RFKILL 925#ifdef CONFIG_IWLWIFI_RFKILL
1037 struct iwl_rfkill_mngr rfkill_mngr; 926 struct rfkill *rfkill;
1038#endif 927#endif
1039 928
1040#ifdef CONFIG_IWLWIFI_LEDS 929#ifdef CONFIG_IWLWIFI_LEDS
1041 struct iwl4965_led led[IWL_LED_TRG_MAX]; 930 struct iwl_led led[IWL_LED_TRG_MAX];
1042 unsigned long last_blink_time; 931 unsigned long last_blink_time;
1043 u8 last_blink_rate; 932 u8 last_blink_rate;
1044 u8 allow_blinking; 933 u8 allow_blinking;
@@ -1050,17 +939,12 @@ struct iwl_priv {
1050 939
1051 u8 assoc_station_added; 940 u8 assoc_station_added;
1052 u8 use_ant_b_for_management_frame; /* Tx antenna selection */ 941 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
1053 u8 valid_antenna; /* Bit mask of antennas actually connected */
1054#ifdef CONFIG_IWL4965_SENSITIVITY
1055 struct iwl4965_sensitivity_data sensitivity_data;
1056 struct iwl4965_chain_noise_data chain_noise_data;
1057 u8 start_calib; 942 u8 start_calib;
943 struct iwl_sensitivity_data sensitivity_data;
944 struct iwl_chain_noise_data chain_noise_data;
1058 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 945 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1059#endif /*CONFIG_IWL4965_SENSITIVITY*/
1060 946
1061#ifdef CONFIG_IWL4965_HT
1062 struct iwl_ht_info current_ht_config; 947 struct iwl_ht_info current_ht_config;
1063#endif
1064 u8 last_phy_res[100]; 948 u8 last_phy_res[100];
1065 949
1066 /* Rate scaling data */ 950 /* Rate scaling data */
@@ -1075,10 +959,10 @@ struct iwl_priv {
1075 int activity_timer_active; 959 int activity_timer_active;
1076 960
1077 /* Rx and Tx DMA processing queues */ 961 /* Rx and Tx DMA processing queues */
1078 struct iwl4965_rx_queue rxq; 962 struct iwl_rx_queue rxq;
1079 struct iwl4965_tx_queue txq[IWL_MAX_NUM_QUEUES]; 963 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
1080 unsigned long txq_ctx_active_msk; 964 unsigned long txq_ctx_active_msk;
1081 struct iwl4965_kw kw; /* keep warm address */ 965 struct iwl_kw kw; /* keep warm address */
1082 u32 scd_base_addr; /* scheduler sram base address */ 966 u32 scd_base_addr; /* scheduler sram base address */
1083 967
1084 unsigned long status; 968 unsigned long status;
@@ -1092,9 +976,9 @@ struct iwl_priv {
1092 u64 bytes; 976 u64 bytes;
1093 } tx_stats[3], rx_stats[3]; 977 } tx_stats[3], rx_stats[3];
1094 978
1095 struct iwl4965_power_mgr power_data; 979 struct iwl_power_mgr power_data;
1096 980
1097 struct iwl4965_notif_statistics statistics; 981 struct iwl_notif_statistics statistics;
1098 unsigned long last_statistics_time; 982 unsigned long last_statistics_time;
1099 983
1100 /* context information */ 984 /* context information */
@@ -1111,7 +995,7 @@ struct iwl_priv {
1111 /*station table variables */ 995 /*station table variables */
1112 spinlock_t sta_lock; 996 spinlock_t sta_lock;
1113 int num_stations; 997 int num_stations;
1114 struct iwl4965_station_entry stations[IWL_STATION_COUNT]; 998 struct iwl_station_entry stations[IWL_STATION_COUNT];
1115 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; 999 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1116 u8 default_wep_key; 1000 u8 default_wep_key;
1117 u8 key_mapping_key; 1001 u8 key_mapping_key;
@@ -1122,22 +1006,13 @@ struct iwl_priv {
1122 1006
1123 u8 mac80211_registered; 1007 u8 mac80211_registered;
1124 1008
1125 u32 notif_missed_beacons;
1126
1127 /* Rx'd packet timing information */ 1009 /* Rx'd packet timing information */
1128 u32 last_beacon_time; 1010 u32 last_beacon_time;
1129 u64 last_tsf; 1011 u64 last_tsf;
1130 1012
1131 /* Duplicate packet detection */
1132 u16 last_seq_num;
1133 u16 last_frag_num;
1134 unsigned long last_packet_time;
1135
1136 /* Hash table for finding stations in IBSS network */
1137 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE];
1138
1139 /* eeprom */ 1013 /* eeprom */
1140 struct iwl4965_eeprom eeprom; 1014 u8 *eeprom;
1015 struct iwl_eeprom_calib_info *calib_info;
1141 1016
1142 enum ieee80211_if_types iw_mode; 1017 enum ieee80211_if_types iw_mode;
1143 1018
@@ -1151,6 +1026,7 @@ struct iwl_priv {
1151 struct iwl_hw_params hw_params; 1026 struct iwl_hw_params hw_params;
1152 /* driver/uCode shared Tx Byte Counts and Rx status */ 1027 /* driver/uCode shared Tx Byte Counts and Rx status */
1153 void *shared_virt; 1028 void *shared_virt;
1029 int rb_closed_offset;
1154 /* Physical Pointer to Tx Byte Counts and Rx status */ 1030 /* Physical Pointer to Tx Byte Counts and Rx status */
1155 dma_addr_t shared_phys; 1031 dma_addr_t shared_phys;
1156 1032
@@ -1160,7 +1036,7 @@ struct iwl_priv {
1160 u16 assoc_capability; 1036 u16 assoc_capability;
1161 u8 ps_mode; 1037 u8 ps_mode;
1162 1038
1163 struct iwl4965_qos_info qos_data; 1039 struct iwl_qos_info qos_data;
1164 1040
1165 struct workqueue_struct *workqueue; 1041 struct workqueue_struct *workqueue;
1166 1042
@@ -1176,20 +1052,16 @@ struct iwl_priv {
1176 struct work_struct report_work; 1052 struct work_struct report_work;
1177 struct work_struct request_scan; 1053 struct work_struct request_scan;
1178 struct work_struct beacon_update; 1054 struct work_struct beacon_update;
1055 struct work_struct set_monitor;
1179 1056
1180 struct tasklet_struct irq_tasklet; 1057 struct tasklet_struct irq_tasklet;
1181 1058
1182 struct delayed_work init_alive_start; 1059 struct delayed_work init_alive_start;
1183 struct delayed_work alive_start; 1060 struct delayed_work alive_start;
1184 struct delayed_work activity_timer;
1185 struct delayed_work thermal_periodic;
1186 struct delayed_work gather_stats;
1187 struct delayed_work scan_check; 1061 struct delayed_work scan_check;
1188 struct delayed_work post_associate; 1062 /* TX Power */
1189 1063 s8 tx_power_user_lmt;
1190#define IWL_DEFAULT_TX_POWER 0x0F 1064 s8 tx_power_channel_lmt;
1191 s8 user_txpower_limit;
1192 s8 max_channel_txpower_limit;
1193 1065
1194#ifdef CONFIG_PM 1066#ifdef CONFIG_PM
1195 u32 pm_state[16]; 1067 u32 pm_state[16];
@@ -1197,6 +1069,7 @@ struct iwl_priv {
1197 1069
1198#ifdef CONFIG_IWLWIFI_DEBUG 1070#ifdef CONFIG_IWLWIFI_DEBUG
1199 /* debugging info */ 1071 /* debugging info */
1072 u32 debug_level;
1200 u32 framecnt_to_us; 1073 u32 framecnt_to_us;
1201 atomic_t restrict_refcnt; 1074 atomic_t restrict_refcnt;
1202#ifdef CONFIG_IWLWIFI_DEBUGFS 1075#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1206,12 +1079,40 @@ struct iwl_priv {
1206#endif /* CONFIG_IWLWIFI_DEBUG */ 1079#endif /* CONFIG_IWLWIFI_DEBUG */
1207 1080
1208 struct work_struct txpower_work; 1081 struct work_struct txpower_work;
1209#ifdef CONFIG_IWL4965_SENSITIVITY 1082 u32 disable_sens_cal;
1210 struct work_struct sensitivity_work; 1083 u32 disable_chain_noise_cal;
1211#endif 1084 u32 disable_tx_power_cal;
1085 struct work_struct run_time_calib_work;
1212 struct timer_list statistics_periodic; 1086 struct timer_list statistics_periodic;
1213}; /*iwl_priv */ 1087}; /*iwl_priv */
1214 1088
1089static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1090{
1091 set_bit(txq_id, &priv->txq_ctx_active_msk);
1092}
1093
1094static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1095{
1096 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1097}
1098
1099#ifdef CONFIG_IWLWIFI_DEBUG
1100const char *iwl_get_tx_fail_reason(u32 status);
1101#else
1102static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
1103#endif
1104
1105
1106static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1107 int txq_id, int idx)
1108{
1109 if (priv->txq[txq_id].txb[idx].skb[0])
1110 return (struct ieee80211_hdr *)priv->txq[txq_id].
1111 txb[idx].skb[0]->data;
1112 return NULL;
1113}
1114
1115
1215static inline int iwl_is_associated(struct iwl_priv *priv) 1116static inline int iwl_is_associated(struct iwl_priv *priv)
1216{ 1117{
1217 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1118 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
@@ -1224,11 +1125,6 @@ static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1224 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 1125 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1225} 1126}
1226 1127
1227static inline int is_channel_narrow(const struct iwl_channel_info *ch_info)
1228{
1229 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
1230}
1231
1232static inline int is_channel_radar(const struct iwl_channel_info *ch_info) 1128static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1233{ 1129{
1234 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 1130 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
@@ -1254,9 +1150,26 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1254 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1150 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1255} 1151}
1256 1152
1153#ifdef CONFIG_IWLWIFI_DEBUG
1154static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1155 void *p, u32 len)
1156{
1157 if (!(priv->debug_level & level))
1158 return;
1159
1160 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
1161 p, len, 1);
1162}
1163#else
1164static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1165 void *p, u32 len)
1166{
1167}
1168#endif
1169
1257extern const struct iwl_channel_info *iwl_get_channel_info( 1170extern const struct iwl_channel_info *iwl_get_channel_info(
1258 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); 1171 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
1259 1172
1260/* Requires full declaration of iwl_priv before including */ 1173/* Requires full declaration of iwl_priv before including */
1261 1174
1262#endif /* __iwl4965_4965_h__ */ 1175#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a07d5dcb7abc..4a08a1b50979 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,8 +68,8 @@
68 68
69#include <net/mac80211.h> 69#include <net/mac80211.h>
70 70
71#include "iwl-4965-commands.h" 71#include "iwl-commands.h"
72#include "iwl-4965.h" 72#include "iwl-dev.h"
73#include "iwl-core.h" 73#include "iwl-core.h"
74#include "iwl-debug.h" 74#include "iwl-debug.h"
75#include "iwl-eeprom.h" 75#include "iwl-eeprom.h"
@@ -193,6 +193,12 @@ void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
193} 193}
194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore); 194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
195 195
196const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
197{
198 BUG_ON(offset >= priv->cfg->eeprom_size);
199 return &priv->eeprom[offset];
200}
201EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
196 202
197/** 203/**
198 * iwl_eeprom_init - read EEPROM contents 204 * iwl_eeprom_init - read EEPROM contents
@@ -203,30 +209,35 @@ EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
203 */ 209 */
204int iwl_eeprom_init(struct iwl_priv *priv) 210int iwl_eeprom_init(struct iwl_priv *priv)
205{ 211{
206 u16 *e = (u16 *)&priv->eeprom; 212 u16 *e;
207 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 213 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
208 u32 r; 214 u32 r;
209 int sz = sizeof(priv->eeprom); 215 int sz = priv->cfg->eeprom_size;
210 int ret; 216 int ret;
211 int i; 217 int i;
212 u16 addr; 218 u16 addr;
213 219
214 /* The EEPROM structure has several padding buffers within it 220 /* allocate eeprom */
215 * and when adding new EEPROM maps is subject to programmer errors 221 priv->eeprom = kzalloc(sz, GFP_KERNEL);
216 * which may be very difficult to identify without explicitly 222 if (!priv->eeprom) {
217 * checking the resulting size of the eeprom map. */ 223 ret = -ENOMEM;
218 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); 224 goto alloc_err;
225 }
226 e = (u16 *)priv->eeprom;
219 227
220 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 228 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
229 if (ret < 0) {
221 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 230 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
222 return -ENOENT; 231 ret = -ENOENT;
232 goto err;
223 } 233 }
224 234
225 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 235 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
226 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); 236 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
227 if (ret < 0) { 237 if (ret < 0) {
228 IWL_ERROR("Failed to acquire EEPROM semaphore.\n"); 238 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
229 return -ENOENT; 239 ret = -ENOENT;
240 goto err;
230 } 241 }
231 242
232 /* eeprom is an array of 16bit values */ 243 /* eeprom is an array of 16bit values */
@@ -250,61 +261,98 @@ int iwl_eeprom_init(struct iwl_priv *priv)
250 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 261 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
251 } 262 }
252 ret = 0; 263 ret = 0;
253
254done: 264done:
255 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); 265 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
266err:
267 if (ret)
268 kfree(priv->eeprom);
269alloc_err:
256 return ret; 270 return ret;
257} 271}
258EXPORT_SYMBOL(iwl_eeprom_init); 272EXPORT_SYMBOL(iwl_eeprom_init);
259 273
274void iwl_eeprom_free(struct iwl_priv *priv)
275{
276 if(priv->eeprom)
277 kfree(priv->eeprom);
278 priv->eeprom = NULL;
279}
280EXPORT_SYMBOL(iwl_eeprom_free);
281
282int iwl_eeprom_check_version(struct iwl_priv *priv)
283{
284 return priv->cfg->ops->lib->eeprom_ops.check_version(priv);
285}
286EXPORT_SYMBOL(iwl_eeprom_check_version);
287
288const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
289{
290 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
291}
292EXPORT_SYMBOL(iwl_eeprom_query_addr);
293
294u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
295{
296 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
297}
298EXPORT_SYMBOL(iwl_eeprom_query16);
260 299
261void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 300void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
262{ 301{
263 memcpy(mac, priv->eeprom.mac_address, 6); 302 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
303 EEPROM_MAC_ADDRESS);
304 memcpy(mac, addr, ETH_ALEN);
264} 305}
265EXPORT_SYMBOL(iwl_eeprom_get_mac); 306EXPORT_SYMBOL(iwl_eeprom_get_mac);
266 307
267static void iwl_init_band_reference(const struct iwl_priv *priv, 308static void iwl_init_band_reference(const struct iwl_priv *priv,
268 int band, 309 int eep_band, int *eeprom_ch_count,
269 int *eeprom_ch_count, 310 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const struct iwl4965_eeprom_channel 311 const u8 **eeprom_ch_index)
271 **eeprom_ch_info,
272 const u8 **eeprom_ch_index)
273{ 312{
274 switch (band) { 313 u32 offset = priv->cfg->ops->lib->
314 eeprom_ops.regulatory_bands[eep_band - 1];
315 switch (eep_band) {
275 case 1: /* 2.4GHz band */ 316 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 317 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
277 *eeprom_ch_info = priv->eeprom.band_1_channels; 318 *eeprom_ch_info = (struct iwl_eeprom_channel *)
319 iwl_eeprom_query_addr(priv, offset);
278 *eeprom_ch_index = iwl_eeprom_band_1; 320 *eeprom_ch_index = iwl_eeprom_band_1;
279 break; 321 break;
280 case 2: /* 4.9GHz band */ 322 case 2: /* 4.9GHz band */
281 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 323 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
282 *eeprom_ch_info = priv->eeprom.band_2_channels; 324 *eeprom_ch_info = (struct iwl_eeprom_channel *)
325 iwl_eeprom_query_addr(priv, offset);
283 *eeprom_ch_index = iwl_eeprom_band_2; 326 *eeprom_ch_index = iwl_eeprom_band_2;
284 break; 327 break;
285 case 3: /* 5.2GHz band */ 328 case 3: /* 5.2GHz band */
286 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 329 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
287 *eeprom_ch_info = priv->eeprom.band_3_channels; 330 *eeprom_ch_info = (struct iwl_eeprom_channel *)
331 iwl_eeprom_query_addr(priv, offset);
288 *eeprom_ch_index = iwl_eeprom_band_3; 332 *eeprom_ch_index = iwl_eeprom_band_3;
289 break; 333 break;
290 case 4: /* 5.5GHz band */ 334 case 4: /* 5.5GHz band */
291 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 335 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
292 *eeprom_ch_info = priv->eeprom.band_4_channels; 336 *eeprom_ch_info = (struct iwl_eeprom_channel *)
337 iwl_eeprom_query_addr(priv, offset);
293 *eeprom_ch_index = iwl_eeprom_band_4; 338 *eeprom_ch_index = iwl_eeprom_band_4;
294 break; 339 break;
295 case 5: /* 5.7GHz band */ 340 case 5: /* 5.7GHz band */
296 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 341 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
297 *eeprom_ch_info = priv->eeprom.band_5_channels; 342 *eeprom_ch_info = (struct iwl_eeprom_channel *)
343 iwl_eeprom_query_addr(priv, offset);
298 *eeprom_ch_index = iwl_eeprom_band_5; 344 *eeprom_ch_index = iwl_eeprom_band_5;
299 break; 345 break;
300 case 6: /* 2.4GHz FAT channels */ 346 case 6: /* 2.4GHz FAT channels */
301 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 347 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
302 *eeprom_ch_info = priv->eeprom.band_24_channels; 348 *eeprom_ch_info = (struct iwl_eeprom_channel *)
349 iwl_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwl_eeprom_band_6; 350 *eeprom_ch_index = iwl_eeprom_band_6;
304 break; 351 break;
305 case 7: /* 5 GHz FAT channels */ 352 case 7: /* 5 GHz FAT channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 353 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
307 *eeprom_ch_info = priv->eeprom.band_52_channels; 354 *eeprom_ch_info = (struct iwl_eeprom_channel *)
355 iwl_eeprom_query_addr(priv, offset);
308 *eeprom_ch_index = iwl_eeprom_band_7; 356 *eeprom_ch_index = iwl_eeprom_band_7;
309 break; 357 break;
310 default: 358 default:
@@ -317,13 +365,13 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
317 ? # x " " : "") 365 ? # x " " : "")
318 366
319/** 367/**
320 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv. 368 * iwl_set_fat_chan_info - Copy fat channel info into driver's priv.
321 * 369 *
322 * Does not set up a command, or touch hardware. 370 * Does not set up a command, or touch hardware.
323 */ 371 */
324static int iwl4965_set_fat_chan_info(struct iwl_priv *priv, 372static int iwl_set_fat_chan_info(struct iwl_priv *priv,
325 enum ieee80211_band band, u16 channel, 373 enum ieee80211_band band, u16 channel,
326 const struct iwl4965_eeprom_channel *eeprom_ch, 374 const struct iwl_eeprom_channel *eeprom_ch,
327 u8 fat_extension_channel) 375 u8 fat_extension_channel)
328{ 376{
329 struct iwl_channel_info *ch_info; 377 struct iwl_channel_info *ch_info;
@@ -334,8 +382,8 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
334 if (!is_channel_valid(ch_info)) 382 if (!is_channel_valid(ch_info))
335 return -1; 383 return -1;
336 384
337 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" 385 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
338 " %ddBm): Ad-Hoc %ssupported\n", 386 " Ad-Hoc %ssupported\n",
339 ch_info->channel, 387 ch_info->channel,
340 is_channel_a_band(ch_info) ? 388 is_channel_a_band(ch_info) ?
341 "5.2" : "2.4", 389 "5.2" : "2.4",
@@ -343,7 +391,6 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
343 CHECK_AND_PRINT(ACTIVE), 391 CHECK_AND_PRINT(ACTIVE),
344 CHECK_AND_PRINT(RADAR), 392 CHECK_AND_PRINT(RADAR),
345 CHECK_AND_PRINT(WIDE), 393 CHECK_AND_PRINT(WIDE),
346 CHECK_AND_PRINT(NARROW),
347 CHECK_AND_PRINT(DFS), 394 CHECK_AND_PRINT(DFS),
348 eeprom_ch->flags, 395 eeprom_ch->flags,
349 eeprom_ch->max_power_avg, 396 eeprom_ch->max_power_avg,
@@ -372,7 +419,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
372{ 419{
373 int eeprom_ch_count = 0; 420 int eeprom_ch_count = 0;
374 const u8 *eeprom_ch_index = NULL; 421 const u8 *eeprom_ch_index = NULL;
375 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL; 422 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
376 int band, ch; 423 int band, ch;
377 struct iwl_channel_info *ch_info; 424 struct iwl_channel_info *ch_info;
378 425
@@ -381,12 +428,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
381 return 0; 428 return 0;
382 } 429 }
383 430
384 if (priv->eeprom.version < 0x2f) {
385 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
386 priv->eeprom.version);
387 return -EINVAL;
388 }
389
390 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); 431 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
391 432
392 priv->channel_count = 433 priv->channel_count =
@@ -429,6 +470,11 @@ int iwl_init_channel_map(struct iwl_priv *priv)
429 /* Copy the run-time flags so they are there even on 470 /* Copy the run-time flags so they are there even on
430 * invalid channels */ 471 * invalid channels */
431 ch_info->flags = eeprom_ch_info[ch].flags; 472 ch_info->flags = eeprom_ch_info[ch].flags;
473 /* First write that fat is not enabled, and then enable
474 * one by one */
475 ch_info->fat_extension_channel =
476 (IEEE80211_CHAN_NO_FAT_ABOVE |
477 IEEE80211_CHAN_NO_FAT_BELOW);
432 478
433 if (!(is_channel_valid(ch_info))) { 479 if (!(is_channel_valid(ch_info))) {
434 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " 480 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
@@ -447,8 +493,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
447 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 493 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
448 ch_info->min_power = 0; 494 ch_info->min_power = 0;
449 495
450 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 496 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):"
451 " %ddBm): Ad-Hoc %ssupported\n", 497 " Ad-Hoc %ssupported\n",
452 ch_info->channel, 498 ch_info->channel,
453 is_channel_a_band(ch_info) ? 499 is_channel_a_band(ch_info) ?
454 "5.2" : "2.4", 500 "5.2" : "2.4",
@@ -457,7 +503,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
457 CHECK_AND_PRINT_I(ACTIVE), 503 CHECK_AND_PRINT_I(ACTIVE),
458 CHECK_AND_PRINT_I(RADAR), 504 CHECK_AND_PRINT_I(RADAR),
459 CHECK_AND_PRINT_I(WIDE), 505 CHECK_AND_PRINT_I(WIDE),
460 CHECK_AND_PRINT_I(NARROW),
461 CHECK_AND_PRINT_I(DFS), 506 CHECK_AND_PRINT_I(DFS),
462 eeprom_ch_info[ch].flags, 507 eeprom_ch_info[ch].flags,
463 eeprom_ch_info[ch].max_power_avg, 508 eeprom_ch_info[ch].max_power_avg,
@@ -470,8 +515,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
470 /* Set the user_txpower_limit to the highest power 515 /* Set the user_txpower_limit to the highest power
471 * supported by any channel */ 516 * supported by any channel */
472 if (eeprom_ch_info[ch].max_power_avg > 517 if (eeprom_ch_info[ch].max_power_avg >
473 priv->user_txpower_limit) 518 priv->tx_power_user_lmt)
474 priv->user_txpower_limit = 519 priv->tx_power_user_lmt =
475 eeprom_ch_info[ch].max_power_avg; 520 eeprom_ch_info[ch].max_power_avg;
476 521
477 ch_info++; 522 ch_info++;
@@ -494,24 +539,26 @@ int iwl_init_channel_map(struct iwl_priv *priv)
494 for (ch = 0; ch < eeprom_ch_count; ch++) { 539 for (ch = 0; ch < eeprom_ch_count; ch++) {
495 540
496 if ((band == 6) && 541 if ((band == 6) &&
497 ((eeprom_ch_index[ch] == 5) || 542 ((eeprom_ch_index[ch] == 5) ||
498 (eeprom_ch_index[ch] == 6) || 543 (eeprom_ch_index[ch] == 6) ||
499 (eeprom_ch_index[ch] == 7))) 544 (eeprom_ch_index[ch] == 7)))
500 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX; 545 /* both are allowed: above and below */
546 fat_extension_chan = 0;
501 else 547 else
502 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; 548 fat_extension_chan =
549 IEEE80211_CHAN_NO_FAT_BELOW;
503 550
504 /* Set up driver's info for lower half */ 551 /* Set up driver's info for lower half */
505 iwl4965_set_fat_chan_info(priv, ieeeband, 552 iwl_set_fat_chan_info(priv, ieeeband,
506 eeprom_ch_index[ch], 553 eeprom_ch_index[ch],
507 &(eeprom_ch_info[ch]), 554 &(eeprom_ch_info[ch]),
508 fat_extension_chan); 555 fat_extension_chan);
509 556
510 /* Set up driver's info for upper half */ 557 /* Set up driver's info for upper half */
511 iwl4965_set_fat_chan_info(priv, ieeeband, 558 iwl_set_fat_chan_info(priv, ieeeband,
512 (eeprom_ch_index[ch] + 4), 559 (eeprom_ch_index[ch] + 4),
513 &(eeprom_ch_info[ch]), 560 &(eeprom_ch_info[ch]),
514 HT_IE_EXT_CHANNEL_BELOW); 561 IEEE80211_CHAN_NO_FAT_ABOVE);
515 } 562 }
516 } 563 }
517 564
@@ -520,23 +567,21 @@ int iwl_init_channel_map(struct iwl_priv *priv)
520EXPORT_SYMBOL(iwl_init_channel_map); 567EXPORT_SYMBOL(iwl_init_channel_map);
521 568
522/* 569/*
523 * iwl_free_channel_map - undo allocations in iwl4965_init_channel_map 570 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
524 */ 571 */
525void iwl_free_channel_map(struct iwl_priv *priv) 572void iwl_free_channel_map(struct iwl_priv *priv)
526{ 573{
527 kfree(priv->channel_info); 574 kfree(priv->channel_info);
528 priv->channel_count = 0; 575 priv->channel_count = 0;
529} 576}
530EXPORT_SYMBOL(iwl_free_channel_map);
531 577
532/** 578/**
533 * iwl_get_channel_info - Find driver's private channel info 579 * iwl_get_channel_info - Find driver's private channel info
534 * 580 *
535 * Based on band and channel number. 581 * Based on band and channel number.
536 */ 582 */
537const struct iwl_channel_info *iwl_get_channel_info( 583const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
538 const struct iwl_priv *priv, 584 enum ieee80211_band band, u16 channel)
539 enum ieee80211_band band, u16 channel)
540{ 585{
541 int i; 586 int i;
542 587
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index bd0a042ca77f..d3a2a5b4ac56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -106,7 +106,7 @@ enum {
106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
109 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 109 /* Bit 6 Reserved (was Narrow Channel) */
110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
111}; 111};
112 112
@@ -116,7 +116,7 @@ enum {
116 116
117/* *regulatory* channel data format in eeprom, one for each channel. 117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */ 118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl4965_eeprom_channel { 119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ 120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
@@ -131,17 +131,55 @@ struct iwl4965_eeprom_channel {
131 * each of 3 target output levels */ 131 * each of 3 target output levels */
132#define EEPROM_TX_POWER_MEASUREMENTS (3) 132#define EEPROM_TX_POWER_MEASUREMENTS (3)
133 133
134#define EEPROM_4965_TX_POWER_VERSION (2) 134/* 4965 Specific */
135/* 4965 driver does not work with txpower calibration version < 5 */
136#define EEPROM_4965_TX_POWER_VERSION (5)
137#define EEPROM_4965_EEPROM_VERSION (0x2f)
138#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
139#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
140#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
141#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
142
143/* 5000 Specific */
144#define EEPROM_5000_TX_POWER_VERSION (4)
145#define EEPROM_5000_EEPROM_VERSION (0x11A)
146
147/*5000 calibrations */
148#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
149#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
150
151/* 5000 links */
152#define EEPROM_5000_LINK_HOST (2*0x64)
153#define EEPROM_5000_LINK_GENERAL (2*0x65)
154#define EEPROM_5000_LINK_REGULATORY (2*0x66)
155#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
156#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
157#define EEPROM_5000_LINK_OTHERS (2*0x69)
158
159/* 5000 regulatory - indirect access */
160#define EEPROM_5000_REG_SKU_ID ((0x02)\
161 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
162#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
163 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
164#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
165 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
166#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
167 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
168#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
169 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
170#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
171 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
172#define EEPROM_5000_REG_BAND_24_FAT_CHANNELS ((0x82)\
173 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
174#define EEPROM_5000_REG_BAND_52_FAT_CHANNELS ((0x92)\
175 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
135 176
136/* 4965 driver does not work with txpower calibration version < 5.
137 * Look for this in calib_version member of struct iwl4965_eeprom. */
138#define EEPROM_TX_POWER_VERSION_NEW (5)
139 177
140/* 2.4 GHz */ 178/* 2.4 GHz */
141extern const u8 iwl_eeprom_band_1[14]; 179extern const u8 iwl_eeprom_band_1[14];
142 180
143/* 181/*
144 * 4965 factory calibration data for one txpower level, on one channel, 182 * factory calibration data for one txpower level, on one channel,
145 * measured on one of the 2 tx chains (radio transmitter and associated 183 * measured on one of the 2 tx chains (radio transmitter and associated
146 * antenna). EEPROM contains: 184 * antenna). EEPROM contains:
147 * 185 *
@@ -154,7 +192,7 @@ extern const u8 iwl_eeprom_band_1[14];
154 * 192 *
155 * 4) RF power amplifier detector level measurement (not used). 193 * 4) RF power amplifier detector level measurement (not used).
156 */ 194 */
157struct iwl4965_eeprom_calib_measure { 195struct iwl_eeprom_calib_measure {
158 u8 temperature; /* Device temperature (Celsius) */ 196 u8 temperature; /* Device temperature (Celsius) */
159 u8 gain_idx; /* Index into gain table */ 197 u8 gain_idx; /* Index into gain table */
160 u8 actual_pow; /* Measured RF output power, half-dBm */ 198 u8 actual_pow; /* Measured RF output power, half-dBm */
@@ -163,22 +201,22 @@ struct iwl4965_eeprom_calib_measure {
163 201
164 202
165/* 203/*
166 * 4965 measurement set for one channel. EEPROM contains: 204 * measurement set for one channel. EEPROM contains:
167 * 205 *
168 * 1) Channel number measured 206 * 1) Channel number measured
169 * 207 *
170 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters 208 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
171 * (a.k.a. "tx chains") (6 measurements altogether) 209 * (a.k.a. "tx chains") (6 measurements altogether)
172 */ 210 */
173struct iwl4965_eeprom_calib_ch_info { 211struct iwl_eeprom_calib_ch_info {
174 u8 ch_num; 212 u8 ch_num;
175 struct iwl4965_eeprom_calib_measure 213 struct iwl_eeprom_calib_measure
176 measurements[EEPROM_TX_POWER_TX_CHAINS] 214 measurements[EEPROM_TX_POWER_TX_CHAINS]
177 [EEPROM_TX_POWER_MEASUREMENTS]; 215 [EEPROM_TX_POWER_MEASUREMENTS];
178} __attribute__ ((packed)); 216} __attribute__ ((packed));
179 217
180/* 218/*
181 * 4965 txpower subband info. 219 * txpower subband info.
182 * 220 *
183 * For each frequency subband, EEPROM contains the following: 221 * For each frequency subband, EEPROM contains the following:
184 * 222 *
@@ -187,16 +225,16 @@ struct iwl4965_eeprom_calib_ch_info {
187 * 225 *
188 * 2) Sample measurement sets for 2 channels close to the range endpoints. 226 * 2) Sample measurement sets for 2 channels close to the range endpoints.
189 */ 227 */
190struct iwl4965_eeprom_calib_subband_info { 228struct iwl_eeprom_calib_subband_info {
191 u8 ch_from; /* channel number of lowest channel in subband */ 229 u8 ch_from; /* channel number of lowest channel in subband */
192 u8 ch_to; /* channel number of highest channel in subband */ 230 u8 ch_to; /* channel number of highest channel in subband */
193 struct iwl4965_eeprom_calib_ch_info ch1; 231 struct iwl_eeprom_calib_ch_info ch1;
194 struct iwl4965_eeprom_calib_ch_info ch2; 232 struct iwl_eeprom_calib_ch_info ch2;
195} __attribute__ ((packed)); 233} __attribute__ ((packed));
196 234
197 235
198/* 236/*
199 * 4965 txpower calibration info. EEPROM contains: 237 * txpower calibration info. EEPROM contains:
200 * 238 *
201 * 1) Factory-measured saturation power levels (maximum levels at which 239 * 1) Factory-measured saturation power levels (maximum levels at which
202 * tx power amplifier can output a signal without too much distortion). 240 * tx power amplifier can output a signal without too much distortion).
@@ -212,55 +250,58 @@ struct iwl4965_eeprom_calib_subband_info {
212 * characteristics of the analog radio circuitry vary with frequency. 250 * characteristics of the analog radio circuitry vary with frequency.
213 * 251 *
214 * Not all sets need to be filled with data; 252 * Not all sets need to be filled with data;
215 * struct iwl4965_eeprom_calib_subband_info contains range of channels 253 * struct iwl_eeprom_calib_subband_info contains range of channels
216 * (0 if unused) for each set of data. 254 * (0 if unused) for each set of data.
217 */ 255 */
218struct iwl4965_eeprom_calib_info { 256struct iwl_eeprom_calib_info {
219 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ 257 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
220 u8 saturation_power52; /* half-dBm */ 258 u8 saturation_power52; /* half-dBm */
221 s16 voltage; /* signed */ 259 s16 voltage; /* signed */
222 struct iwl4965_eeprom_calib_subband_info 260 struct iwl_eeprom_calib_subband_info
223 band_info[EEPROM_TX_POWER_BANDS]; 261 band_info[EEPROM_TX_POWER_BANDS];
224} __attribute__ ((packed)); 262} __attribute__ ((packed));
225 263
226 264
227 265#define ADDRESS_MSK 0x0000FFFF
228/* 266#define INDIRECT_TYPE_MSK 0x000F0000
229 * 4965 EEPROM map 267#define INDIRECT_HOST 0x00010000
230 */ 268#define INDIRECT_GENERAL 0x00020000
231struct iwl4965_eeprom { 269#define INDIRECT_REGULATORY 0x00030000
232 u8 reserved0[16]; 270#define INDIRECT_CALIBRATION 0x00040000
233 u16 device_id; /* abs.ofs: 16 */ 271#define INDIRECT_PROCESS_ADJST 0x00050000
234 u8 reserved1[2]; 272#define INDIRECT_OTHERS 0x00060000
235 u16 pmc; /* abs.ofs: 20 */ 273#define INDIRECT_ADDRESS 0x00100000
236 u8 reserved2[20]; 274
237 u8 mac_address[6]; /* abs.ofs: 42 */ 275/* General */
238 u8 reserved3[58]; 276#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
239 u16 board_revision; /* abs.ofs: 106 */ 277#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
240 u8 reserved4[11]; 278#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
241 u8 board_pba_number[9]; /* abs.ofs: 119 */ 279#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
242 u8 reserved5[8]; 280#define EEPROM_VERSION (2*0x44) /* 2 bytes */
243 u16 version; /* abs.ofs: 136 */ 281#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
244 u8 sku_cap; /* abs.ofs: 138 */ 282#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
245 u8 leds_mode; /* abs.ofs: 139 */ 283#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
246 u16 oem_mode; 284#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
247 u16 wowlan_mode; /* abs.ofs: 142 */ 285#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
248 u16 leds_time_interval; /* abs.ofs: 144 */ 286#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
249 u8 leds_off_time; /* abs.ofs: 146 */ 287#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
250 u8 leds_on_time; /* abs.ofs: 147 */ 288
251 u8 almgor_m_version; /* abs.ofs: 148 */ 289/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
252 u8 antenna_switch_type; /* abs.ofs: 149 */ 290#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
253 u8 reserved6[8]; 291#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
254 u16 board_revision_4965; /* abs.ofs: 158 */ 292#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
255 u8 reserved7[13]; 293#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
256 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */ 294#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
257 u8 reserved8[10]; 295#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
258 u8 sku_id[4]; /* abs.ofs: 192 */ 296
297#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
298#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
299#define EEPROM_5000_RF_CFG_TYPE_MAX 0x3
259 300
260/* 301/*
261 * Per-channel regulatory data. 302 * Per-channel regulatory data.
262 * 303 *
263 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 304 * Each channel that *might* be supported by iwl has a fixed location
264 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 305 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
265 * txpower (MSB). 306 * txpower (MSB).
266 * 307 *
@@ -269,40 +310,38 @@ struct iwl4965_eeprom {
269 * 310 *
270 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 311 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
271 */ 312 */
272 u16 band_1_count; /* abs.ofs: 196 */ 313#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
273 struct iwl4965_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ 314#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
315#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
274 316
275/* 317/*
276 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, 318 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
277 * 5.0 GHz channels 7, 8, 11, 12, 16 319 * 5.0 GHz channels 7, 8, 11, 12, 16
278 * (4915-5080MHz) (none of these is ever supported) 320 * (4915-5080MHz) (none of these is ever supported)
279 */ 321 */
280 u16 band_2_count; /* abs.ofs: 226 */ 322#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
281 struct iwl4965_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ 323#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
282 324
283/* 325/*
284 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 326 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
285 * (5170-5320MHz) 327 * (5170-5320MHz)
286 */ 328 */
287 u16 band_3_count; /* abs.ofs: 254 */ 329#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
288 struct iwl4965_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ 330#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
289 331
290/* 332/*
291 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 333 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
292 * (5500-5700MHz) 334 * (5500-5700MHz)
293 */ 335 */
294 u16 band_4_count; /* abs.ofs: 280 */ 336#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
295 struct iwl4965_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ 337#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
296 338
297/* 339/*
298 * 5.7 GHz channels 145, 149, 153, 157, 161, 165 340 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
299 * (5725-5825MHz) 341 * (5725-5825MHz)
300 */ 342 */
301 u16 band_5_count; /* abs.ofs: 304 */ 343#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
302 struct iwl4965_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ 344#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
303
304 u8 reserved10[2];
305
306 345
307/* 346/*
308 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) 347 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
@@ -319,52 +358,35 @@ struct iwl4965_eeprom {
319 * 358 *
320 * NOTE: 4965 does not support FAT channels on 2.4 GHz. 359 * NOTE: 4965 does not support FAT channels on 2.4 GHz.
321 */ 360 */
322 struct iwl4965_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */ 361#define EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */
323 u8 reserved11[2];
324 362
325/* 363/*
326 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64), 364 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64),
327 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) 365 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
328 */ 366 */
329 struct iwl4965_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */ 367#define EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
330 u8 reserved12[6];
331
332/*
333 * 4965 driver requires txpower calibration format version 5 or greater.
334 * Driver does not work with txpower calibration version < 5.
335 * This value is simply a 16-bit number, no major/minor versions here.
336 */
337 u16 calib_version; /* abs.ofs: 364 */
338 u8 reserved13[2];
339 u8 reserved14[96]; /* abs.ofs: 368 */
340
341/*
342 * 4965 Txpower calibration data.
343 */
344 struct iwl4965_eeprom_calib_info calib_info; /* abs.ofs: 464 */
345
346 u8 reserved16[140]; /* fill out to full 1024 byte block */
347
348
349} __attribute__ ((packed));
350
351#define IWL_EEPROM_IMAGE_SIZE 1024
352
353/* End of EEPROM */
354 368
355struct iwl_eeprom_ops { 369struct iwl_eeprom_ops {
370 const u32 regulatory_bands[7];
356 int (*verify_signature) (struct iwl_priv *priv); 371 int (*verify_signature) (struct iwl_priv *priv);
357 int (*acquire_semaphore) (struct iwl_priv *priv); 372 int (*acquire_semaphore) (struct iwl_priv *priv);
358 void (*release_semaphore) (struct iwl_priv *priv); 373 void (*release_semaphore) (struct iwl_priv *priv);
374 int (*check_version) (struct iwl_priv *priv);
375 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
359}; 376};
360 377
361 378
362void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 379void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
363int iwl_eeprom_init(struct iwl_priv *priv); 380int iwl_eeprom_init(struct iwl_priv *priv);
381void iwl_eeprom_free(struct iwl_priv *priv);
382int iwl_eeprom_check_version(struct iwl_priv *priv);
383const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
384u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
364 385
365int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 386int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
366int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 387int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
367void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 388void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
389const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
368 390
369int iwl_init_channel_map(struct iwl_priv *priv); 391int iwl_init_channel_map(struct iwl_priv *priv);
370void iwl_free_channel_map(struct iwl_priv *priv); 392void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
new file mode 100644
index 000000000000..944642450d3d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -0,0 +1,391 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64/****************************/
65/* Flow Handler Definitions */
66/****************************/
67
68/**
69 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
70 * Addresses are offsets from device's PCI hardware base address.
71 */
72#define FH_MEM_LOWER_BOUND (0x1000)
73#define FH_MEM_UPPER_BOUND (0x1EF0)
74
75/**
76 * Keep-Warm (KW) buffer base address.
77 *
78 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
79 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
80 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
81 * from going into a power-savings mode that would cause higher DRAM latency,
82 * and possible data over/under-runs, before all Tx/Rx is complete.
83 *
84 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
85 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
86 * automatically invokes keep-warm accesses when normal accesses might not
87 * be sufficient to maintain fast DRAM response.
88 *
89 * Bit fields:
90 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
91 */
92#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
93
94
95/**
96 * TFD Circular Buffers Base (CBBC) addresses
97 *
98 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
99 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
100 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
101 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
102 * aligned (address bits 0-7 must be 0).
103 *
104 * Bit fields in each pointer register:
105 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
106 */
107#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
108#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
109
110/* Find TFD CB base pointer for given queue (range 0-15). */
111#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
112
113
114/**
115 * Rx SRAM Control and Status Registers (RSCSR)
116 *
117 * These registers provide handshake between driver and 4965 for the Rx queue
118 * (this queue handles *all* command responses, notifications, Rx data, etc.
119 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
120 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
121 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
122 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
123 * mapping between RBDs and RBs.
124 *
125 * Driver must allocate host DRAM memory for the following, and set the
126 * physical address of each into 4965 registers:
127 *
128 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
129 * entries (although any power of 2, up to 4096, is selectable by driver).
130 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
131 * (typically 4K, although 8K or 16K are also selectable by driver).
132 * Driver sets up RB size and number of RBDs in the CB via Rx config
133 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
134 *
135 * Bit fields within one RBD:
136 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
137 *
138 * Driver sets physical address [35:8] of base of RBD circular buffer
139 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
140 *
141 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
142 * (RBs) have been filled, via a "write pointer", actually the index of
143 * the RB's corresponding RBD within the circular buffer. Driver sets
144 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
145 *
146 * Bit fields in lower dword of Rx status buffer (upper dword not used
147 * by driver; see struct iwl4965_shared, val0):
148 * 31-12: Not used by driver
149 * 11- 0: Index of last filled Rx buffer descriptor
150 * (4965 writes, driver reads this value)
151 *
152 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
153 * enter pointers to these RBs into contiguous RBD circular buffer entries,
154 * and update the 4965's "write" index register,
155 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
156 *
157 * This "write" index corresponds to the *next* RBD that the driver will make
158 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
159 * the circular buffer. This value should initially be 0 (before preparing any
160 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
161 * wrap back to 0 at the end of the circular buffer (but don't wrap before
162 * "read" index has advanced past 1! See below).
163 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
164 *
165 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
166 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
167 * to tell the driver the index of the latest filled RBD. The driver must
168 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
169 *
170 * The driver must also internally keep track of a third index, which is the
171 * next RBD to process. When receiving an Rx interrupt, driver should process
172 * all filled but unprocessed RBs up to, but not including, the RB
173 * corresponding to the "read" index. For example, if "read" index becomes "1",
174 * driver may process the RB pointed to by RBD 0. Depending on volume of
175 * traffic, there may be many RBs to process.
176 *
177 * If read index == write index, 4965 thinks there is no room to put new data.
178 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
179 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
180 * and "read" indexes; that is, make sure that there are no more than 254
181 * buffers waiting to be filled.
182 */
183#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
184#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
185#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
186
187/**
188 * Physical base address of 8-byte Rx Status buffer.
189 * Bit fields:
190 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
191 */
192#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
193
194/**
195 * Physical base address of Rx Buffer Descriptor Circular Buffer.
196 * Bit fields:
197 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
198 */
199#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
200
201/**
202 * Rx write pointer (index, really!).
203 * Bit fields:
204 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
205 * NOTE: For 256-entry circular buffer, use only bits [7:0].
206 */
207#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
208#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
209
210
211/**
212 * Rx Config/Status Registers (RCSR)
213 * Rx Config Reg for channel 0 (only channel used)
214 *
215 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
216 * normal operation (see bit fields).
217 *
218 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
219 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
220 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
221 *
222 * Bit fields:
223 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
224 * '10' operate normally
225 * 29-24: reserved
226 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
227 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
228 * 19-18: reserved
229 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
230 * '10' 12K, '11' 16K.
231 * 15-14: reserved
232 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
233 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
234 * typical value 0x10 (about 1/2 msec)
235 * 3- 0: reserved
236 */
237#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
238#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
239#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
240
241#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
242
243#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
244#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
245#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
246#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
247#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
248#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
249
250#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
251#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
252#define RX_RB_TIMEOUT (0x10)
253
254#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
255#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
256#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
257
258#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
259#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
262
263#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
265
266
267/**
268 * Rx Shared Status Registers (RSSR)
269 *
270 * After stopping Rx DMA channel (writing 0 to
271 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
272 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
273 *
274 * Bit fields:
275 * 24: 1 = Channel 0 is idle
276 *
277 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
278 * contain default values that should not be altered by the driver.
279 */
280#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
281#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
282
283#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
284#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
285#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
286 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
287
288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
289
290
291/**
292 * Transmit DMA Channel Control/Status Registers (TCSR)
293 *
294 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
295 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
296 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
297 *
298 * To use a Tx DMA channel, driver must initialize its
299 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
300 *
301 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
302 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
303 *
304 * All other bits should be 0.
305 *
306 * Bit fields:
307 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
308 * '10' operate normally
309 * 29- 4: Reserved, set to "0"
310 * 3: Enable internal DMA requests (1, normal operation), disable (0)
311 * 2- 0: Reserved, set to "0"
312 */
313#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
314#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
315
316/* Find Control/Status reg for given Tx DMA/FIFO channel */
317#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
318 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
319
320#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
321#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
322
323#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
324#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
325#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
326
327#define FH_TCSR_CHNL_NUM (7)
328
329#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
330#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
331#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
332
333#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
334#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
335#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
336
337#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
338#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
339#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
340 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
341#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
342 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x4)
343#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
344 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x8)
345
346/**
347 * Tx Shared Status Registers (TSSR)
348 *
349 * After stopping Tx DMA channel (writing 0 to
350 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
351 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
352 * (channel's buffers empty | no pending requests).
353 *
354 * Bit fields:
355 * 31-24: 1 = Channel buffers empty (channel 7:0)
356 * 23-16: 1 = No pending requests (channel 7:0)
357 */
358#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
359#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
360
361#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
362
363#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
364#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
365
366#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
367 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
368 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
369
370
371
372#define FH_REGS_LOWER_BOUND (0x1000)
373#define FH_REGS_UPPER_BOUND (0x2000)
374
375/* Tx service channels */
376#define FH_SRVC_CHNL (9)
377#define FH_SRVC_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x9C8)
378#define FH_SRVC_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x9D0)
379#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
380 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
381
382/* TFDB Area - TFDs buffer table */
383#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
384#define FH_TFDIB_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x900)
385#define FH_TFDIB_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x958)
386#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
387#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
388
389/* TCSR: tx_config register values */
390#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
391
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index fdb27f1cdc08..8fa991b7202a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -31,7 +31,7 @@
31#include <linux/version.h> 31#include <linux/version.h>
32#include <net/mac80211.h> 32#include <net/mac80211.h>
33 33
34#include "iwl-4965.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
@@ -56,6 +56,7 @@ const char *get_cmd_string(u8 cmd)
56 IWL_CMD(REPLY_RATE_SCALE); 56 IWL_CMD(REPLY_RATE_SCALE);
57 IWL_CMD(REPLY_LEDS_CMD); 57 IWL_CMD(REPLY_LEDS_CMD);
58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
59 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(RADAR_NOTIFICATION); 60 IWL_CMD(RADAR_NOTIFICATION);
60 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
61 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -89,6 +90,10 @@ const char *get_cmd_string(u8 cmd)
89 IWL_CMD(REPLY_RX_MPDU_CMD); 90 IWL_CMD(REPLY_RX_MPDU_CMD);
90 IWL_CMD(REPLY_RX); 91 IWL_CMD(REPLY_RX);
91 IWL_CMD(REPLY_COMPRESSED_BA); 92 IWL_CMD(REPLY_COMPRESSED_BA);
93 IWL_CMD(CALIBRATION_CFG_CMD);
94 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
95 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
96 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
92 default: 97 default:
93 return "UNKNOWN"; 98 return "UNKNOWN";
94 99
@@ -101,7 +106,7 @@ EXPORT_SYMBOL(get_cmd_string);
101static int iwl_generic_cmd_callback(struct iwl_priv *priv, 106static int iwl_generic_cmd_callback(struct iwl_priv *priv,
102 struct iwl_cmd *cmd, struct sk_buff *skb) 107 struct iwl_cmd *cmd, struct sk_buff *skb)
103{ 108{
104 struct iwl4965_rx_packet *pkt = NULL; 109 struct iwl_rx_packet *pkt = NULL;
105 110
106 if (!skb) { 111 if (!skb) {
107 IWL_ERROR("Error: Response NULL in %s.\n", 112 IWL_ERROR("Error: Response NULL in %s.\n",
@@ -109,7 +114,7 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
109 return 1; 114 return 1;
110 } 115 }
111 116
112 pkt = (struct iwl4965_rx_packet *)skb->data; 117 pkt = (struct iwl_rx_packet *)skb->data;
113 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 118 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
114 IWL_ERROR("Bad return from %s (0x%08X)\n", 119 IWL_ERROR("Bad return from %s (0x%08X)\n",
115 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 120 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -139,7 +144,7 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
139 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 144 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
140 return -EBUSY; 145 return -EBUSY;
141 146
142 ret = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 147 ret = iwl_enqueue_hcmd(priv, cmd);
143 if (ret < 0) { 148 if (ret < 0) {
144 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 149 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
145 get_cmd_string(cmd->id), ret); 150 get_cmd_string(cmd->id), ret);
@@ -170,7 +175,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
170 if (cmd->meta.flags & CMD_WANT_SKB) 175 if (cmd->meta.flags & CMD_WANT_SKB)
171 cmd->meta.source = &cmd->meta; 176 cmd->meta.source = &cmd->meta;
172 177
173 cmd_idx = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 178 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
174 if (cmd_idx < 0) { 179 if (cmd_idx < 0) {
175 ret = cmd_idx; 180 ret = cmd_idx;
176 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 181 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index a443472bea62..41eed6793328 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -136,8 +136,8 @@ static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val)
136 136
137#define KELVIN_TO_CELSIUS(x) ((x)-273) 137#define KELVIN_TO_CELSIUS(x) ((x)-273)
138#define CELSIUS_TO_KELVIN(x) ((x)+273) 138#define CELSIUS_TO_KELVIN(x) ((x)+273)
139#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
139 140
140#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010
141 141
142static inline struct ieee80211_conf *ieee80211_get_hw_conf( 142static inline struct ieee80211_conf *ieee80211_get_hw_conf(
143 struct ieee80211_hw *hw) 143 struct ieee80211_hw *hw)
@@ -145,96 +145,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
145 return &hw->conf; 145 return &hw->conf;
146} 146}
147 147
148#define QOS_CONTROL_LEN 2
149
150
151static inline int ieee80211_is_management(u16 fc)
152{
153 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT;
154}
155
156static inline int ieee80211_is_control(u16 fc)
157{
158 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL;
159}
160
161static inline int ieee80211_is_data(u16 fc)
162{
163 return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA;
164}
165
166static inline int ieee80211_is_back_request(u16 fc)
167{
168 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
169 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ);
170}
171
172static inline int ieee80211_is_probe_response(u16 fc)
173{
174 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
175 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP);
176}
177
178static inline int ieee80211_is_probe_request(u16 fc)
179{
180 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
181 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_REQ);
182}
183
184static inline int ieee80211_is_beacon(u16 fc)
185{
186 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
187 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON);
188}
189
190static inline int ieee80211_is_atim(u16 fc)
191{
192 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
193 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ATIM);
194}
195
196static inline int ieee80211_is_assoc_request(u16 fc)
197{
198 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
199 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
200}
201
202static inline int ieee80211_is_assoc_response(u16 fc)
203{
204 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
205 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_RESP);
206}
207
208static inline int ieee80211_is_auth(u16 fc)
209{
210 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
211 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
212}
213
214static inline int ieee80211_is_deauth(u16 fc)
215{
216 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
217 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
218}
219
220static inline int ieee80211_is_disassoc(u16 fc)
221{
222 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
223 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ);
224}
225
226static inline int ieee80211_is_reassoc_request(u16 fc)
227{
228 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
229 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ);
230}
231
232static inline int ieee80211_is_reassoc_response(u16 fc)
233{
234 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
235 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP);
236}
237
238static inline int iwl_check_bits(unsigned long field, unsigned long mask) 148static inline int iwl_check_bits(unsigned long field, unsigned long mask)
239{ 149{
240 return ((field & mask) == mask) ? 1 : 0; 150 return ((field & mask) == mask) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 03fdf5b434a1..899d7a2567a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -39,19 +39,26 @@
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <asm/unaligned.h> 40#include <asm/unaligned.h>
41 41
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46 46
47#define IWL_1MB_RATE (128 * 1024) 47#ifdef CONFIG_IWLWIFI_DEBUG
48#define IWL_LED_THRESHOLD (16) 48static const char *led_type_str[] = {
49#define IWL_MAX_BLINK_TBL (10) 49 __stringify(IWL_LED_TRG_TX),
50 __stringify(IWL_LED_TRG_RX),
51 __stringify(IWL_LED_TRG_ASSOC),
52 __stringify(IWL_LED_TRG_RADIO),
53 NULL
54};
55#endif /* CONFIG_IWLWIFI_DEBUG */
56
50 57
51static const struct { 58static const struct {
52 u16 tpt; 59 u16 tpt;
53 u8 on_time; 60 u8 on_time;
54 u8 of_time; 61 u8 off_time;
55} blink_tbl[] = 62} blink_tbl[] =
56{ 63{
57 {300, 25, 25}, 64 {300, 25, 25},
@@ -63,26 +70,31 @@ static const struct {
63 {15, 95, 95 }, 70 {15, 95, 95 },
64 {10, 110, 110}, 71 {10, 110, 110},
65 {5, 130, 130}, 72 {5, 130, 130},
66 {0, 167, 167} 73 {0, 167, 167},
74/* SOLID_ON */
75 {-1, IWL_LED_SOLID, 0}
67}; 76};
68 77
69static int iwl_led_cmd_callback(struct iwl_priv *priv, 78#define IWL_1MB_RATE (128 * 1024)
70 struct iwl_cmd *cmd, struct sk_buff *skb) 79#define IWL_LED_THRESHOLD (16)
80#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
81#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
82
83/* [0-256] -> [0..8] FIXME: we need [0..10] */
84static inline int iwl_brightness_to_idx(enum led_brightness brightness)
71{ 85{
72 return 1; 86 return fls(0x000000FF & (u32)brightness);
73} 87}
74 88
75
76/* Send led command */ 89/* Send led command */
77static int iwl_send_led_cmd(struct iwl_priv *priv, 90static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
78 struct iwl4965_led_cmd *led_cmd)
79{ 91{
80 struct iwl_host_cmd cmd = { 92 struct iwl_host_cmd cmd = {
81 .id = REPLY_LEDS_CMD, 93 .id = REPLY_LEDS_CMD,
82 .len = sizeof(struct iwl4965_led_cmd), 94 .len = sizeof(struct iwl_led_cmd),
83 .data = led_cmd, 95 .data = led_cmd,
84 .meta.flags = CMD_ASYNC, 96 .meta.flags = CMD_ASYNC,
85 .meta.u.callback = iwl_led_cmd_callback 97 .meta.u.callback = NULL,
86 }; 98 };
87 u32 reg; 99 u32 reg;
88 100
@@ -93,33 +105,20 @@ static int iwl_send_led_cmd(struct iwl_priv *priv,
93 return iwl_send_cmd(priv, &cmd); 105 return iwl_send_cmd(priv, &cmd);
94} 106}
95 107
96 108/* Set led pattern command */
97/* Set led on command */
98static int iwl4965_led_on(struct iwl_priv *priv, int led_id)
99{
100 struct iwl4965_led_cmd led_cmd = {
101 .id = led_id,
102 .on = IWL_LED_SOLID,
103 .off = 0,
104 .interval = IWL_DEF_LED_INTRVL
105 };
106 return iwl_send_led_cmd(priv, &led_cmd);
107}
108
109/* Set led on command */
110static int iwl4965_led_pattern(struct iwl_priv *priv, int led_id, 109static int iwl4965_led_pattern(struct iwl_priv *priv, int led_id,
111 enum led_brightness brightness) 110 unsigned int idx)
112{ 111{
113 struct iwl4965_led_cmd led_cmd = { 112 struct iwl_led_cmd led_cmd = {
114 .id = led_id, 113 .id = led_id,
115 .on = brightness,
116 .off = brightness,
117 .interval = IWL_DEF_LED_INTRVL 114 .interval = IWL_DEF_LED_INTRVL
118 }; 115 };
119 if (brightness == LED_FULL) { 116
120 led_cmd.on = IWL_LED_SOLID; 117 BUG_ON(idx > IWL_MAX_BLINK_TBL);
121 led_cmd.off = 0; 118
122 } 119 led_cmd.on = blink_tbl[idx].on_time;
120 led_cmd.off = blink_tbl[idx].off_time;
121
123 return iwl_send_led_cmd(priv, &led_cmd); 122 return iwl_send_led_cmd(priv, &led_cmd);
124} 123}
125 124
@@ -132,10 +131,22 @@ static int iwl4965_led_on_reg(struct iwl_priv *priv, int led_id)
132} 131}
133 132
134#if 0 133#if 0
134/* Set led on command */
135static int iwl4965_led_on(struct iwl_priv *priv, int led_id)
136{
137 struct iwl_led_cmd led_cmd = {
138 .id = led_id,
139 .on = IWL_LED_SOLID,
140 .off = 0,
141 .interval = IWL_DEF_LED_INTRVL
142 };
143 return iwl_send_led_cmd(priv, &led_cmd);
144}
145
135/* Set led off command */ 146/* Set led off command */
136int iwl4965_led_off(struct iwl_priv *priv, int led_id) 147int iwl4965_led_off(struct iwl_priv *priv, int led_id)
137{ 148{
138 struct iwl4965_led_cmd led_cmd = { 149 struct iwl_led_cmd led_cmd = {
139 .id = led_id, 150 .id = led_id,
140 .on = 0, 151 .on = 0,
141 .off = 0, 152 .off = 0,
@@ -155,25 +166,10 @@ static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
155 return 0; 166 return 0;
156} 167}
157 168
158/* Set led blink command */
159static int iwl4965_led_not_solid(struct iwl_priv *priv, int led_id,
160 u8 brightness)
161{
162 struct iwl4965_led_cmd led_cmd = {
163 .id = led_id,
164 .on = brightness,
165 .off = brightness,
166 .interval = IWL_DEF_LED_INTRVL
167 };
168
169 return iwl_send_led_cmd(priv, &led_cmd);
170}
171
172
173/* 169/*
174 * brightness call back function for Tx/Rx LED 170 * brightness call back function for Tx/Rx LED
175 */ 171 */
176static int iwl4965_led_associated(struct iwl_priv *priv, int led_id) 172static int iwl_led_associated(struct iwl_priv *priv, int led_id)
177{ 173{
178 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 174 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
179 !test_bit(STATUS_READY, &priv->status)) 175 !test_bit(STATUS_READY, &priv->status))
@@ -189,16 +185,18 @@ static int iwl4965_led_associated(struct iwl_priv *priv, int led_id)
189/* 185/*
190 * brightness call back for association and radio 186 * brightness call back for association and radio
191 */ 187 */
192static void iwl4965_led_brightness_set(struct led_classdev *led_cdev, 188static void iwl_led_brightness_set(struct led_classdev *led_cdev,
193 enum led_brightness brightness) 189 enum led_brightness brightness)
194{ 190{
195 struct iwl4965_led *led = container_of(led_cdev, 191 struct iwl_led *led = container_of(led_cdev, struct iwl_led, led_dev);
196 struct iwl4965_led, led_dev);
197 struct iwl_priv *priv = led->priv; 192 struct iwl_priv *priv = led->priv;
198 193
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 194 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return; 195 return;
201 196
197
198 IWL_DEBUG_LED("Led type = %s brightness = %d\n",
199 led_type_str[led->type], brightness);
202 switch (brightness) { 200 switch (brightness) {
203 case LED_FULL: 201 case LED_FULL:
204 if (led->type == IWL_LED_TRG_ASSOC) 202 if (led->type == IWL_LED_TRG_ASSOC)
@@ -215,8 +213,10 @@ static void iwl4965_led_brightness_set(struct led_classdev *led_cdev,
215 led->led_off(priv, IWL_LED_LINK); 213 led->led_off(priv, IWL_LED_LINK);
216 break; 214 break;
217 default: 215 default:
218 if (led->led_pattern) 216 if (led->led_pattern) {
219 led->led_pattern(priv, IWL_LED_LINK, brightness); 217 int idx = iwl_brightness_to_idx(brightness);
218 led->led_pattern(priv, IWL_LED_LINK, idx);
219 }
220 break; 220 break;
221 } 221 }
222} 222}
@@ -226,8 +226,7 @@ static void iwl4965_led_brightness_set(struct led_classdev *led_cdev,
226/* 226/*
227 * Register led class with the system 227 * Register led class with the system
228 */ 228 */
229static int iwl_leds_register_led(struct iwl_priv *priv, 229static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
230 struct iwl4965_led *led,
231 enum led_type type, u8 set_led, 230 enum led_type type, u8 set_led,
232 const char *name, char *trigger) 231 const char *name, char *trigger)
233{ 232{
@@ -235,7 +234,7 @@ static int iwl_leds_register_led(struct iwl_priv *priv,
235 int ret; 234 int ret;
236 235
237 led->led_dev.name = name; 236 led->led_dev.name = name;
238 led->led_dev.brightness_set = iwl4965_led_brightness_set; 237 led->led_dev.brightness_set = iwl_led_brightness_set;
239 led->led_dev.default_trigger = trigger; 238 led->led_dev.default_trigger = trigger;
240 239
241 led->priv = priv; 240 led->priv = priv;
@@ -259,32 +258,28 @@ static int iwl_leds_register_led(struct iwl_priv *priv,
259/* 258/*
260 * calculate blink rate according to last 2 sec Tx/Rx activities 259 * calculate blink rate according to last 2 sec Tx/Rx activities
261 */ 260 */
262static inline u8 get_blink_rate(struct iwl_priv *priv) 261static int iwl_get_blink_rate(struct iwl_priv *priv)
263{ 262{
264 int i; 263 int i;
265 u8 blink_rate; 264 u64 current_tpt = priv->tx_stats[2].bytes;
266 u64 current_tpt = priv->tx_stats[2].bytes + priv->rx_stats[2].bytes; 265 /* FIXME: + priv->rx_stats[2].bytes; */
267 s64 tpt = current_tpt - priv->led_tpt; 266 s64 tpt = current_tpt - priv->led_tpt;
268 267
269 if (tpt < 0) /* wrapparound */ 268 if (tpt < 0) /* wrapparound */
270 tpt = -tpt; 269 tpt = -tpt;
271 270
271 IWL_DEBUG_LED("tpt %lld current_tpt %lld\n", tpt, current_tpt);
272 priv->led_tpt = current_tpt; 272 priv->led_tpt = current_tpt;
273 273
274 if (tpt < IWL_LED_THRESHOLD) { 274 if (!priv->allow_blinking)
275 i = IWL_MAX_BLINK_TBL; 275 i = IWL_MAX_BLINK_TBL;
276 } else { 276 else
277 for (i = 0; i < IWL_MAX_BLINK_TBL; i++) 277 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
278 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) 278 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
279 break; 279 break;
280 }
281 /* if 0 frame is transfered */
282 if ((i == IWL_MAX_BLINK_TBL) || !priv->allow_blinking)
283 blink_rate = IWL_LED_SOLID;
284 else
285 blink_rate = blink_tbl[i].on_time;
286 280
287 return blink_rate; 281 IWL_DEBUG_LED("LED BLINK IDX=%d", i);
282 return i;
288} 283}
289 284
290static inline int is_rf_kill(struct iwl_priv *priv) 285static inline int is_rf_kill(struct iwl_priv *priv)
@@ -300,7 +295,7 @@ static inline int is_rf_kill(struct iwl_priv *priv)
300 */ 295 */
301void iwl_leds_background(struct iwl_priv *priv) 296void iwl_leds_background(struct iwl_priv *priv)
302{ 297{
303 u8 blink_rate; 298 u8 blink_idx;
304 299
305 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 300 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
306 priv->last_blink_time = 0; 301 priv->last_blink_time = 0;
@@ -313,9 +308,10 @@ void iwl_leds_background(struct iwl_priv *priv)
313 308
314 if (!priv->allow_blinking) { 309 if (!priv->allow_blinking) {
315 priv->last_blink_time = 0; 310 priv->last_blink_time = 0;
316 if (priv->last_blink_rate != IWL_LED_SOLID) { 311 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
317 priv->last_blink_rate = IWL_LED_SOLID; 312 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
318 iwl4965_led_on(priv, IWL_LED_LINK); 313 iwl4965_led_pattern(priv, IWL_LED_LINK,
314 IWL_SOLID_BLINK_IDX);
319 } 315 }
320 return; 316 return;
321 } 317 }
@@ -324,21 +320,14 @@ void iwl_leds_background(struct iwl_priv *priv)
324 msecs_to_jiffies(1000))) 320 msecs_to_jiffies(1000)))
325 return; 321 return;
326 322
327 blink_rate = get_blink_rate(priv); 323 blink_idx = iwl_get_blink_rate(priv);
328 324
329 /* call only if blink rate change */ 325 /* call only if blink rate change */
330 if (blink_rate != priv->last_blink_rate) { 326 if (blink_idx != priv->last_blink_rate)
331 if (blink_rate != IWL_LED_SOLID) { 327 iwl4965_led_pattern(priv, IWL_LED_LINK, blink_idx);
332 priv->last_blink_time = jiffies +
333 msecs_to_jiffies(1000);
334 iwl4965_led_not_solid(priv, IWL_LED_LINK, blink_rate);
335 } else {
336 priv->last_blink_time = 0;
337 iwl4965_led_on(priv, IWL_LED_LINK);
338 }
339 }
340 328
341 priv->last_blink_rate = blink_rate; 329 priv->last_blink_time = jiffies;
330 priv->last_blink_rate = blink_idx;
342} 331}
343EXPORT_SYMBOL(iwl_leds_background); 332EXPORT_SYMBOL(iwl_leds_background);
344 333
@@ -362,10 +351,8 @@ int iwl_leds_register(struct iwl_priv *priv)
362 priv->led[IWL_LED_TRG_RADIO].led_off = iwl4965_led_off_reg; 351 priv->led[IWL_LED_TRG_RADIO].led_off = iwl4965_led_off_reg;
363 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL; 352 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
364 353
365 ret = iwl_leds_register_led(priv, 354 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
366 &priv->led[IWL_LED_TRG_RADIO], 355 IWL_LED_TRG_RADIO, 1, name, trigger);
367 IWL_LED_TRG_RADIO, 1,
368 name, trigger);
369 if (ret) 356 if (ret)
370 goto exit_fail; 357 goto exit_fail;
371 358
@@ -373,10 +360,9 @@ int iwl_leds_register(struct iwl_priv *priv)
373 snprintf(name, sizeof(name), "iwl-%s:assoc", 360 snprintf(name, sizeof(name), "iwl-%s:assoc",
374 wiphy_name(priv->hw->wiphy)); 361 wiphy_name(priv->hw->wiphy));
375 362
376 ret = iwl_leds_register_led(priv, 363 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
377 &priv->led[IWL_LED_TRG_ASSOC], 364 IWL_LED_TRG_ASSOC, 0, name, trigger);
378 IWL_LED_TRG_ASSOC, 0, 365
379 name, trigger);
380 /* for assoc always turn led on */ 366 /* for assoc always turn led on */
381 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg; 367 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg;
382 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg; 368 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg;
@@ -386,31 +372,26 @@ int iwl_leds_register(struct iwl_priv *priv)
386 goto exit_fail; 372 goto exit_fail;
387 373
388 trigger = ieee80211_get_rx_led_name(priv->hw); 374 trigger = ieee80211_get_rx_led_name(priv->hw);
389 snprintf(name, sizeof(name), "iwl-%s:RX", 375 snprintf(name, sizeof(name), "iwl-%s:RX", wiphy_name(priv->hw->wiphy));
390 wiphy_name(priv->hw->wiphy));
391 376
392 377
393 ret = iwl_leds_register_led(priv, 378 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
394 &priv->led[IWL_LED_TRG_RX], 379 IWL_LED_TRG_RX, 0, name, trigger);
395 IWL_LED_TRG_RX, 0,
396 name, trigger);
397 380
398 priv->led[IWL_LED_TRG_RX].led_on = iwl4965_led_associated; 381 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
399 priv->led[IWL_LED_TRG_RX].led_off = iwl4965_led_associated; 382 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
400 priv->led[IWL_LED_TRG_RX].led_pattern = iwl4965_led_pattern; 383 priv->led[IWL_LED_TRG_RX].led_pattern = iwl4965_led_pattern;
401 384
402 if (ret) 385 if (ret)
403 goto exit_fail; 386 goto exit_fail;
404 387
405 trigger = ieee80211_get_tx_led_name(priv->hw); 388 trigger = ieee80211_get_tx_led_name(priv->hw);
406 snprintf(name, sizeof(name), "iwl-%s:TX", 389 snprintf(name, sizeof(name), "iwl-%s:TX", wiphy_name(priv->hw->wiphy));
407 wiphy_name(priv->hw->wiphy)); 390 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
408 ret = iwl_leds_register_led(priv, 391 IWL_LED_TRG_TX, 0, name, trigger);
409 &priv->led[IWL_LED_TRG_TX], 392
410 IWL_LED_TRG_TX, 0, 393 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
411 name, trigger); 394 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
412 priv->led[IWL_LED_TRG_TX].led_on = iwl4965_led_associated;
413 priv->led[IWL_LED_TRG_TX].led_off = iwl4965_led_associated;
414 priv->led[IWL_LED_TRG_TX].led_pattern = iwl4965_led_pattern; 395 priv->led[IWL_LED_TRG_TX].led_pattern = iwl4965_led_pattern;
415 396
416 if (ret) 397 if (ret)
@@ -425,7 +406,7 @@ exit_fail:
425EXPORT_SYMBOL(iwl_leds_register); 406EXPORT_SYMBOL(iwl_leds_register);
426 407
427/* unregister led class */ 408/* unregister led class */
428static void iwl_leds_unregister_led(struct iwl4965_led *led, u8 set_led) 409static void iwl_leds_unregister_led(struct iwl_led *led, u8 set_led)
429{ 410{
430 if (!led->registered) 411 if (!led->registered)
431 return; 412 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 5bb04128cd65..1980ae5a7e82 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -49,14 +49,13 @@ enum led_type {
49}; 49};
50 50
51 51
52struct iwl4965_led { 52struct iwl_led {
53 struct iwl_priv *priv; 53 struct iwl_priv *priv;
54 struct led_classdev led_dev; 54 struct led_classdev led_dev;
55 55
56 int (*led_on) (struct iwl_priv *priv, int led_id); 56 int (*led_on) (struct iwl_priv *priv, int led_id);
57 int (*led_off) (struct iwl_priv *priv, int led_id); 57 int (*led_off) (struct iwl_priv *priv, int led_id);
58 int (*led_pattern) (struct iwl_priv *priv, int led_id, 58 int (*led_pattern) (struct iwl_priv *priv, int led_id, unsigned int idx);
59 enum led_brightness brightness);
60 59
61 enum led_type type; 60 enum led_type type;
62 unsigned int registered; 61 unsigned int registered;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
new file mode 100644
index 000000000000..2e71803e09ba
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-commands.h"
41#include "iwl-debug.h"
42#include "iwl-power.h"
43#include "iwl-helpers.h"
44
45/*
46 * Setting power level allow the card to go to sleep when not busy
47 * there are three factor that decide the power level to go to, they
48 * are list here with its priority
49 * 1- critical_power_setting this will be set according to card temperature.
50 * 2- system_power_setting this will be set by system PM manager.
51 * 3- user_power_setting this will be set by user either by writing to sys or
52 * mac80211
53 *
54 * if system_power_setting and user_power_setting is set to auto
55 * the power level will be decided according to association status and battery
56 * status.
57 *
58 */
59
60#define MSEC_TO_USEC 1024
61#define IWL_POWER_RANGE_0_MAX (2)
62#define IWL_POWER_RANGE_1_MAX (10)
63
64
65#define NOSLP __constant_cpu_to_le16(0), 0, 0
66#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
67#define SLP_TOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
68#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
69 __constant_cpu_to_le32(X1), \
70 __constant_cpu_to_le32(X2), \
71 __constant_cpu_to_le32(X3), \
72 __constant_cpu_to_le32(X4)}
73
74#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5
75#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM
76#define IWL_POWER_ON_AC_ASSOC IWL_POWER_MODE_CAM
77
78
79#define IWL_CT_KILL_TEMPERATURE 110
80#define IWL_MIN_POWER_TEMPERATURE 100
81#define IWL_REDUCED_POWER_TEMPERATURE 95
82
83/* default power management (not Tx power) table values */
84/* for tim 0-10 */
85static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
91 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
92};
93
94
95/* for tim = 3-10 */
96static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
101 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
102 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
103};
104
105/* for tim > 11 */
106static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
111 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113};
114
115/* decide the right power level according to association status
116 * and battery status
117 */
118static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
119{
120 u16 mode = priv->power_data.user_power_setting;
121
122 switch (priv->power_data.user_power_setting) {
123 case IWL_POWER_AUTO:
124 /* if running on battery */
125 if (priv->power_data.is_battery_active)
126 mode = IWL_POWER_ON_BATTERY;
127 else if (iwl_is_associated(priv))
128 mode = IWL_POWER_ON_AC_ASSOC;
129 else
130 mode = IWL_POWER_ON_AC_DISASSOC;
131 break;
132 case IWL_POWER_BATTERY:
133 mode = IWL_POWER_INDEX_3;
134 break;
135 case IWL_POWER_AC:
136 mode = IWL_POWER_MODE_CAM;
137 break;
138 }
139 return mode;
140}
141
142/* initialize to default */
143static int iwl_power_init_handle(struct iwl_priv *priv)
144{
145 int ret = 0, i;
146 struct iwl_power_mgr *pow_data;
147 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
148 u16 pci_pm;
149
150 IWL_DEBUG_POWER("Initialize power \n");
151
152 pow_data = &(priv->power_data);
153
154 memset(pow_data, 0, sizeof(*pow_data));
155
156 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
157 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
158 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
159
160 ret = pci_read_config_word(priv->pci_dev,
161 PCI_LINK_CTRL, &pci_pm);
162 if (ret != 0)
163 return 0;
164 else {
165 struct iwl4965_powertable_cmd *cmd;
166
167 IWL_DEBUG_POWER("adjust power command flags\n");
168
169 for (i = 0; i < IWL_POWER_AC; i++) {
170 cmd = &pow_data->pwr_range_0[i].cmd;
171
172 if (pci_pm & 0x1)
173 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
174 else
175 cmd->flags |= IWL_POWER_PCI_PM_MSK;
176 }
177 }
178 return ret;
179}
180
181/* adjust power command according to dtim period and power level*/
182static int iwl_update_power_command(struct iwl_priv *priv,
183 struct iwl4965_powertable_cmd *cmd,
184 u16 mode)
185{
186 int ret = 0, i;
187 u8 skip;
188 u32 max_sleep = 0;
189 struct iwl_power_vec_entry *range;
190 u8 period = 0;
191 struct iwl_power_mgr *pow_data;
192
193 if (mode > IWL_POWER_INDEX_5) {
194 IWL_DEBUG_POWER("Error invalid power mode \n");
195 return -1;
196 }
197 pow_data = &(priv->power_data);
198
199 if (pow_data->dtim_period <= IWL_POWER_RANGE_0_MAX)
200 range = &pow_data->pwr_range_0[0];
201 else if (pow_data->dtim_period <= IWL_POWER_RANGE_1_MAX)
202 range = &pow_data->pwr_range_1[0];
203 else
204 range = &pow_data->pwr_range_2[0];
205
206 period = pow_data->dtim_period;
207 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
208
209 if (period == 0) {
210 period = 1;
211 skip = 0;
212 } else
213 skip = range[mode].no_dtim;
214
215 if (skip == 0) {
216 max_sleep = period;
217 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
218 } else {
219 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
220 max_sleep = le32_to_cpu(slp_itrvl);
221 if (max_sleep == 0xFF)
222 max_sleep = period * (skip + 1);
223 else if (max_sleep > period)
224 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
225 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
226 }
227
228 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
229 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
230 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
231 }
232
233 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
234 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
235 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
236 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
237 le32_to_cpu(cmd->sleep_interval[0]),
238 le32_to_cpu(cmd->sleep_interval[1]),
239 le32_to_cpu(cmd->sleep_interval[2]),
240 le32_to_cpu(cmd->sleep_interval[3]),
241 le32_to_cpu(cmd->sleep_interval[4]));
242
243 return ret;
244}
245
246
247/*
248 * calucaute the final power mode index
249 */
250int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
251{
252 struct iwl_power_mgr *setting = &(priv->power_data);
253 int ret = 0;
254 u16 uninitialized_var(final_mode);
255
256 /* If on battery, set to 3,
257 * if plugged into AC power, set to CAM ("continuously aware mode"),
258 * else user level */
259
260 switch (setting->system_power_setting) {
261 case IWL_POWER_AUTO:
262 final_mode = iwl_get_auto_power_mode(priv);
263 break;
264 case IWL_POWER_BATTERY:
265 final_mode = IWL_POWER_INDEX_3;
266 break;
267 case IWL_POWER_AC:
268 final_mode = IWL_POWER_MODE_CAM;
269 break;
270 default:
271 final_mode = setting->system_power_setting;
272 }
273
274 if (setting->critical_power_setting > final_mode)
275 final_mode = setting->critical_power_setting;
276
277 /* driver only support CAM for non STA network */
278 if (priv->iw_mode != IEEE80211_IF_TYPE_STA)
279 final_mode = IWL_POWER_MODE_CAM;
280
281 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
282 ((setting->power_mode != final_mode) || refresh)) {
283 struct iwl4965_powertable_cmd cmd;
284
285 if (final_mode != IWL_POWER_MODE_CAM)
286 set_bit(STATUS_POWER_PMI, &priv->status);
287
288 iwl_update_power_command(priv, &cmd, final_mode);
289 cmd.keep_alive_beacons = 0;
290
291 if (final_mode == IWL_POWER_INDEX_5)
292 cmd.flags |= IWL_POWER_FAST_PD;
293
294 if (priv->cfg->ops->lib->set_power)
295 ret = priv->cfg->ops->lib->set_power(priv, &cmd);
296
297 if (final_mode == IWL_POWER_MODE_CAM)
298 clear_bit(STATUS_POWER_PMI, &priv->status);
299 else
300 set_bit(STATUS_POWER_PMI, &priv->status);
301
302 if (priv->cfg->ops->lib->update_chain_flags)
303 priv->cfg->ops->lib->update_chain_flags(priv);
304
305 if (!ret)
306 setting->power_mode = final_mode;
307 }
308
309 return ret;
310}
311EXPORT_SYMBOL(iwl_power_update_mode);
312
313/* Allow other iwl code to disable/enable power management active
314 * this will be usefull for rate scale to disable PM during heavy
315 * Tx/Rx activities
316 */
317int iwl_power_disable_management(struct iwl_priv *priv)
318{
319 u16 prev_mode;
320 int ret = 0;
321
322 if (priv->power_data.power_disabled)
323 return -EBUSY;
324
325 prev_mode = priv->power_data.user_power_setting;
326 priv->power_data.user_power_setting = IWL_POWER_MODE_CAM;
327 ret = iwl_power_update_mode(priv, 0);
328 priv->power_data.power_disabled = 1;
329 priv->power_data.user_power_setting = prev_mode;
330
331 return ret;
332}
333EXPORT_SYMBOL(iwl_power_disable_management);
334
335/* Allow other iwl code to disable/enable power management active
336 * this will be usefull for rate scale to disable PM during hight
337 * valume activities
338 */
339int iwl_power_enable_management(struct iwl_priv *priv)
340{
341 int ret = 0;
342
343 priv->power_data.power_disabled = 0;
344 ret = iwl_power_update_mode(priv, 0);
345 return ret;
346}
347EXPORT_SYMBOL(iwl_power_enable_management);
348
349/* set user_power_setting */
350int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
351{
352 int ret = 0;
353
354 if (mode > IWL_POWER_LIMIT)
355 return -EINVAL;
356
357 priv->power_data.user_power_setting = mode;
358
359 ret = iwl_power_update_mode(priv, 0);
360
361 return ret;
362}
363EXPORT_SYMBOL(iwl_power_set_user_mode);
364
365
366/* set system_power_setting. This should be set by over all
367 * PM application.
368 */
369int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
370{
371 int ret = 0;
372
373 if (mode > IWL_POWER_LIMIT)
374 return -EINVAL;
375
376 priv->power_data.system_power_setting = mode;
377
378 ret = iwl_power_update_mode(priv, 0);
379
380 return ret;
381}
382EXPORT_SYMBOL(iwl_power_set_system_mode);
383
384/* initilize to default */
385void iwl_power_initialize(struct iwl_priv *priv)
386{
387
388 iwl_power_init_handle(priv);
389 priv->power_data.user_power_setting = IWL_POWER_AUTO;
390 priv->power_data.power_disabled = 0;
391 priv->power_data.system_power_setting = IWL_POWER_AUTO;
392 priv->power_data.is_battery_active = 0;
393 priv->power_data.power_disabled = 0;
394 priv->power_data.critical_power_setting = 0;
395}
396EXPORT_SYMBOL(iwl_power_initialize);
397
398/* set critical_power_setting according to temperature value */
399int iwl_power_temperature_change(struct iwl_priv *priv)
400{
401 int ret = 0;
402 u16 new_critical = priv->power_data.critical_power_setting;
403 s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature);
404
405 if (temperature > IWL_CT_KILL_TEMPERATURE)
406 return 0;
407 else if (temperature > IWL_MIN_POWER_TEMPERATURE)
408 new_critical = IWL_POWER_INDEX_5;
409 else if (temperature > IWL_REDUCED_POWER_TEMPERATURE)
410 new_critical = IWL_POWER_INDEX_3;
411 else
412 new_critical = IWL_POWER_MODE_CAM;
413
414 if (new_critical != priv->power_data.critical_power_setting)
415 priv->power_data.critical_power_setting = new_critical;
416
417 if (priv->power_data.critical_power_setting >
418 priv->power_data.power_mode)
419 ret = iwl_power_update_mode(priv, 0);
420
421 return ret;
422}
423EXPORT_SYMBOL(iwl_power_temperature_change);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
new file mode 100644
index 000000000000..b066724a1c2b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -0,0 +1,76 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include <net/mac80211.h>
32#include "iwl-commands.h"
33
34struct iwl_priv;
35
36#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
37#define IWL_POWER_INDEX_3 0x03
38#define IWL_POWER_INDEX_5 0x05
39#define IWL_POWER_AC 0x06
40#define IWL_POWER_BATTERY 0x07
41#define IWL_POWER_AUTO 0x08
42#define IWL_POWER_LIMIT 0x08
43#define IWL_POWER_MASK 0x0F
44#define IWL_POWER_ENABLED 0x10
45
46/* Power management (not Tx power) structures */
47
48struct iwl_power_vec_entry {
49 struct iwl4965_powertable_cmd cmd;
50 u8 no_dtim;
51};
52
53struct iwl_power_mgr {
54 spinlock_t lock;
55 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC];
56 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC];
57 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC];
58 u32 dtim_period;
59 /* final power level that used to calculate final power command */
60 u8 power_mode;
61 u8 user_power_setting; /* set by user through mac80211 or sysfs */
62 u8 system_power_setting; /* set by kernel syatem tools */
63 u8 critical_power_setting; /* set if driver over heated */
64 u8 is_battery_active; /* DC/AC power */
65 u8 power_disabled; /* flag to disable using power saving level */
66};
67
68int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
69int iwl_power_disable_management(struct iwl_priv *priv);
70int iwl_power_enable_management(struct iwl_priv *priv);
71int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
72int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
73void iwl_power_initialize(struct iwl_priv *priv);
74int iwl_power_temperature_change(struct iwl_priv *priv);
75
76#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c9cf8eef1a90..70d9c7568b98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -239,40 +239,307 @@
239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) 239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) 240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
241 241
242/**
243 * Tx Scheduler
244 *
245 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
246 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
247 * host DRAM. It steers each frame's Tx command (which contains the frame
248 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
249 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
250 * but one DMA channel may take input from several queues.
251 *
252 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
253 *
254 * 0 -- EDCA BK (background) frames, lowest priority
255 * 1 -- EDCA BE (best effort) frames, normal priority
256 * 2 -- EDCA VI (video) frames, higher priority
257 * 3 -- EDCA VO (voice) and management frames, highest priority
258 * 4 -- Commands (e.g. RXON, etc.)
259 * 5 -- HCCA short frames
260 * 6 -- HCCA long frames
261 * 7 -- not used by driver (device-internal only)
262 *
263 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
264 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
265 * support 11n aggregation via EDCA DMA channels.
266 *
267 * The driver sets up each queue to work in one of two modes:
268 *
269 * 1) Scheduler-Ack, in which the scheduler automatically supports a
270 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
271 * contains TFDs for a unique combination of Recipient Address (RA)
272 * and Traffic Identifier (TID), that is, traffic of a given
273 * Quality-Of-Service (QOS) priority, destined for a single station.
274 *
275 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
276 * each frame within the BA window, including whether it's been transmitted,
277 * and whether it's been acknowledged by the receiving station. The device
278 * automatically processes block-acks received from the receiving STA,
279 * and reschedules un-acked frames to be retransmitted (successful
280 * Tx completion may end up being out-of-order).
281 *
282 * The driver must maintain the queue's Byte Count table in host DRAM
283 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
284 * This mode does not support fragmentation.
285 *
286 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
287 * The device may automatically retry Tx, but will retry only one frame
288 * at a time, until receiving ACK from receiving station, or reaching
289 * retry limit and giving up.
290 *
291 * The command queue (#4) must use this mode!
292 * This mode does not require use of the Byte Count table in host DRAM.
293 *
294 * Driver controls scheduler operation via 3 means:
295 * 1) Scheduler registers
296 * 2) Shared scheduler data base in internal 4956 SRAM
297 * 3) Shared data in host DRAM
298 *
299 * Initialization:
300 *
301 * When loading, driver should allocate memory for:
302 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
303 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
304 * (1024 bytes for each queue).
305 *
306 * After receiving "Alive" response from uCode, driver must initialize
307 * the scheduler (especially for queue #4, the command queue, otherwise
308 * the driver can't issue commands!):
309 */
310
311/**
312 * Max Tx window size is the max number of contiguous TFDs that the scheduler
313 * can keep track of at one time when creating block-ack chains of frames.
314 * Note that "64" matches the number of ack bits in a block-ack packet.
315 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
316 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
317 */
318#define SCD_WIN_SIZE 64
319#define SCD_FRAME_LIMIT 64
320
321/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
322#define IWL49_SCD_START_OFFSET 0xa02c00
323
324/*
325 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
326 * Value is valid only after "Alive" response from uCode.
327 */
328#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
329
330/*
331 * Driver may need to update queue-empty bits after changing queue's
332 * write and read pointers (indexes) during (re-)initialization (i.e. when
333 * scheduler is not tracking what's happening).
334 * Bit fields:
335 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
336 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
337 * NOTE: This register is not used by Linux driver.
338 */
339#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
340
341/*
342 * Physical base address of array of byte count (BC) circular buffers (CBs).
343 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
344 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
345 * Others are spaced by 1024 bytes.
346 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
347 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
348 * Bit fields:
349 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
350 */
351#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
352
353/*
354 * Enables any/all Tx DMA/FIFO channels.
355 * Scheduler generates requests for only the active channels.
356 * Set this to 0xff to enable all 8 channels (normal usage).
357 * Bit fields:
358 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
359 */
360#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
361/*
362 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
363 * Initialized and updated by driver as new TFDs are added to queue.
364 * NOTE: If using Block Ack, index must correspond to frame's
365 * Start Sequence Number; index = (SSN & 0xff)
366 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
367 */
368#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
369
370/*
371 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
372 * For FIFO mode, index indicates next frame to transmit.
373 * For Scheduler-ACK mode, index indicates first frame in Tx window.
374 * Initialized by driver, updated by scheduler.
375 */
376#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
377
378/*
379 * Select which queues work in chain mode (1) vs. not (0).
380 * Use chain mode to build chains of aggregated frames.
381 * Bit fields:
382 * 31-16: Reserved
383 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
384 * NOTE: If driver sets up queue for chain mode, it should be also set up
385 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
386 */
387#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
388
389/*
390 * Select which queues interrupt driver when scheduler increments
391 * a queue's read pointer (index).
392 * Bit fields:
393 * 31-16: Reserved
394 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
395 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
396 * from Rx queue to read Tx command responses and update Tx queues.
397 */
398#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
399
400/*
401 * Queue search status registers. One for each queue.
402 * Sets up queue mode and assigns queue to Tx DMA channel.
403 * Bit fields:
404 * 19-10: Write mask/enable bits for bits 0-9
405 * 9: Driver should init to "0"
406 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
407 * Driver should init to "1" for aggregation mode, or "0" otherwise.
408 * 7-6: Driver should init to "0"
409 * 5: Window Size Left; indicates whether scheduler can request
410 * another TFD, based on window size, etc. Driver should init
411 * this bit to "1" for aggregation mode, or "0" for non-agg.
412 * 4-1: Tx FIFO to use (range 0-7).
413 * 0: Queue is active (1), not active (0).
414 * Other bits should be written as "0"
415 *
416 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
417 * via SCD_QUEUECHAIN_SEL.
418 */
419#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
420 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
421
422/* Bit field positions */
423#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
424#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
425#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
426#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
427
428/* Write masks */
429#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
430#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
431
432/**
433 * 4965 internal SRAM structures for scheduler, shared with driver ...
434 *
435 * Driver should clear and initialize the following areas after receiving
436 * "Alive" response from 4965 uCode, i.e. after initial
437 * uCode load, or after a uCode load done for error recovery:
438 *
439 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
440 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
441 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
442 *
443 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
444 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
445 * All OFFSET values must be added to this base address.
446 */
447
448/*
449 * Queue context. One 8-byte entry for each of 16 queues.
450 *
451 * Driver should clear this entire area (size 0x80) to 0 after receiving
452 * "Alive" notification from uCode. Additionally, driver should init
453 * each queue's entry as follows:
454 *
455 * LS Dword bit fields:
456 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
457 *
458 * MS Dword bit fields:
459 * 16-22: Frame limit. Driver should init to 10 (0xa).
460 *
461 * Driver should init all other bits to 0.
462 *
463 * Init must be done after driver receives "Alive" response from 4965 uCode,
464 * and when setting up queue for aggregation.
465 */
466#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
467#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
468 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
469
470#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
471#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
472#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
473#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
474
475/*
476 * Tx Status Bitmap
477 *
478 * Driver should clear this entire area (size 0x100) to 0 after receiving
479 * "Alive" notification from uCode. Area is used only by device itself;
480 * no other support (besides clearing) is required from driver.
481 */
482#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
483
242/* 484/*
243 * 4965 Tx Scheduler registers. 485 * RAxTID to queue translation mapping.
244 * Details are documented in iwl-4965-hw.h 486 *
487 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
488 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
489 * one QOS priority level destined for one station (for this wireless link,
490 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
491 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
492 * mode, the device ignores the mapping value.
493 *
494 * Bit fields, for each 16-bit map:
495 * 15-9: Reserved, set to 0
496 * 8-4: Index into device's station table for recipient station
497 * 3-0: Traffic ID (tid), range 0-15
498 *
499 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
500 * "Alive" notification from uCode. To update a 16-bit map value, driver
501 * must read a dword-aligned value from device SRAM, replace the 16-bit map
502 * value of interest, and write the dword value back into device SRAM.
245 */ 503 */
246#define IWL49_SCD_BASE (PRPH_BASE + 0xa02c00) 504#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
247 505
248#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_BASE + 0x0) 506/* Find translation table dword to read/write for given queue */
249#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_BASE + 0x4) 507#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
250#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_BASE + 0x10) 508 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
251#define IWL49_SCD_AIT (IWL49_SCD_BASE + 0x18) 509
252#define IWL49_SCD_TXFACT (IWL49_SCD_BASE + 0x1c) 510#define IWL_SCD_TXFIFO_POS_TID (0)
253#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_BASE + 0x24 + (x) * 4) 511#define IWL_SCD_TXFIFO_POS_RA (4)
254#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_BASE + 0x64 + (x) * 4) 512#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
255#define IWL49_SCD_SETQUEUENUM (IWL49_SCD_BASE + 0xa4) 513
256#define IWL49_SCD_SET_TXSTAT_TXED (IWL49_SCD_BASE + 0xa8) 514/* 5000 SCD */
257#define IWL49_SCD_SET_TXSTAT_DONE (IWL49_SCD_BASE + 0xac) 515#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
258#define IWL49_SCD_SET_TXSTAT_NOT_SCHD (IWL49_SCD_BASE + 0xb0) 516#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
259#define IWL49_SCD_DECREASE_CREDIT (IWL49_SCD_BASE + 0xb4) 517#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
260#define IWL49_SCD_DECREASE_SCREDIT (IWL49_SCD_BASE + 0xb8) 518#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
261#define IWL49_SCD_LOAD_CREDIT (IWL49_SCD_BASE + 0xbc) 519#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
262#define IWL49_SCD_LOAD_SCREDIT (IWL49_SCD_BASE + 0xc0) 520
263#define IWL49_SCD_BAR (IWL49_SCD_BASE + 0xc4) 521#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
264#define IWL49_SCD_BAR_DW0 (IWL49_SCD_BASE + 0xc8) 522#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
265#define IWL49_SCD_BAR_DW1 (IWL49_SCD_BASE + 0xcc) 523#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
266#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_BASE + 0xd0) 524#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
267#define IWL49_SCD_QUERY_REQ (IWL49_SCD_BASE + 0xd8) 525#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
268#define IWL49_SCD_QUERY_RES (IWL49_SCD_BASE + 0xdc) 526#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
269#define IWL49_SCD_PENDING_FRAMES (IWL49_SCD_BASE + 0xe0) 527#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
270#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_BASE + 0xe4) 528#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
271#define IWL49_SCD_INTERRUPT_THRESHOLD (IWL49_SCD_BASE + 0xe8) 529
272#define IWL49_SCD_QUERY_MIN_FRAME_SIZE (IWL49_SCD_BASE + 0x100) 530#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
273#define IWL49_SCD_QUEUE_STATUS_BITS(x) (IWL49_SCD_BASE + 0x104 + (x) * 4) 531#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
274 532#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
275/* SP SCD */ 533
534#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
535 (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
536
537#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
538 ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
539
540#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
541 (~(1<<IWL_CMD_QUEUE_NUM)))
542
276#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00) 543#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
277 544
278#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0) 545#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
@@ -287,4 +554,6 @@
287#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108) 554#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
288#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4) 555#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
289 556
557/*********************** END TX SCHEDULER *************************************/
558
290#endif /* __iwl_prph_h__ */ 559#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index 5980a5621cb8..e5e5846e9f25 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -33,7 +33,7 @@
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
36#include "iwl-4965.h" 36#include "iwl-dev.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
38#include "iwl-helpers.h" 38#include "iwl-helpers.h"
39 39
@@ -44,28 +44,31 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
44 struct iwl_priv *priv = data; 44 struct iwl_priv *priv = data;
45 int err = 0; 45 int err = 0;
46 46
47 if (!priv->rfkill_mngr.rfkill) 47 if (!priv->rfkill)
48 return 0; 48 return 0;
49 49
50 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 50 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
51 return 0; 51 return 0;
52 52
53 IWL_DEBUG_RF_KILL("we recieved soft RFKILL set to state %d\n", state); 53 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state);
54 mutex_lock(&priv->mutex); 54 mutex_lock(&priv->mutex);
55 55
56 switch (state) { 56 switch (state) {
57 case RFKILL_STATE_ON: 57 case RFKILL_STATE_UNBLOCKED:
58 priv->cfg->ops->lib->radio_kill_sw(priv, 0); 58 if (iwl_is_rfkill_hw(priv)) {
59 /* if HW rf-kill is set dont allow ON state */
60 if (iwl_is_rfkill(priv))
61 err = -EBUSY; 59 err = -EBUSY;
60 goto out_unlock;
61 }
62 iwl_radio_kill_sw_enable_radio(priv);
62 break; 63 break;
63 case RFKILL_STATE_OFF: 64 case RFKILL_STATE_SOFT_BLOCKED:
64 priv->cfg->ops->lib->radio_kill_sw(priv, 1); 65 iwl_radio_kill_sw_disable_radio(priv);
65 if (!iwl_is_rfkill(priv)) 66 break;
66 err = -EBUSY; 67 default:
68 IWL_WARNING("we recieved unexpected RFKILL state %d\n", state);
67 break; 69 break;
68 } 70 }
71out_unlock:
69 mutex_unlock(&priv->mutex); 72 mutex_unlock(&priv->mutex);
70 73
71 return err; 74 return err;
@@ -79,64 +82,35 @@ int iwl_rfkill_init(struct iwl_priv *priv)
79 BUG_ON(device == NULL); 82 BUG_ON(device == NULL);
80 83
81 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n"); 84 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
82 priv->rfkill_mngr.rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN); 85 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
83 if (!priv->rfkill_mngr.rfkill) { 86 if (!priv->rfkill) {
84 IWL_ERROR("Unable to allocate rfkill device.\n"); 87 IWL_ERROR("Unable to allocate rfkill device.\n");
85 ret = -ENOMEM; 88 ret = -ENOMEM;
86 goto error; 89 goto error;
87 } 90 }
88 91
89 priv->rfkill_mngr.rfkill->name = priv->cfg->name; 92 priv->rfkill->name = priv->cfg->name;
90 priv->rfkill_mngr.rfkill->data = priv; 93 priv->rfkill->data = priv;
91 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_ON; 94 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
92 priv->rfkill_mngr.rfkill->toggle_radio = iwl_rfkill_soft_rf_kill; 95 priv->rfkill->toggle_radio = iwl_rfkill_soft_rf_kill;
93 priv->rfkill_mngr.rfkill->user_claim_unsupported = 1; 96 priv->rfkill->user_claim_unsupported = 1;
94 97
95 priv->rfkill_mngr.rfkill->dev.class->suspend = NULL; 98 priv->rfkill->dev.class->suspend = NULL;
96 priv->rfkill_mngr.rfkill->dev.class->resume = NULL; 99 priv->rfkill->dev.class->resume = NULL;
97 100
98 priv->rfkill_mngr.input_dev = input_allocate_device(); 101 ret = rfkill_register(priv->rfkill);
99 if (!priv->rfkill_mngr.input_dev) {
100 IWL_ERROR("Unable to allocate rfkill input device.\n");
101 ret = -ENOMEM;
102 goto freed_rfkill;
103 }
104
105 priv->rfkill_mngr.input_dev->name = priv->cfg->name;
106 priv->rfkill_mngr.input_dev->phys = wiphy_name(priv->hw->wiphy);
107 priv->rfkill_mngr.input_dev->id.bustype = BUS_HOST;
108 priv->rfkill_mngr.input_dev->id.vendor = priv->pci_dev->vendor;
109 priv->rfkill_mngr.input_dev->dev.parent = device;
110 priv->rfkill_mngr.input_dev->evbit[0] = BIT(EV_KEY);
111 set_bit(KEY_WLAN, priv->rfkill_mngr.input_dev->keybit);
112
113 ret = rfkill_register(priv->rfkill_mngr.rfkill);
114 if (ret) { 102 if (ret) {
115 IWL_ERROR("Unable to register rfkill: %d\n", ret); 103 IWL_ERROR("Unable to register rfkill: %d\n", ret);
116 goto free_input_dev; 104 goto free_rfkill;
117 }
118
119 ret = input_register_device(priv->rfkill_mngr.input_dev);
120 if (ret) {
121 IWL_ERROR("Unable to register rfkill input device: %d\n", ret);
122 goto unregister_rfkill;
123 } 105 }
124 106
125 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n"); 107 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
126 return ret; 108 return ret;
127 109
128unregister_rfkill: 110free_rfkill:
129 rfkill_unregister(priv->rfkill_mngr.rfkill); 111 if (priv->rfkill != NULL)
130 priv->rfkill_mngr.rfkill = NULL; 112 rfkill_free(priv->rfkill);
131 113 priv->rfkill = NULL;
132free_input_dev:
133 input_free_device(priv->rfkill_mngr.input_dev);
134 priv->rfkill_mngr.input_dev = NULL;
135
136freed_rfkill:
137 if (priv->rfkill_mngr.rfkill != NULL)
138 rfkill_free(priv->rfkill_mngr.rfkill);
139 priv->rfkill_mngr.rfkill = NULL;
140 114
141error: 115error:
142 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n"); 116 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
@@ -147,27 +121,27 @@ EXPORT_SYMBOL(iwl_rfkill_init);
147void iwl_rfkill_unregister(struct iwl_priv *priv) 121void iwl_rfkill_unregister(struct iwl_priv *priv)
148{ 122{
149 123
150 if (priv->rfkill_mngr.input_dev) 124 if (priv->rfkill)
151 input_unregister_device(priv->rfkill_mngr.input_dev); 125 rfkill_unregister(priv->rfkill);
152 126
153 if (priv->rfkill_mngr.rfkill) 127 priv->rfkill = NULL;
154 rfkill_unregister(priv->rfkill_mngr.rfkill);
155
156 priv->rfkill_mngr.input_dev = NULL;
157 priv->rfkill_mngr.rfkill = NULL;
158} 128}
159EXPORT_SYMBOL(iwl_rfkill_unregister); 129EXPORT_SYMBOL(iwl_rfkill_unregister);
160 130
161/* set rf-kill to the right state. */ 131/* set rf-kill to the right state. */
162void iwl_rfkill_set_hw_state(struct iwl_priv *priv) 132void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
163{ 133{
134 if (!priv->rfkill)
135 return;
164 136
165 if (!priv->rfkill_mngr.rfkill) 137 if (iwl_is_rfkill_hw(priv)) {
138 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
166 return; 139 return;
140 }
167 141
168 if (!iwl_is_rfkill(priv)) 142 if (!iwl_is_rfkill_sw(priv))
169 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_ON; 143 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
170 else 144 else
171 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_OFF; 145 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
172} 146}
173EXPORT_SYMBOL(iwl_rfkill_set_hw_state); 147EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
index a7f04b855403..402fd4c781da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.h
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
@@ -31,14 +31,8 @@
31struct iwl_priv; 31struct iwl_priv;
32 32
33#include <linux/rfkill.h> 33#include <linux/rfkill.h>
34#include <linux/input.h>
35
36 34
37#ifdef CONFIG_IWLWIFI_RFKILL 35#ifdef CONFIG_IWLWIFI_RFKILL
38struct iwl_rfkill_mngr {
39 struct rfkill *rfkill;
40 struct input_dev *input_dev;
41};
42 36
43void iwl_rfkill_set_hw_state(struct iwl_priv *priv); 37void iwl_rfkill_set_hw_state(struct iwl_priv *priv);
44void iwl_rfkill_unregister(struct iwl_priv *priv); 38void iwl_rfkill_unregister(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
new file mode 100644
index 000000000000..e2d9afba38a5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -0,0 +1,1321 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32#include <asm/unaligned.h>
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-calib.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122
123/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
127{
128 u32 reg = 0;
129 int ret = 0;
130 unsigned long flags;
131
132 spin_lock_irqsave(&q->lock, flags);
133
134 if (q->need_update == 0)
135 goto exit_unlock;
136
137 /* If power-saving is in use, make sure device is awake */
138 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
139 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
140
141 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
142 iwl_set_bit(priv, CSR_GP_CNTRL,
143 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
144 goto exit_unlock;
145 }
146
147 ret = iwl_grab_nic_access(priv);
148 if (ret)
149 goto exit_unlock;
150
151 /* Device expects a multiple of 8 */
152 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
153 q->write & ~0x7);
154 iwl_release_nic_access(priv);
155
156 /* Else device is assumed to be awake */
157 } else
158 /* Device expects a multiple of 8 */
159 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
160
161
162 q->need_update = 0;
163
164 exit_unlock:
165 spin_unlock_irqrestore(&q->lock, flags);
166 return ret;
167}
168EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
169/**
170 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
171 */
172static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
173 dma_addr_t dma_addr)
174{
175 return cpu_to_le32((u32)(dma_addr >> 8));
176}
177
178/**
179 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
180 *
181 * If there are slots in the RX queue that need to be restocked,
182 * and we have free pre-allocated buffers, fill the ranks as much
183 * as we can, pulling from rx_free.
184 *
185 * This moves the 'write' index forward to catch up with 'processed', and
186 * also updates the memory address in the firmware to reference the new
187 * target buffer.
188 */
189int iwl_rx_queue_restock(struct iwl_priv *priv)
190{
191 struct iwl_rx_queue *rxq = &priv->rxq;
192 struct list_head *element;
193 struct iwl_rx_mem_buffer *rxb;
194 unsigned long flags;
195 int write;
196 int ret = 0;
197
198 spin_lock_irqsave(&rxq->lock, flags);
199 write = rxq->write & ~0x7;
200 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
201 /* Get next free Rx buffer, remove from free list */
202 element = rxq->rx_free.next;
203 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
204 list_del(element);
205
206 /* Point to Rx buffer via next RBD in circular buffer */
207 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
208 rxq->queue[rxq->write] = rxb;
209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
210 rxq->free_count--;
211 }
212 spin_unlock_irqrestore(&rxq->lock, flags);
213 /* If the pre-allocated buffer pool is dropping low, schedule to
214 * refill it */
215 if (rxq->free_count <= RX_LOW_WATERMARK)
216 queue_work(priv->workqueue, &priv->rx_replenish);
217
218
219 /* If we've added more space for the firmware to place data, tell it.
220 * Increment device's write pointer in multiples of 8. */
221 if ((write != (rxq->write & ~0x7))
222 || (abs(rxq->write - rxq->read) > 7)) {
223 spin_lock_irqsave(&rxq->lock, flags);
224 rxq->need_update = 1;
225 spin_unlock_irqrestore(&rxq->lock, flags);
226 ret = iwl_rx_queue_update_write_ptr(priv, rxq);
227 }
228
229 return ret;
230}
231EXPORT_SYMBOL(iwl_rx_queue_restock);
232
233
234/**
235 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
236 *
237 * When moving to rx_free an SKB is allocated for the slot.
238 *
239 * Also restock the Rx queue via iwl_rx_queue_restock.
240 * This is called as a scheduled work item (except for during initialization)
241 */
242void iwl_rx_allocate(struct iwl_priv *priv)
243{
244 struct iwl_rx_queue *rxq = &priv->rxq;
245 struct list_head *element;
246 struct iwl_rx_mem_buffer *rxb;
247 unsigned long flags;
248 spin_lock_irqsave(&rxq->lock, flags);
249 while (!list_empty(&rxq->rx_used)) {
250 element = rxq->rx_used.next;
251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
252
253 /* Alloc a new receive buffer */
254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
255 __GFP_NOWARN | GFP_ATOMIC);
256 if (!rxb->skb) {
257 if (net_ratelimit())
258 printk(KERN_CRIT DRV_NAME
259 ": Can not allocate SKB buffers\n");
260 /* We don't reschedule replenish work here -- we will
261 * call the restock method and if it still needs
262 * more buffers it will schedule replenish */
263 break;
264 }
265 priv->alloc_rxb_skb++;
266 list_del(element);
267
268 /* Get physical address of RB/SKB */
269 rxb->dma_addr =
270 pci_map_single(priv->pci_dev, rxb->skb->data,
271 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
272 list_add_tail(&rxb->list, &rxq->rx_free);
273 rxq->free_count++;
274 }
275 spin_unlock_irqrestore(&rxq->lock, flags);
276}
277EXPORT_SYMBOL(iwl_rx_allocate);
278
279void iwl_rx_replenish(struct iwl_priv *priv)
280{
281 unsigned long flags;
282
283 iwl_rx_allocate(priv);
284
285 spin_lock_irqsave(&priv->lock, flags);
286 iwl_rx_queue_restock(priv);
287 spin_unlock_irqrestore(&priv->lock, flags);
288}
289EXPORT_SYMBOL(iwl_rx_replenish);
290
291
292/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
293 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
294 * This free routine walks the list of POOL entries and if SKB is set to
295 * non NULL it is unmapped and freed
296 */
297void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
298{
299 int i;
300 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
301 if (rxq->pool[i].skb != NULL) {
302 pci_unmap_single(priv->pci_dev,
303 rxq->pool[i].dma_addr,
304 priv->hw_params.rx_buf_size,
305 PCI_DMA_FROMDEVICE);
306 dev_kfree_skb(rxq->pool[i].skb);
307 }
308 }
309
310 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
311 rxq->dma_addr);
312 rxq->bd = NULL;
313}
314EXPORT_SYMBOL(iwl_rx_queue_free);
315
316int iwl_rx_queue_alloc(struct iwl_priv *priv)
317{
318 struct iwl_rx_queue *rxq = &priv->rxq;
319 struct pci_dev *dev = priv->pci_dev;
320 int i;
321
322 spin_lock_init(&rxq->lock);
323 INIT_LIST_HEAD(&rxq->rx_free);
324 INIT_LIST_HEAD(&rxq->rx_used);
325
326 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
327 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
328 if (!rxq->bd)
329 return -ENOMEM;
330
331 /* Fill the rx_used queue with _all_ of the Rx buffers */
332 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
333 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
334
335 /* Set us so that we have processed and used all buffers, but have
336 * not restocked the Rx queue with fresh buffers */
337 rxq->read = rxq->write = 0;
338 rxq->free_count = 0;
339 rxq->need_update = 0;
340 return 0;
341}
342EXPORT_SYMBOL(iwl_rx_queue_alloc);
343
344void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
345{
346 unsigned long flags;
347 int i;
348 spin_lock_irqsave(&rxq->lock, flags);
349 INIT_LIST_HEAD(&rxq->rx_free);
350 INIT_LIST_HEAD(&rxq->rx_used);
351 /* Fill the rx_used queue with _all_ of the Rx buffers */
352 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
353 /* In the reset function, these buffers may have been allocated
354 * to an SKB, so we need to unmap and free potential storage */
355 if (rxq->pool[i].skb != NULL) {
356 pci_unmap_single(priv->pci_dev,
357 rxq->pool[i].dma_addr,
358 priv->hw_params.rx_buf_size,
359 PCI_DMA_FROMDEVICE);
360 priv->alloc_rxb_skb--;
361 dev_kfree_skb(rxq->pool[i].skb);
362 rxq->pool[i].skb = NULL;
363 }
364 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
365 }
366
367 /* Set us so that we have processed and used all buffers, but have
368 * not restocked the Rx queue with fresh buffers */
369 rxq->read = rxq->write = 0;
370 rxq->free_count = 0;
371 spin_unlock_irqrestore(&rxq->lock, flags);
372}
373EXPORT_SYMBOL(iwl_rx_queue_reset);
374
375int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
376{
377 int ret;
378 unsigned long flags;
379 unsigned int rb_size;
380
381 spin_lock_irqsave(&priv->lock, flags);
382 ret = iwl_grab_nic_access(priv);
383 if (ret) {
384 spin_unlock_irqrestore(&priv->lock, flags);
385 return ret;
386 }
387
388 if (priv->cfg->mod_params->amsdu_size_8K)
389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
390 else
391 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
392
393 /* Stop Rx DMA */
394 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
395
396 /* Reset driver's Rx queue write index */
397 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
398
399 /* Tell device where to find RBD circular buffer in DRAM */
400 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
401 rxq->dma_addr >> 8);
402
403 /* Tell device where in DRAM to update its Rx status */
404 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
405 (priv->shared_phys + priv->rb_closed_offset) >> 4);
406
407 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
408 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
409 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
410 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
411 rb_size |
412 /* 0x10 << 4 | */
413 (RX_QUEUE_SIZE_LOG <<
414 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
415
416 /*
417 * iwl_write32(priv,CSR_INT_COAL_REG,0);
418 */
419
420 iwl_release_nic_access(priv);
421 spin_unlock_irqrestore(&priv->lock, flags);
422
423 return 0;
424}
425
426int iwl_rxq_stop(struct iwl_priv *priv)
427{
428 int ret;
429 unsigned long flags;
430
431 spin_lock_irqsave(&priv->lock, flags);
432 ret = iwl_grab_nic_access(priv);
433 if (unlikely(ret)) {
434 spin_unlock_irqrestore(&priv->lock, flags);
435 return ret;
436 }
437
438 /* stop Rx DMA */
439 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
440 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
441 (1 << 24), 1000);
442 if (ret < 0)
443 IWL_ERROR("Can't stop Rx DMA.\n");
444
445 iwl_release_nic_access(priv);
446 spin_unlock_irqrestore(&priv->lock, flags);
447
448 return 0;
449}
450EXPORT_SYMBOL(iwl_rxq_stop);
451
452void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
453 struct iwl_rx_mem_buffer *rxb)
454
455{
456 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
457 struct iwl4965_missed_beacon_notif *missed_beacon;
458
459 missed_beacon = &pkt->u.missed_beacon;
460 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
461 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
462 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
463 le32_to_cpu(missed_beacon->total_missed_becons),
464 le32_to_cpu(missed_beacon->num_recvd_beacons),
465 le32_to_cpu(missed_beacon->num_expected_beacons));
466 if (!test_bit(STATUS_SCANNING, &priv->status))
467 iwl_init_sensitivity(priv);
468 }
469}
470EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
471
472int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn)
473{
474 unsigned long flags;
475 int sta_id;
476
477 sta_id = iwl_find_station(priv, addr);
478 if (sta_id == IWL_INVALID_STATION)
479 return -ENXIO;
480
481 spin_lock_irqsave(&priv->sta_lock, flags);
482 priv->stations[sta_id].sta.station_flags_msk = 0;
483 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
484 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
485 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
486 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
487 spin_unlock_irqrestore(&priv->sta_lock, flags);
488
489 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
490 CMD_ASYNC);
491}
492EXPORT_SYMBOL(iwl_rx_agg_start);
493
494int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
495{
496 unsigned long flags;
497 int sta_id;
498
499 sta_id = iwl_find_station(priv, addr);
500 if (sta_id == IWL_INVALID_STATION)
501 return -ENXIO;
502
503 spin_lock_irqsave(&priv->sta_lock, flags);
504 priv->stations[sta_id].sta.station_flags_msk = 0;
505 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
506 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
507 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
508 spin_unlock_irqrestore(&priv->sta_lock, flags);
509
510 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
511 CMD_ASYNC);
512}
513EXPORT_SYMBOL(iwl_rx_agg_stop);
514
515
516/* Calculate noise level, based on measurements during network silence just
517 * before arriving beacon. This measurement can be done only if we know
518 * exactly when to expect beacons, therefore only when we're associated. */
519static void iwl_rx_calc_noise(struct iwl_priv *priv)
520{
521 struct statistics_rx_non_phy *rx_info
522 = &(priv->statistics.rx.general);
523 int num_active_rx = 0;
524 int total_silence = 0;
525 int bcn_silence_a =
526 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
527 int bcn_silence_b =
528 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
529 int bcn_silence_c =
530 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
531
532 if (bcn_silence_a) {
533 total_silence += bcn_silence_a;
534 num_active_rx++;
535 }
536 if (bcn_silence_b) {
537 total_silence += bcn_silence_b;
538 num_active_rx++;
539 }
540 if (bcn_silence_c) {
541 total_silence += bcn_silence_c;
542 num_active_rx++;
543 }
544
545 /* Average among active antennas */
546 if (num_active_rx)
547 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
548 else
549 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
550
551 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
552 bcn_silence_a, bcn_silence_b, bcn_silence_c,
553 priv->last_rx_noise);
554}
555
556#define REG_RECALIB_PERIOD (60)
557
558void iwl_rx_statistics(struct iwl_priv *priv,
559 struct iwl_rx_mem_buffer *rxb)
560{
561 int change;
562 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
563
564 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
565 (int)sizeof(priv->statistics), pkt->len);
566
567 change = ((priv->statistics.general.temperature !=
568 pkt->u.stats.general.temperature) ||
569 ((priv->statistics.flag &
570 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
571 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
572
573 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
574
575 set_bit(STATUS_STATISTICS, &priv->status);
576
577 /* Reschedule the statistics timer to occur in
578 * REG_RECALIB_PERIOD seconds to ensure we get a
579 * thermal update even if the uCode doesn't give
580 * us one */
581 mod_timer(&priv->statistics_periodic, jiffies +
582 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
583
584 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
585 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
586 iwl_rx_calc_noise(priv);
587 queue_work(priv->workqueue, &priv->run_time_calib_work);
588 }
589
590 iwl_leds_background(priv);
591
592 if (priv->cfg->ops->lib->temperature && change)
593 priv->cfg->ops->lib->temperature(priv);
594}
595EXPORT_SYMBOL(iwl_rx_statistics);
596
597#define PERFECT_RSSI (-20) /* dBm */
598#define WORST_RSSI (-95) /* dBm */
599#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
600
601/* Calculate an indication of rx signal quality (a percentage, not dBm!).
602 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
603 * about formulas used below. */
604static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
605{
606 int sig_qual;
607 int degradation = PERFECT_RSSI - rssi_dbm;
608
609 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
610 * as indicator; formula is (signal dbm - noise dbm).
611 * SNR at or above 40 is a great signal (100%).
612 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
613 * Weakest usable signal is usually 10 - 15 dB SNR. */
614 if (noise_dbm) {
615 if (rssi_dbm - noise_dbm >= 40)
616 return 100;
617 else if (rssi_dbm < noise_dbm)
618 return 0;
619 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
620
621 /* Else use just the signal level.
622 * This formula is a least squares fit of data points collected and
623 * compared with a reference system that had a percentage (%) display
624 * for signal quality. */
625 } else
626 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
627 (15 * RSSI_RANGE + 62 * degradation)) /
628 (RSSI_RANGE * RSSI_RANGE);
629
630 if (sig_qual > 100)
631 sig_qual = 100;
632 else if (sig_qual < 1)
633 sig_qual = 0;
634
635 return sig_qual;
636}
637
638#ifdef CONFIG_IWLWIFI_DEBUG
639
640/**
641 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
642 *
643 * You may hack this function to show different aspects of received frames,
644 * including selective frame dumps.
645 * group100 parameter selects whether to show 1 out of 100 good frames.
646 *
647 * TODO: This was originally written for 3945, need to audit for
648 * proper operation with 4965.
649 */
650static void iwl_dbg_report_frame(struct iwl_priv *priv,
651 struct iwl_rx_packet *pkt,
652 struct ieee80211_hdr *header, int group100)
653{
654 u32 to_us;
655 u32 print_summary = 0;
656 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
657 u32 hundred = 0;
658 u32 dataframe = 0;
659 __le16 fc;
660 u16 seq_ctl;
661 u16 channel;
662 u16 phy_flags;
663 int rate_sym;
664 u16 length;
665 u16 status;
666 u16 bcn_tmr;
667 u32 tsf_low;
668 u64 tsf;
669 u8 rssi;
670 u8 agc;
671 u16 sig_avg;
672 u16 noise_diff;
673 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
674 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
675 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
676 u8 *data = IWL_RX_DATA(pkt);
677
678 if (likely(!(priv->debug_level & IWL_DL_RX)))
679 return;
680
681 /* MAC header */
682 fc = header->frame_control;
683 seq_ctl = le16_to_cpu(header->seq_ctrl);
684
685 /* metadata */
686 channel = le16_to_cpu(rx_hdr->channel);
687 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
688 rate_sym = rx_hdr->rate;
689 length = le16_to_cpu(rx_hdr->len);
690
691 /* end-of-frame status and timestamp */
692 status = le32_to_cpu(rx_end->status);
693 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
694 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
695 tsf = le64_to_cpu(rx_end->timestamp);
696
697 /* signal statistics */
698 rssi = rx_stats->rssi;
699 agc = rx_stats->agc;
700 sig_avg = le16_to_cpu(rx_stats->sig_avg);
701 noise_diff = le16_to_cpu(rx_stats->noise_diff);
702
703 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
704
705 /* if data frame is to us and all is good,
706 * (optionally) print summary for only 1 out of every 100 */
707 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
708 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
709 dataframe = 1;
710 if (!group100)
711 print_summary = 1; /* print each frame */
712 else if (priv->framecnt_to_us < 100) {
713 priv->framecnt_to_us++;
714 print_summary = 0;
715 } else {
716 priv->framecnt_to_us = 0;
717 print_summary = 1;
718 hundred = 1;
719 }
720 } else {
721 /* print summary for all other frames */
722 print_summary = 1;
723 }
724
725 if (print_summary) {
726 char *title;
727 int rate_idx;
728 u32 bitrate;
729
730 if (hundred)
731 title = "100Frames";
732 else if (ieee80211_has_retry(fc))
733 title = "Retry";
734 else if (ieee80211_is_assoc_resp(fc))
735 title = "AscRsp";
736 else if (ieee80211_is_reassoc_resp(fc))
737 title = "RasRsp";
738 else if (ieee80211_is_probe_resp(fc)) {
739 title = "PrbRsp";
740 print_dump = 1; /* dump frame contents */
741 } else if (ieee80211_is_beacon(fc)) {
742 title = "Beacon";
743 print_dump = 1; /* dump frame contents */
744 } else if (ieee80211_is_atim(fc))
745 title = "ATIM";
746 else if (ieee80211_is_auth(fc))
747 title = "Auth";
748 else if (ieee80211_is_deauth(fc))
749 title = "DeAuth";
750 else if (ieee80211_is_disassoc(fc))
751 title = "DisAssoc";
752 else
753 title = "Frame";
754
755 rate_idx = iwl_hwrate_to_plcp_idx(rate_sym);
756 if (unlikely(rate_idx == -1))
757 bitrate = 0;
758 else
759 bitrate = iwl_rates[rate_idx].ieee / 2;
760
761 /* print frame summary.
762 * MAC addresses show just the last byte (for brevity),
763 * but you can hack it to show more, if you'd like to. */
764 if (dataframe)
765 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
766 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
767 title, le16_to_cpu(fc), header->addr1[5],
768 length, rssi, channel, bitrate);
769 else {
770 /* src/dst addresses assume managed mode */
771 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
772 "src=0x%02x, rssi=%u, tim=%lu usec, "
773 "phy=0x%02x, chnl=%d\n",
774 title, le16_to_cpu(fc), header->addr1[5],
775 header->addr3[5], rssi,
776 tsf_low - priv->scan_start_tsf,
777 phy_flags, channel);
778 }
779 }
780 if (print_dump)
781 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
782}
783#else
784static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
785 struct iwl_rx_packet *pkt,
786 struct ieee80211_hdr *header,
787 int group100)
788{
789}
790#endif
791
792static void iwl_add_radiotap(struct iwl_priv *priv,
793 struct sk_buff *skb,
794 struct iwl4965_rx_phy_res *rx_start,
795 struct ieee80211_rx_status *stats,
796 u32 ampdu_status)
797{
798 s8 signal = stats->signal;
799 s8 noise = 0;
800 int rate = stats->rate_idx;
801 u64 tsf = stats->mactime;
802 __le16 antenna;
803 __le16 phy_flags_hw = rx_start->phy_flags;
804 struct iwl4965_rt_rx_hdr {
805 struct ieee80211_radiotap_header rt_hdr;
806 __le64 rt_tsf; /* TSF */
807 u8 rt_flags; /* radiotap packet flags */
808 u8 rt_rate; /* rate in 500kb/s */
809 __le16 rt_channelMHz; /* channel in MHz */
810 __le16 rt_chbitmask; /* channel bitfield */
811 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
812 s8 rt_dbmnoise;
813 u8 rt_antenna; /* antenna number */
814 } __attribute__ ((packed)) *iwl4965_rt;
815
816 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
817 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
818 if (net_ratelimit())
819 printk(KERN_ERR "not enough headroom [%d] for "
820 "radiotap head [%zd]\n",
821 skb_headroom(skb), sizeof(*iwl4965_rt));
822 return;
823 }
824
825 /* put radiotap header in front of 802.11 header and data */
826 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
827
828 /* initialise radiotap header */
829 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
830 iwl4965_rt->rt_hdr.it_pad = 0;
831
832 /* total header + data */
833 put_unaligned_le16(sizeof(*iwl4965_rt), &iwl4965_rt->rt_hdr.it_len);
834
835 /* Indicate all the fields we add to the radiotap header */
836 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
837 (1 << IEEE80211_RADIOTAP_FLAGS) |
838 (1 << IEEE80211_RADIOTAP_RATE) |
839 (1 << IEEE80211_RADIOTAP_CHANNEL) |
840 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
841 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
842 (1 << IEEE80211_RADIOTAP_ANTENNA),
843 &(iwl4965_rt->rt_hdr.it_present));
844
845 /* Zero the flags, we'll add to them as we go */
846 iwl4965_rt->rt_flags = 0;
847
848 put_unaligned_le64(tsf, &iwl4965_rt->rt_tsf);
849
850 iwl4965_rt->rt_dbmsignal = signal;
851 iwl4965_rt->rt_dbmnoise = noise;
852
853 /* Convert the channel frequency and set the flags */
854 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
855 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
856 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
857 &iwl4965_rt->rt_chbitmask);
858 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
859 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
860 &iwl4965_rt->rt_chbitmask);
861 else /* 802.11g */
862 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
863 &iwl4965_rt->rt_chbitmask);
864
865 if (rate == -1)
866 iwl4965_rt->rt_rate = 0;
867 else
868 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
869
870 /*
871 * "antenna number"
872 *
873 * It seems that the antenna field in the phy flags value
874 * is actually a bitfield. This is undefined by radiotap,
875 * it wants an actual antenna number but I always get "7"
876 * for most legacy frames I receive indicating that the
877 * same frame was received on all three RX chains.
878 *
879 * I think this field should be removed in favour of a
880 * new 802.11n radiotap field "RX chains" that is defined
881 * as a bitmask.
882 */
883 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
884 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
885
886 /* set the preamble flag if appropriate */
887 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
888 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
889
890 stats->flag |= RX_FLAG_RADIOTAP;
891}
892
893static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
894{
895 /* 0 - mgmt, 1 - cnt, 2 - data */
896 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
897 priv->rx_stats[idx].cnt++;
898 priv->rx_stats[idx].bytes += len;
899}
900
901/*
902 * returns non-zero if packet should be dropped
903 */
904static int iwl_set_decrypted_flag(struct iwl_priv *priv,
905 struct ieee80211_hdr *hdr,
906 u32 decrypt_res,
907 struct ieee80211_rx_status *stats)
908{
909 u16 fc = le16_to_cpu(hdr->frame_control);
910
911 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
912 return 0;
913
914 if (!(fc & IEEE80211_FCTL_PROTECTED))
915 return 0;
916
917 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
918 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
919 case RX_RES_STATUS_SEC_TYPE_TKIP:
920 /* The uCode has got a bad phase 1 Key, pushes the packet.
921 * Decryption will be done in SW. */
922 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
923 RX_RES_STATUS_BAD_KEY_TTAK)
924 break;
925
926 case RX_RES_STATUS_SEC_TYPE_WEP:
927 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
928 RX_RES_STATUS_BAD_ICV_MIC) {
929 /* bad ICV, the packet is destroyed since the
930 * decryption is inplace, drop it */
931 IWL_DEBUG_RX("Packet destroyed\n");
932 return -1;
933 }
934 case RX_RES_STATUS_SEC_TYPE_CCMP:
935 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
936 RX_RES_STATUS_DECRYPT_OK) {
937 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
938 stats->flag |= RX_FLAG_DECRYPTED;
939 }
940 break;
941
942 default:
943 break;
944 }
945 return 0;
946}
947
948static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
949{
950 u32 decrypt_out = 0;
951
952 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
953 RX_RES_STATUS_STATION_FOUND)
954 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
955 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
956
957 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
958
959 /* packet was not encrypted */
960 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
961 RX_RES_STATUS_SEC_TYPE_NONE)
962 return decrypt_out;
963
964 /* packet was encrypted with unknown alg */
965 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
966 RX_RES_STATUS_SEC_TYPE_ERR)
967 return decrypt_out;
968
969 /* decryption was not done in HW */
970 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
971 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
972 return decrypt_out;
973
974 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
975
976 case RX_RES_STATUS_SEC_TYPE_CCMP:
977 /* alg is CCM: check MIC only */
978 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
979 /* Bad MIC */
980 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
981 else
982 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
983
984 break;
985
986 case RX_RES_STATUS_SEC_TYPE_TKIP:
987 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
988 /* Bad TTAK */
989 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
990 break;
991 }
992 /* fall through if TTAK OK */
993 default:
994 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
995 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
996 else
997 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
998 break;
999 };
1000
1001 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
1002 decrypt_in, decrypt_out);
1003
1004 return decrypt_out;
1005}
1006
1007static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1008 int include_phy,
1009 struct iwl_rx_mem_buffer *rxb,
1010 struct ieee80211_rx_status *stats)
1011{
1012 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1013 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
1014 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
1015 struct ieee80211_hdr *hdr;
1016 u16 len;
1017 __le32 *rx_end;
1018 unsigned int skblen;
1019 u32 ampdu_status;
1020 u32 ampdu_status_legacy;
1021
1022 if (!include_phy && priv->last_phy_res[0])
1023 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
1024
1025 if (!rx_start) {
1026 IWL_ERROR("MPDU frame without a PHY data\n");
1027 return;
1028 }
1029 if (include_phy) {
1030 hdr = (struct ieee80211_hdr *)((u8 *) &rx_start[1] +
1031 rx_start->cfg_phy_cnt);
1032
1033 len = le16_to_cpu(rx_start->byte_count);
1034
1035 rx_end = (__le32 *) ((u8 *) &pkt->u.raw[0] +
1036 sizeof(struct iwl4965_rx_phy_res) +
1037 rx_start->cfg_phy_cnt + len);
1038
1039 } else {
1040 struct iwl4965_rx_mpdu_res_start *amsdu =
1041 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1042
1043 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
1044 sizeof(struct iwl4965_rx_mpdu_res_start));
1045 len = le16_to_cpu(amsdu->byte_count);
1046 rx_start->byte_count = amsdu->byte_count;
1047 rx_end = (__le32 *) (((u8 *) hdr) + len);
1048 }
1049
1050 ampdu_status = le32_to_cpu(*rx_end);
1051 skblen = ((u8 *) rx_end - (u8 *) &pkt->u.raw[0]) + sizeof(u32);
1052
1053 if (!include_phy) {
1054 /* New status scheme, need to translate */
1055 ampdu_status_legacy = ampdu_status;
1056 ampdu_status = iwl_translate_rx_status(priv, ampdu_status);
1057 }
1058
1059 /* start from MAC */
1060 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
1061 skb_put(rxb->skb, len); /* end where data ends */
1062
1063 /* We only process data packets if the interface is open */
1064 if (unlikely(!priv->is_open)) {
1065 IWL_DEBUG_DROP_LIMIT
1066 ("Dropping packet while interface is not open.\n");
1067 return;
1068 }
1069
1070 hdr = (struct ieee80211_hdr *)rxb->skb->data;
1071
1072 /* in case of HW accelerated crypto and bad decryption, drop */
1073 if (!priv->hw_params.sw_crypto &&
1074 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1075 return;
1076
1077 if (priv->add_radiotap)
1078 iwl_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
1079
1080 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
1081 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
1082 priv->alloc_rxb_skb--;
1083 rxb->skb = NULL;
1084}
1085
1086/* Calc max signal level (dBm) among 3 possible receivers */
1087static int iwl_calc_rssi(struct iwl_priv *priv,
1088 struct iwl4965_rx_phy_res *rx_resp)
1089{
1090 /* data from PHY/DSP regarding signal strength, etc.,
1091 * contents are always there, not configurable by host. */
1092 struct iwl4965_rx_non_cfg_phy *ncphy =
1093 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
1094 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
1095 >> IWL_AGC_DB_POS;
1096
1097 u32 valid_antennae =
1098 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
1099 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1100 u8 max_rssi = 0;
1101 u32 i;
1102
1103 /* Find max rssi among 3 possible receivers.
1104 * These values are measured by the digital signal processor (DSP).
1105 * They should stay fairly constant even as the signal strength varies,
1106 * if the radio's automatic gain control (AGC) is working right.
1107 * AGC value (see below) will provide the "interesting" info. */
1108 for (i = 0; i < 3; i++)
1109 if (valid_antennae & (1 << i))
1110 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
1111
1112 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1113 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
1114 max_rssi, agc);
1115
1116 /* dBm = max_rssi dB - agc dB - constant.
1117 * Higher AGC (higher radio gain) means lower signal. */
1118 return max_rssi - agc - IWL_RSSI_OFFSET;
1119}
1120
1121static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1122{
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&priv->sta_lock, flags);
1126 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
1127 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1128 priv->stations[sta_id].sta.sta.modify_mask = 0;
1129 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1130 spin_unlock_irqrestore(&priv->sta_lock, flags);
1131
1132 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1133}
1134
1135static void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
1136{
1137 /* FIXME: need locking over ps_status ??? */
1138 u8 sta_id = iwl_find_station(priv, addr);
1139
1140 if (sta_id != IWL_INVALID_STATION) {
1141 u8 sta_awake = priv->stations[sta_id].
1142 ps_status == STA_PS_STATUS_WAKE;
1143
1144 if (sta_awake && ps_bit)
1145 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
1146 else if (!sta_awake && !ps_bit) {
1147 iwl_sta_modify_ps_wake(priv, sta_id);
1148 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
1149 }
1150 }
1151}
1152
1153/* This is necessary only for a number of statistics, see the caller. */
1154static int iwl_is_network_packet(struct iwl_priv *priv,
1155 struct ieee80211_hdr *header)
1156{
1157 /* Filter incoming packets to determine if they are targeted toward
1158 * this network, discarding packets coming from ourselves */
1159 switch (priv->iw_mode) {
1160 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
1161 /* packets to our IBSS update information */
1162 return !compare_ether_addr(header->addr3, priv->bssid);
1163 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
1164 /* packets to our IBSS update information */
1165 return !compare_ether_addr(header->addr2, priv->bssid);
1166 default:
1167 return 1;
1168 }
1169}
1170
1171/* Called for REPLY_RX (legacy ABG frames), or
1172 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1173void iwl_rx_reply_rx(struct iwl_priv *priv,
1174 struct iwl_rx_mem_buffer *rxb)
1175{
1176 struct ieee80211_hdr *header;
1177 struct ieee80211_rx_status rx_status;
1178 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1179 /* Use phy data (Rx signal strength, etc.) contained within
1180 * this rx packet for legacy frames,
1181 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
1182 int include_phy = (pkt->hdr.cmd == REPLY_RX);
1183 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
1184 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
1185 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
1186 __le32 *rx_end;
1187 unsigned int len = 0;
1188 u16 fc;
1189 u8 network_packet;
1190
1191 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
1192 rx_status.freq =
1193 ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
1194 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1195 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1196 rx_status.rate_idx =
1197 iwl_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
1198 if (rx_status.band == IEEE80211_BAND_5GHZ)
1199 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
1200
1201 rx_status.antenna = 0;
1202 rx_status.flag = 0;
1203 rx_status.flag |= RX_FLAG_TSFT;
1204
1205 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
1206 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
1207 rx_start->cfg_phy_cnt);
1208 return;
1209 }
1210
1211 if (!include_phy) {
1212 if (priv->last_phy_res[0])
1213 rx_start = (struct iwl4965_rx_phy_res *)
1214 &priv->last_phy_res[1];
1215 else
1216 rx_start = NULL;
1217 }
1218
1219 if (!rx_start) {
1220 IWL_ERROR("MPDU frame without a PHY data\n");
1221 return;
1222 }
1223
1224 if (include_phy) {
1225 header = (struct ieee80211_hdr *)((u8 *) &rx_start[1]
1226 + rx_start->cfg_phy_cnt);
1227
1228 len = le16_to_cpu(rx_start->byte_count);
1229 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
1230 sizeof(struct iwl4965_rx_phy_res) + len);
1231 } else {
1232 struct iwl4965_rx_mpdu_res_start *amsdu =
1233 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1234
1235 header = (void *)(pkt->u.raw +
1236 sizeof(struct iwl4965_rx_mpdu_res_start));
1237 len = le16_to_cpu(amsdu->byte_count);
1238 rx_end = (__le32 *) (pkt->u.raw +
1239 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
1240 }
1241
1242 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
1243 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1244 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
1245 le32_to_cpu(*rx_end));
1246 return;
1247 }
1248
1249 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
1250
1251 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1252 rx_status.signal = iwl_calc_rssi(priv, rx_start);
1253
1254 /* Meaningful noise values are available only from beacon statistics,
1255 * which are gathered only when associated, and indicate noise
1256 * only for the associated network channel ...
1257 * Ignore these noise values while scanning (other channels) */
1258 if (iwl_is_associated(priv) &&
1259 !test_bit(STATUS_SCANNING, &priv->status)) {
1260 rx_status.noise = priv->last_rx_noise;
1261 rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
1262 rx_status.noise);
1263 } else {
1264 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1265 rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
1266 }
1267
1268 /* Reset beacon noise level if not associated. */
1269 if (!iwl_is_associated(priv))
1270 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1271
1272 /* Set "1" to report good data frames in groups of 100 */
1273 /* FIXME: need to optimze the call: */
1274 iwl_dbg_report_frame(priv, pkt, header, 1);
1275
1276 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
1277 rx_status.signal, rx_status.noise, rx_status.signal,
1278 (unsigned long long)rx_status.mactime);
1279
1280 /* Take shortcut when only in monitor mode */
1281 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
1282 iwl_pass_packet_to_mac80211(priv, include_phy,
1283 rxb, &rx_status);
1284 return;
1285 }
1286
1287 network_packet = iwl_is_network_packet(priv, header);
1288 if (network_packet) {
1289 priv->last_rx_rssi = rx_status.signal;
1290 priv->last_beacon_time = priv->ucode_beacon_time;
1291 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
1292 }
1293
1294 fc = le16_to_cpu(header->frame_control);
1295 switch (fc & IEEE80211_FCTL_FTYPE) {
1296 case IEEE80211_FTYPE_MGMT:
1297 case IEEE80211_FTYPE_DATA:
1298 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
1299 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
1300 header->addr2);
1301 /* fall through */
1302 default:
1303 iwl_pass_packet_to_mac80211(priv, include_phy, rxb,
1304 &rx_status);
1305 break;
1306
1307 }
1308}
1309EXPORT_SYMBOL(iwl_rx_reply_rx);
1310
1311/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1312 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1313void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1314 struct iwl_rx_mem_buffer *rxb)
1315{
1316 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1317 priv->last_phy_res[0] = 1;
1318 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1319 sizeof(struct iwl4965_rx_phy_res));
1320}
1321EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
new file mode 100644
index 000000000000..efc750d2fc5c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -0,0 +1,931 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <net/mac80211.h>
29#include <linux/etherdevice.h>
30
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
36#include "iwl-helpers.h"
37
38/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
39 * sending probe req. This should be set long enough to hear probe responses
40 * from more than one AP. */
41#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
42#define IWL_ACTIVE_DWELL_TIME_52 (20)
43
44#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
45#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
46
47/* For faster active scanning, scan will move to the next channel if fewer than
48 * PLCP_QUIET_THRESH packets are heard on this channel within
49 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
50 * time if it's a quiet channel (nothing responded to our probe, and there's
51 * no other traffic).
52 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
53#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
54#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
55
56/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
57 * Must be set longer than active dwell time.
58 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
59#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
60#define IWL_PASSIVE_DWELL_TIME_52 (10)
61#define IWL_PASSIVE_DWELL_BASE (100)
62#define IWL_CHANNEL_TUNE_TIME 5
63
64#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
65
66
67static int scan_tx_ant[3] = {
68 RATE_MCS_ANT_A_MSK, RATE_MCS_ANT_B_MSK, RATE_MCS_ANT_C_MSK
69};
70
71
72
73static int iwl_is_empty_essid(const char *essid, int essid_len)
74{
75 /* Single white space is for Linksys APs */
76 if (essid_len == 1 && essid[0] == ' ')
77 return 1;
78
79 /* Otherwise, if the entire essid is 0, we assume it is hidden */
80 while (essid_len) {
81 essid_len--;
82 if (essid[essid_len] != '\0')
83 return 0;
84 }
85
86 return 1;
87}
88
89
90
91const char *iwl_escape_essid(const char *essid, u8 essid_len)
92{
93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
94 const char *s = essid;
95 char *d = escaped;
96
97 if (iwl_is_empty_essid(essid, essid_len)) {
98 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
99 return escaped;
100 }
101
102 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
103 while (essid_len--) {
104 if (*s == '\0') {
105 *d++ = '\\';
106 *d++ = '0';
107 s++;
108 } else
109 *d++ = *s++;
110 }
111 *d = '\0';
112 return escaped;
113}
114EXPORT_SYMBOL(iwl_escape_essid);
115
116/**
117 * iwl_scan_cancel - Cancel any currently executing HW scan
118 *
119 * NOTE: priv->mutex is not required before calling this function
120 */
121int iwl_scan_cancel(struct iwl_priv *priv)
122{
123 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
124 clear_bit(STATUS_SCANNING, &priv->status);
125 return 0;
126 }
127
128 if (test_bit(STATUS_SCANNING, &priv->status)) {
129 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
130 IWL_DEBUG_SCAN("Queuing scan abort.\n");
131 set_bit(STATUS_SCAN_ABORTING, &priv->status);
132 queue_work(priv->workqueue, &priv->abort_scan);
133
134 } else
135 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
136
137 return test_bit(STATUS_SCANNING, &priv->status);
138 }
139
140 return 0;
141}
142EXPORT_SYMBOL(iwl_scan_cancel);
143/**
144 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
145 * @ms: amount of time to wait (in milliseconds) for scan to abort
146 *
147 * NOTE: priv->mutex must be held before calling this function
148 */
149int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
150{
151 unsigned long now = jiffies;
152 int ret;
153
154 ret = iwl_scan_cancel(priv);
155 if (ret && ms) {
156 mutex_unlock(&priv->mutex);
157 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
158 test_bit(STATUS_SCANNING, &priv->status))
159 msleep(1);
160 mutex_lock(&priv->mutex);
161
162 return test_bit(STATUS_SCANNING, &priv->status);
163 }
164
165 return ret;
166}
167EXPORT_SYMBOL(iwl_scan_cancel_timeout);
168
169static int iwl_send_scan_abort(struct iwl_priv *priv)
170{
171 int ret = 0;
172 struct iwl_rx_packet *res;
173 struct iwl_host_cmd cmd = {
174 .id = REPLY_SCAN_ABORT_CMD,
175 .meta.flags = CMD_WANT_SKB,
176 };
177
178 /* If there isn't a scan actively going on in the hardware
179 * then we are in between scan bands and not actually
180 * actively scanning, so don't send the abort command */
181 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
182 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
183 return 0;
184 }
185
186 ret = iwl_send_cmd_sync(priv, &cmd);
187 if (ret) {
188 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
189 return ret;
190 }
191
192 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
193 if (res->u.status != CAN_ABORT_STATUS) {
194 /* The scan abort will return 1 for success or
195 * 2 for "failure". A failure condition can be
196 * due to simply not being in an active scan which
197 * can occur if we send the scan abort before we
198 * the microcode has notified us that a scan is
199 * completed. */
200 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
201 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
202 clear_bit(STATUS_SCAN_HW, &priv->status);
203 }
204
205 dev_kfree_skb_any(cmd.meta.u.skb);
206
207 return ret;
208}
209
210
211/* Service response to REPLY_SCAN_CMD (0x80) */
212static void iwl_rx_reply_scan(struct iwl_priv *priv,
213 struct iwl_rx_mem_buffer *rxb)
214{
215#ifdef CONFIG_IWLWIFI_DEBUG
216 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
217 struct iwl_scanreq_notification *notif =
218 (struct iwl_scanreq_notification *)pkt->u.raw;
219
220 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
221#endif
222}
223
224/* Service SCAN_START_NOTIFICATION (0x82) */
225static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
226 struct iwl_rx_mem_buffer *rxb)
227{
228 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
229 struct iwl_scanstart_notification *notif =
230 (struct iwl_scanstart_notification *)pkt->u.raw;
231 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
232 IWL_DEBUG_SCAN("Scan start: "
233 "%d [802.11%s] "
234 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
235 notif->channel,
236 notif->band ? "bg" : "a",
237 le32_to_cpu(notif->tsf_high),
238 le32_to_cpu(notif->tsf_low),
239 notif->status, notif->beacon_timer);
240}
241
242/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
243static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
244 struct iwl_rx_mem_buffer *rxb)
245{
246#ifdef CONFIG_IWLWIFI_DEBUG
247 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
248 struct iwl_scanresults_notification *notif =
249 (struct iwl_scanresults_notification *)pkt->u.raw;
250
251 IWL_DEBUG_SCAN("Scan ch.res: "
252 "%d [802.11%s] "
253 "(TSF: 0x%08X:%08X) - %d "
254 "elapsed=%lu usec (%dms since last)\n",
255 notif->channel,
256 notif->band ? "bg" : "a",
257 le32_to_cpu(notif->tsf_high),
258 le32_to_cpu(notif->tsf_low),
259 le32_to_cpu(notif->statistics[0]),
260 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
261 jiffies_to_msecs(elapsed_jiffies
262 (priv->last_scan_jiffies, jiffies)));
263#endif
264
265 priv->last_scan_jiffies = jiffies;
266 priv->next_scan_jiffies = 0;
267}
268
269/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
270static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
271 struct iwl_rx_mem_buffer *rxb)
272{
273 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
274 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
275
276 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
277 scan_notif->scanned_channels,
278 scan_notif->tsf_low,
279 scan_notif->tsf_high, scan_notif->status);
280
281 /* The HW is no longer scanning */
282 clear_bit(STATUS_SCAN_HW, &priv->status);
283
284 /* The scan completion notification came in, so kill that timer... */
285 cancel_delayed_work(&priv->scan_check);
286
287 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
288 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
289 "2.4" : "5.2",
290 jiffies_to_msecs(elapsed_jiffies
291 (priv->scan_pass_start, jiffies)));
292
293 /* Remove this scanned band from the list of pending
294 * bands to scan, band G precedes A in order of scanning
295 * as seen in iwl_bg_request_scan */
296 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
297 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
298 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
299 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
300
301 /* If a request to abort was given, or the scan did not succeed
302 * then we reset the scan state machine and terminate,
303 * re-queuing another scan if one has been requested */
304 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
305 IWL_DEBUG_INFO("Aborted scan completed.\n");
306 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
307 } else {
308 /* If there are more bands on this scan pass reschedule */
309 if (priv->scan_bands)
310 goto reschedule;
311 }
312
313 priv->last_scan_jiffies = jiffies;
314 priv->next_scan_jiffies = 0;
315 IWL_DEBUG_INFO("Setting scan to off\n");
316
317 clear_bit(STATUS_SCANNING, &priv->status);
318
319 IWL_DEBUG_INFO("Scan took %dms\n",
320 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
321
322 queue_work(priv->workqueue, &priv->scan_completed);
323
324 return;
325
326reschedule:
327 priv->scan_pass_start = jiffies;
328 queue_work(priv->workqueue, &priv->request_scan);
329}
330
331void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
332{
333 /* scan handlers */
334 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
335 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
336 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
337 iwl_rx_scan_results_notif;
338 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
339 iwl_rx_scan_complete_notif;
340}
341EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
342
343static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
344 enum ieee80211_band band,
345 u8 n_probes)
346{
347 if (band == IEEE80211_BAND_5GHZ)
348 return IWL_ACTIVE_DWELL_TIME_52 +
349 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
350 else
351 return IWL_ACTIVE_DWELL_TIME_24 +
352 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
353}
354
355static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
356 enum ieee80211_band band)
357{
358 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
359 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
360 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
361
362 if (iwl_is_associated(priv)) {
363 /* If we're associated, we clamp the maximum passive
364 * dwell time to be 98% of the beacon interval (minus
365 * 2 * channel tune time) */
366 passive = priv->beacon_int;
367 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
368 passive = IWL_PASSIVE_DWELL_BASE;
369 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
370 }
371
372 return passive;
373}
374
375static int iwl_get_channels_for_scan(struct iwl_priv *priv,
376 enum ieee80211_band band,
377 u8 is_active, u8 n_probes,
378 struct iwl_scan_channel *scan_ch)
379{
380 const struct ieee80211_channel *channels = NULL;
381 const struct ieee80211_supported_band *sband;
382 const struct iwl_channel_info *ch_info;
383 u16 passive_dwell = 0;
384 u16 active_dwell = 0;
385 int added, i;
386 u16 channel;
387
388 sband = iwl_get_hw_mode(priv, band);
389 if (!sband)
390 return 0;
391
392 channels = sband->channels;
393
394 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
395 passive_dwell = iwl_get_passive_dwell_time(priv, band);
396
397 if (passive_dwell <= active_dwell)
398 passive_dwell = active_dwell + 1;
399
400 for (i = 0, added = 0; i < sband->n_channels; i++) {
401 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
402 continue;
403
404 channel =
405 ieee80211_frequency_to_channel(channels[i].center_freq);
406 scan_ch->channel = cpu_to_le16(channel);
407
408 ch_info = iwl_get_channel_info(priv, band, channel);
409 if (!is_channel_valid(ch_info)) {
410 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
411 channel);
412 continue;
413 }
414
415 if (!is_active || is_channel_passive(ch_info) ||
416 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
417 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
418 else
419 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
420
421 if ((scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) && n_probes)
422 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
423
424 scan_ch->active_dwell = cpu_to_le16(active_dwell);
425 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
426
427 /* Set txpower levels to defaults */
428 scan_ch->dsp_atten = 110;
429
430 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
431 * power level:
432 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
433 */
434 if (band == IEEE80211_BAND_5GHZ)
435 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
436 else
437 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
438
439 IWL_DEBUG_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n",
440 channel, le32_to_cpu(scan_ch->type),
441 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
442 "ACTIVE" : "PASSIVE",
443 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
444 active_dwell : passive_dwell);
445
446 scan_ch++;
447 added++;
448 }
449
450 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
451 return added;
452}
453
454void iwl_init_scan_params(struct iwl_priv *priv)
455{
456 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
457 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = RATE_MCS_ANT_INIT_IND;
458 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
459 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = RATE_MCS_ANT_INIT_IND;
460}
461
462int iwl_scan_initiate(struct iwl_priv *priv)
463{
464 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
465 IWL_ERROR("APs don't scan.\n");
466 return 0;
467 }
468
469 if (!iwl_is_ready_rf(priv)) {
470 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
471 return -EIO;
472 }
473
474 if (test_bit(STATUS_SCANNING, &priv->status)) {
475 IWL_DEBUG_SCAN("Scan already in progress.\n");
476 return -EAGAIN;
477 }
478
479 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
480 IWL_DEBUG_SCAN("Scan request while abort pending. "
481 "Queuing.\n");
482 return -EAGAIN;
483 }
484
485 IWL_DEBUG_INFO("Starting scan...\n");
486 if (priv->cfg->sku & IWL_SKU_G)
487 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
488 if (priv->cfg->sku & IWL_SKU_A)
489 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
490 set_bit(STATUS_SCANNING, &priv->status);
491 priv->scan_start = jiffies;
492 priv->scan_pass_start = priv->scan_start;
493
494 queue_work(priv->workqueue, &priv->request_scan);
495
496 return 0;
497}
498EXPORT_SYMBOL(iwl_scan_initiate);
499
500#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
501
502static void iwl_bg_scan_check(struct work_struct *data)
503{
504 struct iwl_priv *priv =
505 container_of(data, struct iwl_priv, scan_check.work);
506
507 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
508 return;
509
510 mutex_lock(&priv->mutex);
511 if (test_bit(STATUS_SCANNING, &priv->status) ||
512 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
513 IWL_DEBUG(IWL_DL_SCAN, "Scan completion watchdog resetting "
514 "adapter (%dms)\n",
515 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
516
517 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
518 iwl_send_scan_abort(priv);
519 }
520 mutex_unlock(&priv->mutex);
521}
522/**
523 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
524 *
525 * return : set the bit for each supported rate insert in ie
526 */
527static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
528 u16 basic_rate, int *left)
529{
530 u16 ret_rates = 0, bit;
531 int i;
532 u8 *cnt = ie;
533 u8 *rates = ie + 1;
534
535 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
536 if (bit & supported_rate) {
537 ret_rates |= bit;
538 rates[*cnt] = iwl_rates[i].ieee |
539 ((bit & basic_rate) ? 0x80 : 0x00);
540 (*cnt)++;
541 (*left)--;
542 if ((*left <= 0) ||
543 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
544 break;
545 }
546 }
547
548 return ret_rates;
549}
550
551
552static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
553 u8 *pos, int *left)
554{
555 struct ieee80211_ht_cap *ht_cap;
556
557 if (!sband || !sband->ht_info.ht_supported)
558 return;
559
560 if (*left < sizeof(struct ieee80211_ht_cap))
561 return;
562
563 *pos++ = sizeof(struct ieee80211_ht_cap);
564 ht_cap = (struct ieee80211_ht_cap *) pos;
565
566 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
567 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
568 ht_cap->ampdu_params_info =
569 (sband->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
570 ((sband->ht_info.ampdu_density << 2) &
571 IEEE80211_HT_CAP_AMPDU_DENSITY);
572 *left -= sizeof(struct ieee80211_ht_cap);
573}
574
575/**
576 * iwl_fill_probe_req - fill in all required fields and IE for probe request
577 */
578
579static u16 iwl_fill_probe_req(struct iwl_priv *priv,
580 enum ieee80211_band band,
581 struct ieee80211_mgmt *frame,
582 int left)
583{
584 int len = 0;
585 u8 *pos = NULL;
586 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
587 const struct ieee80211_supported_band *sband =
588 iwl_get_hw_mode(priv, band);
589
590
591 /* Make sure there is enough space for the probe request,
592 * two mandatory IEs and the data */
593 left -= 24;
594 if (left < 0)
595 return 0;
596
597 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
598 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
599 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
600 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
601 frame->seq_ctrl = 0;
602
603 len += 24;
604
605 /* ...next IE... */
606 pos = &frame->u.probe_req.variable[0];
607
608 /* fill in our indirect SSID IE */
609 left -= 2;
610 if (left < 0)
611 return 0;
612 *pos++ = WLAN_EID_SSID;
613 *pos++ = 0;
614
615 len += 2;
616
617 /* fill in supported rate */
618 left -= 2;
619 if (left < 0)
620 return 0;
621
622 *pos++ = WLAN_EID_SUPP_RATES;
623 *pos = 0;
624
625 /* exclude 60M rate */
626 active_rates = priv->rates_mask;
627 active_rates &= ~IWL_RATE_60M_MASK;
628
629 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
630
631 cck_rates = IWL_CCK_RATES_MASK & active_rates;
632 ret_rates = iwl_supported_rate_to_ie(pos, cck_rates,
633 active_rate_basic, &left);
634 active_rates &= ~ret_rates;
635
636 ret_rates = iwl_supported_rate_to_ie(pos, active_rates,
637 active_rate_basic, &left);
638 active_rates &= ~ret_rates;
639
640 len += 2 + *pos;
641 pos += (*pos) + 1;
642
643 if (active_rates == 0)
644 goto fill_end;
645
646 /* fill in supported extended rate */
647 /* ...next IE... */
648 left -= 2;
649 if (left < 0)
650 return 0;
651 /* ... fill it in... */
652 *pos++ = WLAN_EID_EXT_SUPP_RATES;
653 *pos = 0;
654 iwl_supported_rate_to_ie(pos, active_rates, active_rate_basic, &left);
655 if (*pos > 0) {
656 len += 2 + *pos;
657 pos += (*pos) + 1;
658 } else {
659 pos--;
660 }
661
662 fill_end:
663
664 left -= 2;
665 if (left < 0)
666 return 0;
667
668 *pos++ = WLAN_EID_HT_CAPABILITY;
669 *pos = 0;
670 iwl_ht_cap_to_ie(sband, pos, &left);
671 if (*pos > 0)
672 len += 2 + *pos;
673
674 return (u16)len;
675}
676
677static u32 iwl_scan_tx_ant(struct iwl_priv *priv, enum ieee80211_band band)
678{
679 int i, ind;
680
681 ind = priv->scan_tx_ant[band];
682 for (i = 0; i < priv->hw_params.tx_chains_num; i++) {
683 ind = (ind+1) >= priv->hw_params.tx_chains_num ? 0 : ind+1;
684 if (priv->hw_params.valid_tx_ant & (1 << ind)) {
685 priv->scan_tx_ant[band] = ind;
686 break;
687 }
688 }
689 IWL_DEBUG_SCAN("select TX ANT = %c\n", 'A' + ind);
690 return scan_tx_ant[ind];
691}
692
693
694static void iwl_bg_request_scan(struct work_struct *data)
695{
696 struct iwl_priv *priv =
697 container_of(data, struct iwl_priv, request_scan);
698 struct iwl_host_cmd cmd = {
699 .id = REPLY_SCAN_CMD,
700 .len = sizeof(struct iwl_scan_cmd),
701 .meta.flags = CMD_SIZE_HUGE,
702 };
703 struct iwl_scan_cmd *scan;
704 struct ieee80211_conf *conf = NULL;
705 int ret = 0;
706 u32 tx_ant;
707 u16 cmd_len;
708 enum ieee80211_band band;
709 u8 n_probes = 2;
710 u8 rx_chain = 0x7; /* bitmap: ABC chains */
711
712 conf = ieee80211_get_hw_conf(priv->hw);
713
714 mutex_lock(&priv->mutex);
715
716 if (!iwl_is_ready(priv)) {
717 IWL_WARNING("request scan called when driver not ready.\n");
718 goto done;
719 }
720
721 /* Make sure the scan wasn't cancelled before this queued work
722 * was given the chance to run... */
723 if (!test_bit(STATUS_SCANNING, &priv->status))
724 goto done;
725
726 /* This should never be called or scheduled if there is currently
727 * a scan active in the hardware. */
728 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
729 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
730 "Ignoring second request.\n");
731 ret = -EIO;
732 goto done;
733 }
734
735 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
736 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
737 goto done;
738 }
739
740 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
741 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
742 goto done;
743 }
744
745 if (iwl_is_rfkill(priv)) {
746 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
747 goto done;
748 }
749
750 if (!test_bit(STATUS_READY, &priv->status)) {
751 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
752 goto done;
753 }
754
755 if (!priv->scan_bands) {
756 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
757 goto done;
758 }
759
760 if (!priv->scan) {
761 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
762 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
763 if (!priv->scan) {
764 ret = -ENOMEM;
765 goto done;
766 }
767 }
768 scan = priv->scan;
769 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
770
771 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
772 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
773
774 if (iwl_is_associated(priv)) {
775 u16 interval = 0;
776 u32 extra;
777 u32 suspend_time = 100;
778 u32 scan_suspend_time = 100;
779 unsigned long flags;
780
781 IWL_DEBUG_INFO("Scanning while associated...\n");
782
783 spin_lock_irqsave(&priv->lock, flags);
784 interval = priv->beacon_int;
785 spin_unlock_irqrestore(&priv->lock, flags);
786
787 scan->suspend_time = 0;
788 scan->max_out_time = cpu_to_le32(200 * 1024);
789 if (!interval)
790 interval = suspend_time;
791
792 extra = (suspend_time / interval) << 22;
793 scan_suspend_time = (extra |
794 ((suspend_time % interval) * 1024));
795 scan->suspend_time = cpu_to_le32(scan_suspend_time);
796 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
797 scan_suspend_time, interval);
798 }
799
800 /* We should add the ability for user to lock to PASSIVE ONLY */
801 if (priv->one_direct_scan) {
802 IWL_DEBUG_SCAN("Start direct scan for '%s'\n",
803 iwl_escape_essid(priv->direct_ssid,
804 priv->direct_ssid_len));
805 scan->direct_scan[0].id = WLAN_EID_SSID;
806 scan->direct_scan[0].len = priv->direct_ssid_len;
807 memcpy(scan->direct_scan[0].ssid,
808 priv->direct_ssid, priv->direct_ssid_len);
809 n_probes++;
810 } else if (!iwl_is_associated(priv) && priv->essid_len) {
811 IWL_DEBUG_SCAN("Start direct scan for '%s' (not associated)\n",
812 iwl_escape_essid(priv->essid, priv->essid_len));
813 scan->direct_scan[0].id = WLAN_EID_SSID;
814 scan->direct_scan[0].len = priv->essid_len;
815 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
816 n_probes++;
817 } else {
818 IWL_DEBUG_SCAN("Start indirect scan.\n");
819 }
820
821 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
822 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
823 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
824
825
826 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
827 band = IEEE80211_BAND_2GHZ;
828 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
829 tx_ant = iwl_scan_tx_ant(priv, band);
830 if (priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK)
831 scan->tx_cmd.rate_n_flags =
832 iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
833 tx_ant);
834 else
835 scan->tx_cmd.rate_n_flags =
836 iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
837 tx_ant |
838 RATE_MCS_CCK_MSK);
839 scan->good_CRC_th = 0;
840 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
841 band = IEEE80211_BAND_5GHZ;
842 tx_ant = iwl_scan_tx_ant(priv, band);
843 scan->tx_cmd.rate_n_flags =
844 iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
845 tx_ant);
846 scan->good_CRC_th = IWL_GOOD_CRC_TH;
847
848 /* Force use of chains B and C (0x6) for scan Rx for 4965
849 * Avoid A (0x1) because of its off-channel reception on A-band.
850 * MIMO is not used here, but value is required */
851 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
852 rx_chain = 0x6;
853 } else {
854 IWL_WARNING("Invalid scan band count\n");
855 goto done;
856 }
857
858 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
859 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
860 (rx_chain << RXON_RX_CHAIN_FORCE_SEL_POS) |
861 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
862
863 cmd_len = iwl_fill_probe_req(priv, band,
864 (struct ieee80211_mgmt *)scan->data,
865 IWL_MAX_SCAN_SIZE - sizeof(*scan));
866
867 scan->tx_cmd.len = cpu_to_le16(cmd_len);
868
869 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
870 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
871
872 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
873 RXON_FILTER_BCON_AWARE_MSK);
874
875 scan->channel_count =
876 iwl_get_channels_for_scan(priv, band, 1, /* active */
877 n_probes,
878 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
879
880 if (scan->channel_count == 0) {
881 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
882 goto done;
883 }
884
885 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
886 scan->channel_count * sizeof(struct iwl_scan_channel);
887 cmd.data = scan;
888 scan->len = cpu_to_le16(cmd.len);
889
890 set_bit(STATUS_SCAN_HW, &priv->status);
891 ret = iwl_send_cmd_sync(priv, &cmd);
892 if (ret)
893 goto done;
894
895 queue_delayed_work(priv->workqueue, &priv->scan_check,
896 IWL_SCAN_CHECK_WATCHDOG);
897
898 mutex_unlock(&priv->mutex);
899 return;
900
901 done:
902 /* inform mac80211 scan aborted */
903 queue_work(priv->workqueue, &priv->scan_completed);
904 mutex_unlock(&priv->mutex);
905}
906
907static void iwl_bg_abort_scan(struct work_struct *work)
908{
909 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
910
911 if (!iwl_is_ready(priv))
912 return;
913
914 mutex_lock(&priv->mutex);
915
916 set_bit(STATUS_SCAN_ABORTING, &priv->status);
917 iwl_send_scan_abort(priv);
918
919 mutex_unlock(&priv->mutex);
920}
921
922void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
923{
924 /* FIXME: move here when resolved PENDING
925 * INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); */
926 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
927 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
928 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
929}
930EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
931
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index e4fdfaa2b9b2..6d1467d0bd9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -28,17 +28,446 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
31 32
32#include "iwl-eeprom.h" 33#include "iwl-dev.h"
33#include "iwl-4965.h"
34#include "iwl-core.h" 34#include "iwl-core.h"
35#include "iwl-sta.h" 35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h" 36#include "iwl-helpers.h"
38#include "iwl-4965.h"
39#include "iwl-sta.h"
40 37
41int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 38
39#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
40#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
41
42u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
43{
44 int i;
45 int start = 0;
46 int ret = IWL_INVALID_STATION;
47 unsigned long flags;
48 DECLARE_MAC_BUF(mac);
49
50 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
51 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
52 start = IWL_STA_ID;
53
54 if (is_broadcast_ether_addr(addr))
55 return priv->hw_params.bcast_sta_id;
56
57 spin_lock_irqsave(&priv->sta_lock, flags);
58 for (i = start; i < priv->hw_params.max_stations; i++)
59 if (priv->stations[i].used &&
60 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
61 addr))) {
62 ret = i;
63 goto out;
64 }
65
66 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
67 print_mac(mac, addr), priv->num_stations);
68
69 out:
70 spin_unlock_irqrestore(&priv->sta_lock, flags);
71 return ret;
72}
73EXPORT_SYMBOL(iwl_find_station);
74
75int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
76{
77 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
78 return IWL_AP_ID;
79 } else {
80 u8 *da = ieee80211_get_DA(hdr);
81 return iwl_find_station(priv, da);
82 }
83}
84EXPORT_SYMBOL(iwl_get_ra_sta_id);
85
86static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
87{
88 unsigned long flags;
89 DECLARE_MAC_BUF(mac);
90
91 spin_lock_irqsave(&priv->sta_lock, flags);
92
93 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
94 IWL_ERROR("ACTIVATE a non DRIVER active station %d\n", sta_id);
95
96 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
97 IWL_DEBUG_ASSOC("Added STA to Ucode: %s\n",
98 print_mac(mac, priv->stations[sta_id].sta.sta.addr));
99
100 spin_unlock_irqrestore(&priv->sta_lock, flags);
101}
102
103static int iwl_add_sta_callback(struct iwl_priv *priv,
104 struct iwl_cmd *cmd, struct sk_buff *skb)
105{
106 struct iwl_rx_packet *res = NULL;
107 u8 sta_id = cmd->cmd.addsta.sta.sta_id;
108
109 if (!skb) {
110 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
111 return 1;
112 }
113
114 res = (struct iwl_rx_packet *)skb->data;
115 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
116 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
117 res->hdr.flags);
118 return 1;
119 }
120
121 switch (res->u.add_sta.status) {
122 case ADD_STA_SUCCESS_MSK:
123 iwl_sta_ucode_activate(priv, sta_id);
124 /* fall through */
125 default:
126 IWL_DEBUG_HC("Received REPLY_ADD_STA:(0x%08X)\n",
127 res->u.add_sta.status);
128 break;
129 }
130
131 /* We didn't cache the SKB; let the caller free it */
132 return 1;
133}
134
135int iwl_send_add_sta(struct iwl_priv *priv,
136 struct iwl_addsta_cmd *sta, u8 flags)
137{
138 struct iwl_rx_packet *res = NULL;
139 int ret = 0;
140 u8 data[sizeof(*sta)];
141 struct iwl_host_cmd cmd = {
142 .id = REPLY_ADD_STA,
143 .meta.flags = flags,
144 .data = data,
145 };
146
147 if (flags & CMD_ASYNC)
148 cmd.meta.u.callback = iwl_add_sta_callback;
149 else
150 cmd.meta.flags |= CMD_WANT_SKB;
151
152 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
153 ret = iwl_send_cmd(priv, &cmd);
154
155 if (ret || (flags & CMD_ASYNC))
156 return ret;
157
158 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
159 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
160 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
161 res->hdr.flags);
162 ret = -EIO;
163 }
164
165 if (ret == 0) {
166 switch (res->u.add_sta.status) {
167 case ADD_STA_SUCCESS_MSK:
168 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
169 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
170 break;
171 default:
172 ret = -EIO;
173 IWL_WARNING("REPLY_ADD_STA failed\n");
174 break;
175 }
176 }
177
178 priv->alloc_rxb_skb--;
179 dev_kfree_skb_any(cmd.meta.u.skb);
180
181 return ret;
182}
183EXPORT_SYMBOL(iwl_send_add_sta);
184
185static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
186 struct ieee80211_ht_info *sta_ht_inf)
187{
188 __le32 sta_flags;
189 u8 mimo_ps_mode;
190
191 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
192 goto done;
193
194 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
195
196 sta_flags = priv->stations[index].sta.station_flags;
197
198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
199
200 switch (mimo_ps_mode) {
201 case WLAN_HT_CAP_MIMO_PS_STATIC:
202 sta_flags |= STA_FLG_MIMO_DIS_MSK;
203 break;
204 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
205 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
206 break;
207 case WLAN_HT_CAP_MIMO_PS_DISABLED:
208 break;
209 default:
210 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
211 break;
212 }
213
214 sta_flags |= cpu_to_le32(
215 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
216
217 sta_flags |= cpu_to_le32(
218 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
219
220 if (iwl_is_fat_tx_allowed(priv, sta_ht_inf))
221 sta_flags |= STA_FLG_FAT_EN_MSK;
222 else
223 sta_flags &= ~STA_FLG_FAT_EN_MSK;
224
225 priv->stations[index].sta.station_flags = sta_flags;
226 done:
227 return;
228}
229
230/**
231 * iwl_add_station_flags - Add station to tables in driver and device
232 */
233u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
234 u8 flags, struct ieee80211_ht_info *ht_info)
235{
236 int i;
237 int sta_id = IWL_INVALID_STATION;
238 struct iwl_station_entry *station;
239 unsigned long flags_spin;
240 DECLARE_MAC_BUF(mac);
241
242 spin_lock_irqsave(&priv->sta_lock, flags_spin);
243 if (is_ap)
244 sta_id = IWL_AP_ID;
245 else if (is_broadcast_ether_addr(addr))
246 sta_id = priv->hw_params.bcast_sta_id;
247 else
248 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
249 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
250 addr)) {
251 sta_id = i;
252 break;
253 }
254
255 if (!priv->stations[i].used &&
256 sta_id == IWL_INVALID_STATION)
257 sta_id = i;
258 }
259
260 /* These two conditions have the same outcome, but keep them separate
261 since they have different meanings */
262 if (unlikely(sta_id == IWL_INVALID_STATION)) {
263 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
264 return sta_id;
265 }
266
267 if (priv->stations[sta_id].used &&
268 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
269 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
270 return sta_id;
271 }
272
273
274 station = &priv->stations[sta_id];
275 station->used = IWL_STA_DRIVER_ACTIVE;
276 IWL_DEBUG_ASSOC("Add STA to driver ID %d: %s\n",
277 sta_id, print_mac(mac, addr));
278 priv->num_stations++;
279
280 /* Set up the REPLY_ADD_STA command to send to device */
281 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
282 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
283 station->sta.mode = 0;
284 station->sta.sta.sta_id = sta_id;
285 station->sta.station_flags = 0;
286
287 /* BCAST station and IBSS stations do not work in HT mode */
288 if (sta_id != priv->hw_params.bcast_sta_id &&
289 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
290 iwl_set_ht_add_station(priv, sta_id, ht_info);
291
292 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
293
294 /* Add station to device's station table */
295 iwl_send_add_sta(priv, &station->sta, flags);
296 return sta_id;
297
298}
299EXPORT_SYMBOL(iwl_add_station_flags);
300
301static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
302{
303 unsigned long flags;
304 DECLARE_MAC_BUF(mac);
305
306 u8 sta_id = iwl_find_station(priv, addr);
307
308 BUG_ON(sta_id == IWL_INVALID_STATION);
309
310 IWL_DEBUG_ASSOC("Removed STA from Ucode: %s\n",
311 print_mac(mac, addr));
312
313 spin_lock_irqsave(&priv->sta_lock, flags);
314
315 /* Ucode must be active and driver must be non active */
316 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
317 IWL_ERROR("removed non active STA %d\n", sta_id);
318
319 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
320
321 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
322 spin_unlock_irqrestore(&priv->sta_lock, flags);
323}
324
325static int iwl_remove_sta_callback(struct iwl_priv *priv,
326 struct iwl_cmd *cmd, struct sk_buff *skb)
327{
328 struct iwl_rx_packet *res = NULL;
329 const char *addr = cmd->cmd.rm_sta.addr;
330
331 if (!skb) {
332 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n");
333 return 1;
334 }
335
336 res = (struct iwl_rx_packet *)skb->data;
337 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
338 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
339 res->hdr.flags);
340 return 1;
341 }
342
343 switch (res->u.rem_sta.status) {
344 case REM_STA_SUCCESS_MSK:
345 iwl_sta_ucode_deactivate(priv, addr);
346 break;
347 default:
348 IWL_ERROR("REPLY_REMOVE_STA failed\n");
349 break;
350 }
351
352 /* We didn't cache the SKB; let the caller free it */
353 return 1;
354}
355
356static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
357 u8 flags)
358{
359 struct iwl_rx_packet *res = NULL;
360 int ret;
361
362 struct iwl_rem_sta_cmd rm_sta_cmd;
363
364 struct iwl_host_cmd cmd = {
365 .id = REPLY_REMOVE_STA,
366 .len = sizeof(struct iwl_rem_sta_cmd),
367 .meta.flags = flags,
368 .data = &rm_sta_cmd,
369 };
370
371 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
372 rm_sta_cmd.num_sta = 1;
373 memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
374
375 if (flags & CMD_ASYNC)
376 cmd.meta.u.callback = iwl_remove_sta_callback;
377 else
378 cmd.meta.flags |= CMD_WANT_SKB;
379 ret = iwl_send_cmd(priv, &cmd);
380
381 if (ret || (flags & CMD_ASYNC))
382 return ret;
383
384 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
385 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
386 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
387 res->hdr.flags);
388 ret = -EIO;
389 }
390
391 if (!ret) {
392 switch (res->u.rem_sta.status) {
393 case REM_STA_SUCCESS_MSK:
394 iwl_sta_ucode_deactivate(priv, addr);
395 IWL_DEBUG_ASSOC("REPLY_REMOVE_STA PASSED\n");
396 break;
397 default:
398 ret = -EIO;
399 IWL_ERROR("REPLY_REMOVE_STA failed\n");
400 break;
401 }
402 }
403
404 priv->alloc_rxb_skb--;
405 dev_kfree_skb_any(cmd.meta.u.skb);
406
407 return ret;
408}
409
410/**
411 * iwl_remove_station - Remove driver's knowledge of station.
412 */
413int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
414{
415 int sta_id = IWL_INVALID_STATION;
416 int i, ret = -EINVAL;
417 unsigned long flags;
418 DECLARE_MAC_BUF(mac);
419
420 spin_lock_irqsave(&priv->sta_lock, flags);
421
422 if (is_ap)
423 sta_id = IWL_AP_ID;
424 else if (is_broadcast_ether_addr(addr))
425 sta_id = priv->hw_params.bcast_sta_id;
426 else
427 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
428 if (priv->stations[i].used &&
429 !compare_ether_addr(priv->stations[i].sta.sta.addr,
430 addr)) {
431 sta_id = i;
432 break;
433 }
434
435 if (unlikely(sta_id == IWL_INVALID_STATION))
436 goto out;
437
438 IWL_DEBUG_ASSOC("Removing STA from driver:%d %s\n",
439 sta_id, print_mac(mac, addr));
440
441 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
442 IWL_ERROR("Removing %s but non DRIVER active\n",
443 print_mac(mac, addr));
444 goto out;
445 }
446
447 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
448 IWL_ERROR("Removing %s but non UCODE active\n",
449 print_mac(mac, addr));
450 goto out;
451 }
452
453
454 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
455
456 priv->num_stations--;
457
458 BUG_ON(priv->num_stations < 0);
459
460 spin_unlock_irqrestore(&priv->sta_lock, flags);
461
462 ret = iwl_send_remove_station(priv, addr, CMD_ASYNC);
463 return ret;
464out:
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return ret;
467}
468EXPORT_SYMBOL(iwl_remove_station);
469
470static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
42{ 471{
43 int i; 472 int i;
44 473
@@ -91,6 +520,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
91 else 520 else
92 return 0; 521 return 0;
93} 522}
523EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
94 524
95int iwl_remove_default_wep_key(struct iwl_priv *priv, 525int iwl_remove_default_wep_key(struct iwl_priv *priv,
96 struct ieee80211_key_conf *keyconf) 526 struct ieee80211_key_conf *keyconf)
@@ -107,10 +537,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
107 priv->default_wep_key--; 537 priv->default_wep_key--;
108 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 538 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
109 ret = iwl_send_static_wepkey_cmd(priv, 1); 539 ret = iwl_send_static_wepkey_cmd(priv, 1);
540 IWL_DEBUG_WEP("Remove default WEP key: idx=%d ret=%d\n",
541 keyconf->keyidx, ret);
110 spin_unlock_irqrestore(&priv->sta_lock, flags); 542 spin_unlock_irqrestore(&priv->sta_lock, flags);
111 543
112 return ret; 544 return ret;
113} 545}
546EXPORT_SYMBOL(iwl_remove_default_wep_key);
114 547
115int iwl_set_default_wep_key(struct iwl_priv *priv, 548int iwl_set_default_wep_key(struct iwl_priv *priv,
116 struct ieee80211_key_conf *keyconf) 549 struct ieee80211_key_conf *keyconf)
@@ -118,8 +551,14 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
118 int ret; 551 int ret;
119 unsigned long flags; 552 unsigned long flags;
120 553
554 if (keyconf->keylen != WEP_KEY_LEN_128 &&
555 keyconf->keylen != WEP_KEY_LEN_64) {
556 IWL_DEBUG_WEP("Bad WEP key length %d\n", keyconf->keylen);
557 return -EINVAL;
558 }
559
121 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 560 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
122 keyconf->hw_key_idx = keyconf->keyidx; 561 keyconf->hw_key_idx = HW_KEY_DEFAULT;
123 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 562 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
124 563
125 spin_lock_irqsave(&priv->sta_lock, flags); 564 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -134,10 +573,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
134 keyconf->keylen); 573 keyconf->keylen);
135 574
136 ret = iwl_send_static_wepkey_cmd(priv, 0); 575 ret = iwl_send_static_wepkey_cmd(priv, 0);
576 IWL_DEBUG_WEP("Set default WEP key: len=%d idx=%d ret=%d\n",
577 keyconf->keylen, keyconf->keyidx, ret);
137 spin_unlock_irqrestore(&priv->sta_lock, flags); 578 spin_unlock_irqrestore(&priv->sta_lock, flags);
138 579
139 return ret; 580 return ret;
140} 581}
582EXPORT_SYMBOL(iwl_set_default_wep_key);
141 583
142static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, 584static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
143 struct ieee80211_key_conf *keyconf, 585 struct ieee80211_key_conf *keyconf,
@@ -148,7 +590,6 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
148 int ret; 590 int ret;
149 591
150 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 592 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 593
153 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); 594 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
154 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 595 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -172,15 +613,18 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
172 memcpy(&priv->stations[sta_id].sta.key.key[3], 613 memcpy(&priv->stations[sta_id].sta.key.key[3],
173 keyconf->key, keyconf->keylen); 614 keyconf->key, keyconf->keylen);
174 615
175 priv->stations[sta_id].sta.key.key_offset = 616 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
617 == STA_KEY_FLG_NO_ENC)
618 priv->stations[sta_id].sta.key.key_offset =
176 iwl_get_free_ucode_key_index(priv); 619 iwl_get_free_ucode_key_index(priv);
177 priv->stations[sta_id].sta.key.key_flags = key_flags; 620 /* else, we are overriding an existing key => no need to allocated room
621 * in uCode. */
178 622
623 priv->stations[sta_id].sta.key.key_flags = key_flags;
179 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 624 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
180 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 625 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
181 626
182 ret = iwl4965_send_add_station(priv, 627 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
183 &priv->stations[sta_id].sta, CMD_ASYNC);
184 628
185 spin_unlock_irqrestore(&priv->sta_lock, flags); 629 spin_unlock_irqrestore(&priv->sta_lock, flags);
186 630
@@ -202,7 +646,6 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
202 key_flags |= STA_KEY_MULTICAST_MSK; 646 key_flags |= STA_KEY_MULTICAST_MSK;
203 647
204 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 648 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
205 keyconf->hw_key_idx = keyconf->keyidx;
206 649
207 spin_lock_irqsave(&priv->sta_lock, flags); 650 spin_lock_irqsave(&priv->sta_lock, flags);
208 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 651 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
@@ -214,8 +657,13 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
214 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 657 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
215 keyconf->keylen); 658 keyconf->keylen);
216 659
217 priv->stations[sta_id].sta.key.key_offset = 660 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
218 iwl_get_free_ucode_key_index(priv); 661 == STA_KEY_FLG_NO_ENC)
662 priv->stations[sta_id].sta.key.key_offset =
663 iwl_get_free_ucode_key_index(priv);
664 /* else, we are overriding an existing key => no need to allocated room
665 * in uCode. */
666
219 priv->stations[sta_id].sta.key.key_flags = key_flags; 667 priv->stations[sta_id].sta.key.key_flags = key_flags;
220 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 668 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
221 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -223,8 +671,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
223 spin_unlock_irqrestore(&priv->sta_lock, flags); 671 spin_unlock_irqrestore(&priv->sta_lock, flags);
224 672
225 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 673 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
226 return iwl4965_send_add_station(priv, 674 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
227 &priv->stations[sta_id].sta, CMD_ASYNC);
228} 675}
229 676
230static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 677static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -236,15 +683,18 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
236 683
237 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 684 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
238 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 685 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
239 keyconf->hw_key_idx = keyconf->keyidx;
240 686
241 spin_lock_irqsave(&priv->sta_lock, flags); 687 spin_lock_irqsave(&priv->sta_lock, flags);
242 688
243 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 689 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
244 priv->stations[sta_id].keyinfo.conf = keyconf;
245 priv->stations[sta_id].keyinfo.keylen = 16; 690 priv->stations[sta_id].keyinfo.keylen = 16;
246 priv->stations[sta_id].sta.key.key_offset = 691
692 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
693 == STA_KEY_FLG_NO_ENC)
694 priv->stations[sta_id].sta.key.key_offset =
247 iwl_get_free_ucode_key_index(priv); 695 iwl_get_free_ucode_key_index(priv);
696 /* else, we are overriding an existing key => no need to allocated room
697 * in uCode. */
248 698
249 /* This copy is acutally not needed: we get the key with each TX */ 699 /* This copy is acutally not needed: we get the key with each TX */
250 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 700 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -256,54 +706,84 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
256 return ret; 706 return ret;
257} 707}
258 708
259int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id) 709int iwl_remove_dynamic_key(struct iwl_priv *priv,
710 struct ieee80211_key_conf *keyconf,
711 u8 sta_id)
260{ 712{
261 unsigned long flags; 713 unsigned long flags;
714 int ret = 0;
715 u16 key_flags;
716 u8 keyidx;
262 717
263 priv->key_mapping_key = 0; 718 priv->key_mapping_key--;
264 719
265 spin_lock_irqsave(&priv->sta_lock, flags); 720 spin_lock_irqsave(&priv->sta_lock, flags);
721 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
722 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
723
724 IWL_DEBUG_WEP("Remove dynamic key: idx=%d sta=%d\n",
725 keyconf->keyidx, sta_id);
726
727 if (keyconf->keyidx != keyidx) {
728 /* We need to remove a key with index different that the one
729 * in the uCode. This means that the key we need to remove has
730 * been replaced by another one with different index.
731 * Don't do anything and return ok
732 */
733 spin_unlock_irqrestore(&priv->sta_lock, flags);
734 return 0;
735 }
736
266 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 737 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
267 &priv->ucode_key_table)) 738 &priv->ucode_key_table))
268 IWL_ERROR("index %d not used in uCode key table.\n", 739 IWL_ERROR("index %d not used in uCode key table.\n",
269 priv->stations[sta_id].sta.key.key_offset); 740 priv->stations[sta_id].sta.key.key_offset);
270 memset(&priv->stations[sta_id].keyinfo, 0, 741 memset(&priv->stations[sta_id].keyinfo, 0,
271 sizeof(struct iwl4965_hw_key)); 742 sizeof(struct iwl_hw_key));
272 memset(&priv->stations[sta_id].sta.key, 0, 743 memset(&priv->stations[sta_id].sta.key, 0,
273 sizeof(struct iwl4965_keyinfo)); 744 sizeof(struct iwl4965_keyinfo));
274 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 745 priv->stations[sta_id].sta.key.key_flags =
746 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
747 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
275 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 748 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
276 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 749 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
277 spin_unlock_irqrestore(&priv->sta_lock, flags);
278 750
279 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); 751 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
280 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0); 752 spin_unlock_irqrestore(&priv->sta_lock, flags);
753 return ret;
281} 754}
755EXPORT_SYMBOL(iwl_remove_dynamic_key);
282 756
283int iwl_set_dynamic_key(struct iwl_priv *priv, 757int iwl_set_dynamic_key(struct iwl_priv *priv,
284 struct ieee80211_key_conf *key, u8 sta_id) 758 struct ieee80211_key_conf *keyconf, u8 sta_id)
285{ 759{
286 int ret; 760 int ret;
287 761
288 priv->key_mapping_key = 1; 762 priv->key_mapping_key++;
763 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
289 764
290 switch (key->alg) { 765 switch (keyconf->alg) {
291 case ALG_CCMP: 766 case ALG_CCMP:
292 ret = iwl_set_ccmp_dynamic_key_info(priv, key, sta_id); 767 ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
293 break; 768 break;
294 case ALG_TKIP: 769 case ALG_TKIP:
295 ret = iwl_set_tkip_dynamic_key_info(priv, key, sta_id); 770 ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
296 break; 771 break;
297 case ALG_WEP: 772 case ALG_WEP:
298 ret = iwl_set_wep_dynamic_key_info(priv, key, sta_id); 773 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id);
299 break; 774 break;
300 default: 775 default:
301 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, key->alg); 776 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
302 ret = -EINVAL; 777 ret = -EINVAL;
303 } 778 }
304 779
780 IWL_DEBUG_WEP("Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n",
781 keyconf->alg, keyconf->keylen, keyconf->keyidx,
782 sta_id, ret);
783
305 return ret; 784 return ret;
306} 785}
786EXPORT_SYMBOL(iwl_set_dynamic_key);
307 787
308#ifdef CONFIG_IWLWIFI_DEBUG 788#ifdef CONFIG_IWLWIFI_DEBUG
309static void iwl_dump_lq_cmd(struct iwl_priv *priv, 789static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -345,11 +825,171 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
345 825
346 iwl_dump_lq_cmd(priv,lq); 826 iwl_dump_lq_cmd(priv,lq);
347 827
348 if (iwl_is_associated(priv) && priv->assoc_station_added && 828 if (iwl_is_associated(priv) && priv->assoc_station_added)
349 priv->lq_mngr.lq_ready)
350 return iwl_send_cmd(priv, &cmd); 829 return iwl_send_cmd(priv, &cmd);
351 830
352 return 0; 831 return 0;
353} 832}
354EXPORT_SYMBOL(iwl_send_lq_cmd); 833EXPORT_SYMBOL(iwl_send_lq_cmd);
355 834
835/**
836 * iwl_sta_init_lq - Initialize a station's hardware rate table
837 *
838 * The uCode's station table contains a table of fallback rates
839 * for automatic fallback during transmission.
840 *
841 * NOTE: This sets up a default set of values. These will be replaced later
842 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
843 * rc80211_simple.
844 *
845 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
846 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
847 * which requires station table entry to exist).
848 */
849static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
850{
851 int i, r;
852 struct iwl_link_quality_cmd link_cmd = {
853 .reserved1 = 0,
854 };
855 u16 rate_flags;
856
857 /* Set up the rate scaling to start at selected rate, fall back
858 * all the way down to 1M in IEEE order, and then spin on 1M */
859 if (is_ap)
860 r = IWL_RATE_54M_INDEX;
861 else if (priv->band == IEEE80211_BAND_5GHZ)
862 r = IWL_RATE_6M_INDEX;
863 else
864 r = IWL_RATE_1M_INDEX;
865
866 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
867 rate_flags = 0;
868 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
869 rate_flags |= RATE_MCS_CCK_MSK;
870
871 /* Use Tx antenna B only */
872 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/
873
874 link_cmd.rs_table[i].rate_n_flags =
875 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
876 r = iwl4965_get_prev_ieee_rate(r);
877 }
878
879 link_cmd.general_params.single_stream_ant_msk = 2;
880 link_cmd.general_params.dual_stream_ant_msk = 3;
881 link_cmd.agg_params.agg_dis_start_th = 3;
882 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
883
884 /* Update the rate scaling for control frame Tx to AP */
885 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
886
887 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
888 sizeof(link_cmd), &link_cmd, NULL);
889}
890
891/**
892 * iwl_rxon_add_station - add station into station table.
893 *
894 * there is only one AP station with id= IWL_AP_ID
895 * NOTE: mutex must be held before calling this fnction
896 */
897int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
898{
899 u8 sta_id;
900
901 /* Add station to device's station table */
902 struct ieee80211_conf *conf = &priv->hw->conf;
903 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
904
905 if ((is_ap) &&
906 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
907 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
908 sta_id = iwl_add_station_flags(priv, addr, is_ap,
909 0, cur_ht_config);
910 else
911 sta_id = iwl_add_station_flags(priv, addr, is_ap,
912 0, NULL);
913
914 /* Set up default rate scaling table in device's station table */
915 iwl_sta_init_lq(priv, addr, is_ap);
916
917 return sta_id;
918}
919EXPORT_SYMBOL(iwl_rxon_add_station);
920
921/**
922 * iwl_get_sta_id - Find station's index within station table
923 *
924 * If new IBSS station, create new entry in station table
925 */
926int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
927{
928 int sta_id;
929 u16 fc = le16_to_cpu(hdr->frame_control);
930 DECLARE_MAC_BUF(mac);
931
932 /* If this frame is broadcast or management, use broadcast station id */
933 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
934 is_multicast_ether_addr(hdr->addr1))
935 return priv->hw_params.bcast_sta_id;
936
937 switch (priv->iw_mode) {
938
939 /* If we are a client station in a BSS network, use the special
940 * AP station entry (that's the only station we communicate with) */
941 case IEEE80211_IF_TYPE_STA:
942 return IWL_AP_ID;
943
944 /* If we are an AP, then find the station, or use BCAST */
945 case IEEE80211_IF_TYPE_AP:
946 sta_id = iwl_find_station(priv, hdr->addr1);
947 if (sta_id != IWL_INVALID_STATION)
948 return sta_id;
949 return priv->hw_params.bcast_sta_id;
950
951 /* If this frame is going out to an IBSS network, find the station,
952 * or create a new station table entry */
953 case IEEE80211_IF_TYPE_IBSS:
954 sta_id = iwl_find_station(priv, hdr->addr1);
955 if (sta_id != IWL_INVALID_STATION)
956 return sta_id;
957
958 /* Create new station table entry */
959 sta_id = iwl_add_station_flags(priv, hdr->addr1,
960 0, CMD_ASYNC, NULL);
961
962 if (sta_id != IWL_INVALID_STATION)
963 return sta_id;
964
965 IWL_DEBUG_DROP("Station %s not in station map. "
966 "Defaulting to broadcast...\n",
967 print_mac(mac, hdr->addr1));
968 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
969 return priv->hw_params.bcast_sta_id;
970
971 default:
972 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
973 return priv->hw_params.bcast_sta_id;
974 }
975}
976EXPORT_SYMBOL(iwl_get_sta_id);
977
978/**
979 * iwl_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
980 */
981void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid)
982{
983 unsigned long flags;
984
985 /* Remove "disable" flag, to enable Tx for this TID */
986 spin_lock_irqsave(&priv->sta_lock, flags);
987 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
988 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
989 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
990 spin_unlock_irqrestore(&priv->sta_lock, flags);
991
992 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
993}
994EXPORT_SYMBOL(iwl_sta_modify_enable_tid_tx);
995
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 44f272ecc827..221b93e670a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,21 +29,27 @@
29#ifndef __iwl_sta_h__ 29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__ 30#define __iwl_sta_h__
31 31
32#include <net/mac80211.h> 32#define HW_KEY_DYNAMIC 0
33#define HW_KEY_DEFAULT 1
33 34
34#include "iwl-eeprom.h" 35/**
35#include "iwl-core.h" 36 * iwl_find_station - Find station id for a given BSSID
36#include "iwl-4965.h" 37 * @bssid: MAC address of station ID to find
37#include "iwl-io.h" 38 */
38#include "iwl-helpers.h" 39u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
39 40
40int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty); 41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
42int iwl_remove_default_wep_key(struct iwl_priv *priv, 42int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key); 43 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv, 44int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key); 45 struct ieee80211_key_conf *key);
46int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id);
47int iwl_set_dynamic_key(struct iwl_priv *priv, 46int iwl_set_dynamic_key(struct iwl_priv *priv,
48 struct ieee80211_key_conf *key, u8 sta_id); 47 struct ieee80211_key_conf *key, u8 sta_id);
48int iwl_remove_dynamic_key(struct iwl_priv *priv,
49 struct ieee80211_key_conf *key, u8 sta_id);
50int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
51int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
52int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
53void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid);
54int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
49#endif /* __iwl_sta_h__ */ 55#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
new file mode 100644
index 000000000000..9b50b1052b09
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -0,0 +1,1519 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32#include "iwl-eeprom.h"
33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38
39static const u16 default_tid_to_tx_fifo[] = {
40 IWL_TX_FIFO_AC1,
41 IWL_TX_FIFO_AC0,
42 IWL_TX_FIFO_AC0,
43 IWL_TX_FIFO_AC1,
44 IWL_TX_FIFO_AC2,
45 IWL_TX_FIFO_AC2,
46 IWL_TX_FIFO_AC3,
47 IWL_TX_FIFO_AC3,
48 IWL_TX_FIFO_NONE,
49 IWL_TX_FIFO_NONE,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_AC3
57};
58
59
60/**
61 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
62 *
63 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer)
65 */
66int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
67{
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
70 struct pci_dev *dev = priv->pci_dev;
71 int i;
72 int counter = 0;
73 int index, is_odd;
74
75 /* Host command buffers stay mapped in memory, nothing to clean */
76 if (txq->q.id == IWL_CMD_QUEUE_NUM)
77 return 0;
78
79 /* Sanity check on number of chunks */
80 counter = IWL_GET_BITS(*bd, num_tbs);
81 if (counter > MAX_NUM_OF_TBS) {
82 IWL_ERROR("Too many chunks: %i\n", counter);
83 /* @todo issue fatal error, it is quite serious situation */
84 return 0;
85 }
86
87 /* Unmap chunks, if any.
88 * TFD info for odd chunks is different format than for even chunks. */
89 for (i = 0; i < counter; i++) {
90 index = i / 2;
91 is_odd = i & 0x1;
92
93 if (is_odd)
94 pci_unmap_single(
95 dev,
96 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
97 (IWL_GET_BITS(bd->pa[index],
98 tb2_addr_hi20) << 16),
99 IWL_GET_BITS(bd->pa[index], tb2_len),
100 PCI_DMA_TODEVICE);
101
102 else if (i > 0)
103 pci_unmap_single(dev,
104 le32_to_cpu(bd->pa[index].tb1_addr),
105 IWL_GET_BITS(bd->pa[index], tb1_len),
106 PCI_DMA_TODEVICE);
107
108 /* Free SKB, if any, for this chunk */
109 if (txq->txb[txq->q.read_ptr].skb[i]) {
110 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
111
112 dev_kfree_skb(skb);
113 txq->txb[txq->q.read_ptr].skb[i] = NULL;
114 }
115 }
116 return 0;
117}
118EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
119
120
121int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
122 dma_addr_t addr, u16 len)
123{
124 int index, is_odd;
125 struct iwl_tfd_frame *tfd = ptr;
126 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
127
128 /* Each TFD can point to a maximum 20 Tx buffers */
129 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
130 IWL_ERROR("Error can not send more than %d chunks\n",
131 MAX_NUM_OF_TBS);
132 return -EINVAL;
133 }
134
135 index = num_tbs / 2;
136 is_odd = num_tbs & 0x1;
137
138 if (!is_odd) {
139 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
140 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
141 iwl_get_dma_hi_address(addr));
142 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
143 } else {
144 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
145 (u32) (addr & 0xffff));
146 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
147 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
148 }
149
150 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
151
152 return 0;
153}
154EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
155
156/**
157 * iwl_txq_update_write_ptr - Send new write index to hardware
158 */
159int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
160{
161 u32 reg = 0;
162 int ret = 0;
163 int txq_id = txq->q.id;
164
165 if (txq->need_update == 0)
166 return ret;
167
168 /* if we're trying to save power */
169 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
170 /* wake up nic if it's powered down ...
171 * uCode will wake up, and interrupt us again, so next
172 * time we'll skip this part. */
173 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
174
175 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
176 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
177 iwl_set_bit(priv, CSR_GP_CNTRL,
178 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
179 return ret;
180 }
181
182 /* restore this queue's parameters in nic hardware. */
183 ret = iwl_grab_nic_access(priv);
184 if (ret)
185 return ret;
186 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
187 txq->q.write_ptr | (txq_id << 8));
188 iwl_release_nic_access(priv);
189
190 /* else not in power-save mode, uCode will never sleep when we're
191 * trying to tx (during RFKILL, we're not trying to tx). */
192 } else
193 iwl_write32(priv, HBUS_TARG_WRPTR,
194 txq->q.write_ptr | (txq_id << 8));
195
196 txq->need_update = 0;
197
198 return ret;
199}
200EXPORT_SYMBOL(iwl_txq_update_write_ptr);
201
202
203/**
204 * iwl_tx_queue_free - Deallocate DMA queue.
205 * @txq: Transmit queue to deallocate.
206 *
207 * Empty queue by removing and destroying all BD's.
208 * Free all buffers.
209 * 0-fill, but do not free "txq" descriptor structure.
210 */
211static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
212{
213 struct iwl_queue *q = &txq->q;
214 struct pci_dev *dev = priv->pci_dev;
215 int len;
216
217 if (q->n_bd == 0)
218 return;
219
220 /* first, empty all BD's */
221 for (; q->write_ptr != q->read_ptr;
222 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
223 iwl_hw_txq_free_tfd(priv, txq);
224
225 len = sizeof(struct iwl_cmd) * q->n_window;
226 if (q->id == IWL_CMD_QUEUE_NUM)
227 len += IWL_MAX_SCAN_SIZE;
228
229 /* De-alloc array of command/tx buffers */
230 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
231
232 /* De-alloc circular buffer of TFDs */
233 if (txq->q.n_bd)
234 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
235 txq->q.n_bd, txq->bd, txq->q.dma_addr);
236
237 /* De-alloc array of per-TFD driver data */
238 kfree(txq->txb);
239 txq->txb = NULL;
240
241 /* 0-fill queue descriptor structure */
242 memset(txq, 0, sizeof(*txq));
243}
244
245/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
246 * DMA services
247 *
248 * Theory of operation
249 *
250 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
251 * of buffer descriptors, each of which points to one or more data buffers for
252 * the device to read from or fill. Driver and device exchange status of each
253 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
254 * entries in each circular buffer, to protect against confusing empty and full
255 * queue states.
256 *
257 * The device reads or writes the data in the queues via the device's several
258 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
259 *
260 * For Tx queue, there are low mark and high mark limits. If, after queuing
261 * the packet for Tx, free space become < low mark, Tx queue stopped. When
262 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
263 * Tx queue resumed.
264 *
265 * See more detailed info in iwl-4965-hw.h.
266 ***************************************************/
267
268int iwl_queue_space(const struct iwl_queue *q)
269{
270 int s = q->read_ptr - q->write_ptr;
271
272 if (q->read_ptr > q->write_ptr)
273 s -= q->n_bd;
274
275 if (s <= 0)
276 s += q->n_window;
277 /* keep some reserve to not confuse empty and full situations */
278 s -= 2;
279 if (s < 0)
280 s = 0;
281 return s;
282}
283EXPORT_SYMBOL(iwl_queue_space);
284
285
286/**
287 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
288 */
289static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
290 int count, int slots_num, u32 id)
291{
292 q->n_bd = count;
293 q->n_window = slots_num;
294 q->id = id;
295
296 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
297 * and iwl_queue_dec_wrap are broken. */
298 BUG_ON(!is_power_of_2(count));
299
300 /* slots_num must be power-of-two size, otherwise
301 * get_cmd_index is broken. */
302 BUG_ON(!is_power_of_2(slots_num));
303
304 q->low_mark = q->n_window / 4;
305 if (q->low_mark < 4)
306 q->low_mark = 4;
307
308 q->high_mark = q->n_window / 8;
309 if (q->high_mark < 2)
310 q->high_mark = 2;
311
312 q->write_ptr = q->read_ptr = 0;
313
314 return 0;
315}
316
317/**
318 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
319 */
320static int iwl_tx_queue_alloc(struct iwl_priv *priv,
321 struct iwl_tx_queue *txq, u32 id)
322{
323 struct pci_dev *dev = priv->pci_dev;
324
325 /* Driver private data, only for Tx (not command) queues,
326 * not shared with device. */
327 if (id != IWL_CMD_QUEUE_NUM) {
328 txq->txb = kmalloc(sizeof(txq->txb[0]) *
329 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
330 if (!txq->txb) {
331 IWL_ERROR("kmalloc for auxiliary BD "
332 "structures failed\n");
333 goto error;
334 }
335 } else
336 txq->txb = NULL;
337
338 /* Circular buffer of transmit frame descriptors (TFDs),
339 * shared with device */
340 txq->bd = pci_alloc_consistent(dev,
341 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
342 &txq->q.dma_addr);
343
344 if (!txq->bd) {
345 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
346 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
347 goto error;
348 }
349 txq->q.id = id;
350
351 return 0;
352
353 error:
354 kfree(txq->txb);
355 txq->txb = NULL;
356
357 return -ENOMEM;
358}
359
360/*
361 * Tell nic where to find circular buffer of Tx Frame Descriptors for
362 * given Tx queue, and enable the DMA channel used for that queue.
363 *
364 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
365 * channels supported in hardware.
366 */
367static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
368 struct iwl_tx_queue *txq)
369{
370 int rc;
371 unsigned long flags;
372 int txq_id = txq->q.id;
373
374 spin_lock_irqsave(&priv->lock, flags);
375 rc = iwl_grab_nic_access(priv);
376 if (rc) {
377 spin_unlock_irqrestore(&priv->lock, flags);
378 return rc;
379 }
380
381 /* Circular buffer (TFD queue in DRAM) physical base address */
382 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
383 txq->q.dma_addr >> 8);
384
385 /* Enable DMA channel, using same id as for TFD queue */
386 iwl_write_direct32(
387 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
388 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
389 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
390 iwl_release_nic_access(priv);
391 spin_unlock_irqrestore(&priv->lock, flags);
392
393 return 0;
394}
395
396/**
397 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
398 */
399static int iwl_tx_queue_init(struct iwl_priv *priv,
400 struct iwl_tx_queue *txq,
401 int slots_num, u32 txq_id)
402{
403 struct pci_dev *dev = priv->pci_dev;
404 int len;
405 int rc = 0;
406
407 /*
408 * Alloc buffer array for commands (Tx or other types of commands).
409 * For the command queue (#4), allocate command space + one big
410 * command for scan, since scan command is very huge; the system will
411 * not have two scans at the same time, so only one is needed.
412 * For normal Tx queues (all other queues), no super-size command
413 * space is needed.
414 */
415 len = sizeof(struct iwl_cmd) * slots_num;
416 if (txq_id == IWL_CMD_QUEUE_NUM)
417 len += IWL_MAX_SCAN_SIZE;
418 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
419 if (!txq->cmd)
420 return -ENOMEM;
421
422 /* Alloc driver data array and TFD circular buffer */
423 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
424 if (rc) {
425 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
426
427 return -ENOMEM;
428 }
429 txq->need_update = 0;
430
431 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
432 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
433 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
434
435 /* Initialize queue's high/low-water marks, and head/tail indexes */
436 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
437
438 /* Tell device where to find queue */
439 iwl_hw_tx_queue_init(priv, txq);
440
441 return 0;
442}
443/**
444 * iwl_hw_txq_ctx_free - Free TXQ Context
445 *
446 * Destroy all TX DMA queues and structures
447 */
448void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
449{
450 int txq_id;
451
452 /* Tx queues */
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
454 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
455
456 /* Keep-warm buffer */
457 iwl_kw_free(priv);
458}
459EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
460
461
462/**
463 * iwl_txq_ctx_reset - Reset TX queue context
464 * Destroys all DMA structures and initialise them again
465 *
466 * @param priv
467 * @return error code
468 */
469int iwl_txq_ctx_reset(struct iwl_priv *priv)
470{
471 int ret = 0;
472 int txq_id, slots_num;
473 unsigned long flags;
474
475 iwl_kw_free(priv);
476
477 /* Free all tx/cmd queues and keep-warm buffer */
478 iwl_hw_txq_ctx_free(priv);
479
480 /* Alloc keep-warm buffer */
481 ret = iwl_kw_alloc(priv);
482 if (ret) {
483 IWL_ERROR("Keep Warm allocation failed");
484 goto error_kw;
485 }
486 spin_lock_irqsave(&priv->lock, flags);
487 ret = iwl_grab_nic_access(priv);
488 if (unlikely(ret)) {
489 spin_unlock_irqrestore(&priv->lock, flags);
490 goto error_reset;
491 }
492
493 /* Turn off all Tx DMA fifos */
494 priv->cfg->ops->lib->txq_set_sched(priv, 0);
495
496 iwl_release_nic_access(priv);
497 spin_unlock_irqrestore(&priv->lock, flags);
498
499
500 /* Tell nic where to find the keep-warm buffer */
501 ret = iwl_kw_init(priv);
502 if (ret) {
503 IWL_ERROR("kw_init failed\n");
504 goto error_reset;
505 }
506
507 /* Alloc and init all Tx queues, including the command queue (#4) */
508 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
509 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
510 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
511 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
512 txq_id);
513 if (ret) {
514 IWL_ERROR("Tx %d queue init failed\n", txq_id);
515 goto error;
516 }
517 }
518
519 return ret;
520
521 error:
522 iwl_hw_txq_ctx_free(priv);
523 error_reset:
524 iwl_kw_free(priv);
525 error_kw:
526 return ret;
527}
528/**
529 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
530 */
531void iwl_txq_ctx_stop(struct iwl_priv *priv)
532{
533
534 int txq_id;
535 unsigned long flags;
536
537
538 /* Turn off all Tx DMA fifos */
539 spin_lock_irqsave(&priv->lock, flags);
540 if (iwl_grab_nic_access(priv)) {
541 spin_unlock_irqrestore(&priv->lock, flags);
542 return;
543 }
544
545 priv->cfg->ops->lib->txq_set_sched(priv, 0);
546
547 /* Stop each Tx DMA channel, and wait for it to be idle */
548 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
549 iwl_write_direct32(priv,
550 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
551 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
552 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
553 (txq_id), 200);
554 }
555 iwl_release_nic_access(priv);
556 spin_unlock_irqrestore(&priv->lock, flags);
557
558 /* Deallocate memory for all Tx queues */
559 iwl_hw_txq_ctx_free(priv);
560}
561EXPORT_SYMBOL(iwl_txq_ctx_stop);
562
563/*
564 * handle build REPLY_TX command notification.
565 */
566static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
567 struct iwl_tx_cmd *tx_cmd,
568 struct ieee80211_tx_info *info,
569 struct ieee80211_hdr *hdr,
570 int is_unicast, u8 std_id)
571{
572 __le16 fc = hdr->frame_control;
573 __le32 tx_flags = tx_cmd->tx_flags;
574
575 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
576 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
577 tx_flags |= TX_CMD_FLG_ACK_MSK;
578 if (ieee80211_is_mgmt(fc))
579 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
580 if (ieee80211_is_probe_resp(fc) &&
581 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
582 tx_flags |= TX_CMD_FLG_TSF_MSK;
583 } else {
584 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
585 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
586 }
587
588 if (ieee80211_is_back_req(fc))
589 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
590
591
592 tx_cmd->sta_id = std_id;
593 if (ieee80211_has_morefrags(fc))
594 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
595
596 if (ieee80211_is_data_qos(fc)) {
597 u8 *qc = ieee80211_get_qos_ctl(hdr);
598 tx_cmd->tid_tspec = qc[0] & 0xf;
599 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
600 } else {
601 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
602 }
603
604 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
605
606 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
607 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
608
609 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
610 if (ieee80211_is_mgmt(fc)) {
611 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
612 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
613 else
614 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
615 } else {
616 tx_cmd->timeout.pm_frame_timeout = 0;
617 }
618
619 tx_cmd->driver_txop = 0;
620 tx_cmd->tx_flags = tx_flags;
621 tx_cmd->next_frame_len = 0;
622}
623
624#define RTS_HCCA_RETRY_LIMIT 3
625#define RTS_DFAULT_RETRY_LIMIT 60
626
627static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
628 struct iwl_tx_cmd *tx_cmd,
629 struct ieee80211_tx_info *info,
630 __le16 fc, int sta_id,
631 int is_hcca)
632{
633 u8 rts_retry_limit = 0;
634 u8 data_retry_limit = 0;
635 u8 rate_plcp;
636 u16 rate_flags = 0;
637 int rate_idx;
638
639 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
640 IWL_RATE_COUNT - 1);
641
642 rate_plcp = iwl_rates[rate_idx].plcp;
643
644 rts_retry_limit = (is_hcca) ?
645 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
646
647 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
648 rate_flags |= RATE_MCS_CCK_MSK;
649
650
651 if (ieee80211_is_probe_resp(fc)) {
652 data_retry_limit = 3;
653 if (data_retry_limit < rts_retry_limit)
654 rts_retry_limit = data_retry_limit;
655 } else
656 data_retry_limit = IWL_DEFAULT_TX_RETRY;
657
658 if (priv->data_retry_limit != -1)
659 data_retry_limit = priv->data_retry_limit;
660
661
662 if (ieee80211_is_data(fc)) {
663 tx_cmd->initial_rate_index = 0;
664 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
665 } else {
666 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
667 case cpu_to_le16(IEEE80211_STYPE_AUTH):
668 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
669 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
670 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
671 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
672 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
673 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
674 }
675 break;
676 default:
677 break;
678 }
679
680 /* Alternate between antenna A and B for successive frames */
681 if (priv->use_ant_b_for_management_frame) {
682 priv->use_ant_b_for_management_frame = 0;
683 rate_flags |= RATE_MCS_ANT_B_MSK;
684 } else {
685 priv->use_ant_b_for_management_frame = 1;
686 rate_flags |= RATE_MCS_ANT_A_MSK;
687 }
688 }
689
690 tx_cmd->rts_retry_limit = rts_retry_limit;
691 tx_cmd->data_retry_limit = data_retry_limit;
692 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
693}
694
695static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
696 struct ieee80211_tx_info *info,
697 struct iwl_tx_cmd *tx_cmd,
698 struct sk_buff *skb_frag,
699 int sta_id)
700{
701 struct ieee80211_key_conf *keyconf = info->control.hw_key;
702
703 switch (keyconf->alg) {
704 case ALG_CCMP:
705 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
706 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
707 if (info->flags & IEEE80211_TX_CTL_AMPDU)
708 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
709 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
710 break;
711
712 case ALG_TKIP:
713 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
714 ieee80211_get_tkip_key(keyconf, skb_frag,
715 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
716 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
717 break;
718
719 case ALG_WEP:
720 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
721 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
722
723 if (keyconf->keylen == WEP_KEY_LEN_128)
724 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
725
726 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
727
728 IWL_DEBUG_TX("Configuring packet for WEP encryption "
729 "with key %d\n", keyconf->keyidx);
730 break;
731
732 default:
733 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
734 break;
735 }
736}
737
738static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
739{
740 /* 0 - mgmt, 1 - cnt, 2 - data */
741 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
742 priv->tx_stats[idx].cnt++;
743 priv->tx_stats[idx].bytes += len;
744}
745
746/*
747 * start REPLY_TX command process
748 */
749int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
750{
751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
752 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
753 struct iwl_tfd_frame *tfd;
754 u32 *control_flags;
755 int txq_id = skb_get_queue_mapping(skb);
756 struct iwl_tx_queue *txq = NULL;
757 struct iwl_queue *q = NULL;
758 dma_addr_t phys_addr;
759 dma_addr_t txcmd_phys;
760 dma_addr_t scratch_phys;
761 struct iwl_cmd *out_cmd = NULL;
762 struct iwl_tx_cmd *tx_cmd;
763 u16 len, idx, len_org;
764 u16 seq_number = 0;
765 u8 id, hdr_len, unicast;
766 u8 sta_id;
767 __le16 fc;
768 u8 wait_write_ptr = 0;
769 u8 tid = 0;
770 u8 *qc = NULL;
771 unsigned long flags;
772 int ret;
773
774 spin_lock_irqsave(&priv->lock, flags);
775 if (iwl_is_rfkill(priv)) {
776 IWL_DEBUG_DROP("Dropping - RF KILL\n");
777 goto drop_unlock;
778 }
779
780 if (!priv->vif) {
781 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
782 goto drop_unlock;
783 }
784
785 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
786 IWL_INVALID_RATE) {
787 IWL_ERROR("ERROR: No TX rate available.\n");
788 goto drop_unlock;
789 }
790
791 unicast = !is_multicast_ether_addr(hdr->addr1);
792 id = 0;
793
794 fc = hdr->frame_control;
795
796#ifdef CONFIG_IWLWIFI_DEBUG
797 if (ieee80211_is_auth(fc))
798 IWL_DEBUG_TX("Sending AUTH frame\n");
799 else if (ieee80211_is_assoc_req(fc))
800 IWL_DEBUG_TX("Sending ASSOC frame\n");
801 else if (ieee80211_is_reassoc_req(fc))
802 IWL_DEBUG_TX("Sending REASSOC frame\n");
803#endif
804
805 /* drop all data frame if we are not associated */
806 if (ieee80211_is_data(fc) &&
807 (!iwl_is_associated(priv) ||
808 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
809 !priv->assoc_station_added)) {
810 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
811 goto drop_unlock;
812 }
813
814 spin_unlock_irqrestore(&priv->lock, flags);
815
816 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
817
818 /* Find (or create) index into station table for destination station */
819 sta_id = iwl_get_sta_id(priv, hdr);
820 if (sta_id == IWL_INVALID_STATION) {
821 DECLARE_MAC_BUF(mac);
822
823 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
824 print_mac(mac, hdr->addr1));
825 goto drop;
826 }
827
828 IWL_DEBUG_TX("station Id %d\n", sta_id);
829
830 if (ieee80211_is_data_qos(fc)) {
831 qc = ieee80211_get_qos_ctl(hdr);
832 tid = qc[0] & 0xf;
833 seq_number = priv->stations[sta_id].tid[tid].seq_number &
834 IEEE80211_SCTL_SEQ;
835 hdr->seq_ctrl = cpu_to_le16(seq_number) |
836 (hdr->seq_ctrl &
837 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
838 seq_number += 0x10;
839 /* aggregation is on for this <sta,tid> */
840 if (info->flags & IEEE80211_TX_CTL_AMPDU)
841 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
842 priv->stations[sta_id].tid[tid].tfds_in_queue++;
843 }
844
845 /* Descriptor for chosen Tx queue */
846 txq = &priv->txq[txq_id];
847 q = &txq->q;
848
849 spin_lock_irqsave(&priv->lock, flags);
850
851 /* Set up first empty TFD within this queue's circular TFD buffer */
852 tfd = &txq->bd[q->write_ptr];
853 memset(tfd, 0, sizeof(*tfd));
854 control_flags = (u32 *) tfd;
855 idx = get_cmd_index(q, q->write_ptr, 0);
856
857 /* Set up driver data for this TFD */
858 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
859 txq->txb[q->write_ptr].skb[0] = skb;
860
861 /* Set up first empty entry in queue's array of Tx/cmd buffers */
862 out_cmd = &txq->cmd[idx];
863 tx_cmd = &out_cmd->cmd.tx;
864 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
865 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
866
867 /*
868 * Set up the Tx-command (not MAC!) header.
869 * Store the chosen Tx queue and TFD index within the sequence field;
870 * after Tx, uCode's Tx response will return this value so driver can
871 * locate the frame within the tx queue and do post-tx processing.
872 */
873 out_cmd->hdr.cmd = REPLY_TX;
874 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
875 INDEX_TO_SEQ(q->write_ptr)));
876
877 /* Copy MAC header from skb into command buffer */
878 memcpy(tx_cmd->hdr, hdr, hdr_len);
879
880 /*
881 * Use the first empty entry in this queue's command buffer array
882 * to contain the Tx command and MAC header concatenated together
883 * (payload data will be in another buffer).
884 * Size of this varies, due to varying MAC header length.
885 * If end is not dword aligned, we'll have 2 extra bytes at the end
886 * of the MAC header (device reads on dword boundaries).
887 * We'll tell device about this padding later.
888 */
889 len = sizeof(struct iwl_tx_cmd) +
890 sizeof(struct iwl_cmd_header) + hdr_len;
891
892 len_org = len;
893 len = (len + 3) & ~3;
894
895 if (len_org != len)
896 len_org = 1;
897 else
898 len_org = 0;
899
900 /* Physical address of this Tx command's header (not MAC header!),
901 * within command buffer array. */
902 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
903 offsetof(struct iwl_cmd, hdr);
904
905 /* Add buffer containing Tx command and MAC(!) header to TFD's
906 * first entry */
907 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
908
909 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
910 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
911
912 /* Set up TFD's 2nd entry to point directly to remainder of skb,
913 * if any (802.11 null frames have no payload). */
914 len = skb->len - hdr_len;
915 if (len) {
916 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
917 len, PCI_DMA_TODEVICE);
918 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
919 }
920
921 /* Tell NIC about any 2-byte padding after MAC header */
922 if (len_org)
923 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
924
925 /* Total # bytes to be transmitted */
926 len = (u16)skb->len;
927 tx_cmd->len = cpu_to_le16(len);
928 /* TODO need this for burst mode later on */
929 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
930
931 /* set is_hcca to 0; it probably will never be implemented */
932 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
933
934 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
935
936 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
937 offsetof(struct iwl_tx_cmd, scratch);
938 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
939 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
940
941 if (!ieee80211_has_morefrags(hdr->frame_control)) {
942 txq->need_update = 1;
943 if (qc)
944 priv->stations[sta_id].tid[tid].seq_number = seq_number;
945 } else {
946 wait_write_ptr = 1;
947 txq->need_update = 0;
948 }
949
950 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
951
952 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
953
954 /* Set up entry for this TFD in Tx byte-count array */
955 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
956
957 /* Tell device the write index *just past* this latest filled TFD */
958 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
959 ret = iwl_txq_update_write_ptr(priv, txq);
960 spin_unlock_irqrestore(&priv->lock, flags);
961
962 if (ret)
963 return ret;
964
965 if ((iwl_queue_space(q) < q->high_mark)
966 && priv->mac80211_registered) {
967 if (wait_write_ptr) {
968 spin_lock_irqsave(&priv->lock, flags);
969 txq->need_update = 1;
970 iwl_txq_update_write_ptr(priv, txq);
971 spin_unlock_irqrestore(&priv->lock, flags);
972 }
973
974 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
975 }
976
977 return 0;
978
979drop_unlock:
980 spin_unlock_irqrestore(&priv->lock, flags);
981drop:
982 return -1;
983}
984EXPORT_SYMBOL(iwl_tx_skb);
985
986/*************** HOST COMMAND QUEUE FUNCTIONS *****/
987
988/**
989 * iwl_enqueue_hcmd - enqueue a uCode command
990 * @priv: device private data point
991 * @cmd: a point to the ucode command structure
992 *
993 * The function returns < 0 values to indicate the operation is
994 * failed. On success, it turns the index (> 0) of command in the
995 * command queue.
996 */
997int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
998{
999 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1000 struct iwl_queue *q = &txq->q;
1001 struct iwl_tfd_frame *tfd;
1002 u32 *control_flags;
1003 struct iwl_cmd *out_cmd;
1004 u32 idx;
1005 u16 fix_size;
1006 dma_addr_t phys_addr;
1007 int ret;
1008 unsigned long flags;
1009
1010 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1011 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1012
1013 /* If any of the command structures end up being larger than
1014 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1015 * we will need to increase the size of the TFD entries */
1016 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1017 !(cmd->meta.flags & CMD_SIZE_HUGE));
1018
1019 if (iwl_is_rfkill(priv)) {
1020 IWL_DEBUG_INFO("Not sending command - RF KILL");
1021 return -EIO;
1022 }
1023
1024 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1025 IWL_ERROR("No space for Tx\n");
1026 return -ENOSPC;
1027 }
1028
1029 spin_lock_irqsave(&priv->hcmd_lock, flags);
1030
1031 tfd = &txq->bd[q->write_ptr];
1032 memset(tfd, 0, sizeof(*tfd));
1033
1034 control_flags = (u32 *) tfd;
1035
1036 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1037 out_cmd = &txq->cmd[idx];
1038
1039 out_cmd->hdr.cmd = cmd->id;
1040 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1041 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1042
1043 /* At this point, the out_cmd now has all of the incoming cmd
1044 * information */
1045
1046 out_cmd->hdr.flags = 0;
1047 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1048 INDEX_TO_SEQ(q->write_ptr));
1049 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1050 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1051
1052 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
1053 offsetof(struct iwl_cmd, hdr);
1054 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1055
1056 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1057 "%d bytes at %d[%d]:%d\n",
1058 get_cmd_string(out_cmd->hdr.cmd),
1059 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1060 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1061
1062 txq->need_update = 1;
1063
1064 /* Set up entry in queue's byte count circular buffer */
1065 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1066
1067 /* Increment and update queue's write index */
1068 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1069 ret = iwl_txq_update_write_ptr(priv, txq);
1070
1071 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1072 return ret ? ret : idx;
1073}
1074
1075int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1076{
1077 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1078 struct iwl_queue *q = &txq->q;
1079 struct iwl_tx_info *tx_info;
1080 int nfreed = 0;
1081
1082 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1083 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1084 "is out of range [0-%d] %d %d.\n", txq_id,
1085 index, q->n_bd, q->write_ptr, q->read_ptr);
1086 return 0;
1087 }
1088
1089 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1090 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1091
1092 tx_info = &txq->txb[txq->q.read_ptr];
1093 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1094 tx_info->skb[0] = NULL;
1095
1096 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1097 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1098
1099 iwl_hw_txq_free_tfd(priv, txq);
1100 nfreed++;
1101 }
1102 return nfreed;
1103}
1104EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1105
1106
1107/**
1108 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1109 *
1110 * When FW advances 'R' index, all entries between old and new 'R' index
1111 * need to be reclaimed. As result, some free space forms. If there is
1112 * enough free space (> low mark), wake the stack that feeds us.
1113 */
1114static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1115{
1116 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1117 struct iwl_queue *q = &txq->q;
1118 int nfreed = 0;
1119
1120 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1121 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1122 "is out of range [0-%d] %d %d.\n", txq_id,
1123 index, q->n_bd, q->write_ptr, q->read_ptr);
1124 return;
1125 }
1126
1127 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1128 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1129
1130 if (nfreed > 1) {
1131 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
1132 q->write_ptr, q->read_ptr);
1133 queue_work(priv->workqueue, &priv->restart);
1134 }
1135 nfreed++;
1136 }
1137}
1138
1139/**
1140 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1141 * @rxb: Rx buffer to reclaim
1142 *
1143 * If an Rx buffer has an async callback associated with it the callback
1144 * will be executed. The attached skb (if present) will only be freed
1145 * if the callback returns 1
1146 */
1147void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1148{
1149 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1150 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1151 int txq_id = SEQ_TO_QUEUE(sequence);
1152 int index = SEQ_TO_INDEX(sequence);
1153 int huge = sequence & SEQ_HUGE_FRAME;
1154 int cmd_index;
1155 struct iwl_cmd *cmd;
1156
1157 /* If a Tx command is being handled and it isn't in the actual
1158 * command queue then there a command routing bug has been introduced
1159 * in the queue management code. */
1160 if (txq_id != IWL_CMD_QUEUE_NUM)
1161 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
1162 txq_id, pkt->hdr.cmd);
1163 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1164
1165 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1166 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1167
1168 /* Input error checking is done when commands are added to queue. */
1169 if (cmd->meta.flags & CMD_WANT_SKB) {
1170 cmd->meta.source->u.skb = rxb->skb;
1171 rxb->skb = NULL;
1172 } else if (cmd->meta.u.callback &&
1173 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1174 rxb->skb = NULL;
1175
1176 iwl_hcmd_queue_reclaim(priv, txq_id, index);
1177
1178 if (!(cmd->meta.flags & CMD_ASYNC)) {
1179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1180 wake_up_interruptible(&priv->wait_command_queue);
1181 }
1182}
1183EXPORT_SYMBOL(iwl_tx_cmd_complete);
1184
1185/*
1186 * Find first available (lowest unused) Tx Queue, mark it "active".
1187 * Called only when finding queue for aggregation.
1188 * Should never return anything < 7, because they should already
1189 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1190 */
1191static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1192{
1193 int txq_id;
1194
1195 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1196 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1197 return txq_id;
1198 return -1;
1199}
1200
1201int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1202{
1203 int sta_id;
1204 int tx_fifo;
1205 int txq_id;
1206 int ret;
1207 unsigned long flags;
1208 struct iwl_tid_data *tid_data;
1209 DECLARE_MAC_BUF(mac);
1210
1211 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1212 tx_fifo = default_tid_to_tx_fifo[tid];
1213 else
1214 return -EINVAL;
1215
1216 IWL_WARNING("%s on ra = %s tid = %d\n",
1217 __func__, print_mac(mac, ra), tid);
1218
1219 sta_id = iwl_find_station(priv, ra);
1220 if (sta_id == IWL_INVALID_STATION)
1221 return -ENXIO;
1222
1223 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1224 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1225 return -ENXIO;
1226 }
1227
1228 txq_id = iwl_txq_ctx_activate_free(priv);
1229 if (txq_id == -1)
1230 return -ENXIO;
1231
1232 spin_lock_irqsave(&priv->sta_lock, flags);
1233 tid_data = &priv->stations[sta_id].tid[tid];
1234 *ssn = SEQ_TO_SN(tid_data->seq_number);
1235 tid_data->agg.txq_id = txq_id;
1236 spin_unlock_irqrestore(&priv->sta_lock, flags);
1237
1238 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1239 sta_id, tid, *ssn);
1240 if (ret)
1241 return ret;
1242
1243 if (tid_data->tfds_in_queue == 0) {
1244 printk(KERN_ERR "HW queue is empty\n");
1245 tid_data->agg.state = IWL_AGG_ON;
1246 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1247 } else {
1248 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1249 tid_data->tfds_in_queue);
1250 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1251 }
1252 return ret;
1253}
1254EXPORT_SYMBOL(iwl_tx_agg_start);
1255
1256int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1257{
1258 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1259 struct iwl_tid_data *tid_data;
1260 int ret, write_ptr, read_ptr;
1261 unsigned long flags;
1262 DECLARE_MAC_BUF(mac);
1263
1264 if (!ra) {
1265 IWL_ERROR("ra = NULL\n");
1266 return -EINVAL;
1267 }
1268
1269 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1270 tx_fifo_id = default_tid_to_tx_fifo[tid];
1271 else
1272 return -EINVAL;
1273
1274 sta_id = iwl_find_station(priv, ra);
1275
1276 if (sta_id == IWL_INVALID_STATION)
1277 return -ENXIO;
1278
1279 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1280 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1281
1282 tid_data = &priv->stations[sta_id].tid[tid];
1283 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1284 txq_id = tid_data->agg.txq_id;
1285 write_ptr = priv->txq[txq_id].q.write_ptr;
1286 read_ptr = priv->txq[txq_id].q.read_ptr;
1287
1288 /* The queue is not empty */
1289 if (write_ptr != read_ptr) {
1290 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1291 priv->stations[sta_id].tid[tid].agg.state =
1292 IWL_EMPTYING_HW_QUEUE_DELBA;
1293 return 0;
1294 }
1295
1296 IWL_DEBUG_HT("HW queue is empty\n");
1297 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1298
1299 spin_lock_irqsave(&priv->lock, flags);
1300 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1301 tx_fifo_id);
1302 spin_unlock_irqrestore(&priv->lock, flags);
1303
1304 if (ret)
1305 return ret;
1306
1307 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1308
1309 return 0;
1310}
1311EXPORT_SYMBOL(iwl_tx_agg_stop);
1312
1313int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1314{
1315 struct iwl_queue *q = &priv->txq[txq_id].q;
1316 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1317 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1318
1319 switch (priv->stations[sta_id].tid[tid].agg.state) {
1320 case IWL_EMPTYING_HW_QUEUE_DELBA:
1321 /* We are reclaiming the last packet of the */
1322 /* aggregated HW queue */
1323 if (txq_id == tid_data->agg.txq_id &&
1324 q->read_ptr == q->write_ptr) {
1325 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1326 int tx_fifo = default_tid_to_tx_fifo[tid];
1327 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1328 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1329 ssn, tx_fifo);
1330 tid_data->agg.state = IWL_AGG_OFF;
1331 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1332 }
1333 break;
1334 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1335 /* We are reclaiming the last packet of the queue */
1336 if (tid_data->tfds_in_queue == 0) {
1337 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1338 tid_data->agg.state = IWL_AGG_ON;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1340 }
1341 break;
1342 }
1343 return 0;
1344}
1345EXPORT_SYMBOL(iwl_txq_check_empty);
1346
1347/**
1348 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1349 *
1350 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1351 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1352 */
1353static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1354 struct iwl_ht_agg *agg,
1355 struct iwl_compressed_ba_resp *ba_resp)
1356
1357{
1358 int i, sh, ack;
1359 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1360 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1361 u64 bitmap;
1362 int successes = 0;
1363 struct ieee80211_tx_info *info;
1364
1365 if (unlikely(!agg->wait_for_ba)) {
1366 IWL_ERROR("Received BA when not expected\n");
1367 return -EINVAL;
1368 }
1369
1370 /* Mark that the expected block-ack response arrived */
1371 agg->wait_for_ba = 0;
1372 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1373
1374 /* Calculate shift to align block-ack bits with our Tx window bits */
1375 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
1376 if (sh < 0) /* tbw something is wrong with indices */
1377 sh += 0x100;
1378
1379 /* don't use 64-bit values for now */
1380 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1381
1382 if (agg->frame_count > (64 - sh)) {
1383 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
1384 return -1;
1385 }
1386
1387 /* check for success or failure according to the
1388 * transmitted bitmap and block-ack bitmap */
1389 bitmap &= agg->bitmap;
1390
1391 /* For each frame attempted in aggregation,
1392 * update driver's record of tx frame's status. */
1393 for (i = 0; i < agg->frame_count ; i++) {
1394 ack = bitmap & (1 << i);
1395 successes += !!ack;
1396 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1397 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
1398 agg->start_idx + i);
1399 }
1400
1401 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1402 memset(&info->status, 0, sizeof(info->status));
1403 info->flags = IEEE80211_TX_STAT_ACK;
1404 info->flags |= IEEE80211_TX_STAT_AMPDU;
1405 info->status.ampdu_ack_map = successes;
1406 info->status.ampdu_ack_len = agg->frame_count;
1407 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1408
1409 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
1410
1411 return 0;
1412}
1413
1414/**
1415 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1416 *
1417 * Handles block-acknowledge notification from device, which reports success
1418 * of frames sent via aggregation.
1419 */
1420void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1421 struct iwl_rx_mem_buffer *rxb)
1422{
1423 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1424 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1425 int index;
1426 struct iwl_tx_queue *txq = NULL;
1427 struct iwl_ht_agg *agg;
1428 DECLARE_MAC_BUF(mac);
1429
1430 /* "flow" corresponds to Tx queue */
1431 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1432
1433 /* "ssn" is start of block-ack Tx window, corresponds to index
1434 * (in Tx queue's circular buffer) of first TFD/frame in window */
1435 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1436
1437 if (scd_flow >= priv->hw_params.max_txq_num) {
1438 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
1439 return;
1440 }
1441
1442 txq = &priv->txq[scd_flow];
1443 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
1444
1445 /* Find index just before block-ack window */
1446 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1447
1448 /* TODO: Need to get this copy more safely - now good for debug */
1449
1450 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
1451 "sta_id = %d\n",
1452 agg->wait_for_ba,
1453 print_mac(mac, (u8 *) &ba_resp->sta_addr_lo32),
1454 ba_resp->sta_id);
1455 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1456 "%d, scd_ssn = %d\n",
1457 ba_resp->tid,
1458 ba_resp->seq_ctl,
1459 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1460 ba_resp->scd_flow,
1461 ba_resp->scd_ssn);
1462 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
1463 agg->start_idx,
1464 (unsigned long long)agg->bitmap);
1465
1466 /* Update driver's record of ACK vs. not for each frame in window */
1467 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1468
1469 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1470 * block-ack window (we assume that they've been successfully
1471 * transmitted ... if not, it's too late anyway). */
1472 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1473 /* calculate mac80211 ampdu sw queue to wake */
1474 int ampdu_q =
1475 scd_flow - priv->hw_params.first_ampdu_q + priv->hw->queues;
1476 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1477 priv->stations[ba_resp->sta_id].
1478 tid[ba_resp->tid].tfds_in_queue -= freed;
1479 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1480 priv->mac80211_registered &&
1481 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
1482 ieee80211_wake_queue(priv->hw, ampdu_q);
1483
1484 iwl_txq_check_empty(priv, ba_resp->sta_id,
1485 ba_resp->tid, scd_flow);
1486 }
1487}
1488EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1489
1490#ifdef CONFIG_IWLWIFI_DEBUG
1491#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1492
1493const char *iwl_get_tx_fail_reason(u32 status)
1494{
1495 switch (status & TX_STATUS_MSK) {
1496 case TX_STATUS_SUCCESS:
1497 return "SUCCESS";
1498 TX_STATUS_ENTRY(SHORT_LIMIT);
1499 TX_STATUS_ENTRY(LONG_LIMIT);
1500 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1501 TX_STATUS_ENTRY(MGMNT_ABORT);
1502 TX_STATUS_ENTRY(NEXT_FRAG);
1503 TX_STATUS_ENTRY(LIFE_EXPIRE);
1504 TX_STATUS_ENTRY(DEST_PS);
1505 TX_STATUS_ENTRY(ABORTED);
1506 TX_STATUS_ENTRY(BT_RETRY);
1507 TX_STATUS_ENTRY(STA_INVALID);
1508 TX_STATUS_ENTRY(FRAG_DROPPED);
1509 TX_STATUS_ENTRY(TID_DISABLE);
1510 TX_STATUS_ENTRY(FRAME_FLUSHED);
1511 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1512 TX_STATUS_ENTRY(TX_LOCKED);
1513 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1514 }
1515
1516 return "UNKNOWN";
1517}
1518EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1519#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 6027e1119c3f..4a22d3fba75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -102,16 +102,6 @@ MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT); 102MODULE_AUTHOR(DRV_COPYRIGHT);
103MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
104 104
105static __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
106{
107 u16 fc = le16_to_cpu(hdr->frame_control);
108 int hdr_len = ieee80211_get_hdrlen(fc);
109
110 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
111 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
112 return NULL;
113}
114
115static const struct ieee80211_supported_band *iwl3945_get_band( 105static const struct ieee80211_supported_band *iwl3945_get_band(
116 struct iwl3945_priv *priv, enum ieee80211_band band) 106 struct iwl3945_priv *priv, enum ieee80211_band band)
117{ 107{
@@ -547,10 +537,20 @@ static inline int iwl3945_is_init(struct iwl3945_priv *priv)
547 return test_bit(STATUS_INIT, &priv->status); 537 return test_bit(STATUS_INIT, &priv->status);
548} 538}
549 539
540static inline int iwl3945_is_rfkill_sw(struct iwl3945_priv *priv)
541{
542 return test_bit(STATUS_RF_KILL_SW, &priv->status);
543}
544
545static inline int iwl3945_is_rfkill_hw(struct iwl3945_priv *priv)
546{
547 return test_bit(STATUS_RF_KILL_HW, &priv->status);
548}
549
550static inline int iwl3945_is_rfkill(struct iwl3945_priv *priv) 550static inline int iwl3945_is_rfkill(struct iwl3945_priv *priv)
551{ 551{
552 return test_bit(STATUS_RF_KILL_HW, &priv->status) || 552 return iwl3945_is_rfkill_hw(priv) ||
553 test_bit(STATUS_RF_KILL_SW, &priv->status); 553 iwl3945_is_rfkill_sw(priv);
554} 554}
555 555
556static inline int iwl3945_is_ready_rf(struct iwl3945_priv *priv) 556static inline int iwl3945_is_ready_rf(struct iwl3945_priv *priv)
@@ -980,7 +980,7 @@ static int iwl3945_full_rxon_required(struct iwl3945_priv *priv)
980{ 980{
981 981
982 /* These items are only settable from the full RXON command */ 982 /* These items are only settable from the full RXON command */
983 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || 983 if (!(iwl3945_is_associated(priv)) ||
984 compare_ether_addr(priv->staging_rxon.bssid_addr, 984 compare_ether_addr(priv->staging_rxon.bssid_addr,
985 priv->active_rxon.bssid_addr) || 985 priv->active_rxon.bssid_addr) ||
986 compare_ether_addr(priv->staging_rxon.node_addr, 986 compare_ether_addr(priv->staging_rxon.node_addr,
@@ -2035,36 +2035,6 @@ static int iwl3945_send_power_mode(struct iwl3945_priv *priv, u32 mode)
2035 return rc; 2035 return rc;
2036} 2036}
2037 2037
2038int iwl3945_is_network_packet(struct iwl3945_priv *priv, struct ieee80211_hdr *header)
2039{
2040 /* Filter incoming packets to determine if they are targeted toward
2041 * this network, discarding packets coming from ourselves */
2042 switch (priv->iw_mode) {
2043 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2044 /* packets from our adapter are dropped (echo) */
2045 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2046 return 0;
2047 /* {broad,multi}cast packets to our IBSS go through */
2048 if (is_multicast_ether_addr(header->addr1))
2049 return !compare_ether_addr(header->addr3, priv->bssid);
2050 /* packets to our adapter go through */
2051 return !compare_ether_addr(header->addr1, priv->mac_addr);
2052 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2053 /* packets from our adapter are dropped (echo) */
2054 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2055 return 0;
2056 /* {broad,multi}cast packets to our BSS go through */
2057 if (is_multicast_ether_addr(header->addr1))
2058 return !compare_ether_addr(header->addr2, priv->bssid);
2059 /* packets to our adapter go through */
2060 return !compare_ether_addr(header->addr1, priv->mac_addr);
2061 default:
2062 return 1;
2063 }
2064
2065 return 1;
2066}
2067
2068/** 2038/**
2069 * iwl3945_scan_cancel - Cancel any currently executing HW scan 2039 * iwl3945_scan_cancel - Cancel any currently executing HW scan
2070 * 2040 *
@@ -2117,20 +2087,6 @@ static int iwl3945_scan_cancel_timeout(struct iwl3945_priv *priv, unsigned long
2117 return ret; 2087 return ret;
2118} 2088}
2119 2089
2120static void iwl3945_sequence_reset(struct iwl3945_priv *priv)
2121{
2122 /* Reset ieee stats */
2123
2124 /* We don't reset the net_device_stats (ieee->stats) on
2125 * re-association */
2126
2127 priv->last_seq_num = -1;
2128 priv->last_frag_num = -1;
2129 priv->last_packet_time = 0;
2130
2131 iwl3945_scan_cancel(priv);
2132}
2133
2134#define MAX_UCODE_BEACON_INTERVAL 1024 2090#define MAX_UCODE_BEACON_INTERVAL 1024
2135#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) 2091#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2136 2092
@@ -2322,7 +2278,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2322#endif 2278#endif
2323 2279
2324 ch_info = iwl3945_get_channel_info(priv, priv->band, 2280 ch_info = iwl3945_get_channel_info(priv, priv->band,
2325 le16_to_cpu(priv->staging_rxon.channel)); 2281 le16_to_cpu(priv->active_rxon.channel));
2326 2282
2327 if (!ch_info) 2283 if (!ch_info)
2328 ch_info = &priv->channel_info[0]; 2284 ch_info = &priv->channel_info[0];
@@ -2389,12 +2345,13 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2389} 2345}
2390 2346
2391static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv, 2347static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2392 struct ieee80211_tx_control *ctl, 2348 struct ieee80211_tx_info *info,
2393 struct iwl3945_cmd *cmd, 2349 struct iwl3945_cmd *cmd,
2394 struct sk_buff *skb_frag, 2350 struct sk_buff *skb_frag,
2395 int last_frag) 2351 int last_frag)
2396{ 2352{
2397 struct iwl3945_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; 2353 struct iwl3945_hw_key *keyinfo =
2354 &priv->stations[info->control.hw_key->hw_key_idx].keyinfo;
2398 2355
2399 switch (keyinfo->alg) { 2356 switch (keyinfo->alg) {
2400 case ALG_CCMP: 2357 case ALG_CCMP:
@@ -2417,7 +2374,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2417 2374
2418 case ALG_WEP: 2375 case ALG_WEP:
2419 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | 2376 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2420 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 2377 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2421 2378
2422 if (keyinfo->keylen == 13) 2379 if (keyinfo->keylen == 13)
2423 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; 2380 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
@@ -2425,7 +2382,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2425 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); 2382 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2426 2383
2427 IWL_DEBUG_TX("Configuring packet for WEP encryption " 2384 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2428 "with key %d\n", ctl->key_idx); 2385 "with key %d\n", info->control.hw_key->hw_key_idx);
2429 break; 2386 break;
2430 2387
2431 default: 2388 default:
@@ -2439,20 +2396,19 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2439 */ 2396 */
2440static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv, 2397static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2441 struct iwl3945_cmd *cmd, 2398 struct iwl3945_cmd *cmd,
2442 struct ieee80211_tx_control *ctrl, 2399 struct ieee80211_tx_info *info,
2443 struct ieee80211_hdr *hdr, 2400 struct ieee80211_hdr *hdr,
2444 int is_unicast, u8 std_id) 2401 int is_unicast, u8 std_id)
2445{ 2402{
2446 __le16 *qc; 2403 __le16 fc = hdr->frame_control;
2447 u16 fc = le16_to_cpu(hdr->frame_control);
2448 __le32 tx_flags = cmd->cmd.tx.tx_flags; 2404 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2449 2405
2450 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2406 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2451 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { 2407 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
2452 tx_flags |= TX_CMD_FLG_ACK_MSK; 2408 tx_flags |= TX_CMD_FLG_ACK_MSK;
2453 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 2409 if (ieee80211_is_mgmt(fc))
2454 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2410 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2455 if (ieee80211_is_probe_response(fc) && 2411 if (ieee80211_is_probe_resp(fc) &&
2456 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) 2412 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2457 tx_flags |= TX_CMD_FLG_TSF_MSK; 2413 tx_flags |= TX_CMD_FLG_TSF_MSK;
2458 } else { 2414 } else {
@@ -2461,20 +2417,21 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2461 } 2417 }
2462 2418
2463 cmd->cmd.tx.sta_id = std_id; 2419 cmd->cmd.tx.sta_id = std_id;
2464 if (ieee80211_get_morefrag(hdr)) 2420 if (ieee80211_has_morefrags(fc))
2465 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 2421 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2466 2422
2467 qc = ieee80211_get_qos_ctrl(hdr); 2423 if (ieee80211_is_data_qos(fc)) {
2468 if (qc) { 2424 u8 *qc = ieee80211_get_qos_ctl(hdr);
2469 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); 2425 cmd->cmd.tx.tid_tspec = qc[0] & 0xf;
2470 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 2426 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2471 } else 2427 } else {
2472 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2428 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2429 }
2473 2430
2474 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 2431 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
2475 tx_flags |= TX_CMD_FLG_RTS_MSK; 2432 tx_flags |= TX_CMD_FLG_RTS_MSK;
2476 tx_flags &= ~TX_CMD_FLG_CTS_MSK; 2433 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2477 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 2434 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
2478 tx_flags &= ~TX_CMD_FLG_RTS_MSK; 2435 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2479 tx_flags |= TX_CMD_FLG_CTS_MSK; 2436 tx_flags |= TX_CMD_FLG_CTS_MSK;
2480 } 2437 }
@@ -2483,9 +2440,8 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2483 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 2440 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2484 2441
2485 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 2442 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2486 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 2443 if (ieee80211_is_mgmt(fc)) {
2487 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || 2444 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
2488 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2489 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3); 2445 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2490 else 2446 else
2491 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2); 2447 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
@@ -2549,6 +2505,11 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2549 iwl3945_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 2505 iwl3945_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2550 return priv->hw_setting.bcast_sta_id; 2506 return priv->hw_setting.bcast_sta_id;
2551 } 2507 }
2508 /* If we are in monitor mode, use BCAST. This is required for
2509 * packet injection. */
2510 case IEEE80211_IF_TYPE_MNTR:
2511 return priv->hw_setting.bcast_sta_id;
2512
2552 default: 2513 default:
2553 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 2514 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
2554 return priv->hw_setting.bcast_sta_id; 2515 return priv->hw_setting.bcast_sta_id;
@@ -2558,25 +2519,27 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2558/* 2519/*
2559 * start REPLY_TX command process 2520 * start REPLY_TX command process
2560 */ 2521 */
2561static int iwl3945_tx_skb(struct iwl3945_priv *priv, 2522static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2562 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2563{ 2523{
2564 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2524 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2525 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2565 struct iwl3945_tfd_frame *tfd; 2526 struct iwl3945_tfd_frame *tfd;
2566 u32 *control_flags; 2527 u32 *control_flags;
2567 int txq_id = ctl->queue; 2528 int txq_id = skb_get_queue_mapping(skb);
2568 struct iwl3945_tx_queue *txq = NULL; 2529 struct iwl3945_tx_queue *txq = NULL;
2569 struct iwl3945_queue *q = NULL; 2530 struct iwl3945_queue *q = NULL;
2570 dma_addr_t phys_addr; 2531 dma_addr_t phys_addr;
2571 dma_addr_t txcmd_phys; 2532 dma_addr_t txcmd_phys;
2572 struct iwl3945_cmd *out_cmd = NULL; 2533 struct iwl3945_cmd *out_cmd = NULL;
2573 u16 len, idx, len_org; 2534 u16 len, idx, len_org, hdr_len;
2574 u8 id, hdr_len, unicast; 2535 u8 id;
2536 u8 unicast;
2575 u8 sta_id; 2537 u8 sta_id;
2538 u8 tid = 0;
2576 u16 seq_number = 0; 2539 u16 seq_number = 0;
2577 u16 fc; 2540 __le16 fc;
2578 __le16 *qc;
2579 u8 wait_write_ptr = 0; 2541 u8 wait_write_ptr = 0;
2542 u8 *qc = NULL;
2580 unsigned long flags; 2543 unsigned long flags;
2581 int rc; 2544 int rc;
2582 2545
@@ -2586,12 +2549,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2586 goto drop_unlock; 2549 goto drop_unlock;
2587 } 2550 }
2588 2551
2589 if (!priv->vif) { 2552 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
2590 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
2591 goto drop_unlock;
2592 }
2593
2594 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2595 IWL_ERROR("ERROR: No TX rate available.\n"); 2553 IWL_ERROR("ERROR: No TX rate available.\n");
2596 goto drop_unlock; 2554 goto drop_unlock;
2597 } 2555 }
@@ -2599,28 +2557,29 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2599 unicast = !is_multicast_ether_addr(hdr->addr1); 2557 unicast = !is_multicast_ether_addr(hdr->addr1);
2600 id = 0; 2558 id = 0;
2601 2559
2602 fc = le16_to_cpu(hdr->frame_control); 2560 fc = hdr->frame_control;
2603 2561
2604#ifdef CONFIG_IWL3945_DEBUG 2562#ifdef CONFIG_IWL3945_DEBUG
2605 if (ieee80211_is_auth(fc)) 2563 if (ieee80211_is_auth(fc))
2606 IWL_DEBUG_TX("Sending AUTH frame\n"); 2564 IWL_DEBUG_TX("Sending AUTH frame\n");
2607 else if (ieee80211_is_assoc_request(fc)) 2565 else if (ieee80211_is_assoc_req(fc))
2608 IWL_DEBUG_TX("Sending ASSOC frame\n"); 2566 IWL_DEBUG_TX("Sending ASSOC frame\n");
2609 else if (ieee80211_is_reassoc_request(fc)) 2567 else if (ieee80211_is_reassoc_req(fc))
2610 IWL_DEBUG_TX("Sending REASSOC frame\n"); 2568 IWL_DEBUG_TX("Sending REASSOC frame\n");
2611#endif 2569#endif
2612 2570
2613 /* drop all data frame if we are not associated */ 2571 /* drop all data frame if we are not associated */
2614 if ((!iwl3945_is_associated(priv) || 2572 if (ieee80211_is_data(fc) &&
2615 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id)) && 2573 (priv->iw_mode != IEEE80211_IF_TYPE_MNTR) && /* packet injection */
2616 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 2574 (!iwl3945_is_associated(priv) ||
2575 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id))) {
2617 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n"); 2576 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
2618 goto drop_unlock; 2577 goto drop_unlock;
2619 } 2578 }
2620 2579
2621 spin_unlock_irqrestore(&priv->lock, flags); 2580 spin_unlock_irqrestore(&priv->lock, flags);
2622 2581
2623 hdr_len = ieee80211_get_hdrlen(fc); 2582 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
2624 2583
2625 /* Find (or create) index into station table for destination station */ 2584 /* Find (or create) index into station table for destination station */
2626 sta_id = iwl3945_get_sta_id(priv, hdr); 2585 sta_id = iwl3945_get_sta_id(priv, hdr);
@@ -2634,9 +2593,9 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2634 2593
2635 IWL_DEBUG_RATE("station Id %d\n", sta_id); 2594 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2636 2595
2637 qc = ieee80211_get_qos_ctrl(hdr); 2596 if (ieee80211_is_data_qos(fc)) {
2638 if (qc) { 2597 qc = ieee80211_get_qos_ctl(hdr);
2639 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); 2598 tid = qc[0] & 0xf;
2640 seq_number = priv->stations[sta_id].tid[tid].seq_number & 2599 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2641 IEEE80211_SCTL_SEQ; 2600 IEEE80211_SCTL_SEQ;
2642 hdr->seq_ctrl = cpu_to_le16(seq_number) | 2601 hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2660,8 +2619,6 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2660 /* Set up driver data for this TFD */ 2619 /* Set up driver data for this TFD */
2661 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info)); 2620 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info));
2662 txq->txb[q->write_ptr].skb[0] = skb; 2621 txq->txb[q->write_ptr].skb[0] = skb;
2663 memcpy(&(txq->txb[q->write_ptr].status.control),
2664 ctl, sizeof(struct ieee80211_tx_control));
2665 2622
2666 /* Init first empty entry in queue's array of Tx/cmd buffers */ 2623 /* Init first empty entry in queue's array of Tx/cmd buffers */
2667 out_cmd = &txq->cmd[idx]; 2624 out_cmd = &txq->cmd[idx];
@@ -2710,8 +2667,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2710 * first entry */ 2667 * first entry */
2711 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2668 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2712 2669
2713 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 2670 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
2714 iwl3945_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); 2671 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2715 2672
2716 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2673 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2717 * if any (802.11 null frames have no payload). */ 2674 * if any (802.11 null frames have no payload). */
@@ -2736,18 +2693,17 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2736 out_cmd->cmd.tx.len = cpu_to_le16(len); 2693 out_cmd->cmd.tx.len = cpu_to_le16(len);
2737 2694
2738 /* TODO need this for burst mode later on */ 2695 /* TODO need this for burst mode later on */
2739 iwl3945_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); 2696 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id);
2740 2697
2741 /* set is_hcca to 0; it probably will never be implemented */ 2698 /* set is_hcca to 0; it probably will never be implemented */
2742 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); 2699 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
2743 2700
2744 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 2701 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2745 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 2702 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2746 2703
2747 if (!ieee80211_get_morefrag(hdr)) { 2704 if (!ieee80211_has_morefrags(hdr->frame_control)) {
2748 txq->need_update = 1; 2705 txq->need_update = 1;
2749 if (qc) { 2706 if (qc) {
2750 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2751 priv->stations[sta_id].tid[tid].seq_number = seq_number; 2707 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2752 } 2708 }
2753 } else { 2709 } else {
@@ -2759,7 +2715,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2759 sizeof(out_cmd->cmd.tx)); 2715 sizeof(out_cmd->cmd.tx));
2760 2716
2761 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, 2717 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2762 ieee80211_get_hdrlen(fc)); 2718 ieee80211_get_hdrlen(le16_to_cpu(fc)));
2763 2719
2764 /* Tell device the write index *just past* this latest filled TFD */ 2720 /* Tell device the write index *just past* this latest filled TFD */
2765 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 2721 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -2778,7 +2734,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2778 spin_unlock_irqrestore(&priv->lock, flags); 2734 spin_unlock_irqrestore(&priv->lock, flags);
2779 } 2735 }
2780 2736
2781 ieee80211_stop_queue(priv->hw, ctl->queue); 2737 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
2782 } 2738 }
2783 2739
2784 return 0; 2740 return 0;
@@ -2888,7 +2844,8 @@ static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2888 return; 2844 return;
2889 } 2845 }
2890 2846
2891 queue_work(priv->workqueue, &priv->restart); 2847 if (priv->is_open)
2848 queue_work(priv->workqueue, &priv->restart);
2892 return; 2849 return;
2893} 2850}
2894 2851
@@ -2924,72 +2881,6 @@ void iwl3945_set_decrypted_flag(struct iwl3945_priv *priv, struct sk_buff *skb,
2924 } 2881 }
2925} 2882}
2926 2883
2927#define IWL_PACKET_RETRY_TIME HZ
2928
2929int iwl3945_is_duplicate_packet(struct iwl3945_priv *priv, struct ieee80211_hdr *header)
2930{
2931 u16 sc = le16_to_cpu(header->seq_ctrl);
2932 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2933 u16 frag = sc & IEEE80211_SCTL_FRAG;
2934 u16 *last_seq, *last_frag;
2935 unsigned long *last_time;
2936
2937 switch (priv->iw_mode) {
2938 case IEEE80211_IF_TYPE_IBSS:{
2939 struct list_head *p;
2940 struct iwl3945_ibss_seq *entry = NULL;
2941 u8 *mac = header->addr2;
2942 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
2943
2944 __list_for_each(p, &priv->ibss_mac_hash[index]) {
2945 entry = list_entry(p, struct iwl3945_ibss_seq, list);
2946 if (!compare_ether_addr(entry->mac, mac))
2947 break;
2948 }
2949 if (p == &priv->ibss_mac_hash[index]) {
2950 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2951 if (!entry) {
2952 IWL_ERROR("Cannot malloc new mac entry\n");
2953 return 0;
2954 }
2955 memcpy(entry->mac, mac, ETH_ALEN);
2956 entry->seq_num = seq;
2957 entry->frag_num = frag;
2958 entry->packet_time = jiffies;
2959 list_add(&entry->list, &priv->ibss_mac_hash[index]);
2960 return 0;
2961 }
2962 last_seq = &entry->seq_num;
2963 last_frag = &entry->frag_num;
2964 last_time = &entry->packet_time;
2965 break;
2966 }
2967 case IEEE80211_IF_TYPE_STA:
2968 last_seq = &priv->last_seq_num;
2969 last_frag = &priv->last_frag_num;
2970 last_time = &priv->last_packet_time;
2971 break;
2972 default:
2973 return 0;
2974 }
2975 if ((*last_seq == seq) &&
2976 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
2977 if (*last_frag == frag)
2978 goto drop;
2979 if (*last_frag + 1 != frag)
2980 /* out-of-order fragment */
2981 goto drop;
2982 } else
2983 *last_seq = seq;
2984
2985 *last_frag = frag;
2986 *last_time = jiffies;
2987 return 0;
2988
2989 drop:
2990 return 1;
2991}
2992
2993#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 2884#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2994 2885
2995#include "iwl-spectrum.h" 2886#include "iwl-spectrum.h"
@@ -3241,7 +3132,7 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
3241 struct sk_buff *beacon; 3132 struct sk_buff *beacon;
3242 3133
3243 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 3134 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3244 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 3135 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3245 3136
3246 if (!beacon) { 3137 if (!beacon) {
3247 IWL_ERROR("update beacon failed\n"); 3138 IWL_ERROR("update beacon failed\n");
@@ -4848,7 +4739,7 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4848 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 4739 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4849 ch_info->min_power = 0; 4740 ch_info->min_power = 0;
4850 4741
4851 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 4742 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
4852 " %ddBm): Ad-Hoc %ssupported\n", 4743 " %ddBm): Ad-Hoc %ssupported\n",
4853 ch_info->channel, 4744 ch_info->channel,
4854 is_channel_a_band(ch_info) ? 4745 is_channel_a_band(ch_info) ?
@@ -4858,7 +4749,6 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4858 CHECK_AND_PRINT(ACTIVE), 4749 CHECK_AND_PRINT(ACTIVE),
4859 CHECK_AND_PRINT(RADAR), 4750 CHECK_AND_PRINT(RADAR),
4860 CHECK_AND_PRINT(WIDE), 4751 CHECK_AND_PRINT(WIDE),
4861 CHECK_AND_PRINT(NARROW),
4862 CHECK_AND_PRINT(DFS), 4752 CHECK_AND_PRINT(DFS),
4863 eeprom_ch_info[ch].flags, 4753 eeprom_ch_info[ch].flags,
4864 eeprom_ch_info[ch].max_power_avg, 4754 eeprom_ch_info[ch].max_power_avg,
@@ -4994,9 +4884,6 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4994 if (scan_ch->type & 1) 4884 if (scan_ch->type & 1)
4995 scan_ch->type |= (direct_mask << 1); 4885 scan_ch->type |= (direct_mask << 1);
4996 4886
4997 if (is_channel_narrow(ch_info))
4998 scan_ch->type |= (1 << 7);
4999
5000 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4887 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5001 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4888 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5002 4889
@@ -5843,7 +5730,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5843 if (iwl3945_is_rfkill(priv)) 5730 if (iwl3945_is_rfkill(priv))
5844 return; 5731 return;
5845 5732
5846 ieee80211_start_queues(priv->hw); 5733 ieee80211_wake_queues(priv->hw);
5847 5734
5848 priv->active_rate = priv->rates_mask; 5735 priv->active_rate = priv->rates_mask;
5849 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 5736 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
@@ -5869,9 +5756,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5869 /* Configure the adapter for unassociated operation */ 5756 /* Configure the adapter for unassociated operation */
5870 iwl3945_commit_rxon(priv); 5757 iwl3945_commit_rxon(priv);
5871 5758
5872 /* At this point, the NIC is initialized and operational */
5873 priv->notif_missed_beacons = 0;
5874
5875 iwl3945_reg_txpower_periodic(priv); 5759 iwl3945_reg_txpower_periodic(priv);
5876 5760
5877 iwl3945_led_register(priv); 5761 iwl3945_led_register(priv);
@@ -5938,7 +5822,9 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5938 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 5822 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5939 STATUS_GEO_CONFIGURED | 5823 STATUS_GEO_CONFIGURED |
5940 test_bit(STATUS_IN_SUSPEND, &priv->status) << 5824 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5941 STATUS_IN_SUSPEND; 5825 STATUS_IN_SUSPEND |
5826 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5827 STATUS_EXIT_PENDING;
5942 goto exit; 5828 goto exit;
5943 } 5829 }
5944 5830
@@ -5953,7 +5839,9 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
5953 test_bit(STATUS_IN_SUSPEND, &priv->status) << 5839 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5954 STATUS_IN_SUSPEND | 5840 STATUS_IN_SUSPEND |
5955 test_bit(STATUS_FW_ERROR, &priv->status) << 5841 test_bit(STATUS_FW_ERROR, &priv->status) <<
5956 STATUS_FW_ERROR; 5842 STATUS_FW_ERROR |
5843 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5844 STATUS_EXIT_PENDING;
5957 5845
5958 spin_lock_irqsave(&priv->lock, flags); 5846 spin_lock_irqsave(&priv->lock, flags);
5959 iwl3945_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 5847 iwl3945_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -6085,6 +5973,7 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
6085 5973
6086 set_bit(STATUS_EXIT_PENDING, &priv->status); 5974 set_bit(STATUS_EXIT_PENDING, &priv->status);
6087 __iwl3945_down(priv); 5975 __iwl3945_down(priv);
5976 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6088 5977
6089 /* tried to restart and config the device for as long as our 5978 /* tried to restart and config the device for as long as our
6090 * patience could withstand */ 5979 * patience could withstand */
@@ -6152,6 +6041,26 @@ static void iwl3945_bg_rf_kill(struct work_struct *work)
6152 "Kill switch must be turned off for " 6041 "Kill switch must be turned off for "
6153 "wireless networking to work.\n"); 6042 "wireless networking to work.\n");
6154 } 6043 }
6044
6045 mutex_unlock(&priv->mutex);
6046 iwl3945_rfkill_set_hw_state(priv);
6047}
6048
6049static void iwl3945_bg_set_monitor(struct work_struct *work)
6050{
6051 struct iwl3945_priv *priv = container_of(work,
6052 struct iwl3945_priv, set_monitor);
6053
6054 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
6055
6056 mutex_lock(&priv->mutex);
6057
6058 if (!iwl3945_is_ready(priv))
6059 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6060 else
6061 if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
6062 IWL_ERROR("iwl3945_set_mode() failed\n");
6063
6155 mutex_unlock(&priv->mutex); 6064 mutex_unlock(&priv->mutex);
6156} 6065}
6157 6066
@@ -6388,6 +6297,7 @@ static void iwl3945_bg_up(struct work_struct *data)
6388 mutex_lock(&priv->mutex); 6297 mutex_lock(&priv->mutex);
6389 __iwl3945_up(priv); 6298 __iwl3945_up(priv);
6390 mutex_unlock(&priv->mutex); 6299 mutex_unlock(&priv->mutex);
6300 iwl3945_rfkill_set_hw_state(priv);
6391} 6301}
6392 6302
6393static void iwl3945_bg_restart(struct work_struct *data) 6303static void iwl3945_bg_restart(struct work_struct *data)
@@ -6511,8 +6421,6 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6511 break; 6421 break;
6512 } 6422 }
6513 6423
6514 iwl3945_sequence_reset(priv);
6515
6516 iwl3945_activate_qos(priv, 0); 6424 iwl3945_activate_qos(priv, 0);
6517 6425
6518 /* we have just associated, don't start scan too early */ 6426 /* we have just associated, don't start scan too early */
@@ -6608,6 +6516,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
6608 6516
6609 mutex_unlock(&priv->mutex); 6517 mutex_unlock(&priv->mutex);
6610 6518
6519 iwl3945_rfkill_set_hw_state(priv);
6520
6611 if (ret) 6521 if (ret)
6612 goto out_release_irq; 6522 goto out_release_irq;
6613 6523
@@ -6678,8 +6588,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6678 IWL_DEBUG_MAC80211("leave\n"); 6588 IWL_DEBUG_MAC80211("leave\n");
6679} 6589}
6680 6590
6681static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 6591static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6682 struct ieee80211_tx_control *ctl)
6683{ 6592{
6684 struct iwl3945_priv *priv = hw->priv; 6593 struct iwl3945_priv *priv = hw->priv;
6685 6594
@@ -6692,9 +6601,9 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6692 } 6601 }
6693 6602
6694 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6603 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6695 ctl->tx_rate->bitrate); 6604 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6696 6605
6697 if (iwl3945_tx_skb(priv, skb, ctl)) 6606 if (iwl3945_tx_skb(priv, skb))
6698 dev_kfree_skb_any(skb); 6607 dev_kfree_skb_any(skb);
6699 6608
6700 IWL_DEBUG_MAC80211("leave\n"); 6609 IWL_DEBUG_MAC80211("leave\n");
@@ -6837,7 +6746,7 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6837 return; 6746 return;
6838 6747
6839 /* The following should be done only at AP bring up */ 6748 /* The following should be done only at AP bring up */
6840 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { 6749 if (!(iwl3945_is_associated(priv))) {
6841 6750
6842 /* RXON - unassoc (to set timing command) */ 6751 /* RXON - unassoc (to set timing command) */
6843 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 6752 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -6886,6 +6795,9 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6886 * clear sta table, add BCAST sta... */ 6795 * clear sta table, add BCAST sta... */
6887} 6796}
6888 6797
6798/* temporary */
6799static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
6800
6889static int iwl3945_mac_config_interface(struct ieee80211_hw *hw, 6801static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6890 struct ieee80211_vif *vif, 6802 struct ieee80211_vif *vif,
6891 struct ieee80211_if_conf *conf) 6803 struct ieee80211_if_conf *conf)
@@ -6903,10 +6815,21 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6903 return 0; 6815 return 0;
6904 } 6816 }
6905 6817
6818 /* handle this temporarily here */
6819 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
6820 conf->changed & IEEE80211_IFCC_BEACON) {
6821 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6822 if (!beacon)
6823 return -ENOMEM;
6824 rc = iwl3945_mac_beacon_update(hw, beacon);
6825 if (rc)
6826 return rc;
6827 }
6828
6906 /* XXX: this MUST use conf->mac_addr */ 6829 /* XXX: this MUST use conf->mac_addr */
6907 6830
6908 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 6831 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
6909 (!conf->beacon || !conf->ssid_len)) { 6832 (!conf->ssid_len)) {
6910 IWL_DEBUG_MAC80211 6833 IWL_DEBUG_MAC80211
6911 ("Leaving in AP mode because HostAPD is not ready.\n"); 6834 ("Leaving in AP mode because HostAPD is not ready.\n");
6912 return 0; 6835 return 0;
@@ -6938,7 +6861,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6938 if (priv->ibss_beacon) 6861 if (priv->ibss_beacon)
6939 dev_kfree_skb(priv->ibss_beacon); 6862 dev_kfree_skb(priv->ibss_beacon);
6940 6863
6941 priv->ibss_beacon = conf->beacon; 6864 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
6942 } 6865 }
6943 6866
6944 if (iwl3945_is_rfkill(priv)) 6867 if (iwl3945_is_rfkill(priv))
@@ -6999,11 +6922,18 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6999 unsigned int *total_flags, 6922 unsigned int *total_flags,
7000 int mc_count, struct dev_addr_list *mc_list) 6923 int mc_count, struct dev_addr_list *mc_list)
7001{ 6924{
7002 /* 6925 struct iwl3945_priv *priv = hw->priv;
7003 * XXX: dummy 6926
7004 * see also iwl3945_connection_init_rx_config 6927 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
7005 */ 6928 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
7006 *total_flags = 0; 6929 IEEE80211_IF_TYPE_MNTR,
6930 changed_flags, *total_flags);
6931 /* queue work 'cuz mac80211 is holding a lock which
6932 * prevents us from issuing (synchronous) f/w cmds */
6933 queue_work(priv->workqueue, &priv->set_monitor);
6934 }
6935 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
6936 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
7007} 6937}
7008 6938
7009static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw, 6939static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
@@ -7061,9 +6991,10 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7061 rc = -EAGAIN; 6991 rc = -EAGAIN;
7062 goto out_unlock; 6992 goto out_unlock;
7063 } 6993 }
7064 /* if we just finished scan ask for delay */ 6994 /* if we just finished scan ask for delay for a broadcast scan */
7065 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + 6995 if ((len == 0) && priv->last_scan_jiffies &&
7066 IWL_DELAY_NEXT_SCAN, jiffies)) { 6996 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
6997 jiffies)) {
7067 rc = -EAGAIN; 6998 rc = -EAGAIN;
7068 goto out_unlock; 6999 goto out_unlock;
7069 } 7000 }
@@ -7150,7 +7081,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7150 return rc; 7081 return rc;
7151} 7082}
7152 7083
7153static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue, 7084static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7154 const struct ieee80211_tx_queue_params *params) 7085 const struct ieee80211_tx_queue_params *params)
7155{ 7086{
7156 struct iwl3945_priv *priv = hw->priv; 7087 struct iwl3945_priv *priv = hw->priv;
@@ -7224,9 +7155,9 @@ static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7224 q = &txq->q; 7155 q = &txq->q;
7225 avail = iwl3945_queue_space(q); 7156 avail = iwl3945_queue_space(q);
7226 7157
7227 stats->data[i].len = q->n_window - avail; 7158 stats[i].len = q->n_window - avail;
7228 stats->data[i].limit = q->n_window - q->high_mark; 7159 stats[i].limit = q->n_window - q->high_mark;
7229 stats->data[i].count = q->n_window; 7160 stats[i].count = q->n_window;
7230 7161
7231 } 7162 }
7232 spin_unlock_irqrestore(&priv->lock, flags); 7163 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7315,8 +7246,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7315 7246
7316} 7247}
7317 7248
7318static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 7249static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7319 struct ieee80211_tx_control *control)
7320{ 7250{
7321 struct iwl3945_priv *priv = hw->priv; 7251 struct iwl3945_priv *priv = hw->priv;
7322 unsigned long flags; 7252 unsigned long flags;
@@ -7398,37 +7328,6 @@ static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
7398 7328
7399#endif /* CONFIG_IWL3945_DEBUG */ 7329#endif /* CONFIG_IWL3945_DEBUG */
7400 7330
7401static ssize_t show_rf_kill(struct device *d,
7402 struct device_attribute *attr, char *buf)
7403{
7404 /*
7405 * 0 - RF kill not enabled
7406 * 1 - SW based RF kill active (sysfs)
7407 * 2 - HW based RF kill active
7408 * 3 - Both HW and SW based RF kill active
7409 */
7410 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
7411 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
7412 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
7413
7414 return sprintf(buf, "%i\n", val);
7415}
7416
7417static ssize_t store_rf_kill(struct device *d,
7418 struct device_attribute *attr,
7419 const char *buf, size_t count)
7420{
7421 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
7422
7423 mutex_lock(&priv->mutex);
7424 iwl3945_radio_kill_sw(priv, buf[0] == '1');
7425 mutex_unlock(&priv->mutex);
7426
7427 return count;
7428}
7429
7430static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
7431
7432static ssize_t show_temperature(struct device *d, 7331static ssize_t show_temperature(struct device *d,
7433 struct device_attribute *attr, char *buf) 7332 struct device_attribute *attr, char *buf)
7434{ 7333{
@@ -7879,6 +7778,7 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7879 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan); 7778 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7880 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7779 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7881 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7780 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7781 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7882 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate); 7782 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
7883 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7783 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7884 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7784 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
@@ -7913,7 +7813,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
7913#endif 7813#endif
7914 &dev_attr_power_level.attr, 7814 &dev_attr_power_level.attr,
7915 &dev_attr_retry_rate.attr, 7815 &dev_attr_retry_rate.attr,
7916 &dev_attr_rf_kill.attr,
7917 &dev_attr_rs_window.attr, 7816 &dev_attr_rs_window.attr,
7918 &dev_attr_statistics.attr, 7817 &dev_attr_statistics.attr,
7919 &dev_attr_status.attr, 7818 &dev_attr_status.attr,
@@ -7943,7 +7842,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7943 .conf_tx = iwl3945_mac_conf_tx, 7842 .conf_tx = iwl3945_mac_conf_tx,
7944 .get_tsf = iwl3945_mac_get_tsf, 7843 .get_tsf = iwl3945_mac_get_tsf,
7945 .reset_tsf = iwl3945_mac_reset_tsf, 7844 .reset_tsf = iwl3945_mac_reset_tsf,
7946 .beacon_update = iwl3945_mac_beacon_update,
7947 .hw_scan = iwl3945_mac_hw_scan 7845 .hw_scan = iwl3945_mac_hw_scan
7948}; 7846};
7949 7847
@@ -7953,7 +7851,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7953 struct iwl3945_priv *priv; 7851 struct iwl3945_priv *priv;
7954 struct ieee80211_hw *hw; 7852 struct ieee80211_hw *hw;
7955 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data); 7853 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data);
7956 int i;
7957 unsigned long flags; 7854 unsigned long flags;
7958 DECLARE_MAC_BUF(mac); 7855 DECLARE_MAC_BUF(mac);
7959 7856
@@ -8001,17 +7898,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8001 7898
8002 priv->ibss_beacon = NULL; 7899 priv->ibss_beacon = NULL;
8003 7900
8004 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 7901 /* Tell mac80211 our characteristics */
8005 * the range of signal quality values that we'll provide. 7902 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
8006 * Negative values for level/noise indicate that we'll provide dBm. 7903 IEEE80211_HW_SIGNAL_DBM |
8007 * For WE, at least, non-0 values here *enable* display of values 7904 IEEE80211_HW_NOISE_DBM;
8008 * in app (iwconfig). */
8009 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8010 hw->max_noise = -20; /* noise level, negative indicates dBm */
8011 hw->max_signal = 100; /* link quality indication (%) */
8012
8013 /* Tell mac80211 our Tx characteristics */
8014 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8015 7905
8016 /* 4 EDCA QOS priorities */ 7906 /* 4 EDCA QOS priorities */
8017 hw->queues = 4; 7907 hw->queues = 4;
@@ -8021,9 +7911,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8021 spin_lock_init(&priv->sta_lock); 7911 spin_lock_init(&priv->sta_lock);
8022 spin_lock_init(&priv->hcmd_lock); 7912 spin_lock_init(&priv->hcmd_lock);
8023 7913
8024 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
8025 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
8026
8027 INIT_LIST_HEAD(&priv->free_frames); 7914 INIT_LIST_HEAD(&priv->free_frames);
8028 7915
8029 mutex_init(&priv->mutex); 7916 mutex_init(&priv->mutex);
@@ -8161,6 +8048,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8161 pci_save_state(pdev); 8048 pci_save_state(pdev);
8162 pci_disable_device(pdev); 8049 pci_disable_device(pdev);
8163 8050
8051 err = iwl3945_rfkill_init(priv);
8052 if (err)
8053 IWL_ERROR("Unable to initialize RFKILL system. "
8054 "Ignoring error: %d\n", err);
8055
8164 return 0; 8056 return 0;
8165 8057
8166 out_free_geos: 8058 out_free_geos:
@@ -8191,8 +8083,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8191static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) 8083static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8192{ 8084{
8193 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 8085 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
8194 struct list_head *p, *q;
8195 int i;
8196 unsigned long flags; 8086 unsigned long flags;
8197 8087
8198 if (!priv) 8088 if (!priv)
@@ -8213,16 +8103,9 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8213 8103
8214 iwl_synchronize_irq(priv); 8104 iwl_synchronize_irq(priv);
8215 8105
8216 /* Free MAC hash list for ADHOC */
8217 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
8218 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
8219 list_del(p);
8220 kfree(list_entry(p, struct iwl3945_ibss_seq, list));
8221 }
8222 }
8223
8224 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 8106 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8225 8107
8108 iwl3945_rfkill_unregister(priv);
8226 iwl3945_dealloc_ucode_pci(priv); 8109 iwl3945_dealloc_ucode_pci(priv);
8227 8110
8228 if (priv->rxq.bd) 8111 if (priv->rxq.bd)
@@ -8252,7 +8135,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8252 8135
8253 iwl3945_free_channel_map(priv); 8136 iwl3945_free_channel_map(priv);
8254 iwl3945_free_geos(priv); 8137 iwl3945_free_geos(priv);
8255 8138 kfree(priv->scan);
8256 if (priv->ibss_beacon) 8139 if (priv->ibss_beacon)
8257 dev_kfree_skb(priv->ibss_beacon); 8140 dev_kfree_skb(priv->ibss_beacon);
8258 8141
@@ -8291,6 +8174,114 @@ static int iwl3945_pci_resume(struct pci_dev *pdev)
8291 8174
8292#endif /* CONFIG_PM */ 8175#endif /* CONFIG_PM */
8293 8176
8177/*************** RFKILL FUNCTIONS **********/
8178#ifdef CONFIG_IWL3945_RFKILL
8179/* software rf-kill from user */
8180static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
8181{
8182 struct iwl3945_priv *priv = data;
8183 int err = 0;
8184
8185 if (!priv->rfkill)
8186 return 0;
8187
8188 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
8189 return 0;
8190
8191 IWL_DEBUG_RF_KILL("we recieved soft RFKILL set to state %d\n", state);
8192 mutex_lock(&priv->mutex);
8193
8194 switch (state) {
8195 case RFKILL_STATE_UNBLOCKED:
8196 if (iwl3945_is_rfkill_hw(priv)) {
8197 err = -EBUSY;
8198 goto out_unlock;
8199 }
8200 iwl3945_radio_kill_sw(priv, 0);
8201 break;
8202 case RFKILL_STATE_SOFT_BLOCKED:
8203 iwl3945_radio_kill_sw(priv, 1);
8204 break;
8205 default:
8206 IWL_WARNING("we recieved unexpected RFKILL state %d\n", state);
8207 break;
8208 }
8209out_unlock:
8210 mutex_unlock(&priv->mutex);
8211
8212 return err;
8213}
8214
8215int iwl3945_rfkill_init(struct iwl3945_priv *priv)
8216{
8217 struct device *device = wiphy_dev(priv->hw->wiphy);
8218 int ret = 0;
8219
8220 BUG_ON(device == NULL);
8221
8222 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
8223 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
8224 if (!priv->rfkill) {
8225 IWL_ERROR("Unable to allocate rfkill device.\n");
8226 ret = -ENOMEM;
8227 goto error;
8228 }
8229
8230 priv->rfkill->name = priv->cfg->name;
8231 priv->rfkill->data = priv;
8232 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
8233 priv->rfkill->toggle_radio = iwl3945_rfkill_soft_rf_kill;
8234 priv->rfkill->user_claim_unsupported = 1;
8235
8236 priv->rfkill->dev.class->suspend = NULL;
8237 priv->rfkill->dev.class->resume = NULL;
8238
8239 ret = rfkill_register(priv->rfkill);
8240 if (ret) {
8241 IWL_ERROR("Unable to register rfkill: %d\n", ret);
8242 goto freed_rfkill;
8243 }
8244
8245 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8246 return ret;
8247
8248freed_rfkill:
8249 if (priv->rfkill != NULL)
8250 rfkill_free(priv->rfkill);
8251 priv->rfkill = NULL;
8252
8253error:
8254 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8255 return ret;
8256}
8257
8258void iwl3945_rfkill_unregister(struct iwl3945_priv *priv)
8259{
8260 if (priv->rfkill)
8261 rfkill_unregister(priv->rfkill);
8262
8263 priv->rfkill = NULL;
8264}
8265
8266/* set rf-kill to the right state. */
8267void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv)
8268{
8269
8270 if (!priv->rfkill)
8271 return;
8272
8273 if (iwl3945_is_rfkill_hw(priv)) {
8274 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
8275 return;
8276 }
8277
8278 if (!iwl3945_is_rfkill_sw(priv))
8279 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
8280 else
8281 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
8282}
8283#endif
8284
8294/***************************************************************************** 8285/*****************************************************************************
8295 * 8286 *
8296 * driver and module entry point 8287 * driver and module entry point
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 0bd55bb19739..71f5da3fe5c4 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -46,14 +46,13 @@
46#include <asm/div64.h> 46#include <asm/div64.h>
47 47
48#include "iwl-eeprom.h" 48#include "iwl-eeprom.h"
49#include "iwl-4965.h" 49#include "iwl-dev.h"
50#include "iwl-core.h" 50#include "iwl-core.h"
51#include "iwl-io.h" 51#include "iwl-io.h"
52#include "iwl-helpers.h" 52#include "iwl-helpers.h"
53#include "iwl-sta.h" 53#include "iwl-sta.h"
54#include "iwl-calib.h"
54 55
55static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
56 struct iwl4965_tx_queue *txq);
57 56
58/****************************************************************************** 57/******************************************************************************
59 * 58 *
@@ -88,292 +87,6 @@ MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT); 87MODULE_AUTHOR(DRV_COPYRIGHT);
89MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
90 89
91__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
92{
93 u16 fc = le16_to_cpu(hdr->frame_control);
94 int hdr_len = ieee80211_get_hdrlen(fc);
95
96 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
97 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
98 return NULL;
99}
100
101static const struct ieee80211_supported_band *iwl4965_get_hw_mode(
102 struct iwl_priv *priv, enum ieee80211_band band)
103{
104 return priv->hw->wiphy->bands[band];
105}
106
107static int iwl4965_is_empty_essid(const char *essid, int essid_len)
108{
109 /* Single white space is for Linksys APs */
110 if (essid_len == 1 && essid[0] == ' ')
111 return 1;
112
113 /* Otherwise, if the entire essid is 0, we assume it is hidden */
114 while (essid_len) {
115 essid_len--;
116 if (essid[essid_len] != '\0')
117 return 0;
118 }
119
120 return 1;
121}
122
123static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
124{
125 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
126 const char *s = essid;
127 char *d = escaped;
128
129 if (iwl4965_is_empty_essid(essid, essid_len)) {
130 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
131 return escaped;
132 }
133
134 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
135 while (essid_len--) {
136 if (*s == '\0') {
137 *d++ = '\\';
138 *d++ = '0';
139 s++;
140 } else
141 *d++ = *s++;
142 }
143 *d = '\0';
144 return escaped;
145}
146
147/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
148 * DMA services
149 *
150 * Theory of operation
151 *
152 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
153 * of buffer descriptors, each of which points to one or more data buffers for
154 * the device to read from or fill. Driver and device exchange status of each
155 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
156 * entries in each circular buffer, to protect against confusing empty and full
157 * queue states.
158 *
159 * The device reads or writes the data in the queues via the device's several
160 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
161 *
162 * For Tx queue, there are low mark and high mark limits. If, after queuing
163 * the packet for Tx, free space become < low mark, Tx queue stopped. When
164 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
165 * Tx queue resumed.
166 *
167 * The 4965 operates with up to 17 queues: One receive queue, one transmit
168 * queue (#4) for sending commands to the device firmware, and 15 other
169 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
170 *
171 * See more detailed info in iwl-4965-hw.h.
172 ***************************************************/
173
174int iwl4965_queue_space(const struct iwl4965_queue *q)
175{
176 int s = q->read_ptr - q->write_ptr;
177
178 if (q->read_ptr > q->write_ptr)
179 s -= q->n_bd;
180
181 if (s <= 0)
182 s += q->n_window;
183 /* keep some reserve to not confuse empty and full situations */
184 s -= 2;
185 if (s < 0)
186 s = 0;
187 return s;
188}
189
190
191static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
192{
193 return q->write_ptr > q->read_ptr ?
194 (i >= q->read_ptr && i < q->write_ptr) :
195 !(i < q->read_ptr && i >= q->write_ptr);
196}
197
198static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
199{
200 /* This is for scan command, the big buffer at end of command array */
201 if (is_huge)
202 return q->n_window; /* must be power of 2 */
203
204 /* Otherwise, use normal size buffers */
205 return index & (q->n_window - 1);
206}
207
208/**
209 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
210 */
211static int iwl4965_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
212 int count, int slots_num, u32 id)
213{
214 q->n_bd = count;
215 q->n_window = slots_num;
216 q->id = id;
217
218 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
219 * and iwl_queue_dec_wrap are broken. */
220 BUG_ON(!is_power_of_2(count));
221
222 /* slots_num must be power-of-two size, otherwise
223 * get_cmd_index is broken. */
224 BUG_ON(!is_power_of_2(slots_num));
225
226 q->low_mark = q->n_window / 4;
227 if (q->low_mark < 4)
228 q->low_mark = 4;
229
230 q->high_mark = q->n_window / 8;
231 if (q->high_mark < 2)
232 q->high_mark = 2;
233
234 q->write_ptr = q->read_ptr = 0;
235
236 return 0;
237}
238
239/**
240 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
241 */
242static int iwl4965_tx_queue_alloc(struct iwl_priv *priv,
243 struct iwl4965_tx_queue *txq, u32 id)
244{
245 struct pci_dev *dev = priv->pci_dev;
246
247 /* Driver private data, only for Tx (not command) queues,
248 * not shared with device. */
249 if (id != IWL_CMD_QUEUE_NUM) {
250 txq->txb = kmalloc(sizeof(txq->txb[0]) *
251 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
252 if (!txq->txb) {
253 IWL_ERROR("kmalloc for auxiliary BD "
254 "structures failed\n");
255 goto error;
256 }
257 } else
258 txq->txb = NULL;
259
260 /* Circular buffer of transmit frame descriptors (TFDs),
261 * shared with device */
262 txq->bd = pci_alloc_consistent(dev,
263 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
264 &txq->q.dma_addr);
265
266 if (!txq->bd) {
267 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
268 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
269 goto error;
270 }
271 txq->q.id = id;
272
273 return 0;
274
275 error:
276 if (txq->txb) {
277 kfree(txq->txb);
278 txq->txb = NULL;
279 }
280
281 return -ENOMEM;
282}
283
284/**
285 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
286 */
287int iwl4965_tx_queue_init(struct iwl_priv *priv,
288 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
289{
290 struct pci_dev *dev = priv->pci_dev;
291 int len;
292 int rc = 0;
293
294 /*
295 * Alloc buffer array for commands (Tx or other types of commands).
296 * For the command queue (#4), allocate command space + one big
297 * command for scan, since scan command is very huge; the system will
298 * not have two scans at the same time, so only one is needed.
299 * For normal Tx queues (all other queues), no super-size command
300 * space is needed.
301 */
302 len = sizeof(struct iwl_cmd) * slots_num;
303 if (txq_id == IWL_CMD_QUEUE_NUM)
304 len += IWL_MAX_SCAN_SIZE;
305 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
306 if (!txq->cmd)
307 return -ENOMEM;
308
309 /* Alloc driver data array and TFD circular buffer */
310 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
311 if (rc) {
312 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
313
314 return -ENOMEM;
315 }
316 txq->need_update = 0;
317
318 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
319 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
320 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
321
322 /* Initialize queue's high/low-water marks, and head/tail indexes */
323 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
324
325 /* Tell device where to find queue */
326 iwl4965_hw_tx_queue_init(priv, txq);
327
328 return 0;
329}
330
331/**
332 * iwl4965_tx_queue_free - Deallocate DMA queue.
333 * @txq: Transmit queue to deallocate.
334 *
335 * Empty queue by removing and destroying all BD's.
336 * Free all buffers.
337 * 0-fill, but do not free "txq" descriptor structure.
338 */
339void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
340{
341 struct iwl4965_queue *q = &txq->q;
342 struct pci_dev *dev = priv->pci_dev;
343 int len;
344
345 if (q->n_bd == 0)
346 return;
347
348 /* first, empty all BD's */
349 for (; q->write_ptr != q->read_ptr;
350 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
351 iwl4965_hw_txq_free_tfd(priv, txq);
352
353 len = sizeof(struct iwl_cmd) * q->n_window;
354 if (q->id == IWL_CMD_QUEUE_NUM)
355 len += IWL_MAX_SCAN_SIZE;
356
357 /* De-alloc array of command/tx buffers */
358 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
359
360 /* De-alloc circular buffer of TFDs */
361 if (txq->q.n_bd)
362 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
363 txq->q.n_bd, txq->bd, txq->q.dma_addr);
364
365 /* De-alloc array of per-TFD driver data */
366 if (txq->txb) {
367 kfree(txq->txb);
368 txq->txb = NULL;
369 }
370
371 /* 0-fill queue descriptor structure */
372 memset(txq, 0, sizeof(*txq));
373}
374
375const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
376
377/*************** STATION TABLE MANAGEMENT **** 90/*************** STATION TABLE MANAGEMENT ****
378 * mac80211 should be examined to determine if sta_info is duplicating 91 * mac80211 should be examined to determine if sta_info is duplicating
379 * the functionality provided here 92 * the functionality provided here
@@ -381,213 +94,11 @@ const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
381 94
382/**************************************************************/ 95/**************************************************************/
383 96
384#if 0 /* temporary disable till we add real remove station */
385/**
386 * iwl4965_remove_station - Remove driver's knowledge of station.
387 *
388 * NOTE: This does not remove station from device's station table.
389 */
390static u8 iwl4965_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
391{
392 int index = IWL_INVALID_STATION;
393 int i;
394 unsigned long flags;
395
396 spin_lock_irqsave(&priv->sta_lock, flags);
397
398 if (is_ap)
399 index = IWL_AP_ID;
400 else if (is_broadcast_ether_addr(addr))
401 index = priv->hw_params.bcast_sta_id;
402 else
403 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
404 if (priv->stations[i].used &&
405 !compare_ether_addr(priv->stations[i].sta.sta.addr,
406 addr)) {
407 index = i;
408 break;
409 }
410
411 if (unlikely(index == IWL_INVALID_STATION))
412 goto out;
413
414 if (priv->stations[index].used) {
415 priv->stations[index].used = 0;
416 priv->num_stations--;
417 }
418
419 BUG_ON(priv->num_stations < 0);
420
421out:
422 spin_unlock_irqrestore(&priv->sta_lock, flags);
423 return 0;
424}
425#endif
426
427/**
428 * iwl4965_add_station_flags - Add station to tables in driver and device
429 */
430u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
431 int is_ap, u8 flags, void *ht_data)
432{
433 int i;
434 int index = IWL_INVALID_STATION;
435 struct iwl4965_station_entry *station;
436 unsigned long flags_spin;
437 DECLARE_MAC_BUF(mac);
438
439 spin_lock_irqsave(&priv->sta_lock, flags_spin);
440 if (is_ap)
441 index = IWL_AP_ID;
442 else if (is_broadcast_ether_addr(addr))
443 index = priv->hw_params.bcast_sta_id;
444 else
445 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
446 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
447 addr)) {
448 index = i;
449 break;
450 }
451
452 if (!priv->stations[i].used &&
453 index == IWL_INVALID_STATION)
454 index = i;
455 }
456
457
458 /* These two conditions have the same outcome, but keep them separate
459 since they have different meanings */
460 if (unlikely(index == IWL_INVALID_STATION)) {
461 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
462 return index;
463 }
464
465 if (priv->stations[index].used &&
466 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
467 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
468 return index;
469 }
470
471
472 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
473 station = &priv->stations[index];
474 station->used = 1;
475 priv->num_stations++;
476
477 /* Set up the REPLY_ADD_STA command to send to device */
478 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
479 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
480 station->sta.mode = 0;
481 station->sta.sta.sta_id = index;
482 station->sta.station_flags = 0;
483
484#ifdef CONFIG_IWL4965_HT
485 /* BCAST station and IBSS stations do not work in HT mode */
486 if (index != priv->hw_params.bcast_sta_id &&
487 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
488 iwl4965_set_ht_add_station(priv, index,
489 (struct ieee80211_ht_info *) ht_data);
490#endif /*CONFIG_IWL4965_HT*/
491
492 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
493
494 /* Add station to device's station table */
495 iwl4965_send_add_station(priv, &station->sta, flags);
496 return index;
497
498}
499
500
501
502/*************** HOST COMMAND QUEUE FUNCTIONS *****/
503
504/**
505 * iwl4965_enqueue_hcmd - enqueue a uCode command
506 * @priv: device private data point
507 * @cmd: a point to the ucode command structure
508 *
509 * The function returns < 0 values to indicate the operation is
510 * failed. On success, it turns the index (> 0) of command in the
511 * command queue.
512 */
513int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
514{
515 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
516 struct iwl4965_queue *q = &txq->q;
517 struct iwl4965_tfd_frame *tfd;
518 u32 *control_flags;
519 struct iwl_cmd *out_cmd;
520 u32 idx;
521 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
522 dma_addr_t phys_addr;
523 int ret;
524 unsigned long flags;
525
526 /* If any of the command structures end up being larger than
527 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
528 * we will need to increase the size of the TFD entries */
529 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
530 !(cmd->meta.flags & CMD_SIZE_HUGE));
531
532 if (iwl_is_rfkill(priv)) {
533 IWL_DEBUG_INFO("Not sending command - RF KILL");
534 return -EIO;
535 }
536
537 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
538 IWL_ERROR("No space for Tx\n");
539 return -ENOSPC;
540 }
541
542 spin_lock_irqsave(&priv->hcmd_lock, flags);
543
544 tfd = &txq->bd[q->write_ptr];
545 memset(tfd, 0, sizeof(*tfd));
546
547 control_flags = (u32 *) tfd;
548 97
549 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
550 out_cmd = &txq->cmd[idx];
551
552 out_cmd->hdr.cmd = cmd->id;
553 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
554 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
555
556 /* At this point, the out_cmd now has all of the incoming cmd
557 * information */
558
559 out_cmd->hdr.flags = 0;
560 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
561 INDEX_TO_SEQ(q->write_ptr));
562 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
563 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
564
565 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
566 offsetof(struct iwl_cmd, hdr);
567 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
568
569 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
570 "%d bytes at %d[%d]:%d\n",
571 get_cmd_string(out_cmd->hdr.cmd),
572 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
573 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
574
575 txq->need_update = 1;
576
577 /* Set up entry in queue's byte count circular buffer */
578 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
579
580 /* Increment and update queue's write index */
581 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
582 ret = iwl4965_tx_queue_update_write_ptr(priv, txq);
583
584 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
585 return ret ? ret : idx;
586}
587 98
588static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 99static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
589{ 100{
590 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 101 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
591 102
592 if (hw_decrypt) 103 if (hw_decrypt)
593 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 104 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -597,45 +108,13 @@ static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
597} 108}
598 109
599/** 110/**
600 * iwl4965_rxon_add_station - add station into station table.
601 *
602 * there is only one AP station with id= IWL_AP_ID
603 * NOTE: mutex must be held before calling this fnction
604 */
605static int iwl4965_rxon_add_station(struct iwl_priv *priv,
606 const u8 *addr, int is_ap)
607{
608 u8 sta_id;
609
610 /* Add station to device's station table */
611#ifdef CONFIG_IWL4965_HT
612 struct ieee80211_conf *conf = &priv->hw->conf;
613 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
614
615 if ((is_ap) &&
616 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
617 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
618 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
619 0, cur_ht_config);
620 else
621#endif /* CONFIG_IWL4965_HT */
622 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
623 0, NULL);
624
625 /* Set up default rate scaling table in device's station table */
626 iwl4965_add_station(priv, addr, is_ap);
627
628 return sta_id;
629}
630
631/**
632 * iwl4965_check_rxon_cmd - validate RXON structure is valid 111 * iwl4965_check_rxon_cmd - validate RXON structure is valid
633 * 112 *
634 * NOTE: This is really only useful during development and can eventually 113 * NOTE: This is really only useful during development and can eventually
635 * be #ifdef'd out once the driver is stable and folks aren't actively 114 * be #ifdef'd out once the driver is stable and folks aren't actively
636 * making changes 115 * making changes
637 */ 116 */
638static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon) 117static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
639{ 118{
640 int error = 0; 119 int error = 0;
641 int counter = 1; 120 int counter = 1;
@@ -713,7 +192,7 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
713{ 192{
714 193
715 /* These items are only settable from the full RXON command */ 194 /* These items are only settable from the full RXON command */
716 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || 195 if (!(iwl_is_associated(priv)) ||
717 compare_ether_addr(priv->staging_rxon.bssid_addr, 196 compare_ether_addr(priv->staging_rxon.bssid_addr,
718 priv->active_rxon.bssid_addr) || 197 priv->active_rxon.bssid_addr) ||
719 compare_ether_addr(priv->staging_rxon.node_addr, 198 compare_ether_addr(priv->staging_rxon.node_addr,
@@ -760,18 +239,23 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
760static int iwl4965_commit_rxon(struct iwl_priv *priv) 239static int iwl4965_commit_rxon(struct iwl_priv *priv)
761{ 240{
762 /* cast away the const for active_rxon in this function */ 241 /* cast away the const for active_rxon in this function */
763 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 242 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
764 DECLARE_MAC_BUF(mac); 243 DECLARE_MAC_BUF(mac);
765 int rc = 0; 244 int ret;
245 bool new_assoc =
246 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
766 247
767 if (!iwl_is_alive(priv)) 248 if (!iwl_is_alive(priv))
768 return -1; 249 return -EBUSY;
769 250
770 /* always get timestamp with Rx frame */ 251 /* always get timestamp with Rx frame */
771 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 252 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
253 /* allow CTS-to-self if possible. this is relevant only for
254 * 5000, but will not damage 4965 */
255 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
772 256
773 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon); 257 ret = iwl4965_check_rxon_cmd(&priv->staging_rxon);
774 if (rc) { 258 if (ret) {
775 IWL_ERROR("Invalid RXON configuration. Not committing.\n"); 259 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
776 return -EINVAL; 260 return -EINVAL;
777 } 261 }
@@ -780,49 +264,37 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
780 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter 264 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
781 * and other flags for the current radio configuration. */ 265 * and other flags for the current radio configuration. */
782 if (!iwl4965_full_rxon_required(priv)) { 266 if (!iwl4965_full_rxon_required(priv)) {
783 rc = iwl_send_rxon_assoc(priv); 267 ret = iwl_send_rxon_assoc(priv);
784 if (rc) { 268 if (ret) {
785 IWL_ERROR("Error setting RXON_ASSOC " 269 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret);
786 "configuration (%d).\n", rc); 270 return ret;
787 return rc;
788 } 271 }
789 272
790 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 273 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
791
792 return 0; 274 return 0;
793 } 275 }
794 276
795 /* station table will be cleared */ 277 /* station table will be cleared */
796 priv->assoc_station_added = 0; 278 priv->assoc_station_added = 0;
797 279
798#ifdef CONFIG_IWL4965_SENSITIVITY
799 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
800 if (!priv->error_recovering)
801 priv->start_calib = 0;
802
803 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
804#endif /* CONFIG_IWL4965_SENSITIVITY */
805
806 /* If we are currently associated and the new config requires 280 /* If we are currently associated and the new config requires
807 * an RXON_ASSOC and the new config wants the associated mask enabled, 281 * an RXON_ASSOC and the new config wants the associated mask enabled,
808 * we must clear the associated from the active configuration 282 * we must clear the associated from the active configuration
809 * before we apply the new config */ 283 * before we apply the new config */
810 if (iwl_is_associated(priv) && 284 if (iwl_is_associated(priv) && new_assoc) {
811 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
812 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); 285 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
813 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 286 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
814 287
815 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 288 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
816 sizeof(struct iwl4965_rxon_cmd), 289 sizeof(struct iwl_rxon_cmd),
817 &priv->active_rxon); 290 &priv->active_rxon);
818 291
819 /* If the mask clearing failed then we set 292 /* If the mask clearing failed then we set
820 * active_rxon back to what it was previously */ 293 * active_rxon back to what it was previously */
821 if (rc) { 294 if (ret) {
822 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; 295 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
823 IWL_ERROR("Error clearing ASSOC_MSK on current " 296 IWL_ERROR("Error clearing ASSOC_MSK (%d)\n", ret);
824 "configuration (%d).\n", rc); 297 return ret;
825 return rc;
826 } 298 }
827 } 299 }
828 300
@@ -830,65 +302,87 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
830 "* with%s RXON_FILTER_ASSOC_MSK\n" 302 "* with%s RXON_FILTER_ASSOC_MSK\n"
831 "* channel = %d\n" 303 "* channel = %d\n"
832 "* bssid = %s\n", 304 "* bssid = %s\n",
833 ((priv->staging_rxon.filter_flags & 305 (new_assoc ? "" : "out"),
834 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
835 le16_to_cpu(priv->staging_rxon.channel), 306 le16_to_cpu(priv->staging_rxon.channel),
836 print_mac(mac, priv->staging_rxon.bssid_addr)); 307 print_mac(mac, priv->staging_rxon.bssid_addr));
837 308
838 iwl4965_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 309 iwl4965_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
839 /* Apply the new configuration */ 310
840 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 311 /* Apply the new configuration
841 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon); 312 * RXON unassoc clears the station table in uCode, send it before
842 if (rc) { 313 * we add the bcast station. If assoc bit is set, we will send RXON
843 IWL_ERROR("Error setting new configuration (%d).\n", rc); 314 * after having added the bcast and bssid station.
844 return rc; 315 */
316 if (!new_assoc) {
317 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
318 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
319 if (ret) {
320 IWL_ERROR("Error setting new RXON (%d)\n", ret);
321 return ret;
322 }
323 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
845 } 324 }
846 325
847 iwlcore_clear_stations_table(priv); 326 iwl_clear_stations_table(priv);
848 327
849#ifdef CONFIG_IWL4965_SENSITIVITY
850 if (!priv->error_recovering) 328 if (!priv->error_recovering)
851 priv->start_calib = 0; 329 priv->start_calib = 0;
852 330
853 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
854 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
855#endif /* CONFIG_IWL4965_SENSITIVITY */
856
857 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
858
859 /* If we issue a new RXON command which required a tune then we must
860 * send a new TXPOWER command or we won't be able to Tx any frames */
861 rc = iwl4965_hw_reg_send_txpower(priv);
862 if (rc) {
863 IWL_ERROR("Error setting Tx power (%d).\n", rc);
864 return rc;
865 }
866
867 /* Add the broadcast address so we can send broadcast frames */ 331 /* Add the broadcast address so we can send broadcast frames */
868 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) == 332 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
869 IWL_INVALID_STATION) { 333 IWL_INVALID_STATION) {
870 IWL_ERROR("Error adding BROADCAST address for transmit.\n"); 334 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
871 return -EIO; 335 return -EIO;
872 } 336 }
873 337
874 /* If we have set the ASSOC_MSK and we are in BSS mode then 338 /* If we have set the ASSOC_MSK and we are in BSS mode then
875 * add the IWL_AP_ID to the station rate table */ 339 * add the IWL_AP_ID to the station rate table */
876 if (iwl_is_associated(priv) && 340 if (new_assoc) {
877 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { 341 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
878 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) 342 ret = iwl_rxon_add_station(priv,
879 == IWL_INVALID_STATION) { 343 priv->active_rxon.bssid_addr, 1);
880 IWL_ERROR("Error adding AP address for transmit.\n"); 344 if (ret == IWL_INVALID_STATION) {
881 return -EIO; 345 IWL_ERROR("Error adding AP address for TX.\n");
346 return -EIO;
347 }
348 priv->assoc_station_added = 1;
349 if (priv->default_wep_key &&
350 iwl_send_static_wepkey_cmd(priv, 0))
351 IWL_ERROR("Could not send WEP static key.\n");
882 } 352 }
883 priv->assoc_station_added = 1; 353
884 if (priv->default_wep_key && 354 /* Apply the new configuration
885 iwl_send_static_wepkey_cmd(priv, 0)) 355 * RXON assoc doesn't clear the station table in uCode,
886 IWL_ERROR("Could not send WEP static key.\n"); 356 */
357 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
358 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
359 if (ret) {
360 IWL_ERROR("Error setting new RXON (%d)\n", ret);
361 return ret;
362 }
363 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
364 }
365
366 iwl_init_sensitivity(priv);
367
368 /* If we issue a new RXON command which required a tune then we must
369 * send a new TXPOWER command or we won't be able to Tx any frames */
370 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
371 if (ret) {
372 IWL_ERROR("Error sending TX power (%d)\n", ret);
373 return ret;
887 } 374 }
888 375
889 return 0; 376 return 0;
890} 377}
891 378
379void iwl4965_update_chain_flags(struct iwl_priv *priv)
380{
381
382 iwl_set_rxon_chain(priv);
383 iwl4965_commit_rxon(priv);
384}
385
892static int iwl4965_send_bt_config(struct iwl_priv *priv) 386static int iwl4965_send_bt_config(struct iwl_priv *priv)
893{ 387{
894 struct iwl4965_bt_cmd bt_cmd = { 388 struct iwl4965_bt_cmd bt_cmd = {
@@ -903,155 +397,7 @@ static int iwl4965_send_bt_config(struct iwl_priv *priv)
903 sizeof(struct iwl4965_bt_cmd), &bt_cmd); 397 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
904} 398}
905 399
906static int iwl4965_send_scan_abort(struct iwl_priv *priv) 400static void iwl_clear_free_frames(struct iwl_priv *priv)
907{
908 int rc = 0;
909 struct iwl4965_rx_packet *res;
910 struct iwl_host_cmd cmd = {
911 .id = REPLY_SCAN_ABORT_CMD,
912 .meta.flags = CMD_WANT_SKB,
913 };
914
915 /* If there isn't a scan actively going on in the hardware
916 * then we are in between scan bands and not actually
917 * actively scanning, so don't send the abort command */
918 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
919 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
920 return 0;
921 }
922
923 rc = iwl_send_cmd_sync(priv, &cmd);
924 if (rc) {
925 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
926 return rc;
927 }
928
929 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
930 if (res->u.status != CAN_ABORT_STATUS) {
931 /* The scan abort will return 1 for success or
932 * 2 for "failure". A failure condition can be
933 * due to simply not being in an active scan which
934 * can occur if we send the scan abort before we
935 * the microcode has notified us that a scan is
936 * completed. */
937 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
938 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
939 clear_bit(STATUS_SCAN_HW, &priv->status);
940 }
941
942 dev_kfree_skb_any(cmd.meta.u.skb);
943
944 return rc;
945}
946
947static int iwl4965_card_state_sync_callback(struct iwl_priv *priv,
948 struct iwl_cmd *cmd,
949 struct sk_buff *skb)
950{
951 return 1;
952}
953
954/*
955 * CARD_STATE_CMD
956 *
957 * Use: Sets the device's internal card state to enable, disable, or halt
958 *
959 * When in the 'enable' state the card operates as normal.
960 * When in the 'disable' state, the card enters into a low power mode.
961 * When in the 'halt' state, the card is shut down and must be fully
962 * restarted to come back on.
963 */
964static int iwl4965_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
965{
966 struct iwl_host_cmd cmd = {
967 .id = REPLY_CARD_STATE_CMD,
968 .len = sizeof(u32),
969 .data = &flags,
970 .meta.flags = meta_flag,
971 };
972
973 if (meta_flag & CMD_ASYNC)
974 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
975
976 return iwl_send_cmd(priv, &cmd);
977}
978
979static int iwl4965_add_sta_sync_callback(struct iwl_priv *priv,
980 struct iwl_cmd *cmd, struct sk_buff *skb)
981{
982 struct iwl4965_rx_packet *res = NULL;
983
984 if (!skb) {
985 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
986 return 1;
987 }
988
989 res = (struct iwl4965_rx_packet *)skb->data;
990 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
991 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
992 res->hdr.flags);
993 return 1;
994 }
995
996 switch (res->u.add_sta.status) {
997 case ADD_STA_SUCCESS_MSK:
998 break;
999 default:
1000 break;
1001 }
1002
1003 /* We didn't cache the SKB; let the caller free it */
1004 return 1;
1005}
1006
1007int iwl4965_send_add_station(struct iwl_priv *priv,
1008 struct iwl4965_addsta_cmd *sta, u8 flags)
1009{
1010 struct iwl4965_rx_packet *res = NULL;
1011 int rc = 0;
1012 struct iwl_host_cmd cmd = {
1013 .id = REPLY_ADD_STA,
1014 .len = sizeof(struct iwl4965_addsta_cmd),
1015 .meta.flags = flags,
1016 .data = sta,
1017 };
1018
1019 if (flags & CMD_ASYNC)
1020 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
1021 else
1022 cmd.meta.flags |= CMD_WANT_SKB;
1023
1024 rc = iwl_send_cmd(priv, &cmd);
1025
1026 if (rc || (flags & CMD_ASYNC))
1027 return rc;
1028
1029 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
1030 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1031 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1032 res->hdr.flags);
1033 rc = -EIO;
1034 }
1035
1036 if (rc == 0) {
1037 switch (res->u.add_sta.status) {
1038 case ADD_STA_SUCCESS_MSK:
1039 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1040 break;
1041 default:
1042 rc = -EIO;
1043 IWL_WARNING("REPLY_ADD_STA failed\n");
1044 break;
1045 }
1046 }
1047
1048 priv->alloc_rxb_skb--;
1049 dev_kfree_skb_any(cmd.meta.u.skb);
1050
1051 return rc;
1052}
1053
1054static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1055{ 401{
1056 struct list_head *element; 402 struct list_head *element;
1057 403
@@ -1061,7 +407,7 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1061 while (!list_empty(&priv->free_frames)) { 407 while (!list_empty(&priv->free_frames)) {
1062 element = priv->free_frames.next; 408 element = priv->free_frames.next;
1063 list_del(element); 409 list_del(element);
1064 kfree(list_entry(element, struct iwl4965_frame, list)); 410 kfree(list_entry(element, struct iwl_frame, list));
1065 priv->frames_count--; 411 priv->frames_count--;
1066 } 412 }
1067 413
@@ -1072,9 +418,9 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1072 } 418 }
1073} 419}
1074 420
1075static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv) 421static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1076{ 422{
1077 struct iwl4965_frame *frame; 423 struct iwl_frame *frame;
1078 struct list_head *element; 424 struct list_head *element;
1079 if (list_empty(&priv->free_frames)) { 425 if (list_empty(&priv->free_frames)) {
1080 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 426 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
@@ -1089,10 +435,10 @@ static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
1089 435
1090 element = priv->free_frames.next; 436 element = priv->free_frames.next;
1091 list_del(element); 437 list_del(element);
1092 return list_entry(element, struct iwl4965_frame, list); 438 return list_entry(element, struct iwl_frame, list);
1093} 439}
1094 440
1095static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl4965_frame *frame) 441static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1096{ 442{
1097 memset(frame, 0, sizeof(*frame)); 443 memset(frame, 0, sizeof(*frame));
1098 list_add(&frame->list, &priv->free_frames); 444 list_add(&frame->list, &priv->free_frames);
@@ -1116,27 +462,39 @@ unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
1116 return priv->ibss_beacon->len; 462 return priv->ibss_beacon->len;
1117} 463}
1118 464
1119static u8 iwl4965_rate_get_lowest_plcp(int rate_mask) 465static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
1120{ 466{
1121 u8 i; 467 int i;
468 int rate_mask;
469
470 /* Set rate mask*/
471 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
472 rate_mask = priv->active_rate_basic & 0xF;
473 else
474 rate_mask = priv->active_rate_basic & 0xFF0;
1122 475
476 /* Find lowest valid rate */
1123 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; 477 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1124 i = iwl4965_rates[i].next_ieee) { 478 i = iwl_rates[i].next_ieee) {
1125 if (rate_mask & (1 << i)) 479 if (rate_mask & (1 << i))
1126 return iwl4965_rates[i].plcp; 480 return iwl_rates[i].plcp;
1127 } 481 }
1128 482
1129 return IWL_RATE_INVALID; 483 /* No valid rate was found. Assign the lowest one */
484 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
485 return IWL_RATE_1M_PLCP;
486 else
487 return IWL_RATE_6M_PLCP;
1130} 488}
1131 489
1132static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 490static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1133{ 491{
1134 struct iwl4965_frame *frame; 492 struct iwl_frame *frame;
1135 unsigned int frame_size; 493 unsigned int frame_size;
1136 int rc; 494 int rc;
1137 u8 rate; 495 u8 rate;
1138 496
1139 frame = iwl4965_get_free_frame(priv); 497 frame = iwl_get_free_frame(priv);
1140 498
1141 if (!frame) { 499 if (!frame) {
1142 IWL_ERROR("Could not obtain free frame buffer for beacon " 500 IWL_ERROR("Could not obtain free frame buffer for beacon "
@@ -1144,23 +502,14 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1144 return -ENOMEM; 502 return -ENOMEM;
1145 } 503 }
1146 504
1147 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { 505 rate = iwl4965_rate_get_lowest_plcp(priv);
1148 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
1149 0xFF0);
1150 if (rate == IWL_INVALID_RATE)
1151 rate = IWL_RATE_6M_PLCP;
1152 } else {
1153 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1154 if (rate == IWL_INVALID_RATE)
1155 rate = IWL_RATE_1M_PLCP;
1156 }
1157 506
1158 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate); 507 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
1159 508
1160 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 509 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1161 &frame->u.cmd[0]); 510 &frame->u.cmd[0]);
1162 511
1163 iwl4965_free_frame(priv, frame); 512 iwl_free_frame(priv, frame);
1164 513
1165 return rc; 514 return rc;
1166} 515}
@@ -1171,184 +520,69 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1171 * 520 *
1172 ******************************************************************************/ 521 ******************************************************************************/
1173 522
1174static void iwl4965_unset_hw_params(struct iwl_priv *priv) 523static void iwl4965_ht_conf(struct iwl_priv *priv,
1175{ 524 struct ieee80211_bss_conf *bss_conf)
1176 if (priv->shared_virt)
1177 pci_free_consistent(priv->pci_dev,
1178 sizeof(struct iwl4965_shared),
1179 priv->shared_virt,
1180 priv->shared_phys);
1181}
1182
1183/**
1184 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
1185 *
1186 * return : set the bit for each supported rate insert in ie
1187 */
1188static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1189 u16 basic_rate, int *left)
1190{
1191 u16 ret_rates = 0, bit;
1192 int i;
1193 u8 *cnt = ie;
1194 u8 *rates = ie + 1;
1195
1196 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1197 if (bit & supported_rate) {
1198 ret_rates |= bit;
1199 rates[*cnt] = iwl4965_rates[i].ieee |
1200 ((bit & basic_rate) ? 0x80 : 0x00);
1201 (*cnt)++;
1202 (*left)--;
1203 if ((*left <= 0) ||
1204 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
1205 break;
1206 }
1207 }
1208
1209 return ret_rates;
1210}
1211
1212/**
1213 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
1214 */
1215static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1216 enum ieee80211_band band,
1217 struct ieee80211_mgmt *frame,
1218 int left, int is_direct)
1219{ 525{
1220 int len = 0; 526 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
1221 u8 *pos = NULL; 527 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
1222 u16 active_rates, ret_rates, cck_rates, active_rate_basic; 528 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
1223#ifdef CONFIG_IWL4965_HT
1224 const struct ieee80211_supported_band *sband =
1225 iwl4965_get_hw_mode(priv, band);
1226#endif /* CONFIG_IWL4965_HT */
1227
1228 /* Make sure there is enough space for the probe request,
1229 * two mandatory IEs and the data */
1230 left -= 24;
1231 if (left < 0)
1232 return 0;
1233 len += 24;
1234 529
1235 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 530 IWL_DEBUG_MAC80211("enter: \n");
1236 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
1237 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1238 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
1239 frame->seq_ctrl = 0;
1240 531
1241 /* fill in our indirect SSID IE */ 532 iwl_conf->is_ht = bss_conf->assoc_ht;
1242 /* ...next IE... */
1243 533
1244 left -= 2; 534 if (!iwl_conf->is_ht)
1245 if (left < 0) 535 return;
1246 return 0;
1247 len += 2;
1248 pos = &(frame->u.probe_req.variable[0]);
1249 *pos++ = WLAN_EID_SSID;
1250 *pos++ = 0;
1251
1252 /* fill in our direct SSID IE... */
1253 if (is_direct) {
1254 /* ...next IE... */
1255 left -= 2 + priv->essid_len;
1256 if (left < 0)
1257 return 0;
1258 /* ... fill it in... */
1259 *pos++ = WLAN_EID_SSID;
1260 *pos++ = priv->essid_len;
1261 memcpy(pos, priv->essid, priv->essid_len);
1262 pos += priv->essid_len;
1263 len += 2 + priv->essid_len;
1264 }
1265
1266 /* fill in supported rate */
1267 /* ...next IE... */
1268 left -= 2;
1269 if (left < 0)
1270 return 0;
1271 536
1272 /* ... fill it in... */ 537 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
1273 *pos++ = WLAN_EID_SUPP_RATES;
1274 *pos = 0;
1275 538
1276 /* exclude 60M rate */ 539 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
1277 active_rates = priv->rates_mask; 540 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
1278 active_rates &= ~IWL_RATE_60M_MASK; 541 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
542 iwl_conf->sgf |= HT_SHORT_GI_40MHZ;
1279 543
1280 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK; 544 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
545 iwl_conf->max_amsdu_size =
546 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
1281 547
1282 cck_rates = IWL_CCK_RATES_MASK & active_rates; 548 iwl_conf->supported_chan_width =
1283 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates, 549 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
1284 active_rate_basic, &left); 550 iwl_conf->extension_chan_offset =
1285 active_rates &= ~ret_rates; 551 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
552 /* If no above or below channel supplied disable FAT channel */
553 if (iwl_conf->extension_chan_offset != IEEE80211_HT_IE_CHA_SEC_ABOVE &&
554 iwl_conf->extension_chan_offset != IEEE80211_HT_IE_CHA_SEC_BELOW) {
555 iwl_conf->extension_chan_offset = IEEE80211_HT_IE_CHA_SEC_NONE;
556 iwl_conf->supported_chan_width = 0;
557 }
1286 558
1287 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates, 559 iwl_conf->tx_mimo_ps_mode =
1288 active_rate_basic, &left); 560 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
1289 active_rates &= ~ret_rates; 561 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
1290 562
1291 len += 2 + *pos; 563 iwl_conf->control_channel = ht_bss_conf->primary_channel;
1292 pos += (*pos) + 1; 564 iwl_conf->tx_chan_width =
1293 if (active_rates == 0) 565 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
1294 goto fill_end; 566 iwl_conf->ht_protection =
567 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
568 iwl_conf->non_GF_STA_present =
569 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
1295 570
1296 /* fill in supported extended rate */ 571 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
1297 /* ...next IE... */ 572 IWL_DEBUG_MAC80211("leave\n");
1298 left -= 2;
1299 if (left < 0)
1300 return 0;
1301 /* ... fill it in... */
1302 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1303 *pos = 0;
1304 iwl4965_supported_rate_to_ie(pos, active_rates,
1305 active_rate_basic, &left);
1306 if (*pos > 0)
1307 len += 2 + *pos;
1308
1309#ifdef CONFIG_IWL4965_HT
1310 if (sband && sband->ht_info.ht_supported) {
1311 struct ieee80211_ht_cap *ht_cap;
1312 pos += (*pos) + 1;
1313 *pos++ = WLAN_EID_HT_CAPABILITY;
1314 *pos++ = sizeof(struct ieee80211_ht_cap);
1315 ht_cap = (struct ieee80211_ht_cap *)pos;
1316 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
1317 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
1318 ht_cap->ampdu_params_info =(sband->ht_info.ampdu_factor &
1319 IEEE80211_HT_CAP_AMPDU_FACTOR) |
1320 ((sband->ht_info.ampdu_density << 2) &
1321 IEEE80211_HT_CAP_AMPDU_DENSITY);
1322 len += 2 + sizeof(struct ieee80211_ht_cap);
1323 }
1324#endif /*CONFIG_IWL4965_HT */
1325
1326 fill_end:
1327 return (u16)len;
1328} 573}
1329 574
1330/* 575/*
1331 * QoS support 576 * QoS support
1332*/ 577*/
1333static int iwl4965_send_qos_params_command(struct iwl_priv *priv, 578static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
1334 struct iwl4965_qosparam_cmd *qos)
1335{
1336
1337 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1338 sizeof(struct iwl4965_qosparam_cmd), qos);
1339}
1340
1341static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
1342{ 579{
1343 unsigned long flags;
1344
1345 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 580 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1346 return; 581 return;
1347 582
1348 if (!priv->qos_data.qos_enable) 583 if (!priv->qos_data.qos_enable)
1349 return; 584 return;
1350 585
1351 spin_lock_irqsave(&priv->lock, flags);
1352 priv->qos_data.def_qos_parm.qos_flags = 0; 586 priv->qos_data.def_qos_parm.qos_flags = 0;
1353 587
1354 if (priv->qos_data.qos_cap.q_AP.queue_request && 588 if (priv->qos_data.qos_cap.q_AP.queue_request &&
@@ -1359,323 +593,18 @@ static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
1359 priv->qos_data.def_qos_parm.qos_flags |= 593 priv->qos_data.def_qos_parm.qos_flags |=
1360 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 594 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1361 595
1362#ifdef CONFIG_IWL4965_HT
1363 if (priv->current_ht_config.is_ht) 596 if (priv->current_ht_config.is_ht)
1364 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 597 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
1365#endif /* CONFIG_IWL4965_HT */
1366
1367 spin_unlock_irqrestore(&priv->lock, flags);
1368 598
1369 if (force || iwl_is_associated(priv)) { 599 if (force || iwl_is_associated(priv)) {
1370 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 600 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
1371 priv->qos_data.qos_active, 601 priv->qos_data.qos_active,
1372 priv->qos_data.def_qos_parm.qos_flags); 602 priv->qos_data.def_qos_parm.qos_flags);
1373 603
1374 iwl4965_send_qos_params_command(priv, 604 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
1375 &(priv->qos_data.def_qos_parm)); 605 sizeof(struct iwl_qosparam_cmd),
1376 } 606 &priv->qos_data.def_qos_parm, NULL);
1377}
1378
1379/*
1380 * Power management (not Tx power!) functions
1381 */
1382#define MSEC_TO_USEC 1024
1383
1384#define NOSLP __constant_cpu_to_le16(0), 0, 0
1385#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
1386#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1387#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1388 __constant_cpu_to_le32(X1), \
1389 __constant_cpu_to_le32(X2), \
1390 __constant_cpu_to_le32(X3), \
1391 __constant_cpu_to_le32(X4)}
1392
1393
1394/* default power management (not Tx power) table values */
1395/* for tim 0-10 */
1396static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
1397 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1398 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1399 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1400 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1401 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1402 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1403};
1404
1405/* for tim > 10 */
1406static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
1407 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1408 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1409 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1410 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1411 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1412 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1413 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1414 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1415 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1416 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1417};
1418
1419int iwl4965_power_init_handle(struct iwl_priv *priv)
1420{
1421 int rc = 0, i;
1422 struct iwl4965_power_mgr *pow_data;
1423 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
1424 u16 pci_pm;
1425
1426 IWL_DEBUG_POWER("Initialize power \n");
1427
1428 pow_data = &(priv->power_data);
1429
1430 memset(pow_data, 0, sizeof(*pow_data));
1431
1432 pow_data->active_index = IWL_POWER_RANGE_0;
1433 pow_data->dtim_val = 0xffff;
1434
1435 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1436 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1437
1438 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1439 if (rc != 0)
1440 return 0;
1441 else {
1442 struct iwl4965_powertable_cmd *cmd;
1443
1444 IWL_DEBUG_POWER("adjust power command flags\n");
1445
1446 for (i = 0; i < IWL_POWER_AC; i++) {
1447 cmd = &pow_data->pwr_range_0[i].cmd;
1448
1449 if (pci_pm & 0x1)
1450 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1451 else
1452 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1453 }
1454 } 607 }
1455 return rc;
1456}
1457
1458static int iwl4965_update_power_cmd(struct iwl_priv *priv,
1459 struct iwl4965_powertable_cmd *cmd, u32 mode)
1460{
1461 int rc = 0, i;
1462 u8 skip;
1463 u32 max_sleep = 0;
1464 struct iwl4965_power_vec_entry *range;
1465 u8 period = 0;
1466 struct iwl4965_power_mgr *pow_data;
1467
1468 if (mode > IWL_POWER_INDEX_5) {
1469 IWL_DEBUG_POWER("Error invalid power mode \n");
1470 return -1;
1471 }
1472 pow_data = &(priv->power_data);
1473
1474 if (pow_data->active_index == IWL_POWER_RANGE_0)
1475 range = &pow_data->pwr_range_0[0];
1476 else
1477 range = &pow_data->pwr_range_1[1];
1478
1479 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
1480
1481#ifdef IWL_MAC80211_DISABLE
1482 if (priv->assoc_network != NULL) {
1483 unsigned long flags;
1484
1485 period = priv->assoc_network->tim.tim_period;
1486 }
1487#endif /*IWL_MAC80211_DISABLE */
1488 skip = range[mode].no_dtim;
1489
1490 if (period == 0) {
1491 period = 1;
1492 skip = 0;
1493 }
1494
1495 if (skip == 0) {
1496 max_sleep = period;
1497 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1498 } else {
1499 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1500 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1501 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1502 }
1503
1504 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1505 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1506 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1507 }
1508
1509 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1510 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1511 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1512 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1513 le32_to_cpu(cmd->sleep_interval[0]),
1514 le32_to_cpu(cmd->sleep_interval[1]),
1515 le32_to_cpu(cmd->sleep_interval[2]),
1516 le32_to_cpu(cmd->sleep_interval[3]),
1517 le32_to_cpu(cmd->sleep_interval[4]));
1518
1519 return rc;
1520}
1521
1522static int iwl4965_send_power_mode(struct iwl_priv *priv, u32 mode)
1523{
1524 u32 uninitialized_var(final_mode);
1525 int rc;
1526 struct iwl4965_powertable_cmd cmd;
1527
1528 /* If on battery, set to 3,
1529 * if plugged into AC power, set to CAM ("continuously aware mode"),
1530 * else user level */
1531 switch (mode) {
1532 case IWL_POWER_BATTERY:
1533 final_mode = IWL_POWER_INDEX_3;
1534 break;
1535 case IWL_POWER_AC:
1536 final_mode = IWL_POWER_MODE_CAM;
1537 break;
1538 default:
1539 final_mode = mode;
1540 break;
1541 }
1542
1543 cmd.keep_alive_beacons = 0;
1544
1545 iwl4965_update_power_cmd(priv, &cmd, final_mode);
1546
1547 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
1548
1549 if (final_mode == IWL_POWER_MODE_CAM)
1550 clear_bit(STATUS_POWER_PMI, &priv->status);
1551 else
1552 set_bit(STATUS_POWER_PMI, &priv->status);
1553
1554 return rc;
1555}
1556
1557int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
1558{
1559 /* Filter incoming packets to determine if they are targeted toward
1560 * this network, discarding packets coming from ourselves */
1561 switch (priv->iw_mode) {
1562 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
1563 /* packets from our adapter are dropped (echo) */
1564 if (!compare_ether_addr(header->addr2, priv->mac_addr))
1565 return 0;
1566 /* {broad,multi}cast packets to our IBSS go through */
1567 if (is_multicast_ether_addr(header->addr1))
1568 return !compare_ether_addr(header->addr3, priv->bssid);
1569 /* packets to our adapter go through */
1570 return !compare_ether_addr(header->addr1, priv->mac_addr);
1571 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
1572 /* packets from our adapter are dropped (echo) */
1573 if (!compare_ether_addr(header->addr3, priv->mac_addr))
1574 return 0;
1575 /* {broad,multi}cast packets to our BSS go through */
1576 if (is_multicast_ether_addr(header->addr1))
1577 return !compare_ether_addr(header->addr2, priv->bssid);
1578 /* packets to our adapter go through */
1579 return !compare_ether_addr(header->addr1, priv->mac_addr);
1580 default:
1581 break;
1582 }
1583
1584 return 1;
1585}
1586
1587#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1588
1589static const char *iwl4965_get_tx_fail_reason(u32 status)
1590{
1591 switch (status & TX_STATUS_MSK) {
1592 case TX_STATUS_SUCCESS:
1593 return "SUCCESS";
1594 TX_STATUS_ENTRY(SHORT_LIMIT);
1595 TX_STATUS_ENTRY(LONG_LIMIT);
1596 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1597 TX_STATUS_ENTRY(MGMNT_ABORT);
1598 TX_STATUS_ENTRY(NEXT_FRAG);
1599 TX_STATUS_ENTRY(LIFE_EXPIRE);
1600 TX_STATUS_ENTRY(DEST_PS);
1601 TX_STATUS_ENTRY(ABORTED);
1602 TX_STATUS_ENTRY(BT_RETRY);
1603 TX_STATUS_ENTRY(STA_INVALID);
1604 TX_STATUS_ENTRY(FRAG_DROPPED);
1605 TX_STATUS_ENTRY(TID_DISABLE);
1606 TX_STATUS_ENTRY(FRAME_FLUSHED);
1607 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1608 TX_STATUS_ENTRY(TX_LOCKED);
1609 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1610 }
1611
1612 return "UNKNOWN";
1613}
1614
1615/**
1616 * iwl4965_scan_cancel - Cancel any currently executing HW scan
1617 *
1618 * NOTE: priv->mutex is not required before calling this function
1619 */
1620static int iwl4965_scan_cancel(struct iwl_priv *priv)
1621{
1622 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1623 clear_bit(STATUS_SCANNING, &priv->status);
1624 return 0;
1625 }
1626
1627 if (test_bit(STATUS_SCANNING, &priv->status)) {
1628 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1629 IWL_DEBUG_SCAN("Queuing scan abort.\n");
1630 set_bit(STATUS_SCAN_ABORTING, &priv->status);
1631 queue_work(priv->workqueue, &priv->abort_scan);
1632
1633 } else
1634 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
1635
1636 return test_bit(STATUS_SCANNING, &priv->status);
1637 }
1638
1639 return 0;
1640}
1641
1642/**
1643 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
1644 * @ms: amount of time to wait (in milliseconds) for scan to abort
1645 *
1646 * NOTE: priv->mutex must be held before calling this function
1647 */
1648static int iwl4965_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
1649{
1650 unsigned long now = jiffies;
1651 int ret;
1652
1653 ret = iwl4965_scan_cancel(priv);
1654 if (ret && ms) {
1655 mutex_unlock(&priv->mutex);
1656 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
1657 test_bit(STATUS_SCANNING, &priv->status))
1658 msleep(1);
1659 mutex_lock(&priv->mutex);
1660
1661 return test_bit(STATUS_SCANNING, &priv->status);
1662 }
1663
1664 return ret;
1665}
1666
1667static void iwl4965_sequence_reset(struct iwl_priv *priv)
1668{
1669 /* Reset ieee stats */
1670
1671 /* We don't reset the net_device_stats (ieee->stats) on
1672 * re-association */
1673
1674 priv->last_seq_num = -1;
1675 priv->last_frag_num = -1;
1676 priv->last_packet_time = 0;
1677
1678 iwl4965_scan_cancel(priv);
1679} 608}
1680 609
1681#define MAX_UCODE_BEACON_INTERVAL 4096 610#define MAX_UCODE_BEACON_INTERVAL 4096
@@ -1750,46 +679,8 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
1750 le16_to_cpu(priv->rxon_timing.atim_window)); 679 le16_to_cpu(priv->rxon_timing.atim_window));
1751} 680}
1752 681
1753static int iwl4965_scan_initiate(struct iwl_priv *priv) 682static void iwl_set_flags_for_band(struct iwl_priv *priv,
1754{ 683 enum ieee80211_band band)
1755 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1756 IWL_ERROR("APs don't scan.\n");
1757 return 0;
1758 }
1759
1760 if (!iwl_is_ready_rf(priv)) {
1761 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1762 return -EIO;
1763 }
1764
1765 if (test_bit(STATUS_SCANNING, &priv->status)) {
1766 IWL_DEBUG_SCAN("Scan already in progress.\n");
1767 return -EAGAIN;
1768 }
1769
1770 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1771 IWL_DEBUG_SCAN("Scan request while abort pending. "
1772 "Queuing.\n");
1773 return -EAGAIN;
1774 }
1775
1776 IWL_DEBUG_INFO("Starting scan...\n");
1777 if (priv->cfg->sku & IWL_SKU_G)
1778 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
1779 if (priv->cfg->sku & IWL_SKU_A)
1780 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
1781 set_bit(STATUS_SCANNING, &priv->status);
1782 priv->scan_start = jiffies;
1783 priv->scan_pass_start = priv->scan_start;
1784
1785 queue_work(priv->workqueue, &priv->request_scan);
1786
1787 return 0;
1788}
1789
1790
1791static void iwl4965_set_flags_for_phymode(struct iwl_priv *priv,
1792 enum ieee80211_band band)
1793{ 684{
1794 if (band == IEEE80211_BAND_5GHZ) { 685 if (band == IEEE80211_BAND_5GHZ) {
1795 priv->staging_rxon.flags &= 686 priv->staging_rxon.flags &=
@@ -1858,7 +749,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1858#endif 749#endif
1859 750
1860 ch_info = iwl_get_channel_info(priv, priv->band, 751 ch_info = iwl_get_channel_info(priv, priv->band,
1861 le16_to_cpu(priv->staging_rxon.channel)); 752 le16_to_cpu(priv->active_rxon.channel));
1862 753
1863 if (!ch_info) 754 if (!ch_info)
1864 ch_info = &priv->channel_info[0]; 755 ch_info = &priv->channel_info[0];
@@ -1874,7 +765,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1874 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 765 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1875 priv->band = ch_info->band; 766 priv->band = ch_info->band;
1876 767
1877 iwl4965_set_flags_for_phymode(priv, priv->band); 768 iwl_set_flags_for_band(priv, priv->band);
1878 769
1879 priv->staging_rxon.ofdm_basic_rates = 770 priv->staging_rxon.ofdm_basic_rates =
1880 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 771 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1887,38 +778,24 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1887 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); 778 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1888 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; 779 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1889 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; 780 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1890 iwl4965_set_rxon_chain(priv); 781 iwl_set_rxon_chain(priv);
1891} 782}
1892 783
1893static int iwl4965_set_mode(struct iwl_priv *priv, int mode) 784static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
1894{ 785{
1895 if (mode == IEEE80211_IF_TYPE_IBSS) {
1896 const struct iwl_channel_info *ch_info;
1897
1898 ch_info = iwl_get_channel_info(priv,
1899 priv->band,
1900 le16_to_cpu(priv->staging_rxon.channel));
1901
1902 if (!ch_info || !is_channel_ibss(ch_info)) {
1903 IWL_ERROR("channel %d not IBSS channel\n",
1904 le16_to_cpu(priv->staging_rxon.channel));
1905 return -EINVAL;
1906 }
1907 }
1908
1909 priv->iw_mode = mode; 786 priv->iw_mode = mode;
1910 787
1911 iwl4965_connection_init_rx_config(priv); 788 iwl4965_connection_init_rx_config(priv);
1912 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 789 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1913 790
1914 iwlcore_clear_stations_table(priv); 791 iwl_clear_stations_table(priv);
1915 792
1916 /* dont commit rxon if rf-kill is on*/ 793 /* dont commit rxon if rf-kill is on*/
1917 if (!iwl_is_ready_rf(priv)) 794 if (!iwl_is_ready_rf(priv))
1918 return -EAGAIN; 795 return -EAGAIN;
1919 796
1920 cancel_delayed_work(&priv->scan_check); 797 cancel_delayed_work(&priv->scan_check);
1921 if (iwl4965_scan_cancel_timeout(priv, 100)) { 798 if (iwl_scan_cancel_timeout(priv, 100)) {
1922 IWL_WARNING("Aborted scan still in progress after 100ms\n"); 799 IWL_WARNING("Aborted scan still in progress after 100ms\n");
1923 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 800 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
1924 return -EAGAIN; 801 return -EAGAIN;
@@ -1929,448 +806,13 @@ static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
1929 return 0; 806 return 0;
1930} 807}
1931 808
1932static void iwl4965_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
1933 struct ieee80211_tx_control *ctl,
1934 struct iwl_cmd *cmd,
1935 struct sk_buff *skb_frag,
1936 int sta_id)
1937{
1938 struct iwl4965_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
1939 struct iwl_wep_key *wepkey;
1940 int keyidx = 0;
1941
1942 BUG_ON(ctl->key_idx > 3);
1943
1944 switch (keyinfo->alg) {
1945 case ALG_CCMP:
1946 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
1947 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
1948 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
1949 cmd->cmd.tx.tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1950 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
1951 break;
1952
1953 case ALG_TKIP:
1954 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
1955 ieee80211_get_tkip_key(keyinfo->conf, skb_frag,
1956 IEEE80211_TKIP_P2_KEY, cmd->cmd.tx.key);
1957 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
1958 break;
1959
1960 case ALG_WEP:
1961 wepkey = &priv->wep_keys[ctl->key_idx];
1962 cmd->cmd.tx.sec_ctl = 0;
1963 if (priv->default_wep_key) {
1964 /* the WEP key was sent as static */
1965 keyidx = ctl->key_idx;
1966 memcpy(&cmd->cmd.tx.key[3], wepkey->key,
1967 wepkey->key_size);
1968 if (wepkey->key_size == WEP_KEY_LEN_128)
1969 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1970 } else {
1971 /* the WEP key was sent as dynamic */
1972 keyidx = keyinfo->keyidx;
1973 memcpy(&cmd->cmd.tx.key[3], keyinfo->key,
1974 keyinfo->keylen);
1975 if (keyinfo->keylen == WEP_KEY_LEN_128)
1976 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1977 }
1978
1979 cmd->cmd.tx.sec_ctl |= (TX_CMD_SEC_WEP |
1980 (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
1981
1982 IWL_DEBUG_TX("Configuring packet for WEP encryption "
1983 "with key %d\n", keyidx);
1984 break;
1985
1986 default:
1987 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
1988 break;
1989 }
1990}
1991
1992/*
1993 * handle build REPLY_TX command notification.
1994 */
1995static void iwl4965_build_tx_cmd_basic(struct iwl_priv *priv,
1996 struct iwl_cmd *cmd,
1997 struct ieee80211_tx_control *ctrl,
1998 struct ieee80211_hdr *hdr,
1999 int is_unicast, u8 std_id)
2000{
2001 __le16 *qc;
2002 u16 fc = le16_to_cpu(hdr->frame_control);
2003 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2004
2005 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2006 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2007 tx_flags |= TX_CMD_FLG_ACK_MSK;
2008 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2009 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2010 if (ieee80211_is_probe_response(fc) &&
2011 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2012 tx_flags |= TX_CMD_FLG_TSF_MSK;
2013 } else {
2014 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2015 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2016 }
2017
2018 if (ieee80211_is_back_request(fc))
2019 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2020
2021
2022 cmd->cmd.tx.sta_id = std_id;
2023 if (ieee80211_get_morefrag(hdr))
2024 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2025
2026 qc = ieee80211_get_qos_ctrl(hdr);
2027 if (qc) {
2028 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2029 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2030 } else
2031 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2032
2033 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2034 tx_flags |= TX_CMD_FLG_RTS_MSK;
2035 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2036 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2037 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2038 tx_flags |= TX_CMD_FLG_CTS_MSK;
2039 }
2040
2041 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2042 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2043
2044 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2045 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2046 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2047 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2048 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2049 else
2050 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2051 } else {
2052 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2053 }
2054
2055 cmd->cmd.tx.driver_txop = 0;
2056 cmd->cmd.tx.tx_flags = tx_flags;
2057 cmd->cmd.tx.next_frame_len = 0;
2058}
2059static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2060{
2061 /* 0 - mgmt, 1 - cnt, 2 - data */
2062 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2063 priv->tx_stats[idx].cnt++;
2064 priv->tx_stats[idx].bytes += len;
2065}
2066/**
2067 * iwl4965_get_sta_id - Find station's index within station table
2068 *
2069 * If new IBSS station, create new entry in station table
2070 */
2071static int iwl4965_get_sta_id(struct iwl_priv *priv,
2072 struct ieee80211_hdr *hdr)
2073{
2074 int sta_id;
2075 u16 fc = le16_to_cpu(hdr->frame_control);
2076 DECLARE_MAC_BUF(mac);
2077
2078 /* If this frame is broadcast or management, use broadcast station id */
2079 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2080 is_multicast_ether_addr(hdr->addr1))
2081 return priv->hw_params.bcast_sta_id;
2082
2083 switch (priv->iw_mode) {
2084
2085 /* If we are a client station in a BSS network, use the special
2086 * AP station entry (that's the only station we communicate with) */
2087 case IEEE80211_IF_TYPE_STA:
2088 return IWL_AP_ID;
2089
2090 /* If we are an AP, then find the station, or use BCAST */
2091 case IEEE80211_IF_TYPE_AP:
2092 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2093 if (sta_id != IWL_INVALID_STATION)
2094 return sta_id;
2095 return priv->hw_params.bcast_sta_id;
2096
2097 /* If this frame is going out to an IBSS network, find the station,
2098 * or create a new station table entry */
2099 case IEEE80211_IF_TYPE_IBSS:
2100 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2101 if (sta_id != IWL_INVALID_STATION)
2102 return sta_id;
2103
2104 /* Create new station table entry */
2105 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2106 0, CMD_ASYNC, NULL);
2107
2108 if (sta_id != IWL_INVALID_STATION)
2109 return sta_id;
2110
2111 IWL_DEBUG_DROP("Station %s not in station map. "
2112 "Defaulting to broadcast...\n",
2113 print_mac(mac, hdr->addr1));
2114 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2115 return priv->hw_params.bcast_sta_id;
2116
2117 default:
2118 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
2119 return priv->hw_params.bcast_sta_id;
2120 }
2121}
2122
2123/*
2124 * start REPLY_TX command process
2125 */
2126static int iwl4965_tx_skb(struct iwl_priv *priv,
2127 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2128{
2129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2130 struct iwl4965_tfd_frame *tfd;
2131 u32 *control_flags;
2132 int txq_id = ctl->queue;
2133 struct iwl4965_tx_queue *txq = NULL;
2134 struct iwl4965_queue *q = NULL;
2135 dma_addr_t phys_addr;
2136 dma_addr_t txcmd_phys;
2137 dma_addr_t scratch_phys;
2138 struct iwl_cmd *out_cmd = NULL;
2139 u16 len, idx, len_org;
2140 u8 id, hdr_len, unicast;
2141 u8 sta_id;
2142 u16 seq_number = 0;
2143 u16 fc;
2144 __le16 *qc;
2145 u8 wait_write_ptr = 0;
2146 unsigned long flags;
2147 int rc;
2148
2149 spin_lock_irqsave(&priv->lock, flags);
2150 if (iwl_is_rfkill(priv)) {
2151 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2152 goto drop_unlock;
2153 }
2154
2155 if (!priv->vif) {
2156 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
2157 goto drop_unlock;
2158 }
2159
2160 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2161 IWL_ERROR("ERROR: No TX rate available.\n");
2162 goto drop_unlock;
2163 }
2164
2165 unicast = !is_multicast_ether_addr(hdr->addr1);
2166 id = 0;
2167
2168 fc = le16_to_cpu(hdr->frame_control);
2169
2170#ifdef CONFIG_IWLWIFI_DEBUG
2171 if (ieee80211_is_auth(fc))
2172 IWL_DEBUG_TX("Sending AUTH frame\n");
2173 else if (ieee80211_is_assoc_request(fc))
2174 IWL_DEBUG_TX("Sending ASSOC frame\n");
2175 else if (ieee80211_is_reassoc_request(fc))
2176 IWL_DEBUG_TX("Sending REASSOC frame\n");
2177#endif
2178
2179 /* drop all data frame if we are not associated */
2180 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2181 (!iwl_is_associated(priv) ||
2182 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
2183 !priv->assoc_station_added)) {
2184 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2185 goto drop_unlock;
2186 }
2187
2188 spin_unlock_irqrestore(&priv->lock, flags);
2189
2190 hdr_len = ieee80211_get_hdrlen(fc);
2191
2192 /* Find (or create) index into station table for destination station */
2193 sta_id = iwl4965_get_sta_id(priv, hdr);
2194 if (sta_id == IWL_INVALID_STATION) {
2195 DECLARE_MAC_BUF(mac);
2196
2197 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2198 print_mac(mac, hdr->addr1));
2199 goto drop;
2200 }
2201
2202 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2203
2204 qc = ieee80211_get_qos_ctrl(hdr);
2205 if (qc) {
2206 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2207 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2208 IEEE80211_SCTL_SEQ;
2209 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2210 (hdr->seq_ctrl &
2211 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2212 seq_number += 0x10;
2213#ifdef CONFIG_IWL4965_HT
2214 /* aggregation is on for this <sta,tid> */
2215 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2216 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2217 priv->stations[sta_id].tid[tid].tfds_in_queue++;
2218#endif /* CONFIG_IWL4965_HT */
2219 }
2220
2221 /* Descriptor for chosen Tx queue */
2222 txq = &priv->txq[txq_id];
2223 q = &txq->q;
2224
2225 spin_lock_irqsave(&priv->lock, flags);
2226
2227 /* Set up first empty TFD within this queue's circular TFD buffer */
2228 tfd = &txq->bd[q->write_ptr];
2229 memset(tfd, 0, sizeof(*tfd));
2230 control_flags = (u32 *) tfd;
2231 idx = get_cmd_index(q, q->write_ptr, 0);
2232
2233 /* Set up driver data for this TFD */
2234 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
2235 txq->txb[q->write_ptr].skb[0] = skb;
2236 memcpy(&(txq->txb[q->write_ptr].status.control),
2237 ctl, sizeof(struct ieee80211_tx_control));
2238
2239 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2240 out_cmd = &txq->cmd[idx];
2241 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2242 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2243
2244 /*
2245 * Set up the Tx-command (not MAC!) header.
2246 * Store the chosen Tx queue and TFD index within the sequence field;
2247 * after Tx, uCode's Tx response will return this value so driver can
2248 * locate the frame within the tx queue and do post-tx processing.
2249 */
2250 out_cmd->hdr.cmd = REPLY_TX;
2251 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2252 INDEX_TO_SEQ(q->write_ptr)));
2253
2254 /* Copy MAC header from skb into command buffer */
2255 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2256
2257 /*
2258 * Use the first empty entry in this queue's command buffer array
2259 * to contain the Tx command and MAC header concatenated together
2260 * (payload data will be in another buffer).
2261 * Size of this varies, due to varying MAC header length.
2262 * If end is not dword aligned, we'll have 2 extra bytes at the end
2263 * of the MAC header (device reads on dword boundaries).
2264 * We'll tell device about this padding later.
2265 */
2266 len = priv->hw_params.tx_cmd_len +
2267 sizeof(struct iwl_cmd_header) + hdr_len;
2268
2269 len_org = len;
2270 len = (len + 3) & ~3;
2271
2272 if (len_org != len)
2273 len_org = 1;
2274 else
2275 len_org = 0;
2276
2277 /* Physical address of this Tx command's header (not MAC header!),
2278 * within command buffer array. */
2279 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2280 offsetof(struct iwl_cmd, hdr);
2281
2282 /* Add buffer containing Tx command and MAC(!) header to TFD's
2283 * first entry */
2284 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2285
2286 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2287 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, sta_id);
2288
2289 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2290 * if any (802.11 null frames have no payload). */
2291 len = skb->len - hdr_len;
2292 if (len) {
2293 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2294 len, PCI_DMA_TODEVICE);
2295 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2296 }
2297
2298 /* Tell 4965 about any 2-byte padding after MAC header */
2299 if (len_org)
2300 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2301
2302 /* Total # bytes to be transmitted */
2303 len = (u16)skb->len;
2304 out_cmd->cmd.tx.len = cpu_to_le16(len);
2305
2306 /* TODO need this for burst mode later on */
2307 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2308
2309 /* set is_hcca to 0; it probably will never be implemented */
2310 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2311
2312 iwl_update_tx_stats(priv, fc, len);
2313
2314 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
2315 offsetof(struct iwl4965_tx_cmd, scratch);
2316 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
2317 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
2318
2319 if (!ieee80211_get_morefrag(hdr)) {
2320 txq->need_update = 1;
2321 if (qc) {
2322 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2323 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2324 }
2325 } else {
2326 wait_write_ptr = 1;
2327 txq->need_update = 0;
2328 }
2329
2330 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2331 sizeof(out_cmd->cmd.tx));
2332
2333 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2334 ieee80211_get_hdrlen(fc));
2335
2336 /* Set up entry for this TFD in Tx byte-count array */
2337 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
2338
2339 /* Tell device the write index *just past* this latest filled TFD */
2340 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2341 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
2342 spin_unlock_irqrestore(&priv->lock, flags);
2343
2344 if (rc)
2345 return rc;
2346
2347 if ((iwl4965_queue_space(q) < q->high_mark)
2348 && priv->mac80211_registered) {
2349 if (wait_write_ptr) {
2350 spin_lock_irqsave(&priv->lock, flags);
2351 txq->need_update = 1;
2352 iwl4965_tx_queue_update_write_ptr(priv, txq);
2353 spin_unlock_irqrestore(&priv->lock, flags);
2354 }
2355
2356 ieee80211_stop_queue(priv->hw, ctl->queue);
2357 }
2358
2359 return 0;
2360
2361drop_unlock:
2362 spin_unlock_irqrestore(&priv->lock, flags);
2363drop:
2364 return -1;
2365}
2366
2367static void iwl4965_set_rate(struct iwl_priv *priv) 809static void iwl4965_set_rate(struct iwl_priv *priv)
2368{ 810{
2369 const struct ieee80211_supported_band *hw = NULL; 811 const struct ieee80211_supported_band *hw = NULL;
2370 struct ieee80211_rate *rate; 812 struct ieee80211_rate *rate;
2371 int i; 813 int i;
2372 814
2373 hw = iwl4965_get_hw_mode(priv, priv->band); 815 hw = iwl_get_hw_mode(priv, priv->band);
2374 if (!hw) { 816 if (!hw) {
2375 IWL_ERROR("Failed to set rate: unable to get hw mode\n"); 817 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2376 return; 818 return;
@@ -2411,169 +853,6 @@ static void iwl4965_set_rate(struct iwl_priv *priv)
2411 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 853 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2412} 854}
2413 855
2414void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2415{
2416 unsigned long flags;
2417
2418 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2419 return;
2420
2421 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2422 disable_radio ? "OFF" : "ON");
2423
2424 if (disable_radio) {
2425 iwl4965_scan_cancel(priv);
2426 /* FIXME: This is a workaround for AP */
2427 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
2428 spin_lock_irqsave(&priv->lock, flags);
2429 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2430 CSR_UCODE_SW_BIT_RFKILL);
2431 spin_unlock_irqrestore(&priv->lock, flags);
2432 /* call the host command only if no hw rf-kill set */
2433 if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
2434 iwl_is_ready(priv))
2435 iwl4965_send_card_state(priv,
2436 CARD_STATE_CMD_DISABLE,
2437 0);
2438 set_bit(STATUS_RF_KILL_SW, &priv->status);
2439
2440 /* make sure mac80211 stop sending Tx frame */
2441 if (priv->mac80211_registered)
2442 ieee80211_stop_queues(priv->hw);
2443 }
2444 return;
2445 }
2446
2447 spin_lock_irqsave(&priv->lock, flags);
2448 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2449
2450 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2451 spin_unlock_irqrestore(&priv->lock, flags);
2452
2453 /* wake up ucode */
2454 msleep(10);
2455
2456 spin_lock_irqsave(&priv->lock, flags);
2457 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2458 if (!iwl_grab_nic_access(priv))
2459 iwl_release_nic_access(priv);
2460 spin_unlock_irqrestore(&priv->lock, flags);
2461
2462 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2463 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2464 "disabled by HW switch\n");
2465 return;
2466 }
2467
2468 queue_work(priv->workqueue, &priv->restart);
2469 return;
2470}
2471
2472void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
2473 u32 decrypt_res, struct ieee80211_rx_status *stats)
2474{
2475 u16 fc =
2476 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2477
2478 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2479 return;
2480
2481 if (!(fc & IEEE80211_FCTL_PROTECTED))
2482 return;
2483
2484 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2485 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2486 case RX_RES_STATUS_SEC_TYPE_TKIP:
2487 /* The uCode has got a bad phase 1 Key, pushes the packet.
2488 * Decryption will be done in SW. */
2489 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2490 RX_RES_STATUS_BAD_KEY_TTAK)
2491 break;
2492
2493 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2494 RX_RES_STATUS_BAD_ICV_MIC)
2495 stats->flag |= RX_FLAG_MMIC_ERROR;
2496 case RX_RES_STATUS_SEC_TYPE_WEP:
2497 case RX_RES_STATUS_SEC_TYPE_CCMP:
2498 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2499 RX_RES_STATUS_DECRYPT_OK) {
2500 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2501 stats->flag |= RX_FLAG_DECRYPTED;
2502 }
2503 break;
2504
2505 default:
2506 break;
2507 }
2508}
2509
2510
2511#define IWL_PACKET_RETRY_TIME HZ
2512
2513int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2514{
2515 u16 sc = le16_to_cpu(header->seq_ctrl);
2516 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2517 u16 frag = sc & IEEE80211_SCTL_FRAG;
2518 u16 *last_seq, *last_frag;
2519 unsigned long *last_time;
2520
2521 switch (priv->iw_mode) {
2522 case IEEE80211_IF_TYPE_IBSS:{
2523 struct list_head *p;
2524 struct iwl4965_ibss_seq *entry = NULL;
2525 u8 *mac = header->addr2;
2526 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
2527
2528 __list_for_each(p, &priv->ibss_mac_hash[index]) {
2529 entry = list_entry(p, struct iwl4965_ibss_seq, list);
2530 if (!compare_ether_addr(entry->mac, mac))
2531 break;
2532 }
2533 if (p == &priv->ibss_mac_hash[index]) {
2534 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2535 if (!entry) {
2536 IWL_ERROR("Cannot malloc new mac entry\n");
2537 return 0;
2538 }
2539 memcpy(entry->mac, mac, ETH_ALEN);
2540 entry->seq_num = seq;
2541 entry->frag_num = frag;
2542 entry->packet_time = jiffies;
2543 list_add(&entry->list, &priv->ibss_mac_hash[index]);
2544 return 0;
2545 }
2546 last_seq = &entry->seq_num;
2547 last_frag = &entry->frag_num;
2548 last_time = &entry->packet_time;
2549 break;
2550 }
2551 case IEEE80211_IF_TYPE_STA:
2552 last_seq = &priv->last_seq_num;
2553 last_frag = &priv->last_frag_num;
2554 last_time = &priv->last_packet_time;
2555 break;
2556 default:
2557 return 0;
2558 }
2559 if ((*last_seq == seq) &&
2560 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
2561 if (*last_frag == frag)
2562 goto drop;
2563 if (*last_frag + 1 != frag)
2564 /* out-of-order fragment */
2565 goto drop;
2566 } else
2567 *last_seq = seq;
2568
2569 *last_frag = frag;
2570 *last_time = jiffies;
2571 return 0;
2572
2573 drop:
2574 return 1;
2575}
2576
2577#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 856#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
2578 857
2579#include "iwl-spectrum.h" 858#include "iwl-spectrum.h"
@@ -2632,7 +911,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2632 u8 type) 911 u8 type)
2633{ 912{
2634 struct iwl4965_spectrum_cmd spectrum; 913 struct iwl4965_spectrum_cmd spectrum;
2635 struct iwl4965_rx_packet *res; 914 struct iwl_rx_packet *res;
2636 struct iwl_host_cmd cmd = { 915 struct iwl_host_cmd cmd = {
2637 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 916 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2638 .data = (void *)&spectrum, 917 .data = (void *)&spectrum,
@@ -2677,7 +956,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2677 if (rc) 956 if (rc)
2678 return rc; 957 return rc;
2679 958
2680 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data; 959 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2681 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 960 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2682 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); 961 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2683 rc = -EIO; 962 rc = -EIO;
@@ -2707,352 +986,16 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2707} 986}
2708#endif 987#endif
2709 988
2710static void iwl4965_txstatus_to_ieee(struct iwl_priv *priv,
2711 struct iwl4965_tx_info *tx_sta)
2712{
2713
2714 tx_sta->status.ack_signal = 0;
2715 tx_sta->status.excessive_retries = 0;
2716 tx_sta->status.queue_length = 0;
2717 tx_sta->status.queue_number = 0;
2718
2719 if (in_interrupt())
2720 ieee80211_tx_status_irqsafe(priv->hw,
2721 tx_sta->skb[0], &(tx_sta->status));
2722 else
2723 ieee80211_tx_status(priv->hw,
2724 tx_sta->skb[0], &(tx_sta->status));
2725
2726 tx_sta->skb[0] = NULL;
2727}
2728
2729/**
2730 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
2731 *
2732 * When FW advances 'R' index, all entries between old and new 'R' index
2733 * need to be reclaimed. As result, some free space forms. If there is
2734 * enough free space (> low mark), wake the stack that feeds us.
2735 */
2736int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
2737{
2738 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2739 struct iwl4965_queue *q = &txq->q;
2740 int nfreed = 0;
2741
2742 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
2743 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
2744 "is out of range [0-%d] %d %d.\n", txq_id,
2745 index, q->n_bd, q->write_ptr, q->read_ptr);
2746 return 0;
2747 }
2748
2749 for (index = iwl_queue_inc_wrap(index, q->n_bd);
2750 q->read_ptr != index;
2751 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2752 if (txq_id != IWL_CMD_QUEUE_NUM) {
2753 iwl4965_txstatus_to_ieee(priv,
2754 &(txq->txb[txq->q.read_ptr]));
2755 iwl4965_hw_txq_free_tfd(priv, txq);
2756 } else if (nfreed > 1) {
2757 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
2758 q->write_ptr, q->read_ptr);
2759 queue_work(priv->workqueue, &priv->restart);
2760 }
2761 nfreed++;
2762 }
2763
2764/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
2765 (txq_id != IWL_CMD_QUEUE_NUM) &&
2766 priv->mac80211_registered)
2767 ieee80211_wake_queue(priv->hw, txq_id); */
2768
2769
2770 return nfreed;
2771}
2772
2773static int iwl4965_is_tx_success(u32 status)
2774{
2775 status &= TX_STATUS_MSK;
2776 return (status == TX_STATUS_SUCCESS)
2777 || (status == TX_STATUS_DIRECT_DONE);
2778}
2779
2780/****************************************************************************** 989/******************************************************************************
2781 * 990 *
2782 * Generic RX handler implementations 991 * Generic RX handler implementations
2783 * 992 *
2784 ******************************************************************************/ 993 ******************************************************************************/
2785#ifdef CONFIG_IWL4965_HT 994static void iwl_rx_reply_alive(struct iwl_priv *priv,
2786 995 struct iwl_rx_mem_buffer *rxb)
2787static inline int iwl4965_get_ra_sta_id(struct iwl_priv *priv,
2788 struct ieee80211_hdr *hdr)
2789{ 996{
2790 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) 997 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2791 return IWL_AP_ID; 998 struct iwl_alive_resp *palive;
2792 else {
2793 u8 *da = ieee80211_get_DA(hdr);
2794 return iwl4965_hw_find_station(priv, da);
2795 }
2796}
2797
2798static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
2799 struct iwl_priv *priv, int txq_id, int idx)
2800{
2801 if (priv->txq[txq_id].txb[idx].skb[0])
2802 return (struct ieee80211_hdr *)priv->txq[txq_id].
2803 txb[idx].skb[0]->data;
2804 return NULL;
2805}
2806
2807static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2808{
2809 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
2810 tx_resp->frame_count);
2811 return le32_to_cpu(*scd_ssn) & MAX_SN;
2812
2813}
2814
2815/**
2816 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2817 */
2818static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2819 struct iwl4965_ht_agg *agg,
2820 struct iwl4965_tx_resp_agg *tx_resp,
2821 u16 start_idx)
2822{
2823 u16 status;
2824 struct agg_tx_status *frame_status = &tx_resp->status;
2825 struct ieee80211_tx_status *tx_status = NULL;
2826 struct ieee80211_hdr *hdr = NULL;
2827 int i, sh;
2828 int txq_id, idx;
2829 u16 seq;
2830
2831 if (agg->wait_for_ba)
2832 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2833
2834 agg->frame_count = tx_resp->frame_count;
2835 agg->start_idx = start_idx;
2836 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2837 agg->bitmap = 0;
2838
2839 /* # frames attempted by Tx command */
2840 if (agg->frame_count == 1) {
2841 /* Only one frame was attempted; no block-ack will arrive */
2842 status = le16_to_cpu(frame_status[0].status);
2843 seq = le16_to_cpu(frame_status[0].sequence);
2844 idx = SEQ_TO_INDEX(seq);
2845 txq_id = SEQ_TO_QUEUE(seq);
2846
2847 /* FIXME: code repetition */
2848 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2849 agg->frame_count, agg->start_idx, idx);
2850
2851 tx_status = &(priv->txq[txq_id].txb[idx].status);
2852 tx_status->retry_count = tx_resp->failure_frame;
2853 tx_status->queue_number = status & 0xff;
2854 tx_status->queue_length = tx_resp->failure_rts;
2855 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
2856 tx_status->flags = iwl4965_is_tx_success(status)?
2857 IEEE80211_TX_STATUS_ACK : 0;
2858 iwl4965_hwrate_to_tx_control(priv,
2859 le32_to_cpu(tx_resp->rate_n_flags),
2860 &tx_status->control);
2861 /* FIXME: code repetition end */
2862
2863 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2864 status & 0xff, tx_resp->failure_frame);
2865 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
2866 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
2867
2868 agg->wait_for_ba = 0;
2869 } else {
2870 /* Two or more frames were attempted; expect block-ack */
2871 u64 bitmap = 0;
2872 int start = agg->start_idx;
2873
2874 /* Construct bit-map of pending frames within Tx window */
2875 for (i = 0; i < agg->frame_count; i++) {
2876 u16 sc;
2877 status = le16_to_cpu(frame_status[i].status);
2878 seq = le16_to_cpu(frame_status[i].sequence);
2879 idx = SEQ_TO_INDEX(seq);
2880 txq_id = SEQ_TO_QUEUE(seq);
2881
2882 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2883 AGG_TX_STATE_ABORT_MSK))
2884 continue;
2885
2886 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2887 agg->frame_count, txq_id, idx);
2888
2889 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
2890
2891 sc = le16_to_cpu(hdr->seq_ctrl);
2892 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2893 IWL_ERROR("BUG_ON idx doesn't match seq control"
2894 " idx=%d, seq_idx=%d, seq=%d\n",
2895 idx, SEQ_TO_SN(sc),
2896 hdr->seq_ctrl);
2897 return -1;
2898 }
2899
2900 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2901 i, idx, SEQ_TO_SN(sc));
2902
2903 sh = idx - start;
2904 if (sh > 64) {
2905 sh = (start - idx) + 0xff;
2906 bitmap = bitmap << sh;
2907 sh = 0;
2908 start = idx;
2909 } else if (sh < -64)
2910 sh = 0xff - (start - idx);
2911 else if (sh < 0) {
2912 sh = start - idx;
2913 start = idx;
2914 bitmap = bitmap << sh;
2915 sh = 0;
2916 }
2917 bitmap |= (1 << sh);
2918 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2919 start, (u32)(bitmap & 0xFFFFFFFF));
2920 }
2921
2922 agg->bitmap = bitmap;
2923 agg->start_idx = start;
2924 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2925 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2926 agg->frame_count, agg->start_idx,
2927 (unsigned long long)agg->bitmap);
2928
2929 if (bitmap)
2930 agg->wait_for_ba = 1;
2931 }
2932 return 0;
2933}
2934#endif
2935
2936/**
2937 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2938 */
2939static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2940 struct iwl4965_rx_mem_buffer *rxb)
2941{
2942 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2943 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2944 int txq_id = SEQ_TO_QUEUE(sequence);
2945 int index = SEQ_TO_INDEX(sequence);
2946 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2947 struct ieee80211_tx_status *tx_status;
2948 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2949 u32 status = le32_to_cpu(tx_resp->status);
2950#ifdef CONFIG_IWL4965_HT
2951 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
2952 struct ieee80211_hdr *hdr;
2953 __le16 *qc;
2954#endif
2955
2956 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
2957 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
2958 "is out of range [0-%d] %d %d\n", txq_id,
2959 index, txq->q.n_bd, txq->q.write_ptr,
2960 txq->q.read_ptr);
2961 return;
2962 }
2963
2964#ifdef CONFIG_IWL4965_HT
2965 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
2966 qc = ieee80211_get_qos_ctrl(hdr);
2967
2968 if (qc)
2969 tid = le16_to_cpu(*qc) & 0xf;
2970
2971 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2972 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2973 IWL_ERROR("Station not known\n");
2974 return;
2975 }
2976
2977 if (txq->sched_retry) {
2978 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2979 struct iwl4965_ht_agg *agg = NULL;
2980
2981 if (!qc)
2982 return;
2983
2984 agg = &priv->stations[sta_id].tid[tid].agg;
2985
2986 iwl4965_tx_status_reply_tx(priv, agg,
2987 (struct iwl4965_tx_resp_agg *)tx_resp, index);
2988
2989 if ((tx_resp->frame_count == 1) &&
2990 !iwl4965_is_tx_success(status)) {
2991 /* TODO: send BAR */
2992 }
2993
2994 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2995 int freed;
2996 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2997 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2998 "%d index %d\n", scd_ssn , index);
2999 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3000 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3001
3002 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3003 txq_id >= 0 && priv->mac80211_registered &&
3004 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3005 ieee80211_wake_queue(priv->hw, txq_id);
3006
3007 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3008 }
3009 } else {
3010#endif /* CONFIG_IWL4965_HT */
3011 tx_status = &(txq->txb[txq->q.read_ptr].status);
3012
3013 tx_status->retry_count = tx_resp->failure_frame;
3014 tx_status->queue_number = status;
3015 tx_status->queue_length = tx_resp->bt_kill_count;
3016 tx_status->queue_length |= tx_resp->failure_rts;
3017 tx_status->flags =
3018 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3019 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
3020 &tx_status->control);
3021
3022 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3023 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
3024 status, le32_to_cpu(tx_resp->rate_n_flags),
3025 tx_resp->failure_frame);
3026
3027 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3028 if (index != -1) {
3029#ifdef CONFIG_IWL4965_HT
3030 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3031
3032 if (tid != MAX_TID_COUNT)
3033 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3034 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3035 (txq_id >= 0) &&
3036 priv->mac80211_registered)
3037 ieee80211_wake_queue(priv->hw, txq_id);
3038 if (tid != MAX_TID_COUNT)
3039 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3040#endif
3041 }
3042#ifdef CONFIG_IWL4965_HT
3043 }
3044#endif /* CONFIG_IWL4965_HT */
3045
3046 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3047 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3048}
3049
3050
3051static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3052 struct iwl4965_rx_mem_buffer *rxb)
3053{
3054 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3055 struct iwl4965_alive_resp *palive;
3056 struct delayed_work *pwork; 999 struct delayed_work *pwork;
3057 1000
3058 palive = &pkt->u.alive_frame; 1001 palive = &pkt->u.alive_frame;
@@ -3066,12 +1009,12 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3066 IWL_DEBUG_INFO("Initialization Alive received.\n"); 1009 IWL_DEBUG_INFO("Initialization Alive received.\n");
3067 memcpy(&priv->card_alive_init, 1010 memcpy(&priv->card_alive_init,
3068 &pkt->u.alive_frame, 1011 &pkt->u.alive_frame,
3069 sizeof(struct iwl4965_init_alive_resp)); 1012 sizeof(struct iwl_init_alive_resp));
3070 pwork = &priv->init_alive_start; 1013 pwork = &priv->init_alive_start;
3071 } else { 1014 } else {
3072 IWL_DEBUG_INFO("Runtime Alive received.\n"); 1015 IWL_DEBUG_INFO("Runtime Alive received.\n");
3073 memcpy(&priv->card_alive, &pkt->u.alive_frame, 1016 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3074 sizeof(struct iwl4965_alive_resp)); 1017 sizeof(struct iwl_alive_resp));
3075 pwork = &priv->alive_start; 1018 pwork = &priv->alive_start;
3076 } 1019 }
3077 1020
@@ -3084,19 +1027,10 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3084 IWL_WARNING("uCode did not respond OK.\n"); 1027 IWL_WARNING("uCode did not respond OK.\n");
3085} 1028}
3086 1029
3087static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
3088 struct iwl4965_rx_mem_buffer *rxb)
3089{
3090 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3091
3092 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3093 return;
3094}
3095
3096static void iwl4965_rx_reply_error(struct iwl_priv *priv, 1030static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3097 struct iwl4965_rx_mem_buffer *rxb) 1031 struct iwl_rx_mem_buffer *rxb)
3098{ 1032{
3099 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1033 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3100 1034
3101 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " 1035 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3102 "seq 0x%04X ser 0x%08X\n", 1036 "seq 0x%04X ser 0x%08X\n",
@@ -3109,10 +1043,10 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3109 1043
3110#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 1044#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3111 1045
3112static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 1046static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3113{ 1047{
3114 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1048 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3115 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; 1049 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3116 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif); 1050 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
3117 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", 1051 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3118 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1052 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
@@ -3121,15 +1055,15 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *
3121} 1055}
3122 1056
3123static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 1057static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3124 struct iwl4965_rx_mem_buffer *rxb) 1058 struct iwl_rx_mem_buffer *rxb)
3125{ 1059{
3126#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 1060#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
3127 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1061 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3128 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif); 1062 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
3129 1063
3130 if (!report->state) { 1064 if (!report->state) {
3131 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, 1065 IWL_DEBUG(IWL_DL_11H,
3132 "Spectrum Measure Notification: Start\n"); 1066 "Spectrum Measure Notification: Start\n");
3133 return; 1067 return;
3134 } 1068 }
3135 1069
@@ -3139,10 +1073,10 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3139} 1073}
3140 1074
3141static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, 1075static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3142 struct iwl4965_rx_mem_buffer *rxb) 1076 struct iwl_rx_mem_buffer *rxb)
3143{ 1077{
3144#ifdef CONFIG_IWLWIFI_DEBUG 1078#ifdef CONFIG_IWLWIFI_DEBUG
3145 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1079 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3146 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif); 1080 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
3147 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 1081 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3148 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1082 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -3150,13 +1084,13 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3150} 1084}
3151 1085
3152static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1086static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3153 struct iwl4965_rx_mem_buffer *rxb) 1087 struct iwl_rx_mem_buffer *rxb)
3154{ 1088{
3155 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1089 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3156 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 1090 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3157 "notification for %s:\n", 1091 "notification for %s:\n",
3158 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 1092 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3159 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 1093 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3160} 1094}
3161 1095
3162static void iwl4965_bg_beacon_update(struct work_struct *work) 1096static void iwl4965_bg_beacon_update(struct work_struct *work)
@@ -3166,7 +1100,7 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3166 struct sk_buff *beacon; 1100 struct sk_buff *beacon;
3167 1101
3168 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 1102 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3169 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 1103 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3170 1104
3171 if (!beacon) { 1105 if (!beacon) {
3172 IWL_ERROR("update beacon failed\n"); 1106 IWL_ERROR("update beacon failed\n");
@@ -3184,17 +1118,37 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3184 iwl4965_send_beacon_cmd(priv); 1118 iwl4965_send_beacon_cmd(priv);
3185} 1119}
3186 1120
1121/**
1122 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
1123 *
1124 * This callback is provided in order to send a statistics request.
1125 *
1126 * This timer function is continually reset to execute within
1127 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
1128 * was received. We need to ensure we receive the statistics in order
1129 * to update the temperature used for calibrating the TXPOWER.
1130 */
1131static void iwl4965_bg_statistics_periodic(unsigned long data)
1132{
1133 struct iwl_priv *priv = (struct iwl_priv *)data;
1134
1135 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1136 return;
1137
1138 iwl_send_statistics_request(priv, CMD_ASYNC);
1139}
1140
3187static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, 1141static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3188 struct iwl4965_rx_mem_buffer *rxb) 1142 struct iwl_rx_mem_buffer *rxb)
3189{ 1143{
3190#ifdef CONFIG_IWLWIFI_DEBUG 1144#ifdef CONFIG_IWLWIFI_DEBUG
3191 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1145 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3192 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status); 1146 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3193 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 1147 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3194 1148
3195 IWL_DEBUG_RX("beacon status %x retries %d iss %d " 1149 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3196 "tsf %d %d rate %d\n", 1150 "tsf %d %d rate %d\n",
3197 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, 1151 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3198 beacon->beacon_notify_hdr.failure_frame, 1152 beacon->beacon_notify_hdr.failure_frame,
3199 le32_to_cpu(beacon->ibss_mgr_status), 1153 le32_to_cpu(beacon->ibss_mgr_status),
3200 le32_to_cpu(beacon->high_tsf), 1154 le32_to_cpu(beacon->high_tsf),
@@ -3206,129 +1160,12 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3206 queue_work(priv->workqueue, &priv->beacon_update); 1160 queue_work(priv->workqueue, &priv->beacon_update);
3207} 1161}
3208 1162
3209/* Service response to REPLY_SCAN_CMD (0x80) */
3210static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3211 struct iwl4965_rx_mem_buffer *rxb)
3212{
3213#ifdef CONFIG_IWLWIFI_DEBUG
3214 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3215 struct iwl4965_scanreq_notification *notif =
3216 (struct iwl4965_scanreq_notification *)pkt->u.raw;
3217
3218 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3219#endif
3220}
3221
3222/* Service SCAN_START_NOTIFICATION (0x82) */
3223static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3224 struct iwl4965_rx_mem_buffer *rxb)
3225{
3226 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3227 struct iwl4965_scanstart_notification *notif =
3228 (struct iwl4965_scanstart_notification *)pkt->u.raw;
3229 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3230 IWL_DEBUG_SCAN("Scan start: "
3231 "%d [802.11%s] "
3232 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3233 notif->channel,
3234 notif->band ? "bg" : "a",
3235 notif->tsf_high,
3236 notif->tsf_low, notif->status, notif->beacon_timer);
3237}
3238
3239/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3240static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3241 struct iwl4965_rx_mem_buffer *rxb)
3242{
3243 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3244 struct iwl4965_scanresults_notification *notif =
3245 (struct iwl4965_scanresults_notification *)pkt->u.raw;
3246
3247 IWL_DEBUG_SCAN("Scan ch.res: "
3248 "%d [802.11%s] "
3249 "(TSF: 0x%08X:%08X) - %d "
3250 "elapsed=%lu usec (%dms since last)\n",
3251 notif->channel,
3252 notif->band ? "bg" : "a",
3253 le32_to_cpu(notif->tsf_high),
3254 le32_to_cpu(notif->tsf_low),
3255 le32_to_cpu(notif->statistics[0]),
3256 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3257 jiffies_to_msecs(elapsed_jiffies
3258 (priv->last_scan_jiffies, jiffies)));
3259
3260 priv->last_scan_jiffies = jiffies;
3261 priv->next_scan_jiffies = 0;
3262}
3263
3264/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3265static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
3266 struct iwl4965_rx_mem_buffer *rxb)
3267{
3268 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3269 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3270
3271 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3272 scan_notif->scanned_channels,
3273 scan_notif->tsf_low,
3274 scan_notif->tsf_high, scan_notif->status);
3275
3276 /* The HW is no longer scanning */
3277 clear_bit(STATUS_SCAN_HW, &priv->status);
3278
3279 /* The scan completion notification came in, so kill that timer... */
3280 cancel_delayed_work(&priv->scan_check);
3281
3282 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3283 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
3284 "2.4" : "5.2",
3285 jiffies_to_msecs(elapsed_jiffies
3286 (priv->scan_pass_start, jiffies)));
3287
3288 /* Remove this scanned band from the list of pending
3289 * bands to scan, band G precedes A in order of scanning
3290 * as seen in iwl_bg_request_scan */
3291 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
3292 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
3293 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
3294 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
3295
3296 /* If a request to abort was given, or the scan did not succeed
3297 * then we reset the scan state machine and terminate,
3298 * re-queuing another scan if one has been requested */
3299 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3300 IWL_DEBUG_INFO("Aborted scan completed.\n");
3301 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3302 } else {
3303 /* If there are more bands on this scan pass reschedule */
3304 if (priv->scan_bands)
3305 goto reschedule;
3306 }
3307
3308 priv->last_scan_jiffies = jiffies;
3309 priv->next_scan_jiffies = 0;
3310 IWL_DEBUG_INFO("Setting scan to off\n");
3311
3312 clear_bit(STATUS_SCANNING, &priv->status);
3313
3314 IWL_DEBUG_INFO("Scan took %dms\n",
3315 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3316
3317 queue_work(priv->workqueue, &priv->scan_completed);
3318
3319 return;
3320
3321reschedule:
3322 priv->scan_pass_start = jiffies;
3323 queue_work(priv->workqueue, &priv->request_scan);
3324}
3325
3326/* Handle notification from uCode that card's power state is changing 1163/* Handle notification from uCode that card's power state is changing
3327 * due to software, hardware, or critical temperature RFKILL */ 1164 * due to software, hardware, or critical temperature RFKILL */
3328static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, 1165static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3329 struct iwl4965_rx_mem_buffer *rxb) 1166 struct iwl_rx_mem_buffer *rxb)
3330{ 1167{
3331 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1168 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3332 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 1169 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3333 unsigned long status = priv->status; 1170 unsigned long status = priv->status;
3334 1171
@@ -3383,7 +1220,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3383 clear_bit(STATUS_RF_KILL_SW, &priv->status); 1220 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3384 1221
3385 if (!(flags & RXON_CARD_DISABLED)) 1222 if (!(flags & RXON_CARD_DISABLED))
3386 iwl4965_scan_cancel(priv); 1223 iwl_scan_cancel(priv);
3387 1224
3388 if ((test_bit(STATUS_RF_KILL_HW, &status) != 1225 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3389 test_bit(STATUS_RF_KILL_HW, &priv->status)) || 1226 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
@@ -3403,10 +1240,9 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3403 * This function chains into the hardware specific files for them to setup 1240 * This function chains into the hardware specific files for them to setup
3404 * any hardware specific handlers as well. 1241 * any hardware specific handlers as well.
3405 */ 1242 */
3406static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) 1243static void iwl_setup_rx_handlers(struct iwl_priv *priv)
3407{ 1244{
3408 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; 1245 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3409 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
3410 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error; 1246 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
3411 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa; 1247 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
3412 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 1248 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
@@ -3421,500 +1257,47 @@ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3421 * statistics request from the host as well as for the periodic 1257 * statistics request from the host as well as for the periodic
3422 * statistics notifications (after received beacons) from the uCode. 1258 * statistics notifications (after received beacons) from the uCode.
3423 */ 1259 */
3424 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics; 1260 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics;
3425 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics; 1261 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
3426
3427 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
3428 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
3429 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3430 iwl4965_rx_scan_results_notif;
3431 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3432 iwl4965_rx_scan_complete_notif;
3433 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
3434 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
3435
3436 /* Set up hardware specific Rx handlers */
3437 iwl4965_hw_rx_handler_setup(priv);
3438}
3439
3440/**
3441 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3442 * @rxb: Rx buffer to reclaim
3443 *
3444 * If an Rx buffer has an async callback associated with it the callback
3445 * will be executed. The attached skb (if present) will only be freed
3446 * if the callback returns 1
3447 */
3448static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
3449 struct iwl4965_rx_mem_buffer *rxb)
3450{
3451 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3452 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3453 int txq_id = SEQ_TO_QUEUE(sequence);
3454 int index = SEQ_TO_INDEX(sequence);
3455 int huge = sequence & SEQ_HUGE_FRAME;
3456 int cmd_index;
3457 struct iwl_cmd *cmd;
3458
3459 /* If a Tx command is being handled and it isn't in the actual
3460 * command queue then there a command routing bug has been introduced
3461 * in the queue management code. */
3462 if (txq_id != IWL_CMD_QUEUE_NUM)
3463 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3464 txq_id, pkt->hdr.cmd);
3465 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3466
3467 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3468 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3469
3470 /* Input error checking is done when commands are added to queue. */
3471 if (cmd->meta.flags & CMD_WANT_SKB) {
3472 cmd->meta.source->u.skb = rxb->skb;
3473 rxb->skb = NULL;
3474 } else if (cmd->meta.u.callback &&
3475 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3476 rxb->skb = NULL;
3477
3478 iwl4965_tx_queue_reclaim(priv, txq_id, index);
3479
3480 if (!(cmd->meta.flags & CMD_ASYNC)) {
3481 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3482 wake_up_interruptible(&priv->wait_command_queue);
3483 }
3484}
3485
3486/************************** RX-FUNCTIONS ****************************/
3487/*
3488 * Rx theory of operation
3489 *
3490 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
3491 * each of which point to Receive Buffers to be filled by 4965. These get
3492 * used not only for Rx frames, but for any command response or notification
3493 * from the 4965. The driver and 4965 manage the Rx buffers by means
3494 * of indexes into the circular buffer.
3495 *
3496 * Rx Queue Indexes
3497 * The host/firmware share two index registers for managing the Rx buffers.
3498 *
3499 * The READ index maps to the first position that the firmware may be writing
3500 * to -- the driver can read up to (but not including) this position and get
3501 * good data.
3502 * The READ index is managed by the firmware once the card is enabled.
3503 *
3504 * The WRITE index maps to the last position the driver has read from -- the
3505 * position preceding WRITE is the last slot the firmware can place a packet.
3506 *
3507 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3508 * WRITE = READ.
3509 *
3510 * During initialization, the host sets up the READ queue position to the first
3511 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3512 *
3513 * When the firmware places a packet in a buffer, it will advance the READ index
3514 * and fire the RX interrupt. The driver can then query the READ index and
3515 * process as many packets as possible, moving the WRITE index forward as it
3516 * resets the Rx queue buffers with new memory.
3517 *
3518 * The management in the driver is as follows:
3519 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3520 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3521 * to replenish the iwl->rxq->rx_free.
3522 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
3523 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3524 * 'processed' and 'read' driver indexes as well)
3525 * + A received packet is processed and handed to the kernel network stack,
3526 * detached from the iwl->rxq. The driver 'processed' index is updated.
3527 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3528 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3529 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3530 * were enough free buffers and RX_STALLED is set it is cleared.
3531 *
3532 *
3533 * Driver sequence:
3534 *
3535 * iwl4965_rx_queue_alloc() Allocates rx_free
3536 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
3537 * iwl4965_rx_queue_restock
3538 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
3539 * queue, updates firmware pointers, and updates
3540 * the WRITE index. If insufficient rx_free buffers
3541 * are available, schedules iwl4965_rx_replenish
3542 *
3543 * -- enable interrupts --
3544 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
3545 * READ INDEX, detaching the SKB from the pool.
3546 * Moves the packet buffer from queue to rx_used.
3547 * Calls iwl4965_rx_queue_restock to refill any empty
3548 * slots.
3549 * ...
3550 *
3551 */
3552
3553/**
3554 * iwl4965_rx_queue_space - Return number of free slots available in queue.
3555 */
3556static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
3557{
3558 int s = q->read - q->write;
3559 if (s <= 0)
3560 s += RX_QUEUE_SIZE;
3561 /* keep some buffer to not confuse full and empty queue */
3562 s -= 2;
3563 if (s < 0)
3564 s = 0;
3565 return s;
3566}
3567
3568/**
3569 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3570 */
3571int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
3572{
3573 u32 reg = 0;
3574 int rc = 0;
3575 unsigned long flags;
3576
3577 spin_lock_irqsave(&q->lock, flags);
3578
3579 if (q->need_update == 0)
3580 goto exit_unlock;
3581
3582 /* If power-saving is in use, make sure device is awake */
3583 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3584 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3585
3586 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3587 iwl_set_bit(priv, CSR_GP_CNTRL,
3588 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3589 goto exit_unlock;
3590 }
3591
3592 rc = iwl_grab_nic_access(priv);
3593 if (rc)
3594 goto exit_unlock;
3595
3596 /* Device expects a multiple of 8 */
3597 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
3598 q->write & ~0x7);
3599 iwl_release_nic_access(priv);
3600
3601 /* Else device is assumed to be awake */
3602 } else
3603 /* Device expects a multiple of 8 */
3604 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3605 1262
1263 iwl_setup_rx_scan_handlers(priv);
3606 1264
3607 q->need_update = 0; 1265 /* status change handler */
3608 1266 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
3609 exit_unlock:
3610 spin_unlock_irqrestore(&q->lock, flags);
3611 return rc;
3612}
3613
3614/**
3615 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3616 */
3617static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
3618 dma_addr_t dma_addr)
3619{
3620 return cpu_to_le32((u32)(dma_addr >> 8));
3621}
3622
3623
3624/**
3625 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
3626 *
3627 * If there are slots in the RX queue that need to be restocked,
3628 * and we have free pre-allocated buffers, fill the ranks as much
3629 * as we can, pulling from rx_free.
3630 *
3631 * This moves the 'write' index forward to catch up with 'processed', and
3632 * also updates the memory address in the firmware to reference the new
3633 * target buffer.
3634 */
3635static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
3636{
3637 struct iwl4965_rx_queue *rxq = &priv->rxq;
3638 struct list_head *element;
3639 struct iwl4965_rx_mem_buffer *rxb;
3640 unsigned long flags;
3641 int write, rc;
3642
3643 spin_lock_irqsave(&rxq->lock, flags);
3644 write = rxq->write & ~0x7;
3645 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3646 /* Get next free Rx buffer, remove from free list */
3647 element = rxq->rx_free.next;
3648 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3649 list_del(element);
3650
3651 /* Point to Rx buffer via next RBD in circular buffer */
3652 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
3653 rxq->queue[rxq->write] = rxb;
3654 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3655 rxq->free_count--;
3656 }
3657 spin_unlock_irqrestore(&rxq->lock, flags);
3658 /* If the pre-allocated buffer pool is dropping low, schedule to
3659 * refill it */
3660 if (rxq->free_count <= RX_LOW_WATERMARK)
3661 queue_work(priv->workqueue, &priv->rx_replenish);
3662
3663
3664 /* If we've added more space for the firmware to place data, tell it.
3665 * Increment device's write pointer in multiples of 8. */
3666 if ((write != (rxq->write & ~0x7))
3667 || (abs(rxq->write - rxq->read) > 7)) {
3668 spin_lock_irqsave(&rxq->lock, flags);
3669 rxq->need_update = 1;
3670 spin_unlock_irqrestore(&rxq->lock, flags);
3671 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
3672 if (rc)
3673 return rc;
3674 }
3675
3676 return 0;
3677}
3678
3679/**
3680 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
3681 *
3682 * When moving to rx_free an SKB is allocated for the slot.
3683 *
3684 * Also restock the Rx queue via iwl4965_rx_queue_restock.
3685 * This is called as a scheduled work item (except for during initialization)
3686 */
3687static void iwl4965_rx_allocate(struct iwl_priv *priv)
3688{
3689 struct iwl4965_rx_queue *rxq = &priv->rxq;
3690 struct list_head *element;
3691 struct iwl4965_rx_mem_buffer *rxb;
3692 unsigned long flags;
3693 spin_lock_irqsave(&rxq->lock, flags);
3694 while (!list_empty(&rxq->rx_used)) {
3695 element = rxq->rx_used.next;
3696 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3697
3698 /* Alloc a new receive buffer */
3699 rxb->skb =
3700 alloc_skb(priv->hw_params.rx_buf_size,
3701 __GFP_NOWARN | GFP_ATOMIC);
3702 if (!rxb->skb) {
3703 if (net_ratelimit())
3704 printk(KERN_CRIT DRV_NAME
3705 ": Can not allocate SKB buffers\n");
3706 /* We don't reschedule replenish work here -- we will
3707 * call the restock method and if it still needs
3708 * more buffers it will schedule replenish */
3709 break;
3710 }
3711 priv->alloc_rxb_skb++;
3712 list_del(element);
3713 1267
3714 /* Get physical address of RB/SKB */ 1268 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
3715 rxb->dma_addr = 1269 iwl_rx_missed_beacon_notif;
3716 pci_map_single(priv->pci_dev, rxb->skb->data, 1270 /* Rx handlers */
3717 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); 1271 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
3718 list_add_tail(&rxb->list, &rxq->rx_free); 1272 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
3719 rxq->free_count++; 1273 /* block ack */
3720 } 1274 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
3721 spin_unlock_irqrestore(&rxq->lock, flags); 1275 /* Set up hardware specific Rx handlers */
1276 priv->cfg->ops->lib->rx_handler_setup(priv);
3722} 1277}
3723 1278
3724/* 1279/*
3725 * this should be called while priv->lock is locked 1280 * this should be called while priv->lock is locked
3726*/ 1281*/
3727static void __iwl4965_rx_replenish(void *data) 1282static void __iwl_rx_replenish(struct iwl_priv *priv)
3728{ 1283{
3729 struct iwl_priv *priv = data; 1284 iwl_rx_allocate(priv);
3730 1285 iwl_rx_queue_restock(priv);
3731 iwl4965_rx_allocate(priv);
3732 iwl4965_rx_queue_restock(priv);
3733} 1286}
3734 1287
3735 1288
3736void iwl4965_rx_replenish(void *data)
3737{
3738 struct iwl_priv *priv = data;
3739 unsigned long flags;
3740
3741 iwl4965_rx_allocate(priv);
3742
3743 spin_lock_irqsave(&priv->lock, flags);
3744 iwl4965_rx_queue_restock(priv);
3745 spin_unlock_irqrestore(&priv->lock, flags);
3746}
3747
3748/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3749 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3750 * This free routine walks the list of POOL entries and if SKB is set to
3751 * non NULL it is unmapped and freed
3752 */
3753static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3754{
3755 int i;
3756 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3757 if (rxq->pool[i].skb != NULL) {
3758 pci_unmap_single(priv->pci_dev,
3759 rxq->pool[i].dma_addr,
3760 priv->hw_params.rx_buf_size,
3761 PCI_DMA_FROMDEVICE);
3762 dev_kfree_skb(rxq->pool[i].skb);
3763 }
3764 }
3765
3766 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3767 rxq->dma_addr);
3768 rxq->bd = NULL;
3769}
3770
3771int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
3772{
3773 struct iwl4965_rx_queue *rxq = &priv->rxq;
3774 struct pci_dev *dev = priv->pci_dev;
3775 int i;
3776
3777 spin_lock_init(&rxq->lock);
3778 INIT_LIST_HEAD(&rxq->rx_free);
3779 INIT_LIST_HEAD(&rxq->rx_used);
3780
3781 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3782 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3783 if (!rxq->bd)
3784 return -ENOMEM;
3785
3786 /* Fill the rx_used queue with _all_ of the Rx buffers */
3787 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3788 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3789
3790 /* Set us so that we have processed and used all buffers, but have
3791 * not restocked the Rx queue with fresh buffers */
3792 rxq->read = rxq->write = 0;
3793 rxq->free_count = 0;
3794 rxq->need_update = 0;
3795 return 0;
3796}
3797
3798void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3799{
3800 unsigned long flags;
3801 int i;
3802 spin_lock_irqsave(&rxq->lock, flags);
3803 INIT_LIST_HEAD(&rxq->rx_free);
3804 INIT_LIST_HEAD(&rxq->rx_used);
3805 /* Fill the rx_used queue with _all_ of the Rx buffers */
3806 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3807 /* In the reset function, these buffers may have been allocated
3808 * to an SKB, so we need to unmap and free potential storage */
3809 if (rxq->pool[i].skb != NULL) {
3810 pci_unmap_single(priv->pci_dev,
3811 rxq->pool[i].dma_addr,
3812 priv->hw_params.rx_buf_size,
3813 PCI_DMA_FROMDEVICE);
3814 priv->alloc_rxb_skb--;
3815 dev_kfree_skb(rxq->pool[i].skb);
3816 rxq->pool[i].skb = NULL;
3817 }
3818 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3819 }
3820
3821 /* Set us so that we have processed and used all buffers, but have
3822 * not restocked the Rx queue with fresh buffers */
3823 rxq->read = rxq->write = 0;
3824 rxq->free_count = 0;
3825 spin_unlock_irqrestore(&rxq->lock, flags);
3826}
3827
3828/* Convert linear signal-to-noise ratio into dB */
3829static u8 ratio2dB[100] = {
3830/* 0 1 2 3 4 5 6 7 8 9 */
3831 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3832 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3833 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3834 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3835 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3836 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3837 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3838 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3839 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3840 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3841};
3842
3843/* Calculates a relative dB value from a ratio of linear
3844 * (i.e. not dB) signal levels.
3845 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3846int iwl4965_calc_db_from_ratio(int sig_ratio)
3847{
3848 /* 1000:1 or higher just report as 60 dB */
3849 if (sig_ratio >= 1000)
3850 return 60;
3851
3852 /* 100:1 or higher, divide by 10 and use table,
3853 * add 20 dB to make up for divide by 10 */
3854 if (sig_ratio >= 100)
3855 return (20 + (int)ratio2dB[sig_ratio/10]);
3856
3857 /* We shouldn't see this */
3858 if (sig_ratio < 1)
3859 return 0;
3860
3861 /* Use table for ratios 1:1 - 99:1 */
3862 return (int)ratio2dB[sig_ratio];
3863}
3864
3865#define PERFECT_RSSI (-20) /* dBm */
3866#define WORST_RSSI (-95) /* dBm */
3867#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3868
3869/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3870 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3871 * about formulas used below. */
3872int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
3873{
3874 int sig_qual;
3875 int degradation = PERFECT_RSSI - rssi_dbm;
3876
3877 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3878 * as indicator; formula is (signal dbm - noise dbm).
3879 * SNR at or above 40 is a great signal (100%).
3880 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3881 * Weakest usable signal is usually 10 - 15 dB SNR. */
3882 if (noise_dbm) {
3883 if (rssi_dbm - noise_dbm >= 40)
3884 return 100;
3885 else if (rssi_dbm < noise_dbm)
3886 return 0;
3887 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3888
3889 /* Else use just the signal level.
3890 * This formula is a least squares fit of data points collected and
3891 * compared with a reference system that had a percentage (%) display
3892 * for signal quality. */
3893 } else
3894 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3895 (15 * RSSI_RANGE + 62 * degradation)) /
3896 (RSSI_RANGE * RSSI_RANGE);
3897
3898 if (sig_qual > 100)
3899 sig_qual = 100;
3900 else if (sig_qual < 1)
3901 sig_qual = 0;
3902
3903 return sig_qual;
3904}
3905
3906/** 1289/**
3907 * iwl4965_rx_handle - Main entry function for receiving responses from uCode 1290 * iwl_rx_handle - Main entry function for receiving responses from uCode
3908 * 1291 *
3909 * Uses the priv->rx_handlers callback function array to invoke 1292 * Uses the priv->rx_handlers callback function array to invoke
3910 * the appropriate handlers, including command responses, 1293 * the appropriate handlers, including command responses,
3911 * frame-received notifications, and other notifications. 1294 * frame-received notifications, and other notifications.
3912 */ 1295 */
3913static void iwl4965_rx_handle(struct iwl_priv *priv) 1296void iwl_rx_handle(struct iwl_priv *priv)
3914{ 1297{
3915 struct iwl4965_rx_mem_buffer *rxb; 1298 struct iwl_rx_mem_buffer *rxb;
3916 struct iwl4965_rx_packet *pkt; 1299 struct iwl_rx_packet *pkt;
3917 struct iwl4965_rx_queue *rxq = &priv->rxq; 1300 struct iwl_rx_queue *rxq = &priv->rxq;
3918 u32 r, i; 1301 u32 r, i;
3919 int reclaim; 1302 int reclaim;
3920 unsigned long flags; 1303 unsigned long flags;
@@ -3923,14 +1306,14 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3923 1306
3924 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1307 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3925 * buffer that the driver may process (last buffer filled by ucode). */ 1308 * buffer that the driver may process (last buffer filled by ucode). */
3926 r = iwl4965_hw_get_rx_read(priv); 1309 r = priv->cfg->ops->lib->shared_mem_rx_idx(priv);
3927 i = rxq->read; 1310 i = rxq->read;
3928 1311
3929 /* Rx interrupt, but nothing sent from uCode */ 1312 /* Rx interrupt, but nothing sent from uCode */
3930 if (i == r) 1313 if (i == r)
3931 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); 1314 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d\n", r, i);
3932 1315
3933 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 1316 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3934 fill_rx = 1; 1317 fill_rx = 1;
3935 1318
3936 while (i != r) { 1319 while (i != r) {
@@ -3946,7 +1329,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3946 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1329 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
3947 priv->hw_params.rx_buf_size, 1330 priv->hw_params.rx_buf_size,
3948 PCI_DMA_FROMDEVICE); 1331 PCI_DMA_FROMDEVICE);
3949 pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 1332 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3950 1333
3951 /* Reclaim a command buffer only if this packet is a response 1334 /* Reclaim a command buffer only if this packet is a response
3952 * to a (driver-originated) command. 1335 * to a (driver-originated) command.
@@ -3965,13 +1348,12 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3965 * handle those that need handling via function in 1348 * handle those that need handling via function in
3966 * rx_handlers table. See iwl4965_setup_rx_handlers() */ 1349 * rx_handlers table. See iwl4965_setup_rx_handlers() */
3967 if (priv->rx_handlers[pkt->hdr.cmd]) { 1350 if (priv->rx_handlers[pkt->hdr.cmd]) {
3968 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 1351 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r,
3969 "r = %d, i = %d, %s, 0x%02x\n", r, i, 1352 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3970 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3971 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1353 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3972 } else { 1354 } else {
3973 /* No handling needed */ 1355 /* No handling needed */
3974 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 1356 IWL_DEBUG(IWL_DL_RX,
3975 "r %d i %d No handler needed for %s, 0x%02x\n", 1357 "r %d i %d No handler needed for %s, 0x%02x\n",
3976 r, i, get_cmd_string(pkt->hdr.cmd), 1358 r, i, get_cmd_string(pkt->hdr.cmd),
3977 pkt->hdr.cmd); 1359 pkt->hdr.cmd);
@@ -3982,7 +1364,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3982 * fire off the (possibly) blocking iwl_send_cmd() 1364 * fire off the (possibly) blocking iwl_send_cmd()
3983 * as we reclaim the driver command queue */ 1365 * as we reclaim the driver command queue */
3984 if (rxb && rxb->skb) 1366 if (rxb && rxb->skb)
3985 iwl4965_tx_cmd_complete(priv, rxb); 1367 iwl_tx_cmd_complete(priv, rxb);
3986 else 1368 else
3987 IWL_WARNING("Claim null rxb?\n"); 1369 IWL_WARNING("Claim null rxb?\n");
3988 } 1370 }
@@ -4009,7 +1391,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4009 count++; 1391 count++;
4010 if (count >= 8) { 1392 if (count >= 8) {
4011 priv->rxq.read = i; 1393 priv->rxq.read = i;
4012 __iwl4965_rx_replenish(priv); 1394 __iwl_rx_replenish(priv);
4013 count = 0; 1395 count = 0;
4014 } 1396 }
4015 } 1397 }
@@ -4017,62 +1399,17 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4017 1399
4018 /* Backtrack one entry */ 1400 /* Backtrack one entry */
4019 priv->rxq.read = i; 1401 priv->rxq.read = i;
4020 iwl4965_rx_queue_restock(priv); 1402 iwl_rx_queue_restock(priv);
4021}
4022
4023/**
4024 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4025 */
4026static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
4027 struct iwl4965_tx_queue *txq)
4028{
4029 u32 reg = 0;
4030 int rc = 0;
4031 int txq_id = txq->q.id;
4032
4033 if (txq->need_update == 0)
4034 return rc;
4035
4036 /* if we're trying to save power */
4037 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4038 /* wake up nic if it's powered down ...
4039 * uCode will wake up, and interrupt us again, so next
4040 * time we'll skip this part. */
4041 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4042
4043 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4044 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4045 iwl_set_bit(priv, CSR_GP_CNTRL,
4046 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4047 return rc;
4048 }
4049
4050 /* restore this queue's parameters in nic hardware. */
4051 rc = iwl_grab_nic_access(priv);
4052 if (rc)
4053 return rc;
4054 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
4055 txq->q.write_ptr | (txq_id << 8));
4056 iwl_release_nic_access(priv);
4057
4058 /* else not in power-save mode, uCode will never sleep when we're
4059 * trying to tx (during RFKILL, we're not trying to tx). */
4060 } else
4061 iwl_write32(priv, HBUS_TARG_WRPTR,
4062 txq->q.write_ptr | (txq_id << 8));
4063
4064 txq->need_update = 0;
4065
4066 return rc;
4067} 1403}
4068 1404
4069#ifdef CONFIG_IWLWIFI_DEBUG 1405#ifdef CONFIG_IWLWIFI_DEBUG
4070static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon) 1406static void iwl4965_print_rx_config_cmd(struct iwl_priv *priv)
4071{ 1407{
1408 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
4072 DECLARE_MAC_BUF(mac); 1409 DECLARE_MAC_BUF(mac);
4073 1410
4074 IWL_DEBUG_RADIO("RX CONFIG:\n"); 1411 IWL_DEBUG_RADIO("RX CONFIG:\n");
4075 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 1412 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4076 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 1413 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4077 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 1414 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4078 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", 1415 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
@@ -4118,173 +1455,6 @@ static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
4118 IWL_DEBUG_ISR("Disabled interrupts\n"); 1455 IWL_DEBUG_ISR("Disabled interrupts\n");
4119} 1456}
4120 1457
4121static const char *desc_lookup(int i)
4122{
4123 switch (i) {
4124 case 1:
4125 return "FAIL";
4126 case 2:
4127 return "BAD_PARAM";
4128 case 3:
4129 return "BAD_CHECKSUM";
4130 case 4:
4131 return "NMI_INTERRUPT";
4132 case 5:
4133 return "SYSASSERT";
4134 case 6:
4135 return "FATAL_ERROR";
4136 }
4137
4138 return "UNKNOWN";
4139}
4140
4141#define ERROR_START_OFFSET (1 * sizeof(u32))
4142#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4143
4144static void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
4145{
4146 u32 data2, line;
4147 u32 desc, time, count, base, data1;
4148 u32 blink1, blink2, ilink1, ilink2;
4149 int rc;
4150
4151 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4152
4153 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4154 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4155 return;
4156 }
4157
4158 rc = iwl_grab_nic_access(priv);
4159 if (rc) {
4160 IWL_WARNING("Can not read from adapter at this time.\n");
4161 return;
4162 }
4163
4164 count = iwl_read_targ_mem(priv, base);
4165
4166 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4167 IWL_ERROR("Start IWL Error Log Dump:\n");
4168 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
4169 }
4170
4171 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
4172 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
4173 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
4174 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
4175 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
4176 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
4177 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
4178 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
4179 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
4180
4181 IWL_ERROR("Desc Time "
4182 "data1 data2 line\n");
4183 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4184 desc_lookup(desc), desc, time, data1, data2, line);
4185 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4186 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4187 ilink1, ilink2);
4188
4189 iwl_release_nic_access(priv);
4190}
4191
4192#define EVENT_START_OFFSET (4 * sizeof(u32))
4193
4194/**
4195 * iwl4965_print_event_log - Dump error event log to syslog
4196 *
4197 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4198 */
4199static void iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
4200 u32 num_events, u32 mode)
4201{
4202 u32 i;
4203 u32 base; /* SRAM byte address of event log header */
4204 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4205 u32 ptr; /* SRAM byte address of log data */
4206 u32 ev, time, data; /* event log data */
4207
4208 if (num_events == 0)
4209 return;
4210
4211 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4212
4213 if (mode == 0)
4214 event_size = 2 * sizeof(u32);
4215 else
4216 event_size = 3 * sizeof(u32);
4217
4218 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4219
4220 /* "time" is actually "data" for mode 0 (no timestamp).
4221 * place event id # at far right for easier visual parsing. */
4222 for (i = 0; i < num_events; i++) {
4223 ev = iwl_read_targ_mem(priv, ptr);
4224 ptr += sizeof(u32);
4225 time = iwl_read_targ_mem(priv, ptr);
4226 ptr += sizeof(u32);
4227 if (mode == 0)
4228 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4229 else {
4230 data = iwl_read_targ_mem(priv, ptr);
4231 ptr += sizeof(u32);
4232 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4233 }
4234 }
4235}
4236
4237static void iwl4965_dump_nic_event_log(struct iwl_priv *priv)
4238{
4239 int rc;
4240 u32 base; /* SRAM byte address of event log header */
4241 u32 capacity; /* event log capacity in # entries */
4242 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4243 u32 num_wraps; /* # times uCode wrapped to top of log */
4244 u32 next_entry; /* index of next entry to be written by uCode */
4245 u32 size; /* # entries that we'll print */
4246
4247 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4248 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4249 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4250 return;
4251 }
4252
4253 rc = iwl_grab_nic_access(priv);
4254 if (rc) {
4255 IWL_WARNING("Can not read from adapter at this time.\n");
4256 return;
4257 }
4258
4259 /* event log header */
4260 capacity = iwl_read_targ_mem(priv, base);
4261 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4262 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4263 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
4264
4265 size = num_wraps ? capacity : next_entry;
4266
4267 /* bail out if nothing in log */
4268 if (size == 0) {
4269 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
4270 iwl_release_nic_access(priv);
4271 return;
4272 }
4273
4274 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
4275 size, num_wraps);
4276
4277 /* if uCode has wrapped back to top of log, start at the oldest entry,
4278 * i.e the next one that uCode would fill. */
4279 if (num_wraps)
4280 iwl4965_print_event_log(priv, next_entry,
4281 capacity - next_entry, mode);
4282
4283 /* (then/else) start at top of log */
4284 iwl4965_print_event_log(priv, 0, next_entry, mode);
4285
4286 iwl_release_nic_access(priv);
4287}
4288 1458
4289/** 1459/**
4290 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card 1460 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
@@ -4298,10 +1468,10 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4298 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1468 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4299 1469
4300#ifdef CONFIG_IWLWIFI_DEBUG 1470#ifdef CONFIG_IWLWIFI_DEBUG
4301 if (iwl_debug_level & IWL_DL_FW_ERRORS) { 1471 if (priv->debug_level & IWL_DL_FW_ERRORS) {
4302 iwl4965_dump_nic_error_log(priv); 1472 iwl_dump_nic_error_log(priv);
4303 iwl4965_dump_nic_event_log(priv); 1473 iwl_dump_nic_event_log(priv);
4304 iwl4965_print_rx_config_cmd(&priv->staging_rxon); 1474 iwl4965_print_rx_config_cmd(priv);
4305 } 1475 }
4306#endif 1476#endif
4307 1477
@@ -4312,7 +1482,7 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4312 clear_bit(STATUS_READY, &priv->status); 1482 clear_bit(STATUS_READY, &priv->status);
4313 1483
4314 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 1484 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4315 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, 1485 IWL_DEBUG(IWL_DL_FW_ERRORS,
4316 "Restarting adapter due to uCode error.\n"); 1486 "Restarting adapter due to uCode error.\n");
4317 1487
4318 if (iwl_is_associated(priv)) { 1488 if (iwl_is_associated(priv)) {
@@ -4320,7 +1490,8 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4320 sizeof(priv->recovery_rxon)); 1490 sizeof(priv->recovery_rxon));
4321 priv->error_recovering = 1; 1491 priv->error_recovering = 1;
4322 } 1492 }
4323 queue_work(priv->workqueue, &priv->restart); 1493 if (priv->cfg->mod_params->restart_fw)
1494 queue_work(priv->workqueue, &priv->restart);
4324 } 1495 }
4325} 1496}
4326 1497
@@ -4333,7 +1504,7 @@ static void iwl4965_error_recovery(struct iwl_priv *priv)
4333 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1504 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4334 iwl4965_commit_rxon(priv); 1505 iwl4965_commit_rxon(priv);
4335 1506
4336 iwl4965_rxon_add_station(priv, priv->bssid, 1); 1507 iwl_rxon_add_station(priv, priv->bssid, 1);
4337 1508
4338 spin_lock_irqsave(&priv->lock, flags); 1509 spin_lock_irqsave(&priv->lock, flags);
4339 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); 1510 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
@@ -4365,7 +1536,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4365 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1536 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4366 1537
4367#ifdef CONFIG_IWLWIFI_DEBUG 1538#ifdef CONFIG_IWLWIFI_DEBUG
4368 if (iwl_debug_level & IWL_DL_ISR) { 1539 if (priv->debug_level & IWL_DL_ISR) {
4369 /* just for debug */ 1540 /* just for debug */
4370 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1541 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4371 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1542 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -4399,7 +1570,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4399 } 1570 }
4400 1571
4401#ifdef CONFIG_IWLWIFI_DEBUG 1572#ifdef CONFIG_IWLWIFI_DEBUG
4402 if (iwl_debug_level & (IWL_DL_ISR)) { 1573 if (priv->debug_level & (IWL_DL_ISR)) {
4403 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1574 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4404 if (inta & CSR_INT_BIT_SCD) 1575 if (inta & CSR_INT_BIT_SCD)
4405 IWL_DEBUG_ISR("Scheduler finished to transmit " 1576 IWL_DEBUG_ISR("Scheduler finished to transmit "
@@ -4420,18 +1591,15 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4420 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 1591 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4421 hw_rf_kill = 1; 1592 hw_rf_kill = 1;
4422 1593
4423 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, 1594 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n",
4424 "RF_KILL bit toggled to %s.\n",
4425 hw_rf_kill ? "disable radio":"enable radio"); 1595 hw_rf_kill ? "disable radio":"enable radio");
4426 1596
4427 /* Queue restart only if RF_KILL switch was set to "kill" 1597 /* driver only loads ucode once setting the interface up.
4428 * when we loaded driver, and is now set to "enable". 1598 * the driver as well won't allow loading if RFKILL is set
4429 * After we're Alive, RF_KILL gets handled by 1599 * therefore no need to restart the driver from this handler
4430 * iwl4965_rx_card_state_notif() */ 1600 */
4431 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) { 1601 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
4432 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1602 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4433 queue_work(priv->workqueue, &priv->restart);
4434 }
4435 1603
4436 handled |= CSR_INT_BIT_RF_KILL; 1604 handled |= CSR_INT_BIT_RF_KILL;
4437 } 1605 }
@@ -4453,13 +1621,13 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4453 /* uCode wakes up after power-down sleep */ 1621 /* uCode wakes up after power-down sleep */
4454 if (inta & CSR_INT_BIT_WAKEUP) { 1622 if (inta & CSR_INT_BIT_WAKEUP) {
4455 IWL_DEBUG_ISR("Wakeup interrupt\n"); 1623 IWL_DEBUG_ISR("Wakeup interrupt\n");
4456 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq); 1624 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4457 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); 1625 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
4458 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); 1626 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
4459 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); 1627 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
4460 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]); 1628 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
4461 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]); 1629 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
4462 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]); 1630 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
4463 1631
4464 handled |= CSR_INT_BIT_WAKEUP; 1632 handled |= CSR_INT_BIT_WAKEUP;
4465 } 1633 }
@@ -4468,13 +1636,16 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4468 * Rx "responses" (frame-received notification), and other 1636 * Rx "responses" (frame-received notification), and other
4469 * notifications from uCode come through here*/ 1637 * notifications from uCode come through here*/
4470 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1638 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4471 iwl4965_rx_handle(priv); 1639 iwl_rx_handle(priv);
4472 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1640 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4473 } 1641 }
4474 1642
4475 if (inta & CSR_INT_BIT_FH_TX) { 1643 if (inta & CSR_INT_BIT_FH_TX) {
4476 IWL_DEBUG_ISR("Tx interrupt\n"); 1644 IWL_DEBUG_ISR("Tx interrupt\n");
4477 handled |= CSR_INT_BIT_FH_TX; 1645 handled |= CSR_INT_BIT_FH_TX;
1646 /* FH finished to write, send event */
1647 priv->ucode_write_complete = 1;
1648 wake_up_interruptible(&priv->wait_command_queue);
4478 } 1649 }
4479 1650
4480 if (inta & ~handled) 1651 if (inta & ~handled)
@@ -4492,7 +1663,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4492 iwl4965_enable_interrupts(priv); 1663 iwl4965_enable_interrupts(priv);
4493 1664
4494#ifdef CONFIG_IWLWIFI_DEBUG 1665#ifdef CONFIG_IWLWIFI_DEBUG
4495 if (iwl_debug_level & (IWL_DL_ISR)) { 1666 if (priv->debug_level & (IWL_DL_ISR)) {
4496 inta = iwl_read32(priv, CSR_INT); 1667 inta = iwl_read32(priv, CSR_INT);
4497 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1668 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4498 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1669 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -4561,297 +1732,6 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
4561 return IRQ_NONE; 1732 return IRQ_NONE;
4562} 1733}
4563 1734
4564/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4565 * sending probe req. This should be set long enough to hear probe responses
4566 * from more than one AP. */
4567#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
4568#define IWL_ACTIVE_DWELL_TIME_52 (10)
4569
4570/* For faster active scanning, scan will move to the next channel if fewer than
4571 * PLCP_QUIET_THRESH packets are heard on this channel within
4572 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
4573 * time if it's a quiet channel (nothing responded to our probe, and there's
4574 * no other traffic).
4575 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4576#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
4577#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
4578
4579/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4580 * Must be set longer than active dwell time.
4581 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4582#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
4583#define IWL_PASSIVE_DWELL_TIME_52 (10)
4584#define IWL_PASSIVE_DWELL_BASE (100)
4585#define IWL_CHANNEL_TUNE_TIME 5
4586
4587static inline u16 iwl4965_get_active_dwell_time(struct iwl_priv *priv,
4588 enum ieee80211_band band)
4589{
4590 if (band == IEEE80211_BAND_5GHZ)
4591 return IWL_ACTIVE_DWELL_TIME_52;
4592 else
4593 return IWL_ACTIVE_DWELL_TIME_24;
4594}
4595
4596static u16 iwl4965_get_passive_dwell_time(struct iwl_priv *priv,
4597 enum ieee80211_band band)
4598{
4599 u16 active = iwl4965_get_active_dwell_time(priv, band);
4600 u16 passive = (band != IEEE80211_BAND_5GHZ) ?
4601 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4602 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4603
4604 if (iwl_is_associated(priv)) {
4605 /* If we're associated, we clamp the maximum passive
4606 * dwell time to be 98% of the beacon interval (minus
4607 * 2 * channel tune time) */
4608 passive = priv->beacon_int;
4609 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4610 passive = IWL_PASSIVE_DWELL_BASE;
4611 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4612 }
4613
4614 if (passive <= active)
4615 passive = active + 1;
4616
4617 return passive;
4618}
4619
4620static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4621 enum ieee80211_band band,
4622 u8 is_active, u8 direct_mask,
4623 struct iwl4965_scan_channel *scan_ch)
4624{
4625 const struct ieee80211_channel *channels = NULL;
4626 const struct ieee80211_supported_band *sband;
4627 const struct iwl_channel_info *ch_info;
4628 u16 passive_dwell = 0;
4629 u16 active_dwell = 0;
4630 int added, i;
4631
4632 sband = iwl4965_get_hw_mode(priv, band);
4633 if (!sband)
4634 return 0;
4635
4636 channels = sband->channels;
4637
4638 active_dwell = iwl4965_get_active_dwell_time(priv, band);
4639 passive_dwell = iwl4965_get_passive_dwell_time(priv, band);
4640
4641 for (i = 0, added = 0; i < sband->n_channels; i++) {
4642 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4643 continue;
4644
4645 scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq);
4646
4647 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
4648 if (!is_channel_valid(ch_info)) {
4649 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
4650 scan_ch->channel);
4651 continue;
4652 }
4653
4654 if (!is_active || is_channel_passive(ch_info) ||
4655 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
4656 scan_ch->type = 0; /* passive */
4657 else
4658 scan_ch->type = 1; /* active */
4659
4660 if (scan_ch->type & 1)
4661 scan_ch->type |= (direct_mask << 1);
4662
4663 if (is_channel_narrow(ch_info))
4664 scan_ch->type |= (1 << 7);
4665
4666 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4667 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4668
4669 /* Set txpower levels to defaults */
4670 scan_ch->tpc.dsp_atten = 110;
4671 /* scan_pwr_info->tpc.dsp_atten; */
4672
4673 /*scan_pwr_info->tpc.tx_gain; */
4674 if (band == IEEE80211_BAND_5GHZ)
4675 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4676 else {
4677 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4678 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
4679 * power level:
4680 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
4681 */
4682 }
4683
4684 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4685 scan_ch->channel,
4686 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4687 (scan_ch->type & 1) ?
4688 active_dwell : passive_dwell);
4689
4690 scan_ch++;
4691 added++;
4692 }
4693
4694 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4695 return added;
4696}
4697
4698static void iwl4965_init_hw_rates(struct iwl_priv *priv,
4699 struct ieee80211_rate *rates)
4700{
4701 int i;
4702
4703 for (i = 0; i < IWL_RATE_COUNT; i++) {
4704 rates[i].bitrate = iwl4965_rates[i].ieee * 5;
4705 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4706 rates[i].hw_value_short = i;
4707 rates[i].flags = 0;
4708 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
4709 /*
4710 * If CCK != 1M then set short preamble rate flag.
4711 */
4712 rates[i].flags |=
4713 (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ?
4714 0 : IEEE80211_RATE_SHORT_PREAMBLE;
4715 }
4716 }
4717}
4718
4719/**
4720 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
4721 */
4722int iwl4965_init_geos(struct iwl_priv *priv)
4723{
4724 struct iwl_channel_info *ch;
4725 struct ieee80211_supported_band *sband;
4726 struct ieee80211_channel *channels;
4727 struct ieee80211_channel *geo_ch;
4728 struct ieee80211_rate *rates;
4729 int i = 0;
4730
4731 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4732 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
4733 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4734 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4735 return 0;
4736 }
4737
4738 channels = kzalloc(sizeof(struct ieee80211_channel) *
4739 priv->channel_count, GFP_KERNEL);
4740 if (!channels)
4741 return -ENOMEM;
4742
4743 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
4744 GFP_KERNEL);
4745 if (!rates) {
4746 kfree(channels);
4747 return -ENOMEM;
4748 }
4749
4750 /* 5.2GHz channels start after the 2.4GHz channels */
4751 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4752 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
4753 /* just OFDM */
4754 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4755 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4756
4757 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
4758
4759 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4760 sband->channels = channels;
4761 /* OFDM & CCK */
4762 sband->bitrates = rates;
4763 sband->n_bitrates = IWL_RATE_COUNT;
4764
4765 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
4766
4767 priv->ieee_channels = channels;
4768 priv->ieee_rates = rates;
4769
4770 iwl4965_init_hw_rates(priv, rates);
4771
4772 for (i = 0; i < priv->channel_count; i++) {
4773 ch = &priv->channel_info[i];
4774
4775 /* FIXME: might be removed if scan is OK */
4776 if (!is_channel_valid(ch))
4777 continue;
4778
4779 if (is_channel_a_band(ch))
4780 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4781 else
4782 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4783
4784 geo_ch = &sband->channels[sband->n_channels++];
4785
4786 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
4787 geo_ch->max_power = ch->max_power_avg;
4788 geo_ch->max_antenna_gain = 0xff;
4789 geo_ch->hw_value = ch->channel;
4790
4791 if (is_channel_valid(ch)) {
4792 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4793 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
4794
4795 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4796 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
4797
4798 if (ch->flags & EEPROM_CHANNEL_RADAR)
4799 geo_ch->flags |= IEEE80211_CHAN_RADAR;
4800
4801 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4802 priv->max_channel_txpower_limit =
4803 ch->max_power_avg;
4804 } else {
4805 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4806 }
4807
4808 /* Save flags for reg domain usage */
4809 geo_ch->orig_flags = geo_ch->flags;
4810
4811 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4812 ch->channel, geo_ch->center_freq,
4813 is_channel_a_band(ch) ? "5.2" : "2.4",
4814 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4815 "restricted" : "valid",
4816 geo_ch->flags);
4817 }
4818
4819 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4820 priv->cfg->sku & IWL_SKU_A) {
4821 printk(KERN_INFO DRV_NAME
4822 ": Incorrectly detected BG card as ABG. Please send "
4823 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4824 priv->pci_dev->device, priv->pci_dev->subsystem_device);
4825 priv->cfg->sku &= ~IWL_SKU_A;
4826 }
4827
4828 printk(KERN_INFO DRV_NAME
4829 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4830 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4831 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4832
4833 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4834 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4835 &priv->bands[IEEE80211_BAND_2GHZ];
4836 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4837 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4838 &priv->bands[IEEE80211_BAND_5GHZ];
4839
4840 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4841
4842 return 0;
4843}
4844
4845/*
4846 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
4847 */
4848void iwl4965_free_geos(struct iwl_priv *priv)
4849{
4850 kfree(priv->ieee_channels);
4851 kfree(priv->ieee_rates);
4852 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4853}
4854
4855/****************************************************************************** 1735/******************************************************************************
4856 * 1736 *
4857 * uCode download functions 1737 * uCode download functions
@@ -4868,146 +1748,6 @@ static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
4868 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1748 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
4869} 1749}
4870 1750
4871/**
4872 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
4873 * looking at all data.
4874 */
4875static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
4876 u32 len)
4877{
4878 u32 val;
4879 u32 save_len = len;
4880 int rc = 0;
4881 u32 errcnt;
4882
4883 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4884
4885 rc = iwl_grab_nic_access(priv);
4886 if (rc)
4887 return rc;
4888
4889 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
4890
4891 errcnt = 0;
4892 for (; len > 0; len -= sizeof(u32), image++) {
4893 /* read data comes through single port, auto-incr addr */
4894 /* NOTE: Use the debugless read so we don't flood kernel log
4895 * if IWL_DL_IO is set */
4896 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4897 if (val != le32_to_cpu(*image)) {
4898 IWL_ERROR("uCode INST section is invalid at "
4899 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4900 save_len - len, val, le32_to_cpu(*image));
4901 rc = -EIO;
4902 errcnt++;
4903 if (errcnt >= 20)
4904 break;
4905 }
4906 }
4907
4908 iwl_release_nic_access(priv);
4909
4910 if (!errcnt)
4911 IWL_DEBUG_INFO
4912 ("ucode image in INSTRUCTION memory is good\n");
4913
4914 return rc;
4915}
4916
4917
4918/**
4919 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
4920 * using sample data 100 bytes apart. If these sample points are good,
4921 * it's a pretty good bet that everything between them is good, too.
4922 */
4923static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
4924{
4925 u32 val;
4926 int rc = 0;
4927 u32 errcnt = 0;
4928 u32 i;
4929
4930 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4931
4932 rc = iwl_grab_nic_access(priv);
4933 if (rc)
4934 return rc;
4935
4936 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4937 /* read data comes through single port, auto-incr addr */
4938 /* NOTE: Use the debugless read so we don't flood kernel log
4939 * if IWL_DL_IO is set */
4940 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4941 i + RTC_INST_LOWER_BOUND);
4942 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4943 if (val != le32_to_cpu(*image)) {
4944#if 0 /* Enable this if you want to see details */
4945 IWL_ERROR("uCode INST section is invalid at "
4946 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4947 i, val, *image);
4948#endif
4949 rc = -EIO;
4950 errcnt++;
4951 if (errcnt >= 3)
4952 break;
4953 }
4954 }
4955
4956 iwl_release_nic_access(priv);
4957
4958 return rc;
4959}
4960
4961
4962/**
4963 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
4964 * and verify its contents
4965 */
4966static int iwl4965_verify_ucode(struct iwl_priv *priv)
4967{
4968 __le32 *image;
4969 u32 len;
4970 int rc = 0;
4971
4972 /* Try bootstrap */
4973 image = (__le32 *)priv->ucode_boot.v_addr;
4974 len = priv->ucode_boot.len;
4975 rc = iwl4965_verify_inst_sparse(priv, image, len);
4976 if (rc == 0) {
4977 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4978 return 0;
4979 }
4980
4981 /* Try initialize */
4982 image = (__le32 *)priv->ucode_init.v_addr;
4983 len = priv->ucode_init.len;
4984 rc = iwl4965_verify_inst_sparse(priv, image, len);
4985 if (rc == 0) {
4986 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4987 return 0;
4988 }
4989
4990 /* Try runtime/protocol */
4991 image = (__le32 *)priv->ucode_code.v_addr;
4992 len = priv->ucode_code.len;
4993 rc = iwl4965_verify_inst_sparse(priv, image, len);
4994 if (rc == 0) {
4995 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4996 return 0;
4997 }
4998
4999 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5000
5001 /* Since nothing seems to match, show first several data entries in
5002 * instruction SRAM, so maybe visual inspection will give a clue.
5003 * Selection of bootstrap image (vs. other images) is arbitrary. */
5004 image = (__le32 *)priv->ucode_boot.v_addr;
5005 len = priv->ucode_boot.len;
5006 rc = iwl4965_verify_inst_full(priv, image, len);
5007
5008 return rc;
5009}
5010
5011static void iwl4965_nic_start(struct iwl_priv *priv) 1751static void iwl4965_nic_start(struct iwl_priv *priv)
5012{ 1752{
5013 /* Remove all resets to allow NIC to operate */ 1753 /* Remove all resets to allow NIC to operate */
@@ -5022,7 +1762,7 @@ static void iwl4965_nic_start(struct iwl_priv *priv)
5022 */ 1762 */
5023static int iwl4965_read_ucode(struct iwl_priv *priv) 1763static int iwl4965_read_ucode(struct iwl_priv *priv)
5024{ 1764{
5025 struct iwl4965_ucode *ucode; 1765 struct iwl_ucode *ucode;
5026 int ret; 1766 int ret;
5027 const struct firmware *ucode_raw; 1767 const struct firmware *ucode_raw;
5028 const char *name = priv->cfg->fw_name; 1768 const char *name = priv->cfg->fw_name;
@@ -5083,34 +1823,34 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5083 } 1823 }
5084 1824
5085 /* Verify that uCode images will fit in card's SRAM */ 1825 /* Verify that uCode images will fit in card's SRAM */
5086 if (inst_size > IWL_MAX_INST_SIZE) { 1826 if (inst_size > priv->hw_params.max_inst_size) {
5087 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n", 1827 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5088 inst_size); 1828 inst_size);
5089 ret = -EINVAL; 1829 ret = -EINVAL;
5090 goto err_release; 1830 goto err_release;
5091 } 1831 }
5092 1832
5093 if (data_size > IWL_MAX_DATA_SIZE) { 1833 if (data_size > priv->hw_params.max_data_size) {
5094 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n", 1834 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5095 data_size); 1835 data_size);
5096 ret = -EINVAL; 1836 ret = -EINVAL;
5097 goto err_release; 1837 goto err_release;
5098 } 1838 }
5099 if (init_size > IWL_MAX_INST_SIZE) { 1839 if (init_size > priv->hw_params.max_inst_size) {
5100 IWL_DEBUG_INFO 1840 IWL_DEBUG_INFO
5101 ("uCode init instr len %d too large to fit in\n", 1841 ("uCode init instr len %d too large to fit in\n",
5102 init_size); 1842 init_size);
5103 ret = -EINVAL; 1843 ret = -EINVAL;
5104 goto err_release; 1844 goto err_release;
5105 } 1845 }
5106 if (init_data_size > IWL_MAX_DATA_SIZE) { 1846 if (init_data_size > priv->hw_params.max_data_size) {
5107 IWL_DEBUG_INFO 1847 IWL_DEBUG_INFO
5108 ("uCode init data len %d too large to fit in\n", 1848 ("uCode init data len %d too large to fit in\n",
5109 init_data_size); 1849 init_data_size);
5110 ret = -EINVAL; 1850 ret = -EINVAL;
5111 goto err_release; 1851 goto err_release;
5112 } 1852 }
5113 if (boot_size > IWL_MAX_BSM_SIZE) { 1853 if (boot_size > priv->hw_params.max_bsm_size) {
5114 IWL_DEBUG_INFO 1854 IWL_DEBUG_INFO
5115 ("uCode boot instr len %d too large to fit in\n", 1855 ("uCode boot instr len %d too large to fit in\n",
5116 boot_size); 1856 boot_size);
@@ -5211,111 +1951,12 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5211 return ret; 1951 return ret;
5212} 1952}
5213 1953
5214
5215/**
5216 * iwl4965_set_ucode_ptrs - Set uCode address location
5217 *
5218 * Tell initialization uCode where to find runtime uCode.
5219 *
5220 * BSM registers initially contain pointers to initialization uCode.
5221 * We need to replace them to load runtime uCode inst and data,
5222 * and to save runtime data when powering down.
5223 */
5224static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
5225{
5226 dma_addr_t pinst;
5227 dma_addr_t pdata;
5228 int rc = 0;
5229 unsigned long flags;
5230
5231 /* bits 35:4 for 4965 */
5232 pinst = priv->ucode_code.p_addr >> 4;
5233 pdata = priv->ucode_data_backup.p_addr >> 4;
5234
5235 spin_lock_irqsave(&priv->lock, flags);
5236 rc = iwl_grab_nic_access(priv);
5237 if (rc) {
5238 spin_unlock_irqrestore(&priv->lock, flags);
5239 return rc;
5240 }
5241
5242 /* Tell bootstrap uCode where to find image to load */
5243 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5244 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5245 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5246 priv->ucode_data.len);
5247
5248 /* Inst bytecount must be last to set up, bit 31 signals uCode
5249 * that all new ptr/size info is in place */
5250 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5251 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5252
5253 iwl_release_nic_access(priv);
5254
5255 spin_unlock_irqrestore(&priv->lock, flags);
5256
5257 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5258
5259 return rc;
5260}
5261
5262/**
5263 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
5264 *
5265 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5266 *
5267 * The 4965 "initialize" ALIVE reply contains calibration data for:
5268 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
5269 * (3945 does not contain this data).
5270 *
5271 * Tell "initialize" uCode to go ahead and load the runtime uCode.
5272*/
5273static void iwl4965_init_alive_start(struct iwl_priv *priv)
5274{
5275 /* Check alive response for "valid" sign from uCode */
5276 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5277 /* We had an error bringing up the hardware, so take it
5278 * all the way back down so we can try again */
5279 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5280 goto restart;
5281 }
5282
5283 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5284 * This is a paranoid check, because we would not have gotten the
5285 * "initialize" alive if code weren't properly loaded. */
5286 if (iwl4965_verify_ucode(priv)) {
5287 /* Runtime instruction load was bad;
5288 * take it all the way back down so we can try again */
5289 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5290 goto restart;
5291 }
5292
5293 /* Calculate temperature */
5294 priv->temperature = iwl4965_get_temperature(priv);
5295
5296 /* Send pointers to protocol/runtime uCode image ... init code will
5297 * load and launch runtime uCode, which will send us another "Alive"
5298 * notification. */
5299 IWL_DEBUG_INFO("Initialization Alive received.\n");
5300 if (iwl4965_set_ucode_ptrs(priv)) {
5301 /* Runtime instruction load won't happen;
5302 * take it all the way back down so we can try again */
5303 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5304 goto restart;
5305 }
5306 return;
5307
5308 restart:
5309 queue_work(priv->workqueue, &priv->restart);
5310}
5311
5312
5313/** 1954/**
5314 * iwl4965_alive_start - called after REPLY_ALIVE notification received 1955 * iwl_alive_start - called after REPLY_ALIVE notification received
5315 * from protocol/runtime uCode (initialization uCode's 1956 * from protocol/runtime uCode (initialization uCode's
5316 * Alive gets handled by iwl4965_init_alive_start()). 1957 * Alive gets handled by iwl_init_alive_start()).
5317 */ 1958 */
5318static void iwl4965_alive_start(struct iwl_priv *priv) 1959static void iwl_alive_start(struct iwl_priv *priv)
5319{ 1960{
5320 int ret = 0; 1961 int ret = 0;
5321 1962
@@ -5331,15 +1972,14 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5331 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 1972 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5332 * This is a paranoid check, because we would not have gotten the 1973 * This is a paranoid check, because we would not have gotten the
5333 * "runtime" alive if code weren't properly loaded. */ 1974 * "runtime" alive if code weren't properly loaded. */
5334 if (iwl4965_verify_ucode(priv)) { 1975 if (iwl_verify_ucode(priv)) {
5335 /* Runtime instruction load was bad; 1976 /* Runtime instruction load was bad;
5336 * take it all the way back down so we can try again */ 1977 * take it all the way back down so we can try again */
5337 IWL_DEBUG_INFO("Bad runtime uCode load.\n"); 1978 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
5338 goto restart; 1979 goto restart;
5339 } 1980 }
5340 1981
5341 iwlcore_clear_stations_table(priv); 1982 iwl_clear_stations_table(priv);
5342
5343 ret = priv->cfg->ops->lib->alive_notify(priv); 1983 ret = priv->cfg->ops->lib->alive_notify(priv);
5344 if (ret) { 1984 if (ret) {
5345 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", 1985 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
@@ -5350,22 +1990,17 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5350 /* After the ALIVE response, we can send host commands to 4965 uCode */ 1990 /* After the ALIVE response, we can send host commands to 4965 uCode */
5351 set_bit(STATUS_ALIVE, &priv->status); 1991 set_bit(STATUS_ALIVE, &priv->status);
5352 1992
5353 /* Clear out the uCode error bit if it is set */
5354 clear_bit(STATUS_FW_ERROR, &priv->status);
5355
5356 if (iwl_is_rfkill(priv)) 1993 if (iwl_is_rfkill(priv))
5357 return; 1994 return;
5358 1995
5359 ieee80211_start_queues(priv->hw); 1996 ieee80211_wake_queues(priv->hw);
5360 1997
5361 priv->active_rate = priv->rates_mask; 1998 priv->active_rate = priv->rates_mask;
5362 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 1999 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5363 2000
5364 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
5365
5366 if (iwl_is_associated(priv)) { 2001 if (iwl_is_associated(priv)) {
5367 struct iwl4965_rxon_cmd *active_rxon = 2002 struct iwl_rxon_cmd *active_rxon =
5368 (struct iwl4965_rxon_cmd *)(&priv->active_rxon); 2003 (struct iwl_rxon_cmd *)&priv->active_rxon;
5369 2004
5370 memcpy(&priv->staging_rxon, &priv->active_rxon, 2005 memcpy(&priv->staging_rxon, &priv->active_rxon,
5371 sizeof(priv->staging_rxon)); 2006 sizeof(priv->staging_rxon));
@@ -5379,13 +2014,13 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5379 /* Configure Bluetooth device coexistence support */ 2014 /* Configure Bluetooth device coexistence support */
5380 iwl4965_send_bt_config(priv); 2015 iwl4965_send_bt_config(priv);
5381 2016
2017 iwl_reset_run_time_calib(priv);
2018
5382 /* Configure the adapter for unassociated operation */ 2019 /* Configure the adapter for unassociated operation */
5383 iwl4965_commit_rxon(priv); 2020 iwl4965_commit_rxon(priv);
5384 2021
5385 /* At this point, the NIC is initialized and operational */ 2022 /* At this point, the NIC is initialized and operational */
5386 priv->notif_missed_beacons = 0; 2023 iwl_rf_kill_ct_config(priv);
5387
5388 iwl4965_rf_kill_ct_config(priv);
5389 2024
5390 iwl_leds_register(priv); 2025 iwl_leds_register(priv);
5391 2026
@@ -5396,34 +2031,33 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5396 if (priv->error_recovering) 2031 if (priv->error_recovering)
5397 iwl4965_error_recovery(priv); 2032 iwl4965_error_recovery(priv);
5398 2033
5399 iwlcore_low_level_notify(priv, IWLCORE_START_EVT); 2034 iwl_power_update_mode(priv, 1);
5400 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); 2035 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
2036
2037 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2038 iwl4965_set_mode(priv, priv->iw_mode);
2039
5401 return; 2040 return;
5402 2041
5403 restart: 2042 restart:
5404 queue_work(priv->workqueue, &priv->restart); 2043 queue_work(priv->workqueue, &priv->restart);
5405} 2044}
5406 2045
5407static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); 2046static void iwl_cancel_deferred_work(struct iwl_priv *priv);
5408 2047
5409static void __iwl4965_down(struct iwl_priv *priv) 2048static void __iwl4965_down(struct iwl_priv *priv)
5410{ 2049{
5411 unsigned long flags; 2050 unsigned long flags;
5412 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2051 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5413 struct ieee80211_conf *conf = NULL;
5414 2052
5415 IWL_DEBUG_INFO(DRV_NAME " is going down\n"); 2053 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5416 2054
5417 conf = ieee80211_get_hw_conf(priv->hw);
5418
5419 if (!exit_pending) 2055 if (!exit_pending)
5420 set_bit(STATUS_EXIT_PENDING, &priv->status); 2056 set_bit(STATUS_EXIT_PENDING, &priv->status);
5421 2057
5422 iwl_leds_unregister(priv); 2058 iwl_leds_unregister(priv);
5423 2059
5424 iwlcore_low_level_notify(priv, IWLCORE_STOP_EVT); 2060 iwl_clear_stations_table(priv);
5425
5426 iwlcore_clear_stations_table(priv);
5427 2061
5428 /* Unblock any waiting calls */ 2062 /* Unblock any waiting calls */
5429 wake_up_interruptible_all(&priv->wait_command_queue); 2063 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -5455,7 +2089,9 @@ static void __iwl4965_down(struct iwl_priv *priv)
5455 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2089 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5456 STATUS_GEO_CONFIGURED | 2090 STATUS_GEO_CONFIGURED |
5457 test_bit(STATUS_IN_SUSPEND, &priv->status) << 2091 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5458 STATUS_IN_SUSPEND; 2092 STATUS_IN_SUSPEND |
2093 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2094 STATUS_EXIT_PENDING;
5459 goto exit; 2095 goto exit;
5460 } 2096 }
5461 2097
@@ -5470,15 +2106,17 @@ static void __iwl4965_down(struct iwl_priv *priv)
5470 test_bit(STATUS_IN_SUSPEND, &priv->status) << 2106 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5471 STATUS_IN_SUSPEND | 2107 STATUS_IN_SUSPEND |
5472 test_bit(STATUS_FW_ERROR, &priv->status) << 2108 test_bit(STATUS_FW_ERROR, &priv->status) <<
5473 STATUS_FW_ERROR; 2109 STATUS_FW_ERROR |
2110 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2111 STATUS_EXIT_PENDING;
5474 2112
5475 spin_lock_irqsave(&priv->lock, flags); 2113 spin_lock_irqsave(&priv->lock, flags);
5476 iwl_clear_bit(priv, CSR_GP_CNTRL, 2114 iwl_clear_bit(priv, CSR_GP_CNTRL,
5477 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2115 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5478 spin_unlock_irqrestore(&priv->lock, flags); 2116 spin_unlock_irqrestore(&priv->lock, flags);
5479 2117
5480 iwl4965_hw_txq_ctx_stop(priv); 2118 iwl_txq_ctx_stop(priv);
5481 iwl4965_hw_rxq_stop(priv); 2119 iwl_rxq_stop(priv);
5482 2120
5483 spin_lock_irqsave(&priv->lock, flags); 2121 spin_lock_irqsave(&priv->lock, flags);
5484 if (!iwl_grab_nic_access(priv)) { 2122 if (!iwl_grab_nic_access(priv)) {
@@ -5490,19 +2128,19 @@ static void __iwl4965_down(struct iwl_priv *priv)
5490 2128
5491 udelay(5); 2129 udelay(5);
5492 2130
5493 iwl4965_hw_nic_stop_master(priv); 2131 /* FIXME: apm_ops.suspend(priv) */
5494 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 2132 priv->cfg->ops->lib->apm_ops.reset(priv);
5495 iwl4965_hw_nic_reset(priv); 2133 priv->cfg->ops->lib->free_shared_mem(priv);
5496 2134
5497 exit: 2135 exit:
5498 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp)); 2136 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
5499 2137
5500 if (priv->ibss_beacon) 2138 if (priv->ibss_beacon)
5501 dev_kfree_skb(priv->ibss_beacon); 2139 dev_kfree_skb(priv->ibss_beacon);
5502 priv->ibss_beacon = NULL; 2140 priv->ibss_beacon = NULL;
5503 2141
5504 /* clear out any free frames */ 2142 /* clear out any free frames */
5505 iwl4965_clear_free_frames(priv); 2143 iwl_clear_free_frames(priv);
5506} 2144}
5507 2145
5508static void iwl4965_down(struct iwl_priv *priv) 2146static void iwl4965_down(struct iwl_priv *priv)
@@ -5511,7 +2149,7 @@ static void iwl4965_down(struct iwl_priv *priv)
5511 __iwl4965_down(priv); 2149 __iwl4965_down(priv);
5512 mutex_unlock(&priv->mutex); 2150 mutex_unlock(&priv->mutex);
5513 2151
5514 iwl4965_cancel_deferred_work(priv); 2152 iwl_cancel_deferred_work(priv);
5515} 2153}
5516 2154
5517#define MAX_HW_RESTARTS 5 2155#define MAX_HW_RESTARTS 5
@@ -5526,13 +2164,6 @@ static int __iwl4965_up(struct iwl_priv *priv)
5526 return -EIO; 2164 return -EIO;
5527 } 2165 }
5528 2166
5529 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
5530 IWL_WARNING("Radio disabled by SW RF kill (module "
5531 "parameter)\n");
5532 iwl_rfkill_set_hw_state(priv);
5533 return -ENODEV;
5534 }
5535
5536 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 2167 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
5537 IWL_ERROR("ucode not available for device bringup\n"); 2168 IWL_ERROR("ucode not available for device bringup\n");
5538 return -EIO; 2169 return -EIO;
@@ -5542,19 +2173,25 @@ static int __iwl4965_up(struct iwl_priv *priv)
5542 if (iwl_read32(priv, CSR_GP_CNTRL) & 2173 if (iwl_read32(priv, CSR_GP_CNTRL) &
5543 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2174 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5544 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2175 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5545 else { 2176 else
5546 set_bit(STATUS_RF_KILL_HW, &priv->status); 2177 set_bit(STATUS_RF_KILL_HW, &priv->status);
5547 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) { 2178
5548 iwl_rfkill_set_hw_state(priv); 2179 if (!test_bit(STATUS_IN_SUSPEND, &priv->status) &&
5549 IWL_WARNING("Radio disabled by HW RF Kill switch\n"); 2180 iwl_is_rfkill(priv)) {
5550 return -ENODEV; 2181 IWL_WARNING("Radio disabled by %s RF Kill switch\n",
5551 } 2182 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
2183 return -ENODEV;
5552 } 2184 }
5553 2185
5554 iwl_rfkill_set_hw_state(priv);
5555 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2186 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5556 2187
5557 ret = priv->cfg->ops->lib->hw_nic_init(priv); 2188 ret = priv->cfg->ops->lib->alloc_shared_mem(priv);
2189 if (ret) {
2190 IWL_ERROR("Unable to allocate shared memory\n");
2191 return ret;
2192 }
2193
2194 ret = iwl_hw_nic_init(priv);
5558 if (ret) { 2195 if (ret) {
5559 IWL_ERROR("Unable to init nic\n"); 2196 IWL_ERROR("Unable to init nic\n");
5560 return ret; 2197 return ret;
@@ -5580,12 +2217,13 @@ static int __iwl4965_up(struct iwl_priv *priv)
5580 priv->ucode_data.len); 2217 priv->ucode_data.len);
5581 2218
5582 /* We return success when we resume from suspend and rf_kill is on. */ 2219 /* We return success when we resume from suspend and rf_kill is on. */
5583 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) 2220 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
2221 test_bit(STATUS_RF_KILL_SW, &priv->status))
5584 return 0; 2222 return 0;
5585 2223
5586 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2224 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5587 2225
5588 iwlcore_clear_stations_table(priv); 2226 iwl_clear_stations_table(priv);
5589 2227
5590 /* load bootstrap state machine, 2228 /* load bootstrap state machine,
5591 * load bootstrap program into processor's memory, 2229 * load bootstrap program into processor's memory,
@@ -5597,6 +2235,9 @@ static int __iwl4965_up(struct iwl_priv *priv)
5597 continue; 2235 continue;
5598 } 2236 }
5599 2237
2238 /* Clear out the uCode error bit if it is set */
2239 clear_bit(STATUS_FW_ERROR, &priv->status);
2240
5600 /* start card; "initialize" will load runtime ucode */ 2241 /* start card; "initialize" will load runtime ucode */
5601 iwl4965_nic_start(priv); 2242 iwl4965_nic_start(priv);
5602 2243
@@ -5607,6 +2248,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
5607 2248
5608 set_bit(STATUS_EXIT_PENDING, &priv->status); 2249 set_bit(STATUS_EXIT_PENDING, &priv->status);
5609 __iwl4965_down(priv); 2250 __iwl4965_down(priv);
2251 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5610 2252
5611 /* tried to restart and config the device for as long as our 2253 /* tried to restart and config the device for as long as our
5612 * patience could withstand */ 2254 * patience could withstand */
@@ -5621,7 +2263,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
5621 * 2263 *
5622 *****************************************************************************/ 2264 *****************************************************************************/
5623 2265
5624static void iwl4965_bg_init_alive_start(struct work_struct *data) 2266static void iwl_bg_init_alive_start(struct work_struct *data)
5625{ 2267{
5626 struct iwl_priv *priv = 2268 struct iwl_priv *priv =
5627 container_of(data, struct iwl_priv, init_alive_start.work); 2269 container_of(data, struct iwl_priv, init_alive_start.work);
@@ -5630,11 +2272,11 @@ static void iwl4965_bg_init_alive_start(struct work_struct *data)
5630 return; 2272 return;
5631 2273
5632 mutex_lock(&priv->mutex); 2274 mutex_lock(&priv->mutex);
5633 iwl4965_init_alive_start(priv); 2275 priv->cfg->ops->lib->init_alive_start(priv);
5634 mutex_unlock(&priv->mutex); 2276 mutex_unlock(&priv->mutex);
5635} 2277}
5636 2278
5637static void iwl4965_bg_alive_start(struct work_struct *data) 2279static void iwl_bg_alive_start(struct work_struct *data)
5638{ 2280{
5639 struct iwl_priv *priv = 2281 struct iwl_priv *priv =
5640 container_of(data, struct iwl_priv, alive_start.work); 2282 container_of(data, struct iwl_priv, alive_start.work);
@@ -5643,7 +2285,7 @@ static void iwl4965_bg_alive_start(struct work_struct *data)
5643 return; 2285 return;
5644 2286
5645 mutex_lock(&priv->mutex); 2287 mutex_lock(&priv->mutex);
5646 iwl4965_alive_start(priv); 2288 iwl_alive_start(priv);
5647 mutex_unlock(&priv->mutex); 2289 mutex_unlock(&priv->mutex);
5648} 2290}
5649 2291
@@ -5659,7 +2301,7 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5659 mutex_lock(&priv->mutex); 2301 mutex_lock(&priv->mutex);
5660 2302
5661 if (!iwl_is_rfkill(priv)) { 2303 if (!iwl_is_rfkill(priv)) {
5662 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, 2304 IWL_DEBUG(IWL_DL_RF_KILL,
5663 "HW and/or SW RF Kill no longer active, restarting " 2305 "HW and/or SW RF Kill no longer active, restarting "
5664 "device\n"); 2306 "device\n");
5665 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 2307 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -5677,239 +2319,53 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5677 "Kill switch must be turned off for " 2319 "Kill switch must be turned off for "
5678 "wireless networking to work.\n"); 2320 "wireless networking to work.\n");
5679 } 2321 }
5680 iwl_rfkill_set_hw_state(priv);
5681
5682 mutex_unlock(&priv->mutex); 2322 mutex_unlock(&priv->mutex);
2323 iwl_rfkill_set_hw_state(priv);
5683} 2324}
5684 2325
5685#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 2326static void iwl4965_bg_set_monitor(struct work_struct *work)
5686
5687static void iwl4965_bg_scan_check(struct work_struct *data)
5688{ 2327{
5689 struct iwl_priv *priv = 2328 struct iwl_priv *priv = container_of(work,
5690 container_of(data, struct iwl_priv, scan_check.work); 2329 struct iwl_priv, set_monitor);
2330 int ret;
5691 2331
5692 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2332 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
5693 return;
5694 2333
5695 mutex_lock(&priv->mutex); 2334 mutex_lock(&priv->mutex);
5696 if (test_bit(STATUS_SCANNING, &priv->status) ||
5697 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5698 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
5699 "Scan completion watchdog resetting adapter (%dms)\n",
5700 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
5701 2335
5702 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 2336 ret = iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR);
5703 iwl4965_send_scan_abort(priv); 2337
2338 if (ret) {
2339 if (ret == -EAGAIN)
2340 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
2341 else
2342 IWL_ERROR("iwl4965_set_mode() failed ret = %d\n", ret);
5704 } 2343 }
2344
5705 mutex_unlock(&priv->mutex); 2345 mutex_unlock(&priv->mutex);
5706} 2346}
5707 2347
5708static void iwl4965_bg_request_scan(struct work_struct *data) 2348static void iwl_bg_run_time_calib_work(struct work_struct *work)
5709{ 2349{
5710 struct iwl_priv *priv = 2350 struct iwl_priv *priv = container_of(work, struct iwl_priv,
5711 container_of(data, struct iwl_priv, request_scan); 2351 run_time_calib_work);
5712 struct iwl_host_cmd cmd = {
5713 .id = REPLY_SCAN_CMD,
5714 .len = sizeof(struct iwl4965_scan_cmd),
5715 .meta.flags = CMD_SIZE_HUGE,
5716 };
5717 struct iwl4965_scan_cmd *scan;
5718 struct ieee80211_conf *conf = NULL;
5719 u16 cmd_len;
5720 enum ieee80211_band band;
5721 u8 direct_mask;
5722 int ret = 0;
5723
5724 conf = ieee80211_get_hw_conf(priv->hw);
5725 2352
5726 mutex_lock(&priv->mutex); 2353 mutex_lock(&priv->mutex);
5727 2354
5728 if (!iwl_is_ready(priv)) { 2355 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
5729 IWL_WARNING("request scan called when driver not ready.\n"); 2356 test_bit(STATUS_SCANNING, &priv->status)) {
5730 goto done; 2357 mutex_unlock(&priv->mutex);
5731 } 2358 return;
5732
5733 /* Make sure the scan wasn't cancelled before this queued work
5734 * was given the chance to run... */
5735 if (!test_bit(STATUS_SCANNING, &priv->status))
5736 goto done;
5737
5738 /* This should never be called or scheduled if there is currently
5739 * a scan active in the hardware. */
5740 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
5741 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
5742 "Ignoring second request.\n");
5743 ret = -EIO;
5744 goto done;
5745 }
5746
5747 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5748 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5749 goto done;
5750 }
5751
5752 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5753 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5754 goto done;
5755 }
5756
5757 if (iwl_is_rfkill(priv)) {
5758 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5759 goto done;
5760 }
5761
5762 if (!test_bit(STATUS_READY, &priv->status)) {
5763 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
5764 goto done;
5765 }
5766
5767 if (!priv->scan_bands) {
5768 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
5769 goto done;
5770 }
5771
5772 if (!priv->scan) {
5773 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
5774 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
5775 if (!priv->scan) {
5776 ret = -ENOMEM;
5777 goto done;
5778 }
5779 } 2359 }
5780 scan = priv->scan;
5781 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
5782 2360
5783 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2361 if (priv->start_calib) {
5784 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2362 iwl_chain_noise_calibration(priv, &priv->statistics);
5785 2363
5786 if (iwl_is_associated(priv)) { 2364 iwl_sensitivity_calibration(priv, &priv->statistics);
5787 u16 interval = 0;
5788 u32 extra;
5789 u32 suspend_time = 100;
5790 u32 scan_suspend_time = 100;
5791 unsigned long flags;
5792
5793 IWL_DEBUG_INFO("Scanning while associated...\n");
5794
5795 spin_lock_irqsave(&priv->lock, flags);
5796 interval = priv->beacon_int;
5797 spin_unlock_irqrestore(&priv->lock, flags);
5798
5799 scan->suspend_time = 0;
5800 scan->max_out_time = cpu_to_le32(200 * 1024);
5801 if (!interval)
5802 interval = suspend_time;
5803
5804 extra = (suspend_time / interval) << 22;
5805 scan_suspend_time = (extra |
5806 ((suspend_time % interval) * 1024));
5807 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5808 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5809 scan_suspend_time, interval);
5810 }
5811
5812 /* We should add the ability for user to lock to PASSIVE ONLY */
5813 if (priv->one_direct_scan) {
5814 IWL_DEBUG_SCAN
5815 ("Kicking off one direct scan for '%s'\n",
5816 iwl4965_escape_essid(priv->direct_ssid,
5817 priv->direct_ssid_len));
5818 scan->direct_scan[0].id = WLAN_EID_SSID;
5819 scan->direct_scan[0].len = priv->direct_ssid_len;
5820 memcpy(scan->direct_scan[0].ssid,
5821 priv->direct_ssid, priv->direct_ssid_len);
5822 direct_mask = 1;
5823 } else if (!iwl_is_associated(priv) && priv->essid_len) {
5824 IWL_DEBUG_SCAN
5825 ("Kicking off one direct scan for '%s' when not associated\n",
5826 iwl4965_escape_essid(priv->essid, priv->essid_len));
5827 scan->direct_scan[0].id = WLAN_EID_SSID;
5828 scan->direct_scan[0].len = priv->essid_len;
5829 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
5830 direct_mask = 1;
5831 } else {
5832 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
5833 direct_mask = 0;
5834 } 2365 }
5835 2366
5836 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
5837 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
5838 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
5839
5840
5841 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
5842 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
5843 scan->tx_cmd.rate_n_flags =
5844 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
5845 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
5846
5847 scan->good_CRC_th = 0;
5848 band = IEEE80211_BAND_2GHZ;
5849 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
5850 scan->tx_cmd.rate_n_flags =
5851 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
5852 RATE_MCS_ANT_B_MSK);
5853 scan->good_CRC_th = IWL_GOOD_CRC_TH;
5854 band = IEEE80211_BAND_5GHZ;
5855 } else {
5856 IWL_WARNING("Invalid scan band count\n");
5857 goto done;
5858 }
5859
5860 /* We don't build a direct scan probe request; the uCode will do
5861 * that based on the direct_mask added to each channel entry */
5862 cmd_len = iwl4965_fill_probe_req(priv, band,
5863 (struct ieee80211_mgmt *)scan->data,
5864 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0);
5865
5866 scan->tx_cmd.len = cpu_to_le16(cmd_len);
5867 /* select Rx chains */
5868
5869 /* Force use of chains B and C (0x6) for scan Rx.
5870 * Avoid A (0x1) because of its off-channel reception on A-band.
5871 * MIMO is not used here, but value is required to make uCode happy. */
5872 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
5873 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
5874 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
5875 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
5876
5877 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
5878 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5879
5880 if (direct_mask)
5881 scan->channel_count =
5882 iwl4965_get_channels_for_scan(
5883 priv, band, 1, /* active */
5884 direct_mask,
5885 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5886 else
5887 scan->channel_count =
5888 iwl4965_get_channels_for_scan(
5889 priv, band, 0, /* passive */
5890 direct_mask,
5891 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5892
5893 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
5894 scan->channel_count * sizeof(struct iwl4965_scan_channel);
5895 cmd.data = scan;
5896 scan->len = cpu_to_le16(cmd.len);
5897
5898 set_bit(STATUS_SCAN_HW, &priv->status);
5899 ret = iwl_send_cmd_sync(priv, &cmd);
5900 if (ret)
5901 goto done;
5902
5903 queue_delayed_work(priv->workqueue, &priv->scan_check,
5904 IWL_SCAN_CHECK_WATCHDOG);
5905
5906 mutex_unlock(&priv->mutex); 2367 mutex_unlock(&priv->mutex);
5907 return; 2368 return;
5908
5909 done:
5910 /* inform mac80211 scan aborted */
5911 queue_work(priv->workqueue, &priv->scan_completed);
5912 mutex_unlock(&priv->mutex);
5913} 2369}
5914 2370
5915static void iwl4965_bg_up(struct work_struct *data) 2371static void iwl4965_bg_up(struct work_struct *data)
@@ -5922,6 +2378,7 @@ static void iwl4965_bg_up(struct work_struct *data)
5922 mutex_lock(&priv->mutex); 2378 mutex_lock(&priv->mutex);
5923 __iwl4965_up(priv); 2379 __iwl4965_up(priv);
5924 mutex_unlock(&priv->mutex); 2380 mutex_unlock(&priv->mutex);
2381 iwl_rfkill_set_hw_state(priv);
5925} 2382}
5926 2383
5927static void iwl4965_bg_restart(struct work_struct *data) 2384static void iwl4965_bg_restart(struct work_struct *data)
@@ -5944,7 +2401,7 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
5944 return; 2401 return;
5945 2402
5946 mutex_lock(&priv->mutex); 2403 mutex_lock(&priv->mutex);
5947 iwl4965_rx_replenish(priv); 2404 iwl_rx_replenish(priv);
5948 mutex_unlock(&priv->mutex); 2405 mutex_unlock(&priv->mutex);
5949} 2406}
5950 2407
@@ -5955,6 +2412,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
5955 struct ieee80211_conf *conf = NULL; 2412 struct ieee80211_conf *conf = NULL;
5956 int ret = 0; 2413 int ret = 0;
5957 DECLARE_MAC_BUF(mac); 2414 DECLARE_MAC_BUF(mac);
2415 unsigned long flags;
5958 2416
5959 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2417 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
5960 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); 2418 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
@@ -5973,7 +2431,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
5973 if (!priv->vif || !priv->is_open) 2431 if (!priv->vif || !priv->is_open)
5974 return; 2432 return;
5975 2433
5976 iwl4965_scan_cancel_timeout(priv, 200); 2434 iwl_scan_cancel_timeout(priv, 200);
5977 2435
5978 conf = ieee80211_get_hw_conf(priv->hw); 2436 conf = ieee80211_get_hw_conf(priv->hw);
5979 2437
@@ -5990,11 +2448,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
5990 2448
5991 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2449 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
5992 2450
5993#ifdef CONFIG_IWL4965_HT
5994 if (priv->current_ht_config.is_ht) 2451 if (priv->current_ht_config.is_ht)
5995 iwl4965_set_rxon_ht(priv, &priv->current_ht_config); 2452 iwl_set_rxon_ht(priv, &priv->current_ht_config);
5996#endif /* CONFIG_IWL4965_HT*/ 2453
5997 iwl4965_set_rxon_chain(priv); 2454 iwl_set_rxon_chain(priv);
5998 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 2455 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
5999 2456
6000 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", 2457 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
@@ -6020,17 +2477,14 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6020 2477
6021 switch (priv->iw_mode) { 2478 switch (priv->iw_mode) {
6022 case IEEE80211_IF_TYPE_STA: 2479 case IEEE80211_IF_TYPE_STA:
6023 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
6024 break; 2480 break;
6025 2481
6026 case IEEE80211_IF_TYPE_IBSS: 2482 case IEEE80211_IF_TYPE_IBSS:
6027 2483
6028 /* clear out the station table */ 2484 /* assume default assoc id */
6029 iwlcore_clear_stations_table(priv); 2485 priv->assoc_id = 1;
6030 2486
6031 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 2487 iwl_rxon_add_station(priv, priv->bssid, 0);
6032 iwl4965_rxon_add_station(priv, priv->bssid, 0);
6033 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
6034 iwl4965_send_beacon_cmd(priv); 2488 iwl4965_send_beacon_cmd(priv);
6035 2489
6036 break; 2490 break;
@@ -6041,58 +2495,30 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6041 break; 2495 break;
6042 } 2496 }
6043 2497
6044 iwl4965_sequence_reset(priv);
6045
6046#ifdef CONFIG_IWL4965_SENSITIVITY
6047 /* Enable Rx differential gain and sensitivity calibrations */ 2498 /* Enable Rx differential gain and sensitivity calibrations */
6048 iwl4965_chain_noise_reset(priv); 2499 iwl_chain_noise_reset(priv);
6049 priv->start_calib = 1; 2500 priv->start_calib = 1;
6050#endif /* CONFIG_IWL4965_SENSITIVITY */
6051 2501
6052 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2502 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6053 priv->assoc_station_added = 1; 2503 priv->assoc_station_added = 1;
6054 2504
6055 iwl4965_activate_qos(priv, 0); 2505 spin_lock_irqsave(&priv->lock, flags);
2506 iwl_activate_qos(priv, 0);
2507 spin_unlock_irqrestore(&priv->lock, flags);
6056 2508
2509 iwl_power_update_mode(priv, 0);
6057 /* we have just associated, don't start scan too early */ 2510 /* we have just associated, don't start scan too early */
6058 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 2511 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6059} 2512}
6060 2513
6061
6062static void iwl4965_bg_post_associate(struct work_struct *data)
6063{
6064 struct iwl_priv *priv = container_of(data, struct iwl_priv,
6065 post_associate.work);
6066
6067 mutex_lock(&priv->mutex);
6068 iwl4965_post_associate(priv);
6069 mutex_unlock(&priv->mutex);
6070
6071}
6072
6073static void iwl4965_bg_abort_scan(struct work_struct *work)
6074{
6075 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
6076
6077 if (!iwl_is_ready(priv))
6078 return;
6079
6080 mutex_lock(&priv->mutex);
6081
6082 set_bit(STATUS_SCAN_ABORTING, &priv->status);
6083 iwl4965_send_scan_abort(priv);
6084
6085 mutex_unlock(&priv->mutex);
6086}
6087
6088static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); 2514static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
6089 2515
6090static void iwl4965_bg_scan_completed(struct work_struct *work) 2516static void iwl_bg_scan_completed(struct work_struct *work)
6091{ 2517{
6092 struct iwl_priv *priv = 2518 struct iwl_priv *priv =
6093 container_of(work, struct iwl_priv, scan_completed); 2519 container_of(work, struct iwl_priv, scan_completed);
6094 2520
6095 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); 2521 IWL_DEBUG_SCAN("SCAN complete scan\n");
6096 2522
6097 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2523 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6098 return; 2524 return;
@@ -6105,7 +2531,7 @@ static void iwl4965_bg_scan_completed(struct work_struct *work)
6105 /* Since setting the TXPOWER may have been deferred while 2531 /* Since setting the TXPOWER may have been deferred while
6106 * performing the scan, fire one off */ 2532 * performing the scan, fire one off */
6107 mutex_lock(&priv->mutex); 2533 mutex_lock(&priv->mutex);
6108 iwl4965_hw_reg_send_txpower(priv); 2534 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
6109 mutex_unlock(&priv->mutex); 2535 mutex_unlock(&priv->mutex);
6110} 2536}
6111 2537
@@ -6115,7 +2541,7 @@ static void iwl4965_bg_scan_completed(struct work_struct *work)
6115 * 2541 *
6116 *****************************************************************************/ 2542 *****************************************************************************/
6117 2543
6118#define UCODE_READY_TIMEOUT (2 * HZ) 2544#define UCODE_READY_TIMEOUT (4 * HZ)
6119 2545
6120static int iwl4965_mac_start(struct ieee80211_hw *hw) 2546static int iwl4965_mac_start(struct ieee80211_hw *hw)
6121{ 2547{
@@ -6141,7 +2567,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6141 /* we should be verifying the device is ready to be opened */ 2567 /* we should be verifying the device is ready to be opened */
6142 mutex_lock(&priv->mutex); 2568 mutex_lock(&priv->mutex);
6143 2569
6144 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd)); 2570 memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd));
6145 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 2571 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6146 * ucode filename and max sizes are card-specific. */ 2572 * ucode filename and max sizes are card-specific. */
6147 2573
@@ -6158,6 +2584,8 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6158 2584
6159 mutex_unlock(&priv->mutex); 2585 mutex_unlock(&priv->mutex);
6160 2586
2587 iwl_rfkill_set_hw_state(priv);
2588
6161 if (ret) 2589 if (ret)
6162 goto out_release_irq; 2590 goto out_release_irq;
6163 2591
@@ -6166,15 +2594,15 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6166 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 2594 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6167 return 0; 2595 return 0;
6168 2596
6169 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 2597 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
6170 * mac80211 will not be run successfully. */ 2598 * mac80211 will not be run successfully. */
6171 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2599 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
6172 test_bit(STATUS_READY, &priv->status), 2600 test_bit(STATUS_READY, &priv->status),
6173 UCODE_READY_TIMEOUT); 2601 UCODE_READY_TIMEOUT);
6174 if (!ret) { 2602 if (!ret) {
6175 if (!test_bit(STATUS_READY, &priv->status)) { 2603 if (!test_bit(STATUS_READY, &priv->status)) {
6176 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n", 2604 IWL_ERROR("START_ALIVE timeout after %dms.\n",
6177 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 2605 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6178 ret = -ETIMEDOUT; 2606 ret = -ETIMEDOUT;
6179 goto out_release_irq; 2607 goto out_release_irq;
6180 } 2608 }
@@ -6212,8 +2640,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
6212 * RXON_FILTER_ASSOC_MSK BIT 2640 * RXON_FILTER_ASSOC_MSK BIT
6213 */ 2641 */
6214 mutex_lock(&priv->mutex); 2642 mutex_lock(&priv->mutex);
6215 iwl4965_scan_cancel_timeout(priv, 100); 2643 iwl_scan_cancel_timeout(priv, 100);
6216 cancel_delayed_work(&priv->post_associate);
6217 mutex_unlock(&priv->mutex); 2644 mutex_unlock(&priv->mutex);
6218 } 2645 }
6219 2646
@@ -6228,8 +2655,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
6228 IWL_DEBUG_MAC80211("leave\n"); 2655 IWL_DEBUG_MAC80211("leave\n");
6229} 2656}
6230 2657
6231static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 2658static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6232 struct ieee80211_tx_control *ctl)
6233{ 2659{
6234 struct iwl_priv *priv = hw->priv; 2660 struct iwl_priv *priv = hw->priv;
6235 2661
@@ -6242,9 +2668,9 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6242 } 2668 }
6243 2669
6244 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2670 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6245 ctl->tx_rate->bitrate); 2671 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6246 2672
6247 if (iwl4965_tx_skb(priv, skb, ctl)) 2673 if (iwl_tx_skb(priv, skb))
6248 dev_kfree_skb_any(skb); 2674 dev_kfree_skb_any(skb);
6249 2675
6250 IWL_DEBUG_MAC80211("leave\n"); 2676 IWL_DEBUG_MAC80211("leave\n");
@@ -6277,8 +2703,9 @@ static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
6277 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2703 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6278 } 2704 }
6279 2705
6280 if (iwl_is_ready(priv)) 2706 if (iwl4965_set_mode(priv, conf->type) == -EAGAIN)
6281 iwl4965_set_mode(priv, conf->type); 2707 /* we are not ready, will run again when ready */
2708 set_bit(STATUS_MODE_PENDING, &priv->status);
6282 2709
6283 mutex_unlock(&priv->mutex); 2710 mutex_unlock(&priv->mutex);
6284 2711
@@ -6299,12 +2726,21 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6299 const struct iwl_channel_info *ch_info; 2726 const struct iwl_channel_info *ch_info;
6300 unsigned long flags; 2727 unsigned long flags;
6301 int ret = 0; 2728 int ret = 0;
2729 u16 channel;
6302 2730
6303 mutex_lock(&priv->mutex); 2731 mutex_lock(&priv->mutex);
6304 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 2732 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
6305 2733
6306 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 2734 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
6307 2735
2736 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
2737 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n");
2738 goto out;
2739 }
2740
2741 if (!conf->radio_enabled)
2742 iwl_radio_kill_sw_disable_radio(priv);
2743
6308 if (!iwl_is_ready(priv)) { 2744 if (!iwl_is_ready(priv)) {
6309 IWL_DEBUG_MAC80211("leave - not ready\n"); 2745 IWL_DEBUG_MAC80211("leave - not ready\n");
6310 ret = -EIO; 2746 ret = -EIO;
@@ -6319,33 +2755,37 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6319 return 0; 2755 return 0;
6320 } 2756 }
6321 2757
6322 spin_lock_irqsave(&priv->lock, flags); 2758 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
6323 2759 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
6324 ch_info = iwl_get_channel_info(priv, conf->channel->band,
6325 ieee80211_frequency_to_channel(conf->channel->center_freq));
6326 if (!is_channel_valid(ch_info)) { 2760 if (!is_channel_valid(ch_info)) {
6327 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 2761 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6328 spin_unlock_irqrestore(&priv->lock, flags);
6329 ret = -EINVAL; 2762 ret = -EINVAL;
6330 goto out; 2763 goto out;
6331 } 2764 }
6332 2765
6333#ifdef CONFIG_IWL4965_HT 2766 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
2767 !is_channel_ibss(ch_info)) {
2768 IWL_ERROR("channel %d in band %d not IBSS channel\n",
2769 conf->channel->hw_value, conf->channel->band);
2770 ret = -EINVAL;
2771 goto out;
2772 }
2773
2774 spin_lock_irqsave(&priv->lock, flags);
2775
6334 /* if we are switching from ht to 2.4 clear flags 2776 /* if we are switching from ht to 2.4 clear flags
6335 * from any ht related info since 2.4 does not 2777 * from any ht related info since 2.4 does not
6336 * support ht */ 2778 * support ht */
6337 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel->hw_value) 2779 if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
6338#ifdef IEEE80211_CONF_CHANNEL_SWITCH 2780#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6339 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) 2781 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
6340#endif 2782#endif
6341 ) 2783 )
6342 priv->staging_rxon.flags = 0; 2784 priv->staging_rxon.flags = 0;
6343#endif /* CONFIG_IWL4965_HT */
6344 2785
6345 iwlcore_set_rxon_channel(priv, conf->channel->band, 2786 iwl_set_rxon_channel(priv, conf->channel->band, channel);
6346 ieee80211_frequency_to_channel(conf->channel->center_freq));
6347 2787
6348 iwl4965_set_flags_for_phymode(priv, conf->channel->band); 2788 iwl_set_flags_for_band(priv, conf->channel->band);
6349 2789
6350 /* The list of supported rates and rate mask can be different 2790 /* The list of supported rates and rate mask can be different
6351 * for each band; since the band may have changed, reset 2791 * for each band; since the band may have changed, reset
@@ -6361,9 +2801,6 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6361 } 2801 }
6362#endif 2802#endif
6363 2803
6364 if (priv->cfg->ops->lib->radio_kill_sw)
6365 priv->cfg->ops->lib->radio_kill_sw(priv, !conf->radio_enabled);
6366
6367 if (!conf->radio_enabled) { 2804 if (!conf->radio_enabled) {
6368 IWL_DEBUG_MAC80211("leave - radio disabled\n"); 2805 IWL_DEBUG_MAC80211("leave - radio disabled\n");
6369 goto out; 2806 goto out;
@@ -6375,6 +2812,11 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6375 goto out; 2812 goto out;
6376 } 2813 }
6377 2814
2815 IWL_DEBUG_MAC80211("TX Power old=%d new=%d\n",
2816 priv->tx_power_user_lmt, conf->power_level);
2817
2818 iwl_set_tx_power(priv, conf->power_level, false);
2819
6378 iwl4965_set_rate(priv); 2820 iwl4965_set_rate(priv);
6379 2821
6380 if (memcmp(&priv->active_rxon, 2822 if (memcmp(&priv->active_rxon,
@@ -6394,12 +2836,13 @@ out:
6394static void iwl4965_config_ap(struct iwl_priv *priv) 2836static void iwl4965_config_ap(struct iwl_priv *priv)
6395{ 2837{
6396 int ret = 0; 2838 int ret = 0;
2839 unsigned long flags;
6397 2840
6398 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2841 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6399 return; 2842 return;
6400 2843
6401 /* The following should be done only at AP bring up */ 2844 /* The following should be done only at AP bring up */
6402 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { 2845 if (!(iwl_is_associated(priv))) {
6403 2846
6404 /* RXON - unassoc (to set timing command) */ 2847 /* RXON - unassoc (to set timing command) */
6405 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2848 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -6414,7 +2857,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6414 IWL_WARNING("REPLY_RXON_TIMING failed - " 2857 IWL_WARNING("REPLY_RXON_TIMING failed - "
6415 "Attempting to continue.\n"); 2858 "Attempting to continue.\n");
6416 2859
6417 iwl4965_set_rxon_chain(priv); 2860 iwl_set_rxon_chain(priv);
6418 2861
6419 /* FIXME: what should be the assoc_id for AP? */ 2862 /* FIXME: what should be the assoc_id for AP? */
6420 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 2863 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -6441,8 +2884,10 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6441 /* restore RXON assoc */ 2884 /* restore RXON assoc */
6442 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2885 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6443 iwl4965_commit_rxon(priv); 2886 iwl4965_commit_rxon(priv);
6444 iwl4965_activate_qos(priv, 1); 2887 spin_lock_irqsave(&priv->lock, flags);
6445 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 2888 iwl_activate_qos(priv, 1);
2889 spin_unlock_irqrestore(&priv->lock, flags);
2890 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
6446 } 2891 }
6447 iwl4965_send_beacon_cmd(priv); 2892 iwl4965_send_beacon_cmd(priv);
6448 2893
@@ -6451,6 +2896,9 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6451 * clear sta table, add BCAST sta... */ 2896 * clear sta table, add BCAST sta... */
6452} 2897}
6453 2898
2899/* temporary */
2900static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
2901
6454static int iwl4965_mac_config_interface(struct ieee80211_hw *hw, 2902static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6455 struct ieee80211_vif *vif, 2903 struct ieee80211_vif *vif,
6456 struct ieee80211_if_conf *conf) 2904 struct ieee80211_if_conf *conf)
@@ -6468,8 +2916,18 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6468 return 0; 2916 return 0;
6469 } 2917 }
6470 2918
2919 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
2920 conf->changed & IEEE80211_IFCC_BEACON) {
2921 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2922 if (!beacon)
2923 return -ENOMEM;
2924 rc = iwl4965_mac_beacon_update(hw, beacon);
2925 if (rc)
2926 return rc;
2927 }
2928
6471 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 2929 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
6472 (!conf->beacon || !conf->ssid_len)) { 2930 (!conf->ssid_len)) {
6473 IWL_DEBUG_MAC80211 2931 IWL_DEBUG_MAC80211
6474 ("Leaving in AP mode because HostAPD is not ready.\n"); 2932 ("Leaving in AP mode because HostAPD is not ready.\n");
6475 return 0; 2933 return 0;
@@ -6501,7 +2959,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6501 if (priv->ibss_beacon) 2959 if (priv->ibss_beacon)
6502 dev_kfree_skb(priv->ibss_beacon); 2960 dev_kfree_skb(priv->ibss_beacon);
6503 2961
6504 priv->ibss_beacon = conf->beacon; 2962 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
6505 } 2963 }
6506 2964
6507 if (iwl_is_rfkill(priv)) 2965 if (iwl_is_rfkill(priv))
@@ -6511,7 +2969,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6511 !is_multicast_ether_addr(conf->bssid)) { 2969 !is_multicast_ether_addr(conf->bssid)) {
6512 /* If there is currently a HW scan going on in the background 2970 /* If there is currently a HW scan going on in the background
6513 * then we need to cancel it else the RXON below will fail. */ 2971 * then we need to cancel it else the RXON below will fail. */
6514 if (iwl4965_scan_cancel_timeout(priv, 100)) { 2972 if (iwl_scan_cancel_timeout(priv, 100)) {
6515 IWL_WARNING("Aborted scan still in progress " 2973 IWL_WARNING("Aborted scan still in progress "
6516 "after 100ms\n"); 2974 "after 100ms\n");
6517 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); 2975 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
@@ -6531,12 +2989,12 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6531 else { 2989 else {
6532 rc = iwl4965_commit_rxon(priv); 2990 rc = iwl4965_commit_rxon(priv);
6533 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 2991 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
6534 iwl4965_rxon_add_station( 2992 iwl_rxon_add_station(
6535 priv, priv->active_rxon.bssid_addr, 1); 2993 priv, priv->active_rxon.bssid_addr, 1);
6536 } 2994 }
6537 2995
6538 } else { 2996 } else {
6539 iwl4965_scan_cancel_timeout(priv, 100); 2997 iwl_scan_cancel_timeout(priv, 100);
6540 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2998 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6541 iwl4965_commit_rxon(priv); 2999 iwl4965_commit_rxon(priv);
6542 } 3000 }
@@ -6562,11 +3020,18 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
6562 unsigned int *total_flags, 3020 unsigned int *total_flags,
6563 int mc_count, struct dev_addr_list *mc_list) 3021 int mc_count, struct dev_addr_list *mc_list)
6564{ 3022{
6565 /* 3023 struct iwl_priv *priv = hw->priv;
6566 * XXX: dummy 3024
6567 * see also iwl4965_connection_init_rx_config 3025 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
6568 */ 3026 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
6569 *total_flags = 0; 3027 IEEE80211_IF_TYPE_MNTR,
3028 changed_flags, *total_flags);
3029 /* queue work 'cuz mac80211 is holding a lock which
3030 * prevents us from issuing (synchronous) f/w cmds */
3031 queue_work(priv->workqueue, &priv->set_monitor);
3032 }
3033 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
3034 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6570} 3035}
6571 3036
6572static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw, 3037static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
@@ -6579,8 +3044,7 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
6579 mutex_lock(&priv->mutex); 3044 mutex_lock(&priv->mutex);
6580 3045
6581 if (iwl_is_ready_rf(priv)) { 3046 if (iwl_is_ready_rf(priv)) {
6582 iwl4965_scan_cancel_timeout(priv, 100); 3047 iwl_scan_cancel_timeout(priv, 100);
6583 cancel_delayed_work(&priv->post_associate);
6584 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3048 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6585 iwl4965_commit_rxon(priv); 3049 iwl4965_commit_rxon(priv);
6586 } 3050 }
@@ -6596,64 +3060,6 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
6596 3060
6597} 3061}
6598 3062
6599
6600#ifdef CONFIG_IWL4965_HT
6601static void iwl4965_ht_conf(struct iwl_priv *priv,
6602 struct ieee80211_bss_conf *bss_conf)
6603{
6604 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
6605 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
6606 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
6607
6608 IWL_DEBUG_MAC80211("enter: \n");
6609
6610 iwl_conf->is_ht = bss_conf->assoc_ht;
6611
6612 if (!iwl_conf->is_ht)
6613 return;
6614
6615 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6616
6617 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
6618 iwl_conf->sgf |= 0x1;
6619 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
6620 iwl_conf->sgf |= 0x2;
6621
6622 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
6623 iwl_conf->max_amsdu_size =
6624 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
6625
6626 iwl_conf->supported_chan_width =
6627 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
6628 iwl_conf->extension_chan_offset =
6629 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
6630 /* If no above or below channel supplied disable FAT channel */
6631 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
6632 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
6633 iwl_conf->supported_chan_width = 0;
6634
6635 iwl_conf->tx_mimo_ps_mode =
6636 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6637 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
6638
6639 iwl_conf->control_channel = ht_bss_conf->primary_channel;
6640 iwl_conf->tx_chan_width =
6641 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
6642 iwl_conf->ht_protection =
6643 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
6644 iwl_conf->non_GF_STA_present =
6645 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
6646
6647 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
6648 IWL_DEBUG_MAC80211("leave\n");
6649}
6650#else
6651static inline void iwl4965_ht_conf(struct iwl_priv *priv,
6652 struct ieee80211_bss_conf *bss_conf)
6653{
6654}
6655#endif
6656
6657#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 3063#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6658static void iwl4965_bss_info_changed(struct ieee80211_hw *hw, 3064static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6659 struct ieee80211_vif *vif, 3065 struct ieee80211_vif *vif,
@@ -6684,7 +3090,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6684 if (changes & BSS_CHANGED_HT) { 3090 if (changes & BSS_CHANGED_HT) {
6685 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht); 3091 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht);
6686 iwl4965_ht_conf(priv, bss_conf); 3092 iwl4965_ht_conf(priv, bss_conf);
6687 iwl4965_set_rxon_chain(priv); 3093 iwl_set_rxon_chain(priv);
6688 } 3094 }
6689 3095
6690 if (changes & BSS_CHANGED_ASSOC) { 3096 if (changes & BSS_CHANGED_ASSOC) {
@@ -6751,7 +3157,7 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6751 } 3157 }
6752 if (len) { 3158 if (len) {
6753 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ", 3159 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
6754 iwl4965_escape_essid(ssid, len), (int)len); 3160 iwl_escape_essid(ssid, len), (int)len);
6755 3161
6756 priv->one_direct_scan = 1; 3162 priv->one_direct_scan = 1;
6757 priv->direct_ssid_len = (u8) 3163 priv->direct_ssid_len = (u8)
@@ -6760,7 +3166,7 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6760 } else 3166 } else
6761 priv->one_direct_scan = 0; 3167 priv->one_direct_scan = 0;
6762 3168
6763 rc = iwl4965_scan_initiate(priv); 3169 rc = iwl_scan_initiate(priv);
6764 3170
6765 IWL_DEBUG_MAC80211("leave\n"); 3171 IWL_DEBUG_MAC80211("leave\n");
6766 3172
@@ -6784,14 +3190,14 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6784 3190
6785 IWL_DEBUG_MAC80211("enter\n"); 3191 IWL_DEBUG_MAC80211("enter\n");
6786 3192
6787 sta_id = iwl4965_hw_find_station(priv, addr); 3193 sta_id = iwl_find_station(priv, addr);
6788 if (sta_id == IWL_INVALID_STATION) { 3194 if (sta_id == IWL_INVALID_STATION) {
6789 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 3195 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6790 print_mac(mac, addr)); 3196 print_mac(mac, addr));
6791 return; 3197 return;
6792 } 3198 }
6793 3199
6794 iwl4965_scan_cancel_timeout(priv, 100); 3200 iwl_scan_cancel_timeout(priv, 100);
6795 3201
6796 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 3202 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
6797 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 3203 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -6812,7 +3218,7 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6812 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 3218 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
6813 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3219 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
6814 3220
6815 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3221 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
6816 3222
6817 spin_unlock_irqrestore(&priv->sta_lock, flags); 3223 spin_unlock_irqrestore(&priv->sta_lock, flags);
6818 3224
@@ -6831,7 +3237,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6831 3237
6832 IWL_DEBUG_MAC80211("enter\n"); 3238 IWL_DEBUG_MAC80211("enter\n");
6833 3239
6834 if (priv->cfg->mod_params->sw_crypto) { 3240 if (priv->hw_params.sw_crypto) {
6835 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 3241 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
6836 return -EOPNOTSUPP; 3242 return -EOPNOTSUPP;
6837 } 3243 }
@@ -6840,7 +3246,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6840 /* only support pairwise keys */ 3246 /* only support pairwise keys */
6841 return -EOPNOTSUPP; 3247 return -EOPNOTSUPP;
6842 3248
6843 sta_id = iwl4965_hw_find_station(priv, addr); 3249 sta_id = iwl_find_station(priv, addr);
6844 if (sta_id == IWL_INVALID_STATION) { 3250 if (sta_id == IWL_INVALID_STATION) {
6845 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 3251 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6846 print_mac(mac, addr)); 3252 print_mac(mac, addr));
@@ -6849,7 +3255,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6849 } 3255 }
6850 3256
6851 mutex_lock(&priv->mutex); 3257 mutex_lock(&priv->mutex);
6852 iwl4965_scan_cancel_timeout(priv, 100); 3258 iwl_scan_cancel_timeout(priv, 100);
6853 mutex_unlock(&priv->mutex); 3259 mutex_unlock(&priv->mutex);
6854 3260
6855 /* If we are getting WEP group key and we didn't receive any key mapping 3261 /* If we are getting WEP group key and we didn't receive any key mapping
@@ -6861,7 +3267,8 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6861 if (cmd == SET_KEY) 3267 if (cmd == SET_KEY)
6862 is_default_wep_key = !priv->key_mapping_key; 3268 is_default_wep_key = !priv->key_mapping_key;
6863 else 3269 else
6864 is_default_wep_key = priv->default_wep_key; 3270 is_default_wep_key =
3271 (key->hw_key_idx == HW_KEY_DEFAULT);
6865 } 3272 }
6866 3273
6867 switch (cmd) { 3274 switch (cmd) {
@@ -6877,7 +3284,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6877 if (is_default_wep_key) 3284 if (is_default_wep_key)
6878 ret = iwl_remove_default_wep_key(priv, key); 3285 ret = iwl_remove_default_wep_key(priv, key);
6879 else 3286 else
6880 ret = iwl_remove_dynamic_key(priv, sta_id); 3287 ret = iwl_remove_dynamic_key(priv, key, sta_id);
6881 3288
6882 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 3289 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
6883 break; 3290 break;
@@ -6890,7 +3297,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6890 return ret; 3297 return ret;
6891} 3298}
6892 3299
6893static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue, 3300static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
6894 const struct ieee80211_tx_queue_params *params) 3301 const struct ieee80211_tx_queue_params *params)
6895{ 3302{
6896 struct iwl_priv *priv = hw->priv; 3303 struct iwl_priv *priv = hw->priv;
@@ -6927,15 +3334,12 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
6927 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 3334 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
6928 priv->qos_data.qos_active = 1; 3335 priv->qos_data.qos_active = 1;
6929 3336
6930 spin_unlock_irqrestore(&priv->lock, flags);
6931
6932 mutex_lock(&priv->mutex);
6933 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 3337 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
6934 iwl4965_activate_qos(priv, 1); 3338 iwl_activate_qos(priv, 1);
6935 else if (priv->assoc_id && iwl_is_associated(priv)) 3339 else if (priv->assoc_id && iwl_is_associated(priv))
6936 iwl4965_activate_qos(priv, 0); 3340 iwl_activate_qos(priv, 0);
6937 3341
6938 mutex_unlock(&priv->mutex); 3342 spin_unlock_irqrestore(&priv->lock, flags);
6939 3343
6940 IWL_DEBUG_MAC80211("leave\n"); 3344 IWL_DEBUG_MAC80211("leave\n");
6941 return 0; 3345 return 0;
@@ -6946,8 +3350,8 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6946{ 3350{
6947 struct iwl_priv *priv = hw->priv; 3351 struct iwl_priv *priv = hw->priv;
6948 int i, avail; 3352 int i, avail;
6949 struct iwl4965_tx_queue *txq; 3353 struct iwl_tx_queue *txq;
6950 struct iwl4965_queue *q; 3354 struct iwl_queue *q;
6951 unsigned long flags; 3355 unsigned long flags;
6952 3356
6953 IWL_DEBUG_MAC80211("enter\n"); 3357 IWL_DEBUG_MAC80211("enter\n");
@@ -6962,11 +3366,11 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6962 for (i = 0; i < AC_NUM; i++) { 3366 for (i = 0; i < AC_NUM; i++) {
6963 txq = &priv->txq[i]; 3367 txq = &priv->txq[i];
6964 q = &txq->q; 3368 q = &txq->q;
6965 avail = iwl4965_queue_space(q); 3369 avail = iwl_queue_space(q);
6966 3370
6967 stats->data[i].len = q->n_window - avail; 3371 stats[i].len = q->n_window - avail;
6968 stats->data[i].limit = q->n_window - q->high_mark; 3372 stats[i].limit = q->n_window - q->high_mark;
6969 stats->data[i].count = q->n_window; 3373 stats[i].count = q->n_window;
6970 3374
6971 } 3375 }
6972 spin_unlock_irqrestore(&priv->lock, flags); 3376 spin_unlock_irqrestore(&priv->lock, flags);
@@ -6979,14 +3383,9 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6979static int iwl4965_mac_get_stats(struct ieee80211_hw *hw, 3383static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
6980 struct ieee80211_low_level_stats *stats) 3384 struct ieee80211_low_level_stats *stats)
6981{ 3385{
6982 IWL_DEBUG_MAC80211("enter\n"); 3386 struct iwl_priv *priv = hw->priv;
6983 IWL_DEBUG_MAC80211("leave\n");
6984
6985 return 0;
6986}
6987 3387
6988static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw) 3388 priv = hw->priv;
6989{
6990 IWL_DEBUG_MAC80211("enter\n"); 3389 IWL_DEBUG_MAC80211("enter\n");
6991 IWL_DEBUG_MAC80211("leave\n"); 3390 IWL_DEBUG_MAC80211("leave\n");
6992 3391
@@ -7001,16 +3400,11 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7001 mutex_lock(&priv->mutex); 3400 mutex_lock(&priv->mutex);
7002 IWL_DEBUG_MAC80211("enter\n"); 3401 IWL_DEBUG_MAC80211("enter\n");
7003 3402
7004 priv->lq_mngr.lq_ready = 0;
7005#ifdef CONFIG_IWL4965_HT
7006 spin_lock_irqsave(&priv->lock, flags); 3403 spin_lock_irqsave(&priv->lock, flags);
7007 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info)); 3404 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
7008 spin_unlock_irqrestore(&priv->lock, flags); 3405 spin_unlock_irqrestore(&priv->lock, flags);
7009#endif /* CONFIG_IWL4965_HT */
7010
7011 iwlcore_reset_qos(priv);
7012 3406
7013 cancel_delayed_work(&priv->post_associate); 3407 iwl_reset_qos(priv);
7014 3408
7015 spin_lock_irqsave(&priv->lock, flags); 3409 spin_lock_irqsave(&priv->lock, flags);
7016 priv->assoc_id = 0; 3410 priv->assoc_id = 0;
@@ -7040,11 +3434,13 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7040 * clear RXON_FILTER_ASSOC_MSK bit 3434 * clear RXON_FILTER_ASSOC_MSK bit
7041 */ 3435 */
7042 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 3436 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
7043 iwl4965_scan_cancel_timeout(priv, 100); 3437 iwl_scan_cancel_timeout(priv, 100);
7044 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3438 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7045 iwl4965_commit_rxon(priv); 3439 iwl4965_commit_rxon(priv);
7046 } 3440 }
7047 3441
3442 iwl_power_update_mode(priv, 0);
3443
7048 /* Per mac80211.h: This is only used in IBSS mode... */ 3444 /* Per mac80211.h: This is only used in IBSS mode... */
7049 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 3445 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7050 3446
@@ -7060,11 +3456,11 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7060 IWL_DEBUG_MAC80211("leave\n"); 3456 IWL_DEBUG_MAC80211("leave\n");
7061} 3457}
7062 3458
7063static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 3459static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7064 struct ieee80211_tx_control *control)
7065{ 3460{
7066 struct iwl_priv *priv = hw->priv; 3461 struct iwl_priv *priv = hw->priv;
7067 unsigned long flags; 3462 unsigned long flags;
3463 __le64 timestamp;
7068 3464
7069 mutex_lock(&priv->mutex); 3465 mutex_lock(&priv->mutex);
7070 IWL_DEBUG_MAC80211("enter\n"); 3466 IWL_DEBUG_MAC80211("enter\n");
@@ -7089,13 +3485,15 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7089 priv->ibss_beacon = skb; 3485 priv->ibss_beacon = skb;
7090 3486
7091 priv->assoc_id = 0; 3487 priv->assoc_id = 0;
3488 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
3489 priv->timestamp = le64_to_cpu(timestamp) + (priv->beacon_int * 1000);
7092 3490
7093 IWL_DEBUG_MAC80211("leave\n"); 3491 IWL_DEBUG_MAC80211("leave\n");
7094 spin_unlock_irqrestore(&priv->lock, flags); 3492 spin_unlock_irqrestore(&priv->lock, flags);
7095 3493
7096 iwlcore_reset_qos(priv); 3494 iwl_reset_qos(priv);
7097 3495
7098 queue_work(priv->workqueue, &priv->post_associate.work); 3496 iwl4965_post_associate(priv);
7099 3497
7100 mutex_unlock(&priv->mutex); 3498 mutex_unlock(&priv->mutex);
7101 3499
@@ -7118,13 +3516,18 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7118 * See the level definitions in iwl for details. 3516 * See the level definitions in iwl for details.
7119 */ 3517 */
7120 3518
7121static ssize_t show_debug_level(struct device_driver *d, char *buf) 3519static ssize_t show_debug_level(struct device *d,
3520 struct device_attribute *attr, char *buf)
7122{ 3521{
7123 return sprintf(buf, "0x%08X\n", iwl_debug_level); 3522 struct iwl_priv *priv = d->driver_data;
3523
3524 return sprintf(buf, "0x%08X\n", priv->debug_level);
7124} 3525}
7125static ssize_t store_debug_level(struct device_driver *d, 3526static ssize_t store_debug_level(struct device *d,
3527 struct device_attribute *attr,
7126 const char *buf, size_t count) 3528 const char *buf, size_t count)
7127{ 3529{
3530 struct iwl_priv *priv = d->driver_data;
7128 char *p = (char *)buf; 3531 char *p = (char *)buf;
7129 u32 val; 3532 u32 val;
7130 3533
@@ -7133,17 +3536,49 @@ static ssize_t store_debug_level(struct device_driver *d,
7133 printk(KERN_INFO DRV_NAME 3536 printk(KERN_INFO DRV_NAME
7134 ": %s is not in hex or decimal form.\n", buf); 3537 ": %s is not in hex or decimal form.\n", buf);
7135 else 3538 else
7136 iwl_debug_level = val; 3539 priv->debug_level = val;
7137 3540
7138 return strnlen(buf, count); 3541 return strnlen(buf, count);
7139} 3542}
7140 3543
7141static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 3544static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
7142 show_debug_level, store_debug_level); 3545 show_debug_level, store_debug_level);
3546
7143 3547
7144#endif /* CONFIG_IWLWIFI_DEBUG */ 3548#endif /* CONFIG_IWLWIFI_DEBUG */
7145 3549
7146 3550
3551static ssize_t show_version(struct device *d,
3552 struct device_attribute *attr, char *buf)
3553{
3554 struct iwl_priv *priv = d->driver_data;
3555 struct iwl_alive_resp *palive = &priv->card_alive;
3556 ssize_t pos = 0;
3557 u16 eeprom_ver;
3558
3559 if (palive->is_valid)
3560 pos += sprintf(buf + pos,
3561 "fw version: 0x%01X.0x%01X.0x%01X.0x%01X\n"
3562 "fw type: 0x%01X 0x%01X\n",
3563 palive->ucode_major, palive->ucode_minor,
3564 palive->sw_rev[0], palive->sw_rev[1],
3565 palive->ver_type, palive->ver_subtype);
3566 else
3567 pos += sprintf(buf + pos, "fw not loaded\n");
3568
3569 if (priv->eeprom) {
3570 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
3571 pos += sprintf(buf + pos, "EEPROM version: 0x%x\n",
3572 eeprom_ver);
3573 } else {
3574 pos += sprintf(buf + pos, "EEPROM not initialzed\n");
3575 }
3576
3577 return pos;
3578}
3579
3580static DEVICE_ATTR(version, S_IWUSR | S_IRUGO, show_version, NULL);
3581
7147static ssize_t show_temperature(struct device *d, 3582static ssize_t show_temperature(struct device *d,
7148 struct device_attribute *attr, char *buf) 3583 struct device_attribute *attr, char *buf)
7149{ 3584{
@@ -7152,7 +3587,7 @@ static ssize_t show_temperature(struct device *d,
7152 if (!iwl_is_alive(priv)) 3587 if (!iwl_is_alive(priv))
7153 return -EAGAIN; 3588 return -EAGAIN;
7154 3589
7155 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv)); 3590 return sprintf(buf, "%d\n", priv->temperature);
7156} 3591}
7157 3592
7158static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3593static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
@@ -7170,7 +3605,7 @@ static ssize_t show_tx_power(struct device *d,
7170 struct device_attribute *attr, char *buf) 3605 struct device_attribute *attr, char *buf)
7171{ 3606{
7172 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3607 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7173 return sprintf(buf, "%d\n", priv->user_txpower_limit); 3608 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
7174} 3609}
7175 3610
7176static ssize_t store_tx_power(struct device *d, 3611static ssize_t store_tx_power(struct device *d,
@@ -7186,7 +3621,7 @@ static ssize_t store_tx_power(struct device *d,
7186 printk(KERN_INFO DRV_NAME 3621 printk(KERN_INFO DRV_NAME
7187 ": %s is not in decimal form.\n", buf); 3622 ": %s is not in decimal form.\n", buf);
7188 else 3623 else
7189 iwl4965_hw_reg_set_txpower(priv, val); 3624 iwl_set_tx_power(priv, val, false);
7190 3625
7191 return count; 3626 return count;
7192} 3627}
@@ -7211,7 +3646,7 @@ static ssize_t store_flags(struct device *d,
7211 mutex_lock(&priv->mutex); 3646 mutex_lock(&priv->mutex);
7212 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3647 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7213 /* Cancel any currently running scans... */ 3648 /* Cancel any currently running scans... */
7214 if (iwl4965_scan_cancel_timeout(priv, 100)) 3649 if (iwl_scan_cancel_timeout(priv, 100))
7215 IWL_WARNING("Could not cancel scan.\n"); 3650 IWL_WARNING("Could not cancel scan.\n");
7216 else { 3651 else {
7217 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", 3652 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
@@ -7246,7 +3681,7 @@ static ssize_t store_filter_flags(struct device *d,
7246 mutex_lock(&priv->mutex); 3681 mutex_lock(&priv->mutex);
7247 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3682 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7248 /* Cancel any currently running scans... */ 3683 /* Cancel any currently running scans... */
7249 if (iwl4965_scan_cancel_timeout(priv, 100)) 3684 if (iwl_scan_cancel_timeout(priv, 100))
7250 IWL_WARNING("Could not cancel scan.\n"); 3685 IWL_WARNING("Could not cancel scan.\n");
7251 else { 3686 else {
7252 IWL_DEBUG_INFO("Committing rxon.filter_flags = " 3687 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
@@ -7376,20 +3811,11 @@ static ssize_t store_power_level(struct device *d,
7376 goto out; 3811 goto out;
7377 } 3812 }
7378 3813
7379 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) 3814 rc = iwl_power_set_user_mode(priv, mode);
7380 mode = IWL_POWER_AC; 3815 if (rc) {
7381 else 3816 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7382 mode |= IWL_POWER_ENABLED; 3817 goto out;
7383
7384 if (mode != priv->power_mode) {
7385 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
7386 if (rc) {
7387 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7388 goto out;
7389 }
7390 priv->power_mode = mode;
7391 } 3818 }
7392
7393 rc = count; 3819 rc = count;
7394 3820
7395 out: 3821 out:
@@ -7419,7 +3845,7 @@ static ssize_t show_power_level(struct device *d,
7419 struct device_attribute *attr, char *buf) 3845 struct device_attribute *attr, char *buf)
7420{ 3846{
7421 struct iwl_priv *priv = dev_get_drvdata(d); 3847 struct iwl_priv *priv = dev_get_drvdata(d);
7422 int level = IWL_POWER_LEVEL(priv->power_mode); 3848 int level = priv->power_data.power_mode;
7423 char *p = buf; 3849 char *p = buf;
7424 3850
7425 p += sprintf(p, "%d ", level); 3851 p += sprintf(p, "%d ", level);
@@ -7437,14 +3863,14 @@ static ssize_t show_power_level(struct device *d,
7437 timeout_duration[level - 1] / 1000, 3863 timeout_duration[level - 1] / 1000,
7438 period_duration[level - 1] / 1000); 3864 period_duration[level - 1] / 1000);
7439 } 3865 }
7440 3866/*
7441 if (!(priv->power_mode & IWL_POWER_ENABLED)) 3867 if (!(priv->power_mode & IWL_POWER_ENABLED))
7442 p += sprintf(p, " OFF\n"); 3868 p += sprintf(p, " OFF\n");
7443 else 3869 else
7444 p += sprintf(p, " \n"); 3870 p += sprintf(p, " \n");
7445 3871*/
3872 p += sprintf(p, " \n");
7446 return (p - buf + 1); 3873 return (p - buf + 1);
7447
7448} 3874}
7449 3875
7450static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 3876static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -7453,8 +3879,62 @@ static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7453static ssize_t show_channels(struct device *d, 3879static ssize_t show_channels(struct device *d,
7454 struct device_attribute *attr, char *buf) 3880 struct device_attribute *attr, char *buf)
7455{ 3881{
7456 /* all this shit doesn't belong into sysfs anyway */ 3882
7457 return 0; 3883 struct iwl_priv *priv = dev_get_drvdata(d);
3884 struct ieee80211_channel *channels = NULL;
3885 const struct ieee80211_supported_band *supp_band = NULL;
3886 int len = 0, i;
3887 int count = 0;
3888
3889 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
3890 return -EAGAIN;
3891
3892 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
3893 channels = supp_band->channels;
3894 count = supp_band->n_channels;
3895
3896 len += sprintf(&buf[len],
3897 "Displaying %d channels in 2.4GHz band "
3898 "(802.11bg):\n", count);
3899
3900 for (i = 0; i < count; i++)
3901 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
3902 ieee80211_frequency_to_channel(
3903 channels[i].center_freq),
3904 channels[i].max_power,
3905 channels[i].flags & IEEE80211_CHAN_RADAR ?
3906 " (IEEE 802.11h required)" : "",
3907 (!(channels[i].flags & IEEE80211_CHAN_NO_IBSS)
3908 || (channels[i].flags &
3909 IEEE80211_CHAN_RADAR)) ? "" :
3910 ", IBSS",
3911 channels[i].flags &
3912 IEEE80211_CHAN_PASSIVE_SCAN ?
3913 "passive only" : "active/passive");
3914
3915 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
3916 channels = supp_band->channels;
3917 count = supp_band->n_channels;
3918
3919 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
3920 "(802.11a):\n", count);
3921
3922 for (i = 0; i < count; i++)
3923 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
3924 ieee80211_frequency_to_channel(
3925 channels[i].center_freq),
3926 channels[i].max_power,
3927 channels[i].flags & IEEE80211_CHAN_RADAR ?
3928 " (IEEE 802.11h required)" : "",
3929 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
3930 || (channels[i].flags &
3931 IEEE80211_CHAN_RADAR)) ? "" :
3932 ", IBSS",
3933 channels[i].flags &
3934 IEEE80211_CHAN_PASSIVE_SCAN ?
3935 "passive only" : "active/passive");
3936
3937 return len;
7458} 3938}
7459 3939
7460static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3940static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
@@ -7463,7 +3943,7 @@ static ssize_t show_statistics(struct device *d,
7463 struct device_attribute *attr, char *buf) 3943 struct device_attribute *attr, char *buf)
7464{ 3944{
7465 struct iwl_priv *priv = dev_get_drvdata(d); 3945 struct iwl_priv *priv = dev_get_drvdata(d);
7466 u32 size = sizeof(struct iwl4965_notif_statistics); 3946 u32 size = sizeof(struct iwl_notif_statistics);
7467 u32 len = 0, ofs = 0; 3947 u32 len = 0, ofs = 0;
7468 u8 *data = (u8 *) & priv->statistics; 3948 u8 *data = (u8 *) & priv->statistics;
7469 int rc = 0; 3949 int rc = 0;
@@ -7497,44 +3977,6 @@ static ssize_t show_statistics(struct device *d,
7497 3977
7498static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 3978static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7499 3979
7500static ssize_t show_antenna(struct device *d,
7501 struct device_attribute *attr, char *buf)
7502{
7503 struct iwl_priv *priv = dev_get_drvdata(d);
7504
7505 if (!iwl_is_alive(priv))
7506 return -EAGAIN;
7507
7508 return sprintf(buf, "%d\n", priv->antenna);
7509}
7510
7511static ssize_t store_antenna(struct device *d,
7512 struct device_attribute *attr,
7513 const char *buf, size_t count)
7514{
7515 int ant;
7516 struct iwl_priv *priv = dev_get_drvdata(d);
7517
7518 if (count == 0)
7519 return 0;
7520
7521 if (sscanf(buf, "%1i", &ant) != 1) {
7522 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7523 return count;
7524 }
7525
7526 if ((ant >= 0) && (ant <= 2)) {
7527 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
7528 priv->antenna = (enum iwl4965_antenna)ant;
7529 } else
7530 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7531
7532
7533 return count;
7534}
7535
7536static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7537
7538static ssize_t show_status(struct device *d, 3980static ssize_t show_status(struct device *d,
7539 struct device_attribute *attr, char *buf) 3981 struct device_attribute *attr, char *buf)
7540{ 3982{
@@ -7546,41 +3988,13 @@ static ssize_t show_status(struct device *d,
7546 3988
7547static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 3989static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7548 3990
7549static ssize_t dump_error_log(struct device *d,
7550 struct device_attribute *attr,
7551 const char *buf, size_t count)
7552{
7553 char *p = (char *)buf;
7554
7555 if (p[0] == '1')
7556 iwl4965_dump_nic_error_log((struct iwl_priv *)d->driver_data);
7557
7558 return strnlen(buf, count);
7559}
7560
7561static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7562
7563static ssize_t dump_event_log(struct device *d,
7564 struct device_attribute *attr,
7565 const char *buf, size_t count)
7566{
7567 char *p = (char *)buf;
7568
7569 if (p[0] == '1')
7570 iwl4965_dump_nic_event_log((struct iwl_priv *)d->driver_data);
7571
7572 return strnlen(buf, count);
7573}
7574
7575static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7576
7577/***************************************************************************** 3991/*****************************************************************************
7578 * 3992 *
7579 * driver setup and teardown 3993 * driver setup and teardown
7580 * 3994 *
7581 *****************************************************************************/ 3995 *****************************************************************************/
7582 3996
7583static void iwl4965_setup_deferred_work(struct iwl_priv *priv) 3997static void iwl_setup_deferred_work(struct iwl_priv *priv)
7584{ 3998{
7585 priv->workqueue = create_workqueue(DRV_NAME); 3999 priv->workqueue = create_workqueue(DRV_NAME);
7586 4000
@@ -7589,38 +4003,42 @@ static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
7589 INIT_WORK(&priv->up, iwl4965_bg_up); 4003 INIT_WORK(&priv->up, iwl4965_bg_up);
7590 INIT_WORK(&priv->restart, iwl4965_bg_restart); 4004 INIT_WORK(&priv->restart, iwl4965_bg_restart);
7591 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); 4005 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
7592 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
7593 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
7594 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
7595 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill); 4006 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
7596 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update); 4007 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
7597 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate); 4008 INIT_WORK(&priv->set_monitor, iwl4965_bg_set_monitor);
7598 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); 4009 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
7599 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); 4010 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
7600 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check); 4011 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
4012
4013 /* FIXME : remove when resolved PENDING */
4014 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
4015 iwl_setup_scan_deferred_work(priv);
4016
4017 if (priv->cfg->ops->lib->setup_deferred_work)
4018 priv->cfg->ops->lib->setup_deferred_work(priv);
7601 4019
7602 iwl4965_hw_setup_deferred_work(priv); 4020 init_timer(&priv->statistics_periodic);
4021 priv->statistics_periodic.data = (unsigned long)priv;
4022 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
7603 4023
7604 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 4024 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
7605 iwl4965_irq_tasklet, (unsigned long)priv); 4025 iwl4965_irq_tasklet, (unsigned long)priv);
7606} 4026}
7607 4027
7608static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) 4028static void iwl_cancel_deferred_work(struct iwl_priv *priv)
7609{ 4029{
7610 iwl4965_hw_cancel_deferred_work(priv); 4030 if (priv->cfg->ops->lib->cancel_deferred_work)
4031 priv->cfg->ops->lib->cancel_deferred_work(priv);
7611 4032
7612 cancel_delayed_work_sync(&priv->init_alive_start); 4033 cancel_delayed_work_sync(&priv->init_alive_start);
7613 cancel_delayed_work(&priv->scan_check); 4034 cancel_delayed_work(&priv->scan_check);
7614 cancel_delayed_work(&priv->alive_start); 4035 cancel_delayed_work(&priv->alive_start);
7615 cancel_delayed_work(&priv->post_associate);
7616 cancel_work_sync(&priv->beacon_update); 4036 cancel_work_sync(&priv->beacon_update);
4037 del_timer_sync(&priv->statistics_periodic);
7617} 4038}
7618 4039
7619static struct attribute *iwl4965_sysfs_entries[] = { 4040static struct attribute *iwl4965_sysfs_entries[] = {
7620 &dev_attr_antenna.attr,
7621 &dev_attr_channels.attr, 4041 &dev_attr_channels.attr,
7622 &dev_attr_dump_errors.attr,
7623 &dev_attr_dump_events.attr,
7624 &dev_attr_flags.attr, 4042 &dev_attr_flags.attr,
7625 &dev_attr_filter_flags.attr, 4043 &dev_attr_filter_flags.attr,
7626#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 4044#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
@@ -7633,6 +4051,10 @@ static struct attribute *iwl4965_sysfs_entries[] = {
7633 &dev_attr_status.attr, 4051 &dev_attr_status.attr,
7634 &dev_attr_temperature.attr, 4052 &dev_attr_temperature.attr,
7635 &dev_attr_tx_power.attr, 4053 &dev_attr_tx_power.attr,
4054#ifdef CONFIG_IWLWIFI_DEBUG
4055 &dev_attr_debug_level.attr,
4056#endif
4057 &dev_attr_version.attr,
7636 4058
7637 NULL 4059 NULL
7638}; 4060};
@@ -7656,13 +4078,9 @@ static struct ieee80211_ops iwl4965_hw_ops = {
7656 .get_stats = iwl4965_mac_get_stats, 4078 .get_stats = iwl4965_mac_get_stats,
7657 .get_tx_stats = iwl4965_mac_get_tx_stats, 4079 .get_tx_stats = iwl4965_mac_get_tx_stats,
7658 .conf_tx = iwl4965_mac_conf_tx, 4080 .conf_tx = iwl4965_mac_conf_tx,
7659 .get_tsf = iwl4965_mac_get_tsf,
7660 .reset_tsf = iwl4965_mac_reset_tsf, 4081 .reset_tsf = iwl4965_mac_reset_tsf,
7661 .beacon_update = iwl4965_mac_beacon_update,
7662 .bss_info_changed = iwl4965_bss_info_changed, 4082 .bss_info_changed = iwl4965_bss_info_changed,
7663#ifdef CONFIG_IWL4965_HT
7664 .ampdu_action = iwl4965_mac_ampdu_action, 4083 .ampdu_action = iwl4965_mac_ampdu_action,
7665#endif /* CONFIG_IWL4965_HT */
7666 .hw_scan = iwl4965_mac_hw_scan 4084 .hw_scan = iwl4965_mac_hw_scan
7667}; 4085};
7668 4086
@@ -7682,7 +4100,9 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7682 /* Disabling hardware scan means that mac80211 will perform scans 4100 /* Disabling hardware scan means that mac80211 will perform scans
7683 * "the hard way", rather than using device's scan. */ 4101 * "the hard way", rather than using device's scan. */
7684 if (cfg->mod_params->disable_hw_scan) { 4102 if (cfg->mod_params->disable_hw_scan) {
7685 IWL_DEBUG_INFO("Disabling hw_scan\n"); 4103 if (cfg->mod_params->debug & IWL_DL_INFO)
4104 dev_printk(KERN_DEBUG, &(pdev->dev),
4105 "Disabling hw_scan\n");
7686 iwl4965_hw_ops.hw_scan = NULL; 4106 iwl4965_hw_ops.hw_scan = NULL;
7687 } 4107 }
7688 4108
@@ -7701,7 +4121,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7701 priv->pci_dev = pdev; 4121 priv->pci_dev = pdev;
7702 4122
7703#ifdef CONFIG_IWLWIFI_DEBUG 4123#ifdef CONFIG_IWLWIFI_DEBUG
7704 iwl_debug_level = priv->cfg->mod_params->debug; 4124 priv->debug_level = priv->cfg->mod_params->debug;
7705 atomic_set(&priv->restrict_refcnt, 0); 4125 atomic_set(&priv->restrict_refcnt, 0);
7706#endif 4126#endif
7707 4127
@@ -7715,13 +4135,19 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7715 4135
7716 pci_set_master(pdev); 4136 pci_set_master(pdev);
7717 4137
7718 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4138 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
7719 if (!err) 4139 if (!err)
7720 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 4140 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4141 if (err) {
4142 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4143 if (!err)
4144 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
4145 /* both attempts failed: */
7721 if (err) { 4146 if (err) {
7722 printk(KERN_WARNING DRV_NAME 4147 printk(KERN_WARNING "%s: No suitable DMA available.\n",
7723 ": No suitable DMA available.\n"); 4148 DRV_NAME);
7724 goto out_pci_disable_device; 4149 goto out_pci_disable_device;
4150 }
7725 } 4151 }
7726 4152
7727 err = pci_request_regions(pdev, DRV_NAME); 4153 err = pci_request_regions(pdev, DRV_NAME);
@@ -7747,31 +4173,31 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7747 (unsigned long long) pci_resource_len(pdev, 0)); 4173 (unsigned long long) pci_resource_len(pdev, 0));
7748 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 4174 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7749 4175
4176 iwl_hw_detect(priv);
7750 printk(KERN_INFO DRV_NAME 4177 printk(KERN_INFO DRV_NAME
7751 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 4178 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
4179 priv->cfg->name, priv->hw_rev);
7752 4180
7753 /***************** 4181 /* amp init */
7754 * 4. Read EEPROM 4182 err = priv->cfg->ops->lib->apm_ops.init(priv);
7755 *****************/
7756 /* nic init */
7757 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
7758 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7759
7760 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7761 err = iwl_poll_bit(priv, CSR_GP_CNTRL,
7762 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7763 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7764 if (err < 0) { 4183 if (err < 0) {
7765 IWL_DEBUG_INFO("Failed to init the card\n"); 4184 IWL_DEBUG_INFO("Failed to init APMG\n");
7766 goto out_iounmap; 4185 goto out_iounmap;
7767 } 4186 }
4187 /*****************
4188 * 4. Read EEPROM
4189 *****************/
7768 /* Read the EEPROM */ 4190 /* Read the EEPROM */
7769 err = iwl_eeprom_init(priv); 4191 err = iwl_eeprom_init(priv);
7770 if (err) { 4192 if (err) {
7771 IWL_ERROR("Unable to init EEPROM\n"); 4193 IWL_ERROR("Unable to init EEPROM\n");
7772 goto out_iounmap; 4194 goto out_iounmap;
7773 } 4195 }
7774 /* MAC Address location in EEPROM same for 3945/4965 */ 4196 err = iwl_eeprom_check_version(priv);
4197 if (err)
4198 goto out_iounmap;
4199
4200 /* extract MAC Address */
7775 iwl_eeprom_get_mac(priv, priv->mac_addr); 4201 iwl_eeprom_get_mac(priv, priv->mac_addr);
7776 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr)); 4202 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
7777 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 4203 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
@@ -7779,19 +4205,18 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7779 /************************ 4205 /************************
7780 * 5. Setup HW constants 4206 * 5. Setup HW constants
7781 ************************/ 4207 ************************/
7782 /* Device-specific setup */ 4208 if (iwl_set_hw_params(priv)) {
7783 if (priv->cfg->ops->lib->set_hw_params(priv)) {
7784 IWL_ERROR("failed to set hw parameters\n"); 4209 IWL_ERROR("failed to set hw parameters\n");
7785 goto out_iounmap; 4210 goto out_free_eeprom;
7786 } 4211 }
7787 4212
7788 /******************* 4213 /*******************
7789 * 6. Setup hw/priv 4214 * 6. Setup priv
7790 *******************/ 4215 *******************/
7791 4216
7792 err = iwl_setup(priv); 4217 err = iwl_init_drv(priv);
7793 if (err) 4218 if (err)
7794 goto out_unset_hw_params; 4219 goto out_free_eeprom;
7795 /* At this point both hw and priv are initialized. */ 4220 /* At this point both hw and priv are initialized. */
7796 4221
7797 /********************************** 4222 /**********************************
@@ -7804,9 +4229,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7804 IWL_DEBUG_INFO("Radio disabled.\n"); 4229 IWL_DEBUG_INFO("Radio disabled.\n");
7805 } 4230 }
7806 4231
7807 if (priv->cfg->mod_params->enable_qos)
7808 priv->qos_data.qos_enable = 1;
7809
7810 /******************** 4232 /********************
7811 * 8. Setup services 4233 * 8. Setup services
7812 ********************/ 4234 ********************/
@@ -7817,17 +4239,12 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7817 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group); 4239 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7818 if (err) { 4240 if (err) {
7819 IWL_ERROR("failed to create sysfs device attributes\n"); 4241 IWL_ERROR("failed to create sysfs device attributes\n");
7820 goto out_unset_hw_params; 4242 goto out_uninit_drv;
7821 } 4243 }
7822 4244
7823 err = iwl_dbgfs_register(priv, DRV_NAME);
7824 if (err) {
7825 IWL_ERROR("failed to create debugfs files\n");
7826 goto out_remove_sysfs;
7827 }
7828 4245
7829 iwl4965_setup_deferred_work(priv); 4246 iwl_setup_deferred_work(priv);
7830 iwl4965_setup_rx_handlers(priv); 4247 iwl_setup_rx_handlers(priv);
7831 4248
7832 /******************** 4249 /********************
7833 * 9. Conclude 4250 * 9. Conclude
@@ -7835,14 +4252,31 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7835 pci_save_state(pdev); 4252 pci_save_state(pdev);
7836 pci_disable_device(pdev); 4253 pci_disable_device(pdev);
7837 4254
7838 /* notify iwlcore to init */ 4255 /**********************************
7839 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT); 4256 * 10. Setup and register mac80211
4257 **********************************/
4258
4259 err = iwl_setup_mac(priv);
4260 if (err)
4261 goto out_remove_sysfs;
4262
4263 err = iwl_dbgfs_register(priv, DRV_NAME);
4264 if (err)
4265 IWL_ERROR("failed to create debugfs files\n");
4266
4267 err = iwl_rfkill_init(priv);
4268 if (err)
4269 IWL_ERROR("Unable to initialize RFKILL system. "
4270 "Ignoring error: %d\n", err);
4271 iwl_power_initialize(priv);
7840 return 0; 4272 return 0;
7841 4273
7842 out_remove_sysfs: 4274 out_remove_sysfs:
7843 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 4275 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7844 out_unset_hw_params: 4276 out_uninit_drv:
7845 iwl4965_unset_hw_params(priv); 4277 iwl_uninit_drv(priv);
4278 out_free_eeprom:
4279 iwl_eeprom_free(priv);
7846 out_iounmap: 4280 out_iounmap:
7847 pci_iounmap(pdev, priv->hw_base); 4281 pci_iounmap(pdev, priv->hw_base);
7848 out_pci_release_regions: 4282 out_pci_release_regions:
@@ -7859,8 +4293,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7859static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) 4293static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7860{ 4294{
7861 struct iwl_priv *priv = pci_get_drvdata(pdev); 4295 struct iwl_priv *priv = pci_get_drvdata(pdev);
7862 struct list_head *p, *q;
7863 int i;
7864 unsigned long flags; 4296 unsigned long flags;
7865 4297
7866 if (!priv) 4298 if (!priv)
@@ -7868,6 +4300,9 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7868 4300
7869 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 4301 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
7870 4302
4303 iwl_dbgfs_unregister(priv);
4304 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
4305
7871 if (priv->mac80211_registered) { 4306 if (priv->mac80211_registered) {
7872 ieee80211_unregister_hw(priv->hw); 4307 ieee80211_unregister_hw(priv->hw);
7873 priv->mac80211_registered = 0; 4308 priv->mac80211_registered = 0;
@@ -7886,26 +4321,15 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7886 4321
7887 iwl_synchronize_irq(priv); 4322 iwl_synchronize_irq(priv);
7888 4323
7889 /* Free MAC hash list for ADHOC */ 4324 iwl_rfkill_unregister(priv);
7890 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
7891 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
7892 list_del(p);
7893 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
7894 }
7895 }
7896
7897 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT);
7898 iwl_dbgfs_unregister(priv);
7899 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7900
7901 iwl4965_dealloc_ucode_pci(priv); 4325 iwl4965_dealloc_ucode_pci(priv);
7902 4326
7903 if (priv->rxq.bd) 4327 if (priv->rxq.bd)
7904 iwl4965_rx_queue_free(priv, &priv->rxq); 4328 iwl_rx_queue_free(priv, &priv->rxq);
7905 iwl4965_hw_txq_ctx_free(priv); 4329 iwl_hw_txq_ctx_free(priv);
7906 4330
7907 iwl4965_unset_hw_params(priv); 4331 iwl_clear_stations_table(priv);
7908 iwlcore_clear_stations_table(priv); 4332 iwl_eeprom_free(priv);
7909 4333
7910 4334
7911 /*netif_stop_queue(dev); */ 4335 /*netif_stop_queue(dev); */
@@ -7922,8 +4346,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7922 pci_disable_device(pdev); 4346 pci_disable_device(pdev);
7923 pci_set_drvdata(pdev, NULL); 4347 pci_set_drvdata(pdev, NULL);
7924 4348
7925 iwl_free_channel_map(priv); 4349 iwl_uninit_drv(priv);
7926 iwl4965_free_geos(priv);
7927 4350
7928 if (priv->ibss_beacon) 4351 if (priv->ibss_beacon)
7929 dev_kfree_skb(priv->ibss_beacon); 4352 dev_kfree_skb(priv->ibss_beacon);
@@ -7973,6 +4396,19 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
7973static struct pci_device_id iwl_hw_card_ids[] = { 4396static struct pci_device_id iwl_hw_card_ids[] = {
7974 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 4397 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
7975 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 4398 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4399#ifdef CONFIG_IWL5000
4400 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
4401 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
4402 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)},
4403 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)},
4404 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)},
4405 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)},
4406 {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)},
4407 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
4408 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
4409 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
4410 {IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)},
4411#endif /* CONFIG_IWL5000 */
7976 {0} 4412 {0}
7977}; 4413};
7978MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 4414MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
@@ -8006,20 +4442,9 @@ static int __init iwl4965_init(void)
8006 IWL_ERROR("Unable to initialize PCI module\n"); 4442 IWL_ERROR("Unable to initialize PCI module\n");
8007 goto error_register; 4443 goto error_register;
8008 } 4444 }
8009#ifdef CONFIG_IWLWIFI_DEBUG
8010 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
8011 if (ret) {
8012 IWL_ERROR("Unable to create driver sysfs file\n");
8013 goto error_debug;
8014 }
8015#endif
8016 4445
8017 return ret; 4446 return ret;
8018 4447
8019#ifdef CONFIG_IWLWIFI_DEBUG
8020error_debug:
8021 pci_unregister_driver(&iwl_driver);
8022#endif
8023error_register: 4448error_register:
8024 iwl4965_rate_control_unregister(); 4449 iwl4965_rate_control_unregister();
8025 return ret; 4450 return ret;
@@ -8027,9 +4452,6 @@ error_register:
8027 4452
8028static void __exit iwl4965_exit(void) 4453static void __exit iwl4965_exit(void)
8029{ 4454{
8030#ifdef CONFIG_IWLWIFI_DEBUG
8031 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
8032#endif
8033 pci_unregister_driver(&iwl_driver); 4455 pci_unregister_driver(&iwl_driver);
8034 iwl4965_rate_control_unregister(); 4456 iwl4965_rate_control_unregister();
8035} 4457}